一、前言
1.1、王者荣耀皮肤爬虫
根据王者荣耀链接,将王者荣耀的全部英雄的全部皮肤图片爬取保存到本地。经过分析得到任务的三个步骤:
- 根据首页全部英雄列表连接获取全部英雄的名称hero_name以及对应的hero_id;
- 根据单个英雄的hero_name和hero_id去查找该英雄每张皮肤图片的下载连接;
- 根据单张皮肤图片链接地址下载并保存图片内容到文件夹中;
1.2、腾讯动漫图片爬虫
将腾讯动漫链接中每章节中的动漫图片爬取下来保存到本地。经过分析可知,只需要获取每张动漫图片的下载地址即可,然后在每章节后点击下一章按钮即可获取其他章节的动漫图片下载链接。其中需要注意的是需要通过动作链去模拟鼠标滑动的操作,可以通过ActionChains(browser).scroll_to_element(pic).perform()完成该操作。
1.3、m3u8视频爬虫
根据单个AcFun视频链接,将视频爬取保存到本地。经过分析可知,可以分为三个步骤:
- 获取m3u8列表文件;
- 提取所有视频片段的播放地址ts文件;
- 下载并合并视频片段;
二、案例
2.1、王者荣耀皮肤爬虫演示
python">"""
@Author :江上挽风&sty
@Blog(个人博客地址):https://blog.csdn.net/weixin_56097064
@File :王者荣耀图片下载
@Time :2024/12/9 13:58
@Motto:一直努力,一直奋进,保持平常心"""
import os.path
import pprint
import reimport requests
from bs4 import BeautifulSoup
# https://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/558/558-bigskin-1.jpg
# https://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/577/577-bigskin-2.jpg
header = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0"
}# 根据英雄皮肤的连接下载并保存对应的英雄皮肤图片
def download_pic(pic_url, path, pic_name, hero_name):pic_content = requests.get(pic_url, headers=header).contentif not os.path.exists(f'{path}/{hero_name}'):os.mkdir(f'{path}/{hero_name}')with open(f'{path}/{hero_name}/{pic_name}.jpg', 'wb') as f:f.write(pic_content)print(f"{pic_name}下载成功")# 获取英雄的全部图片(单个英雄对应多个皮肤图片)
def get_hero_pics(hero_id,hero_name):hero_url = f"https://pvp.qq.com/web201605/herodetail/{hero_id}.shtml"r = requests.get(hero_url, headers=header)# apparent_encoding 是 Python requests 库中的一个属性,用于从响应内容中分析得出的编码方式r.encoding = r.apparent_encoding# print(r.text)soup = BeautifulSoup(r.text, 'html.parser')content = soup.find('ul', class_="pic-pf-list pic-pf-list3").get('data-imgname')pic_names = re.sub('&\d+', '', content).split('|')for num, pic_name in enumerate(pic_names):num += 1pic_url = f"https://game.gtimg.cn/images/yxzj/img201606/skin/hero-info/{hero_id}/{hero_id}-bigskin-{num}.jpg"download_pic(pic_url, path, pic_name, hero_name)# 获取全部英雄的名称和对应的hero_id
def get_hero(hero_url):hero_list = requests.get(hero_url,headers=header).json()# 这个函数主要用于以一种美观、格式化的方式打印复杂的数据结构,如多层嵌套的列表、元组和字典等。它能够使输出的结果显示得更加清晰和易于阅读pprint.pprint(hero_list)for hero in hero_list:hero_name = hero['cname']hero_id = hero['ename']get_hero_pics(hero_id,hero_name)if __name__ == '__main__':"""1、根据首页全部英雄列表连接获取全部英雄的名称hero_name以及对应的hero_id2、根据单个英雄的hero_name和hero_id去查找该英雄的全部皮肤碎片的数量,获取每张皮肤图片的下载连接3、根据单张皮肤图片链接地址下载并保存图片内容到文件夹中"""path = "D:\\ProjectCode\\Spider\\StudySpider07\\heros"heroes_url = "https://pvp.qq.com/web201605/js/herolist.json"get_hero(heroes_url)
2.2、腾讯动漫图片爬虫演示
python">"""
@Author :江上挽风&sty
@Blog(个人博客地址):https://blog.csdn.net/weixin_56097064
@File :腾讯动漫图片下载
@Time :2024/12/9 15:26
@Motto:一直努力,一直奋进,保持平常心"""
import os.path
import timeimport requests
from selenium import webdriver
from selenium.webdriver.edge.service import Service
from selenium.webdriver.edge.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.common.action_chains import ActionChainsservice = Service(executable_path='D:\ApplicationsSoftware\EdgeDriver\edgedriver_win32\msedgedriver.exe')
opt = Options()
opt.add_argument('--disable-blink-features=AutomationControlled')
# opt.headless = True# 下载动漫图片
def download(url ,path):browser = webdriver.Edge(service=service, options=opt)browser.maximize_window()browser.get(url)time.sleep(1)filename = browser.find_element(by=By.XPATH,value='//*[@id="comicTitle"]/span[@class="title-comicHeading"]').textpic_list = browser.find_elements(by=By.XPATH, value='//*[@id="comicContain"]/li/img')for num, pic in enumerate(pic_list):time.sleep(0.5)ActionChains(browser).scroll_to_element(pic).perform()link = pic.get_attribute('src')pic_content = requests.get(link).contentif not os.path.exists(f'{path}/{filename}'):os.mkdir(f'{path}/{filename}')with open(f'{path}/{filename}/{num}.jpg', 'wb') as f:f.write(pic_content)# print(link)print(f"已下载...{filename}....第{num+1}张图片")next_page = browser.find_element(by=By.XPATH, value='//*[@id="mainControlNext"]').get_attribute('href')browser.close()return next_pageif __name__ == '__main__':path = "D:\\ProjectCode\\Spider\\StudySpider07\\动漫"url = "https://ac.qq.com/ComicView/index/id/656073/cid/68282"while url:url = download(url, path)
2.3、m3u8视频爬虫演示
python">"""
@Author :江上挽风&sty
@Blog(个人博客地址):https://blog.csdn.net/weixin_56097064
@File :视频爬虫
@Time :2024/12/9 16:37
@Motto:一直努力,一直奋进,保持平常心"""
import pprint
import re
import json
import requests
from tqdm import tqdm # 进度条模块
from bs4 import BeautifulSoupheader = {"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/131.0.0.0 Safari/537.36 Edg/131.0.0.0"
}# 获取m3u8列表文件
def get_m3u8_list(url):resp = requests.get(url,headers=header)# print(resp.text)# 正则表达式去匹配info = re.findall('window.pageInfo = window.videoInfo = (.*?) window.videoResource', resp.text, re.S)[0].strip()[:-1]# 逐层剥开找到m3u8地址info_json = json.loads(json.loads(info)['currentVideoInfo']['ksPlayJson'])['adaptationSet'][0]['representation'][0]['url']filename = json.loads(info)['title']# print(filename)# pprint.pp(info_json)return info_json,filename# 提取所有视频片段的播放地址ts文件
def get_ts_files(m3u8_url):resp = requests.get(m3u8_url, headers=header)# print(resp.text)ts_files = re.sub('#.*', '', resp.text).strip()return ts_files# 下载并合并视频片段
def download_combine(ts_files, path, filename):with open(f'{path}/{filename}.mp4', 'ab') as f:for ts in tqdm(ts_files):# 地址拼接ts = 'https://ali-safety-video.acfun.cn/mediacloud/acfun/acfun_video/' + ts# 获取地址二进制流内容ts_content = requests.get(ts, headers=header).contentf.write(ts_content)# # 获取目录页的视频链接
# def get_index_link():
# index_url = 'https://www.acfun.cn/rest/pc-direct/homePage/searchDefault'
# resp = requests.get(index_url, headers=header)
# print(resp.text)
# soup = BeautifulSoup(resp.text, 'html.parser')
# link_list = soup.findAll('a', class_="list-wrap")
# # 遍历所有的<a>标签并打印它们的href属性值
# for tag in link_list:
# href = tag.get('href')
# if href: # 确保href属性存在
# print(href)
#
# else:
# print('请求失败,状态码:', resp.status_code)
# print(link_list)def main():url = "https://www.acfun.cn/v/ac46628128"path = "D:\\ProjectCode\\Spider\\StudySpider07\\videos"m3u8_url, filename = get_m3u8_list(url)ts_files = get_ts_files(m3u8_url)download_combine(ts_files, path, filename)# get_index_link()if __name__ == '__main__':main()