下面的代码是采用多线程的生产者消费者模式,下载速度比之前文章的普通下载快一点。
普通下载的链接:python爬虫–王者荣耀高清壁纸下载
代码在下载方面是没有问题的,可以直接运行,就是在收集打不开链接的图片信息时候会出现点问题,但是不影响主要功能的使用。
import requests
from urllib import parse
from urllib import request
import osimport threading
import queue
# 用来存放无法下载的链接
un_download = []# 设置请求头
headers = {'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36','referer': 'https://pvp.qq.com/web201605/wallpaper.shtml'
}# 生产者
class Producer(threading.Thread):def __init__(self,page_queue,image_queue,*args,**kwargs):super(Producer,self).__init__(*args,**kwargs)self.page_queue = page_queueself.image_queue = image_queuedef run(self) -> None:file_num = 0while not self.page_queue.empty():page_url = self.page_queue.get()resp = requests.get(page_url, headers=headers)result = resp.json()datas = result['List']for data in datas:image_urls = exact_image(data)image_name = parse.unquote(data['sProdName']).replace("1:1", "").strip()# 存放图片的文件夹名dir_path = os.path.join("image", image_name)if os.path.exists(dir_path):# 已存在的文件夹重复,就在新的文件夹名后加上2dir_path = dir_path+'2'os.mkdir(dir_path)else:os.mkdir(dir_path)for index,image_url in enumerate(image_urls):self.image_queue.put({"image_url":image_url,"image_name":image_name,"image_path":os.path.join(dir_path,"%d.jpg"%(index))})# 消费者
class Consumer(threading.Thread):def __init__(self,image_queue,*args,**kwargs):super(Consumer,self).__init__(*args,**kwargs)self.image_queue = image_queuedef run(self) -> None:while True:try:image_obj = self.image_queue.get(timeout=10)image_url = image_obj.get("image_url")image_path = image_obj.get("image_path")image_name = image_obj.get("image_name")download(image_url,image_name,image_path)except:break# 收集打不开链接的图片信息(将200替换成0的链接)
def un_download_url(image_name,image_url):un_download_info = {}un_download_info['name'] = image_nameoriginal_url = image_url.replace('/0','/200')un_download_info['original_url'] = original_urlun_download_info['curr_url'] = image_urlreturn un_download_info# 获取图片链接
def exact_image(data):image_urls = []for x in range(1,9):# 解析获取到的链接,高清的图片末尾是/0,所以要将200替换成0,防止url中间有200数值,所以用'/0'替换'/200'image_url = parse.unquote(data['sProdImgNo_%d'%x]).replace('/200', '/0')# image_url = parse.unquote(data['sProdImgNo_%d'%x])image_urls.append(image_url)return image_urls# 下载图片
def download(image_url,image_name,image_path):try:request.urlretrieve(image_url, image_path)print("%s下载完成!" % (image_name + image_url))except Exception as e:# 收集打不开的链接信息un_download_info = un_download_url(image_name, image_url)un_download.append(un_download_info)def main():page_queue = queue.Queue(22)image_queue = queue.Queue(10000)for x in range(0,22):page_url = 'https://apps.game.qq.com/cgi-bin/ams/module/ishow/V1.0/query/workList_inc.cgi?activityId=2735&sVerifyCode=ABCD&sDataType=JSON&iListNum=20&totalpage=0&page={page}&iOrder=0&iSortNumClose=1&iAMSActivityId=51991&_everyRead=true&iTypeId=2&iFlowId=267733&iActId=2735&iModuleId=2735&_=1587884222349'.format(page = x)page_queue.put(page_url)# 创建3个生产者线程for x in range(3):th = Producer(page_queue,image_queue,name="生产者%d号"%x)th.start()# 创建5个消费者线程for x in range(5):th = Consumer(image_queue,name="消费者%d号"%x)th.start()if __name__ == '__main__':main()print(un_download)