免责声明:本文仅做技术交流与学习...请勿不正当使用...
循环页数爬取
import pandas as pd
import requests
from lxml import etree
# 初始化一个空的 DataFrame
df = pd.DataFrame()
data_list = []headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/114.0.5735.289 Safari/537.36"
}
for s in range(1, 12):url = f"https://nanchang.jianzhimao.com/index{s}.html"# 请求网页response = requests.get(url, headers=headers).text# 解析网页html = etree.HTML(response)print("\033[31;1m","*" * 20, "开始爬取", s, "页", "*" * 20,"\033[0m")for i in range(1, 16):name = html.xpath(f'//*[@id="content_list_wrap"]/li[{i}]/a/text()') # 名称quyu = html.xpath(f'//*[@id="content_list_wrap"]/li[{i}]/div[1]/span/text()') # 限制地区rens = html.xpath(f'//*[@id="content_list_wrap"]/li[{i}]/div[2]/span/text()') # 关注人数if name and quyu and rens:# 将单元格数据转换为字典row_data = {'岗位名称': name[0] if name else None,'限制地区': quyu[0] if quyu else None,'职位热度': rens[0] if rens else None,}data_list.append(row_data)# 将数据列表转换为 DataFrame 并追加到 dfdf = pd.concat([df, pd.DataFrame(data_list)], ignore_index=True)# 清空 data_list 以便下一次迭代data_list = []# 将 DataFrame 保存到 Excel 文件
df.to_excel('data.xlsx', index=False)
--->csv