request请求参数
- headers 请求头
- params 请求参数
- proxies 代理ip
- cookie web状态信息
- 1 headers 中携带cookie
- 2 cookies参数
- 3 使用session
- verify 安全验证
- timeout 网络超时参数
- retrying 重试次数
- post/get请求 (params参数)
headers 请求头
- 伪装的更像是一个正常的浏览器在请求服务.
- headers字典是从浏览器群中的请求头信息中复制过来的.
- response = requests.get(url, headers={})
import requests# 1. 准备百度的url地址
url = "http://www.baidu.com"
# 准备自己定义的请求头的字典, 从浏览器中复制过来的请求头信息
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36 Edg/91.0.864.59"
}# 2. 对百度发起请求 获取到响应 想要携带我们自己定义的 请求头信息,那么就可以使用 headers参数={}
response = requests.get(url, headers=headers)# 打印一下响应的内容,以及响应的字符串的长度
# print(response.content.decode())print('-'*50)
print(f"获取到的百度字符串的长度: {len(response.content.decode())}")# 获取一下 请求头信息
print(response.request.headers)
params 请求参数
- 请求参数(query_string) ?key=value&key2=value2
- response = requets.get(url, headers={}, params={})
import requests
# 1. 准备url地址
# 如果url地址中 参数比较多,将参数都放到url地址中,会让url地址变得特别长。
url = "https://www.baidu.com/s?wd=python"
# 从浏览器中复制来的 User-Agent 和 Cookie
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36 Edg/91.0.864.59","Cookie": "BIDUPSID=5DE38708545D8146E1F1F982BD8130D9; PSTM=1614823365; BAIDUID=5DE38708545D814671698FE674E91491:FG=1; __yjs_duid=1_69a4f382b161ec298ec56522ecf08bfd1618971394377; MCITY=-266%3A131%3A; BDORZ=B490B5EBF6F3CD402E515D22BCDA1598; ispeed_lsm=2; H_WISE_SIDS=110085_127969_128698_131862_164869_171235_174447_174661_174665_175538_175555_175756_176121_176158_176399_176677_176866_177007_177062_177224_177371_177380_177397_177412_177749_177783_178007_178261_178330_178384_178445_178498_178615_178706_178726_178896_178946_178993_179200_179309_179349_179402_179423_179480_179593_180074; plus_cv=1::m:7.94e+147; BD_UPN=12314753; H_PS_PSSID=; delPer=0; BD_CK_SAM=1; PSINO=1; BAIDUID_BFESS=5DE38708545D814671698FE674E91491:FG=1; H_PS_645EC=82fb32TSpdNDCfGulvu5ZJ3CgYjWxWSp8qPBqGaBGiw3qguoW0vTRGQb3rw; BA_HECTOR=a5848l8la125018ka81gdl0rl0r; COOKIE_SESSION=216_1_5_8_2_14_1_1_5_5_0_5_60061_0_0_0_1624932884_1624873469_1624933236%7C9%231039617_52_1624873451%7C9"
}# 2. 发送请求 获取响应
response = requests.get(url, headers=headers)# print(response.content.decode())print('-'*100)# 1. 准备url地址 url地址只需要写到 ? 之前即可 ? 可以写 也可以不写
url1 = "https://www.baidu.com/s?"
# 准备参数的字典
params = {"wd":"迪迦奥特曼"
}
# 发送请求 写到 参数字典,获取到响应
response1 = requests.get(url1, headers=headers, params=params)
# 输出响应的内容
print(response1.content.decode())
proxies 代理ip
- 代理ip 本质就是一个服务器,我们找了一个第三方,代替我们去请求网址的服务器
- requests.get(url, headers={}, params={}, proxies={})
- key 就是协议 http或者是 https
- value: 协议://代理IP:端口号
import requests# 1. 准备url
url = "http://www.baidu.com"# 携带代理IP 准备代理IP的字典
proxies = {"http" : "http://88.99.10.254:1080"
}headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36 Edg/91.0.864.59"
}# 2. 发送请求 获取响应
response = requests.get(url, headers=headers, proxies=proxies)print(response)print(response.content.decode())
cookie web状态信息
1 headers 中携带cookie
- 现在浏览器中进行登录,登录成功后拿到cookie
- 将cookie复制到代码中的headers中,在发送请求的时候,携带上headers参数即可.
# 在请求头中携带cookie
import requests# 1. 准备url地址 登录之后才可以访问的url地址url = "https://www.baidu.com/"# 因为上述的url地址再访问的时候,是必须要登录之后才可以访问到,
# 所以在请求该urrl地址的时候,是需要携带登录之后的cookie信息,才可以访问
# 准备cookie,将cookie放到请求头中。
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",# 先在浏览器中进行一次正常的登录,# 登录成功之后,使用Network抓包工具,刷新一下页面,找到请求中的请求头里面的cookie信息,复制到代码中即可"Cookie": "newportal=true; zg_did=%7B%22did%22%3A%20%221784f16add34a5-06213e365d86d8-5c3f1d4e-1fa400-1784f16add42fe%22%7D; bad_idd436e170-a6fb-11ea-9b40-b9d779181ccf=7b51d421-8963-11eb-b35a-0387929e61a0; inNewVersion=true; bad_idb91bf240-868c-11e8-beff-b3a73470030e=96f2e341-8963-11eb-9ac6-993ea0e42a06; Hm_lvt_84c8fd20cad502132fed4406f9aa22e9=1616234588,1617615394; Hm_lvt_c11880ab74b1d3cd437ca5f41060fd17=1616234588,1617615394; zg_7754eb76d2754588beed5ce8f9871edb=%7B%22sid%22%3A%201617615397413%2C%22updated%22%3A%201617615400790%2C%22info%22%3A%201617615397420%2C%22superProperty%22%3A%20%22%7B%7D%22%2C%22platform%22%3A%20%22%7B%7D%22%2C%22utm%22%3A%20%22%7B%7D%22%2C%22referrerDomain%22%3A%20%22www.boxuegu.com%22%2C%22landHref%22%3A%20%22https%3A%2F%2Fpassport.boxuegu.com%2Fuser%2Flogin%3Frefer%3Dhttps%253A%252F%252Fwww.boxuegu.com%252F%22%7D; zg_ea5fe1a9d6d94bfdbdd8a54e0ac598c2=%7B%22sid%22%3A%201617615395693%2C%22updated%22%3A%201617616017286%2C%22info%22%3A%201617615395699%2C%22superProperty%22%3A%20%22%7B%7D%22%2C%22platform%22%3A%20%22%7B%7D%22%2C%22utm%22%3A%20%22%7B%7D%22%2C%22referrerDomain%22%3A%20%22%22%2C%22landHref%22%3A%20%22https%3A%2F%2Fwww.boxuegu.com%2F%22%2C%22cuid%22%3A%20%22473061%22%7D; route=f0301929cfe766dcd7d5dca7332f6bf099f91cbb; JSESSIONID=9495DA39FA0D39CE7FF077C02EB03EFF; SESSION=MjNhMmIwN2MtZDI2MC00YzA4LWFhMzgtYmRhM2FjODEwZTg3"
}# 2. 发送请求, 携带请求头去请求
reponse = requests.get(url, headers=headers)print(reponse.content.decode())
2 cookies参数
- 现在浏览器中进行登录,登录成功之后拿到cookie
- 复制到代码中,将cookie字符串转换为cookie字典
- 在发送请求的时候,将cookie字典,传递给cookies参数
import requests# 登录之后 访问的url地址
url = "http://www.baidu.com"# 需要拿到登录之后的cookie,要将登录之后cookie转换成字典
cookie_str = "JSESSIONID=7061F1768BD2172797BB1FBA631D4EFC"
cookie_dict = {cookie_str.split("=")[0]: cookie_str.split('=')[1]
}# cookies 接收的就是登录之后的cookie字典信息
response = requests.get(url, cookies=cookie_dict)
print(response.content.decode())url = "https://www.baidu.com"
cookie_str = "newportal=true; zg_did=%7B%22did%22%3A%20%221784f16add34a5-06213e365d86d8-5c3f1d4e-1fa400-1784f16add42fe%22%7D; bad_idd436e170-a6fb-11ea-9b40-b9d779181ccf=7b51d421-8963-11eb-b35a-0387929e61a0; inNewVersion=true; bad_idb91bf240-868c-11e8-beff-b3a73470030e=96f2e341-8963-11eb-9ac6-993ea0e42a06; Hm_lvt_84c8fd20cad502132fed4406f9aa22e9=1616234588,1617615394; Hm_lvt_c11880ab74b1d3cd437ca5f41060fd17=1616234588,1617615394; zg_7754eb76d2754588beed5ce8f9871edb=%7B%22sid%22%3A%201617615397413%2C%22updated%22%3A%201617615400790%2C%22info%22%3A%201617615397420%2C%22superProperty%22%3A%20%22%7B%7D%22%2C%22platform%22%3A%20%22%7B%7D%22%2C%22utm%22%3A%20%22%7B%7D%22%2C%22referrerDomain%22%3A%20%22www.boxuegu.com%22%2C%22landHref%22%3A%20%22https%3A%2F%2Fpassport.boxuegu.com%2Fuser%2Flogin%3Frefer%3Dhttps%253A%252F%252Fwww.boxuegu.com%252F%22%7D; zg_ea5fe1a9d6d94bfdbdd8a54e0ac598c2=%7B%22sid%22%3A%201617615395693%2C%22updated%22%3A%201617616017286%2C%22info%22%3A%201617615395699%2C%22superProperty%22%3A%20%22%7B%7D%22%2C%22platform%22%3A%20%22%7B%7D%22%2C%22utm%22%3A%20%22%7B%7D%22%2C%22referrerDomain%22%3A%20%22%22%2C%22landHref%22%3A%20%22https%3A%2F%2Fwww.boxuegu.com%2F%22%2C%22cuid%22%3A%20%22473061%22%7D; JSESSIONID=9495DA39FA0D39CE7FF077C02EB03EFF; SESSION=MjNhMmIwN2MtZDI2MC00YzA4LWFhMzgtYmRhM2FjODEwZTg3; route=58a650353916f836923fb1da33f091a32d54e490"# 将cookie字符串转换为字典
cookie_list = cookie_str.split('; ')
# 准备空字典
cookie_dict = {}
# 遍历cookie的列表,去构造cookie字典
for cookie in cookie_list:key = cookie.split('=')[0]value = cookie.split('=')[1]cookie_dict[key] = value# 发送请求,携带上cookie字典
response = requests.get(url, cookies=cookie_dict)print(response.content.decode())
3 使用session
-创建session对象 session = requests.session()
- 使用session对象完成登录, session.post()
- 使用session登录成功之后,session对象中就会自动的保存了登录之后的cookie信息.
- 再次去使用session对象发起新的请求的时候,session对象就会自动的携带上登录之后的cookie.
import requests# 1. 使用代码登录到 传智健康,代码登录成功之后,会获取到登录之后的cookie
login_url = "http://www.baidu.com"# 登录的请求体数据 的用户名和密码
data = {"username": "admin","password": "admin",
}# 2. 发起登录请求
# 先创建session对象
session = requests.session()# 使用session对象发送登录的请求
session.post(login_url, data=data)
# 获取响应中的 Set-cookie的数据# session.get(url,headers, params, proxies)# 直接使用 session对象 再去请求 登录之后的页面
url = "http://manager-health-java.itheima.net/pages/main.html"
# 再次使用session对象发送请求,
response = session.get(url)print(response.content.decode())
verify 安全验证
import requests# 准备url
url = "https://sam.huat.edu.cn:8443/selfservice/"# 发起请求 verify默认是True 表示会去验证ca证书的有效性
# 发送请求的时候 不去做 ca证书的验证,直接去请求 verify=False
reponse = requests.get(url, verify=False)print(reponse.content.decode("gbk"))
timeout 网络超时参数
import requests# 1. 准备url地址
url = "http://www.google.com"try:requests.get(url, timeout=5)
except Exception as e:print(f'连接超时.......{e}')
# requests.get(url)url1 = "http://www.baidu.com"
requests.get(url1)
retrying 重试次数
from retrying import retry
import requests# stop_max_attempt_number 指定最大的重试次数
@retry(stop_max_attempt_number=3)
def send_request(url):print('----------------')requests.get(url, timeout=3)try:response = send_request("http://www.google.com")
except Exception as e:response = Noneprint(response)
post/get请求 (params参数)
完整语法
get:
requests.get(url, headers={}, params={}, proxies={},coookies={}, verify=False, timeout秒数)
post:
requests.get(url, data={}, headers={}, params={}, proxies={},coookies={}, verify=False, timeout秒数)
post 比 get 请求 多了data参数