添加链接
link管理
链接快照平台
  • 输入网页链接,自动生成快照
  • 标签化管理网页链接
print(response.content) print(response.content.decode()) # 默认解码 print(response.content.decode('utf-8')) print(response.content.decode('GBK')) #解码失败会报错的

获取响应头

#获取响应头
print(response.headers)

获取响应对应的请求头

#获取响应对应的请求头
print(response.request.headers)
# 布尔值
print(response.ok)

json数据获取

#json
print(response.json())  #不是json会报错。

简单案例:

# 案例1:
import requests
# 随便找了一张图片
url = 'https://img.zcool.cn/community/01531b570a79a132f8751b3ff9661f.jpg@1280w_1l_2o_100sh.jpg'
#开始请求连接
response = requests.get(url)
# 拿到数据,因为知道是图片数据,所有使用wb方式。
with open('dgs.jpg','wb') as f:
    # 写入文件
    f.write(response.content)

添加请求头

# 案例2:添加请求头
import requests
url = 'http://www.baidu.com'
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"}
# 没有请求头 数据很少
response = requests.get(url)
print(response.content)
# 有请求头 数据明显增多
response1 = requests.get(url,headers=headers)
print(response1.content)

发送带参数的请求(url传参)

# 案例3:发送带参数的请求(get传参)
# 第一种
import requests
url = 'http://www.zishazx.com/product?page=1&size_id=0&volum_id=0&price_id=2&category_id=1001&prize_id=0&pug_id=25#views'
headers = {
'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36'
# 发送get请求 ,这个是url携带参数的
respones = requests.get(url, headers=headers)
data = respones.content.decode()
with open('zsh.html', 'w',encoding='utf-8') as f:
    f.write(data)

发送带参数的请求(字典传参)

# 第二种
import requests
url = 'http://www.zishazx.com/product'
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36'
kw = {
    'page': '1',
    'size_id': '0',
    'volum_id':' 0',
    'price_id':' 2',
    'category_id':' 1001',
    'prize_id':' 0',
    'pug_id': '25'
respones = requests.get(url,headers=headers,params=kw)
# 自动拼接在一起的url
print(respones.url)
# http://www.zishazx.com/product?page=1&size_id=0&volum_id=+0&price_id=+2&category_id=+1001&prize_id=+0&pug_id=25
data = respones.content.decode()
with open('zsh1.html', 'w',encoding='utf-8') as f:
    f.write(data)

总结重点:

1、requests模块的介绍:能够帮助我们发起请求获取响应 2、requests的基本使用:requests.get(url) 3、以及response常见的属性: response.text 响应体 str类型 respones.content 响应体 bytes类型 response.status_code 响应状态码 response.request.headers 响应对应的请求头 response.headers 响应头 response.request._cookies 响应对应请求的cookie response.cookies 响应的cookie(经过了set-cookie动作) 4、掌握 requests.text和content的区别:text返回str类型,content返回bytes类型 5、掌握 解决网页的解码问题: response.content.decode() response.content.decode("UTF-8") response.text 6、掌握 requests模块发送带headers的请求:requests.get(url, headers={}) 7、掌握 requests模块发送带参数的get请求:requests.get(url, params={})

实战小米网站:

# 实战:
import  requests
url = 'https://app.mi.com/subject/115150'
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36'
# 请求 一次 拿到页面源码
respones = requests.get(url, headers)
# html源码
data = respones.content.decode()
#写入文件 进行分析,减少请求次数,为啥要减少次数捏,自己想想哈。
# with open('xiaomi.html', 'w',encoding='utf-8') as  f:
    # f.write(data)
# 拿源码数据 进行xpath解析
# with open('xiaomi.html', 'r', encoding='utf-8') as f:
#     data = f.read()
# 导入包
from lxml import etree
# 拿到对象
tree = etree.HTML(data)
#开始解析 拿到数据所在的ul下的li
li_list = tree.xpath('//ul[@class="applist"]/li')
# 打印查看是否是li对象
# print(li_list)
for li in li_list:
    img = li.xpath('./a/img/@data-src')[0]
    name = li.xpath('./h5/a/text()')[0]
    name_type = li.xpath('./p[@class="app-desc"]/a/text()')[0]
    print(img, name, name_type)
#确认静态爬取没问题,就可以开启动态啊爬取

ip代理的使用

import requests
proxy = {
    'http':'http://8.219.97.248:80',
    'https':'https//8.219.97.248:80'
url = 'http://httpbin.org/ip'
res = requests.get(url,proxies=proxy)
print(res.content.decode())

带有cookie的请求-

--------------------------------------------
import requests
url = 'https://xueqiu.com/statuses/hot/listV2.json?since_id=-1&max_id=554225&size=15'
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
    'Referer': 'https://xueqiu.com/',
    'Cookie':'cookiesu=121697804017738; device_id=2cb32776fe1f32adba3aefc52173bcdc; xq_a_token=e2f0876e8fd368a0be2b6d38a49ed2dd5eec7557; xqat=e2f0876e8fd368a0be2b6d38a49ed2dd5eec7557; xq_r_token=2a5b753b2db675b4ac36c938d20120660651116d; xq_id_token=eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiJ9.eyJ1aWQiOi0xLCJpc3MiOiJ1YyIsImV4cCI6MTcwMDY5OTg3NSwiY3RtIjoxNjk4MjM4NzI4MTU4LCJjaWQiOiJkOWQwbjRBWnVwIn0.RtY0JREVs0R4s9sgP2RsybzTrLY7UD5dDElnpf16r7-F02lOLkU7mdgAm0HjvKvbcAYYeRyP6Ke6rdy3WfbFI-RlJwzxIo5wZ4ScGzy0Vj3VYKqsh7-Wx8MnzyRjVcJPtVUfBlN_Plj5nmxnQPykmZwKSRjKT02YBy2XH4OHNaN0sG1Rst37mAj2f42lTogbHdfZBsRUkweP-UezUkEyvSncUYIe9IAMZmHf7d5AQ94BK5h3nhSqy01KyyTf2aonnwWG7rNrOeuo7F28S50Wz-1JBKtbQYhRbOEZL2FVpizmpC_h98pYl3RtDBVvbiUEJPxx1-bRN6J78h3bduYu0w; u=121697804017738; Hm_lvt_1db88642e346389874251b5a1eded6e3=1697804019,1698238782;'
res = requests.get(url, headers=headers)
print(res.json())

响应的cookie

-----------------------------------
import requests
url = 'https://xueqiu.com/'
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
res = requests.get(url, headers=headers)
# 获取cookie
cookies = res.cookies
# print(cookies)
# 获取字典格式的cookie
print(dict(cookies))

携带首页响应的coolie进行抓取

--------------------------------------
import requests
# 就是为了获取cookie
index_url = 'https://xueqiu.com/'
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
    'Referer': 'https://xueqiu.com/',
res = requests.get(index_url, headers=headers)
# 获取cookie
cookies = dict(res.cookies)
url = 'https://xueqiu.com/statuses/hot/listV2.json?since_id=-1&max_id=554225&size=15'
# 携带cookie进行请求
res = requests.get(url, headers=headers, cookies=cookies)
print(res.json())

自动维护cookie(session)

-------------------------------------
import requests
# 就是为了获取cookie
headers = {
    'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
    'Referer': 'https://xueqiu.com/',
# 使用这个session对象进行维护
session = requests.Session()
# session = requests.session()
# 请求首页  获取返回的cookie
index_url = 'https://xueqiu.com/'
session.get(index_url, headers=headers)
# 获取数据
url = 'https://xueqiu.com/statuses/hot/listV2.json?since_id=-1&max_id=554225&size=15'
# 携带cookie进行请求
res = session.get(url, headers=headers)
print(res.json())

抓取登录后的数据

----------------------------------------------------
import requests
# 手动携带cookie ,抓取登录页面
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
    'Cookie': 'GUID=bb4eef9b-1b8f-417e-9264-3cdc3bad8eb1; Hm_lvt_9793f42b498361373512340937deb2a0=1697788773,1698241318; c_channel=0; c_csc=web; accessToken=avatarUrl%3Dhttps%253A%252F%252Fcdn.static.17k.com%252Fuser%252Favatar%252F18%252F98%252F90%252F96139098.jpg-88x88%253Fv%253D1650527904000%26id%3D96139098%26nickname%3D%25E4%25B9%25A6%25E5%258F%258BqYx51ZhI1%26e%3D1713793345%26s%3Df4633fc03250bca7; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2296139098%22%2C%22%24device_id%22%3A%2218b4c18c0cf39c-0a8b44b4b81a83-26031151-1866240-18b4c18c0d06e0%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_referrer_host%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%7D%2C%22first_id%22%3A%22bb4eef9b-1b8f-417e-9264-3cdc3bad8eb1%22%7D; Hm_lpvt_9793f42b498361373512340937deb2a0=1698241413',
url = 'https://user.17k.com/ck/user/myInfo/96139098?bindInfo=1&appKey=2406394919'
res = requests.get(url, headers=headers)
print(res.content.decode())

手动太麻烦了,模拟登录

# 手动太麻烦了,模拟登录----------------------------------------------------------
import requests
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
# 登录的url地址
login_url = 'https://passport.17k.com/ck/user/login'
data = {
    'loginName': '17346570232',
    'password': 'xlg17346570232',
# 发起登录请求
res = requests.post(login_url, headers=headers, data=data)
cookies = dict(res.cookies)  # 获取登录后的cookie
# 获取登录后的数据
url = 'https://user.17k.com/ck/user/myInfo/96139098?bindInfo=1&appKey=2406394919'
res = requests.get(url, headers=headers, cookies=cookies)
print(res.content.decode())

模拟登录,session会话保持,抓取登录后数据,动态抓取

#模拟登录,session会话保持,抓取登录后数据,动态抓取--------------------------------------------
import requests
headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/118.0.0.0 Safari/537.36',
# 登录的url地址
login_url = 'https://passport.17k.com/ck/user/login'
data = {
    'loginName': '17346570232',
    'password': 'xlg17346570232',
# 发起登录请求
session = requests.Session()
session.post(login_url, headers=headers, data=data)
# 获取登录后的数据
url = 'https://user.17k.com/ck/user/myInfo/96139098?bindInfo=1&appKey=2406394919'
res = session.get(url, headers=headers)
print(res.content.decode())

requests处理证书错误

# 经常我们在网上冲浪时,经常能够看到下面的提示:
您的链接不是私密链接,就是证书有问题的https请求的
import requests
url = "https://www.12306.cn/mormhweb/"
response = requests.get(url)
# 报错:
#ssl.CertificateError ...
import requests
url = "https://www.12306.cn/mormhweb/"
response = requests.get(url, verify=False)
首先访问一个url index页面
在找到登录框
使用session实例化 (session=requests.Session)
之后后面请求就是session.get  session.get
这样就是把cookie保存在session里面。
ddddocr使用教程
作业 古诗文网(https://so.gushiwen.cn)
# https://movie.douban.com/review/best/ 豆瓣影评
# https://movie.douban.com/top250?start=1 豆瓣电影 Top 250
# https://movie.douban.com/explore#!type=movie&tag=热门&sort=recommend&page_limit=20&page_start=0 豆瓣电影
# http://www.kfc.com.cn/kfccda/index.aspx 肯德基
# https://www.iciba.com/fy 金山词霸
# https://www.shicimingju.com/book/sanguoyanyi.html 三国演义
# http://pic.netbian.com 彼岸图网
# https://sc.chinaz.com/tupian/renwutupian.html 站长之家
# https://xueqiu.com 雪球网(请求携带cookie)
# https://passport.17k.com/ 17k小说网
# https://so.gushiwen.cn 古诗文
# https://www.pearvideo.com/ 梨视频
# http://www.mingchaonaxieshier.com/ 明朝那些事儿全集在线阅读
# https://www.biquc.com/26/26687/ 枫叶小说网
# https://dig.chouti.com 抽屉榜
# https://www.vmgirls.com/special/beauty/ 唯美女生
# http://www.webometrics.info/en/Asia 亚洲大学排行
# https://www.xbiquwx.la/ 笔趣阁小说
# https://www.tupianzj.com/ 图片之家
# https://aidotu.com/bqb/oppmgyv.html 爱斗图
# http://quote.eastmoney.com/center/gridlist.html#hs_a_board 东方财富网股票
# https://bbs.tianya.cn/m/post-140-393974-6.shtml 天涯论坛
# https://app.mi.com 小米应用商店
# https://cang.cngold.org/c/2022-06-14/c8152503.html 金投网
# http://www.boxofficecn.com/boxoffice2019 2019中国票房
# http://www.shicimingju.com/bookmark/sidamingzhu.html 四大名著
# http://www.yb21.cn 邮政编码查询
# https://www.aqistudy.cn/historydata/ 空气质量
# https://www.k581.com/pc/views/games/jspk10/lishikj.html k58极速开奖
# https://www.51shucheng.net/daomu/guichuideng 鬼吹灯
# https://www.3dst.cn/t/lizhigushi/ 阅读文章网
# https://www.qqtn.com/wm/meinvtp_1.html 腾牛网图片
# https://www.3dst.cn/t/lizhigushi/ 励志故事
# https://qq.yh31.com/xq/wq/ 表情党图片
# http://www.jokeji.cn/ 笑话集
# http://bbs.chinaunix.net ChinaUnix
# https://www.dytt8.net/html/gndy/dyzz/list_23_1.html 电影天堂
# http://fundf10.eastmoney.com/jbgk_004400.html 天天基金
# http://www.cs.ecitic.com/newsite/cpzx/jrcpxxgs/zgcp/index.html 中信证券
# https://www.qubook.cc/ 趣书网
# https://www.biedoul.com/ 别逗了
# https://www.xiaohua.com/ 笑话网
# https://www.dytt89.com/ 电影天堂
# https://desk.zol.com.cn/dongman/ 桌面图片
# https://www.umei.cc/bizhitupian/ 优美图库
# https://www.dushu.com/book/1188_1.html 读书
# http://category.dangdang.com/cp01.01.02.00.00.00.html 当当图书
# https://www.starbucks.com.cn/menu 星巴克
# https://top.chinaz.com/hangyemap.html 站长之家
# http://www.metalinfo.cn/mi.html 冶金信息技术网数据抓取
# http://www.umeituku.com/bizhitupian/diannaobizhi/15120.htm 优美图库
# https://www.bizhizu.com/meinv/list-1.html 壁纸族
# https://www.readnovel.com/category 小说阅读网
# http://www.zishazx.com/product 紫砂之星