抓取梨视频(防盗链)
import requests url = "https://www.pearvideo.com/video_1713901" contId = url.split("_")[1] print(contId) videoStatus_url = f"https://www.pearvideo.com/videoStatus.jsp? contId={contId}&mrd=0.8770894467476524" headers = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36", "Referer": url # 防盗链,意义:本次请求是由哪个url 产⽣的 } resp = requests.get(videoStatus_url, headers=headers) dic = resp.json()
# print(dic)
systemTime = dic['systemTime']
videoUrl = dic["videoInfo"]['videos']['srcUrl']
videoUrl = videoUrl.replace(systemTime, "cont-
"+contId) # 拼接真正的视频url地址
# print(videoUrl)
# 下载视频
with open(f"{contId}.mp4", mode="wb") as f:
f.write(requests.get(videoUrl).content)
代理(当我们反复抓取⼀个⽹站时, 由于请求过于频繁, 服务器很可能会将 你的IP进⾏封锁来反爬,主要就是被封锁了本主机就爬不了了)
import requests headers = { "User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.141 Safari/537.36", } proxies = { "https": "https://27.148.248.203:80" } resp = requests.get("https://www.baidu.com", headers=headers, proxies=proxies) print(resp.text)