import requests
from bs4 import BeautifulSoup
import time
import re
import os
import random
agentlist = ["Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50","Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36","Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0","Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0","Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50"]
def get_nomal_headers():
headers = {
"User-Agent":random.choice(agentlist)
}
# print(headers)
return headers
def get_pages():
url1 = "https://zh.qqhentai.com/g/334792/list2/cdnwp/"
headers = get_nomal_headers()
r = requests.get(url1,headers = headers)
soup = BeautifulSoup(r.text,"html.parser")
# print(soup)
container = soup.find("section",id="image-container")
img_list = container.find_all("img",class_="list-img lazyload")
# print(img_list)
for i in img_list:
page = i["alt"]
page = ''.join(page.split(" "))
imgurl = i["data-src"]
downloadimg(page,imgurl)
def downloadimg(page,imgurl):
imgpath = (page + ".jpg")
if os.path.exists(imgpath):
print("已存在"+page + ".jpg")
pass
else:
headers = get_nomal_headers()
r = requests.get(imgurl,headers = headers)
print("downloading"+page)
fin = open(imgpath, "wb")
fin.write(r.content)
fin.flush()
time.sleep(0.2)
get_pages()