爬取思路,
爬取搜狗微信公众号内容,爬取第一层url时请求太快出现验证码,我这里用的蘑菇云代理,并在程序中我判断什么情况下是否+代理,
做到合理运用代理ip。
爬取第二层url时验证码出现次数更严重(和第一层验证码完全不一样),一开始也用更换代理,但是感觉不怎么解决问题,后来利用selenium自动打开该页面,并把
验证码图片抓取下来,然后发现抓下来的并不是页面中的真实验证码,估计是网站加密的原因。
后来利用selenium直接保存截屏整张页面,再利用python的pil包把验证码图片截取下来,发送到云打码,
最后发现, 成功解决问题。
import requests
from lxml import etree
import time
import json
import random
from dama import yundama
from selenium import webdriver
from PIL import Image #处理图片包
import pymysql
import re
from requests.exceptions import ConnectionError
#=============================代理ip封装函数=======================
def get_ip(): #代理Ip
url = 'http://piping.mogumiao.com/proxy/api/' #代理ip的api接口
time.sleep(random.uniform(1, 3))
response = requests.get(url=url).json()
n = response['msg'][0]
ip = {}
ip['http'] = 'http://'+n['ip']+':'+n['port']
return ip
#===================================================================
#随机更换请求头
user_agent = [
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.1 (KHTML, like Gecko) Chrome/21.0.1180.71 Safari/537.1 LBBROWSER',
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; WOW64; Trident/5.0; SLCC2; .NET CLR 2.0.50727; .NET CLR 3.5.30729; .NET CLR 3.0.30729; Media Center PC 6.0; .NET4.0C; .NET4.0E; LBBROWSER)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; QQDownload 732; .NET4.0C; .NET4.0E; LBBROWSER)'
'Mozilla/5.0 (Windows NT 5.1) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.84 Safari/535.11 SE 2.X MetaSr 1.0',
'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SV1; QQDownload 732; .NET4.0C; .NET4.0E; SE 2.X MetaSr 1.0)',
'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36 OPR/26.0.1656.60',
'Opera/8.0 (Windows NT 5.1; U; en)',
'Mozilla/5.0 (Windows NT 5.1; U; en; rv:1.8.1) Gecko/20061208 Firefox/2.0.0 Opera 9.50',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; en) Opera 9.50'
]
#===================================================================
#链接数据库
db = pymysql.Connect(
db = 'test',
user = 'root',
port = 3306,
host = 'localhost',
password = 'mysql',
charset = 'utf8'
)
cursor = db.cursor()
proxi = None #定义全局变量代理用
count = 0 #计数用
def sougou(page):
global proxi,count #改变全局变量 #可以修改关键字查询其它
url = 'http://weixin.sogou.com/weixin?query=python&_sug_type_=&s_from=input&_sug_=n&type=1&ie=utf8&page='+str(page)
headers = {
'Referer': 'http://weixin.sogou.com/weixin?type=1&query=python&ie=utf8&s_from=input&_sug_=n&_sug_type_=1&w=01015002&oq=&ri=5&sourceid=sugg&sut=0&sst0=1540733222633&lkt=0%2C0%2C0&p=40040108',
'User-Agent': random.choice(user_agent),
'Cookie': 'CXID=04C14DAB703E117FA82047F41148A82D; SUID=82F4FB723665860A5AB30BA8000211FB; SUV=1526707509991840; UM_distinctid=16376dbdee02b1-0820fe9948d64e-c343567-100200-16376dbdee4fb8; IPLOC=CN1100; usid=ue2M7rhDvZ5zfSvQ; pgv_pvi=1717965824; dt_ssuid=4873588560; ssuid=9294144357; pex=C864C03270DED3DD8A06887A372DA219231FFAC25A9D64AE09E82AED12E416AC; weixinIndexVisited=1; ld=Vkllllllll2bQuDilllllVs2PuGlllllNYkuOkllll9lllllVklll5@@@@@@@@@@; ad=vkllllllll2b8Y4nlllllVsyx@tlllllNYkuskllll9lllllpVxlw@@@@@@@@@@@; ABTEST=8|1540692132|v1; GOTO=Af71175-1502; SUIR=1AC841BDC9CDB1889FD40AC7C92328E1; SNUID=DD51FF0D787D0E234D0D8342788E1DC9; sct=38; JSESSIONID=aaaFhG5t_2zIAdtqom-Aw; Hm_lvt_dde6ba2851f3db0ddc415ce0f895822e=1540807114,1540807224,1540808537,1540816279; Hm_lpvt_dde6ba2851f3db0ddc415ce0f895822e=1540816279'
} #注意cookie存活时长
try: #捕获异常(主要针对错误的代理ip)
if proxi: #判断加上代理则执行
response = requests.get(url=url, headers=headers, proxies=proxi)
else: #none为不加代理
response = requests.get(url=url, headers=headers, proxies=proxi)
html = etree.HTML(response.text)
datas = html.xpath("//p[@class='tit']/a")
#-------------------------------------------------------------------
if len(response.text) > 5500:#如果不是验证码页面
for i in datas:
gongzhonghao = ''.join(i.xpath('.//text()')) #公众号
gongzhonghao_url = i.xpath('.//@href')[0] #公众号url
list_response = requests.get(url=gongzhonghao_url, headers=headers)
if len(list_response.text) > 6500: #如果不是验证码页面
res = re.compile(r'var msgList = (.*?)};', re.S) #正则取出json数据
lists = json.loads(res.findall(list_response.text)[0] + '}')['list'] #转python格式
for i in lists:
title_name = i['app_msg_ext_info']['title'] #获取文章标题
content_url = 'https://mp.weixin.qq.com' + i['app_msg_ext_info']['content_url'].replace('amp;', '')#取出内容也的url并处理
content_response = requests.get(url=content_url, headers=headers)#请求内容页面
# time.sleep(random.uniform(1,3)) #建议打开随机休眠时间
html1 = etree.HTML(content_response.text)
contents = ''.join(html1.xpath('//*[@id="js_content"]//text()')).strip() #文章内容
img = html1.xpath('//*[@id="js_content"]//img/@data-src') #图片
if len(img)==0: #判断页面是否有无图片
imgs = '图片暂无'
else:
imgs = img
# -------------------------------------------------------------------
#数据库查询是否已存在
check = 'select * from weixin where title="{}"'.format(title_name)
result = cursor.execute(check)#返回查询语句的影响行数
if result == 0:
#不存在则执行添加sql语句
sql = 'insert into weixin VALUES (0,%s,%s)'
cursor.execute(sql, [title_name, re.sub(r'W', '', contents)])#替换文章特殊字符
count += 1 #计数
print(count, title_name)
else:
print('{}----------已存在'.format(title_name))
db.commit()#数据库提交
else:#验证码页面则重新发起请求
web = webdriver.Chrome()
web.maximize_window() #页面最大化
web.get(list_response.url) #请求验证码页面
web.save_screenshot("D:\quan.png")#截取全屏并保存到该路径
imgs = web.find_element_by_id('verify_img') #验证码页面定位验证码图片元素位置
#第一步取参数
place = imgs.location #验证码的坐标位置
size = imgs.size #验证码的大小
#第二部整理参数(数据为元组)
rangle = (int(place['x']), int(place['y']), int(place['x'] + size['width']),
int(place['y'] + size['height'])) # 写成我们需要截取的位置坐标
#第三步导入PIL,打开截图
i = Image.open("D:\quan.png")
#第四部进行抠图操作
frame4 = i.crop(rangle) # 使用Image的crop函数,从截图中再次截取我们需要的区域
#第五步 保存抠下来的验证码
frame4.save('D:\cropped.png') # 保存我们接下来的验证码图片 进行打码
web.find_element_by_id('input').send_keys(yundama())#调用云打码返回参数并发送到input框
time.sleep(1)
web.find_element_by_id('bt').click()#点击提交
time.sleep(2)
web.close() #关闭浏览器
else:#验证码页面文本长度低于5500
proxi = get_ip() #代理生效
print('第一个页面{}正在使用代理ip'.format(proxi))
sougou(page) #递归调用本身
except ConnectionError: #代理ip不能用的
proxi = get_ip()
print('{}请求出错,更换代理ip'.format(proxi))
sougou(page)
if __name__ == '__main__':
for page in range(1,7):#循环页码
sougou(page)