• 微信公众号爬取


    driver = webdriver.Chrome()
    driver.get("https://mp.weixin.qq.com/cgi-bin/loginpage?url=%2Fcgi-bin%2Fhome%3Ft%3Dhome%2Findex%26lang%3Dzh_CN%26token%3D260217419")
    wait = WebDriverWait(driver,1000)

    #用户名
    input1 = wait.until(EC.element_to_be_clickable((By.XPATH,'//*[@id="header"]/div[2]/div/div/form/div[1]/div[1]/div/span/input')))
    input1.send_keys("用户名")
    #密码
    input2 = wait.until(EC.element_to_be_clickable((By.XPATH,'//*[@id="header"]/div[2]/div/div/form/div[1]/div[2]/div/span/input')))
    input2.send_keys("密码")

    #点击
    btn = driver.find_element_by_xpath('//*[@id="header"]/div[2]/div/div/form/div[4]/a')
    btn.click()
    #等待页面
    wait.until(EC.element_to_be_clickable((By.XPATH,'//*[@id="m_open"]')))
    # wait.untilil(EC.(r"https://mp.weixin.qq.com/cgi-bin/home?t=home/index&lang=zh_CN&token=d+"))
    #全屏
    #获取cookies
    cookie_items = driver.get_cookies()
    post = {}
    import json
    for cookie_item in cookie_items:
    post[cookie_item['name']] = cookie_item['value']
    # cookie_str = json.dumps(post)

    driver.maximize_window()
    #获取token
    re_url = driver.current_url
    token = re.findall(r'token=(d+)',str(re_url))[0]
    import random
    search_url = 'https://mp.weixin.qq.com/cgi-bin/searchbiz?'
    query_id = {
    'action': 'search_biz',
    'token' : token,
    'lang': 'zh_CN',
    'f': 'json',
    'ajax': '1',
    'random': random.random(),
    'query': '科技美学',
    'begin': '0',
    'count': '5'
    }
    header = {
    "HOST": "mp.weixin.qq.com",
    "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:53.0) Gecko/20100101 Firefox/53.0"
    }

    search_response = requests.get(search_url, cookies=post, headers=header, params=query_id)
    # 取搜索结果中的第一个公众号
    lists = search_response.json().get('list')[0]
    print(lists)
    #获取这个公众号的fakeid,后面爬取公众号文章需要此字段
    fakeid = lists.get('fakeid')
    print(search_response.url)
    #微信公众号文章接口地址
    appmsg_url = 'https://mp.weixin.qq.com/cgi-bin/appmsg?'
    #搜索文章需要传入几个参数:登录的公众号token、要爬取文章的公众号fakeid、随机数random
    query_id_data = {
    'token': token,
    'lang': 'zh_CN',
    'f': 'json',
    'ajax': '1',
    'random': random.random(),
    'action': 'list_ex',
    'begin': '0',#不同页,此参数变化,变化规则为每页加5
    'count': '5',
    'query': '',
    'fakeid': fakeid,
    'type': '9'
    }
    #打开搜索的微信公众号文章列表页
    appmsg_response = requests.get(appmsg_url, cookies=post, headers=header, params=query_id_data)
    #获取文章总数
    max_num = appmsg_response.json().get('app_msg_cnt')
    print(appmsg_response.url)

    #每页至少有5条,获取文章总的页数,爬取时需要分页爬
    num = int(int(max_num) / 5)
    #起始页begin参数,往后每页加5
    begin = 0
    while num + 1 > 0 :
    query_id_data = {
    'token': token,
    'lang': 'zh_CN',
    'f': 'json',
    'ajax': '1',
    'random': random.random(),
    'action': 'list_ex',
    'begin': '{}'.format(str(begin)),
    'count': '5',
    'query': '',
    'fakeid': fakeid,
    'type': '9'
    }
    print('正在翻页:--------------', begin)

    # 获取每一页文章的标题和链接地址,并写入本地文本中
    query_fakeid_response = requests.get(appmsg_url, cookies=post , headers=header, params=query_id_data)
    fakeid_list = query_fakeid_response.json().get('app_msg_list')
    for item in fakeid_list:
    content_link = item.get('link')
    content_title = item.get('title')
    fileName = '科技美学.txt'
    with open(fileName, 'a', encoding='utf-8') as fh:
    fh.write(content_title + ": " + content_link + " ")
    num -= 1
    begin = int(begin)
    begin += 5
    time.sleep(2)
  • 相关阅读:
    对于数据的测试
    绕过前端,直接调用后端接口的可能性
    API接口自动化之3 同一个war包中多个接口做自动化测试
    API接口自动化之2 处理http请求的返回体,对返回体做校验
    API接口自动化之1 常见的http请求
    DB中字段为null,为空,为空字符串,为空格要怎么过滤取出有效值
    Linux 常用的压缩命令有 gzip 和 zip
    SQL 常用的命令
    JVM内存管理的机制
    Linux 常见命令
  • 原文地址:https://www.cnblogs.com/SealLiu/p/10558878.html
Copyright © 2020-2023  润新知