• python3简单实现微信爬虫


    Python爬虫视频教程零基础小白到scrapy爬虫高手-轻松入门

    https://item.taobao.com/item.htm?spm=a1z38n.10677092.0.0.482434a6EmUbbW&id=564564604865

    使用ghost.py 通过搜搜 的微信搜索来爬取微信公共账号的信息

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    # -*- coding: utf-8 -*-
    import sys
    reload(sys)
    import datetime
    import time
    sys.setdefaultencoding("utf-8")
      
    from ghost import Ghost
    ghost = Ghost(wait_timeout=20)
      
    page,resources = ghost.open(url)
    result, resources = ghost.wait_for_selector("#wxmore a")
      
    from bs4 import BeautifulSoup
    c=0
    while True:
      if c>=30:
        break
      
      soup = BeautifulSoup(ghost.content)
      
      for wx in soup.find_all("h4"):
        print wx
      
      page, resources = ghost.evaluate(
        """
        var div1 = document.getElementById("wxbox");
        div1.innerHTML = '';
        """)
      ghost.click("#wxmore a")
      result, resources = ghost.wait_for_selector(".wx-rb3")
      
      c=c+1
      pass

    http://www.jb51.net/article/78925.htm

    本文给大家分享的是使用python通过搜狗入口,爬取微信文章的小程序,非常的简单实用,有需要的小伙伴可以参考下

    本人想搞个采集微信文章的网站,无奈实在从微信本生无法找到入口链接,网上翻看了大量的资料,发现大家的做法总体来说大同小异,都是以搜狗为入口。下文是笔者整理的一份python爬取微信文章的代码,有兴趣的欢迎阅读

    1
    2
    3
    4
    5
    6
    7
    8
    9
    10
    11
    12
    13
    14
    15
    16
    17
    18
    19
    20
    21
    22
    23
    24
    25
    26
    27
    28
    29
    30
    31
    32
    33
    34
    35
    36
    37
    38
    39
    40
    41
    42
    43
    44
    45
    46
    47
    48
    49
    50
    51
    52
    53
    54
    55
    56
    57
    58
    59
    60
    61
    62
    63
    64
    65
    66
    67
    68
    69
    #coding:utf-8
    author = 'haoning'
    **#!/usr/bin/env python
    import time
    import datetime
    import requests**
    import json
    import sys
    reload(sys)
    sys.setdefaultencoding( "utf-8" )
    import re
    import xml.etree.ElementTree as ET
    import os
    #OPENID = 'oIWsFtyel13ZMva1qltQ3pfejlwU'
    OPENID = 'oIWsFtw_-W2DaHwRz1oGWzL-wF9M&ext'
    XML_LIST = []
    # get current time in milliseconds
    current_milli_time = lambda: int(round(time.time() * 1000))
    def get_json(pageIndex):
     
    global OPENID
    the_headers = {
    'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36',
    'Referer': 'http://weixin.sogou.com/gzh?openid={0}'.format(OPENID),
    'Host': 'weixin.sogou.com'
    }
     
    url = 'http://weixin.sogou.com/gzhjs?cb=sogou.weixin.gzhcb&openid={0}&page={1}&t={2}'.format(OPENID, pageIndex, current_milli_time()) #url
    print(url)
     
    response = requests.get(url, headers = the_headers)
    # TO-DO; check if match the reg
    response_text = response.text
    print response_text
    json_start = response_text.index('sogou.weixin.gzhcb(') + 19
    json_end = response_text.index(')') - 2
    json_str = response_text[json_start : json_end] #get json
    #print(json_str)
    # convert json_str to json object
    json_obj = json.loads(json_str) #get json obj
    # print json_obj['totalPages']
    return json_obj
    def add_xml(jsonObj):
     
    global XML_LIST
    xmls = jsonObj['items'] #get item
    #print type(xmls)
    XML_LIST.extend(xmls) #用新列表扩展原来的列表
    **[#www.oksousou.com][2]**
    # ------------ Main ----------------
    print 'play it :) '
    # get total pages
    default_json_obj = get_json(1)
    total_pages = 0
    total_items = 0
    if(default_json_obj):
     
    # add the default xmls
    add_xml(default_json_obj)
    # get the rest items
    total_pages = default_json_obj['totalPages']
    total_items = default_json_obj['totalItems']
    print total_pages
    # iterate all pages
    if(total_pages >= 2):
      for pageIndex in range(2, total_pages + 1):
        add_xml(get_json(pageIndex)) #extend
        print 'load page ' + str(pageIndex)
        print len(XML_LIST)
  • 相关阅读:
    thinkphp中<eq>标签的使用
    Thinkphp中的eq比较标签
    select取数据库值设为默认值,TP框架模板中ifelse
    fastadmin 前端根据status自定义显示不同的内容
    CMS自定义表单无法切换“是否需要登录”开关
    js获取域名
    fastadmin 页面添加编辑日期时间
    bootstrap-table给单元格添加链接
    python相关资料
    区块链共识机制 —— PoW共识的Python实现
  • 原文地址:https://www.cnblogs.com/webRobot/p/5523574.html
Copyright © 2020-2023  润新知