• 爬虫神器xpath的用法(四)


    使用xpath多线程爬取百度贴吧内容

    #encoing=utf-8
    from lxml import etree
    from multiprocessing.dummy import Pool as ThreadPool
    import requests
    import json
    import sys
    
    reload(sys)
    
    sys.setdefaultencoding('utf-8')
    
    '''重新运行之前请删除content.txt,因为文件操作使用追加方式,会导致内容太多。'''
    
    def towrite(contentdict):
        f.writelines(u'回帖时间:' + str(contentdict['topic_reply_time']) + '
    ')
        f.writelines(u'回帖内容:' + unicode(contentdict['topic_reply_content']) + '
    ')
        f.writelines(u'回帖人:' + contentdict['user_name'] + '
    
    ')
    
    def spider(url):
        html = requests.get(url)
        selector = etree.HTML(html.text)
        content_field = selector.xpath('//div[@class="l_post j_l_post l_post_bright  "]')
        item = {}
        for each in content_field:
            reply_info = json.loads(each.xpath('@data-field')[0].replace('&quot',''))
            author = reply_info['author']['user_name']
            content = each.xpath('div[@class="d_post_content_main"]/div/cc/div[@class="d_post_content j_d_post_content  clearfix"]/text()')[0]
            reply_time = reply_info['content']['date']
            item['user_name'] = author
            item['topic_reply_content'] = content
            item['topic_reply_time'] = reply_time
            towrite(item)
    
    if __name__ == '__main__':
        pool = ThreadPool(4)
        f = open('content.txt','a')
        page = []
        for i in range(1,10):
            newpage = 'http://tieba.baidu.com/p/3522395718?pn=' + str(i)
            page.append(newpage)
    
        results = pool.map(spider, page)
        pool.close()
        pool.join()
        f.close()
  • 相关阅读:
    教你写Makefile(很全,含有工作经验的)
    configure.in详解
    使用autoconf与automake自动生成MakeFile文件
    例解 autoconf 和 automake 生成 Makefile 文件
    MYSQL:SQL中Group By的使用
    日志文件系统syslog,syslog-ng
    syslog-ng应用详解
    sublime的lua插件
    MySQL索引类型
    linux下automake用法
  • 原文地址:https://www.cnblogs.com/gide/p/5247146.html
Copyright © 2020-2023  润新知