• 【爬虫小程序:爬取斗鱼所有房间信息】Xpath(多线程版)


    
    
    # 本程序亲测有效,用于理解爬虫相关的基础知识,不足之处希望大家批评指正
    from queue import Queue
    import requests
    from lxml import etree
    from threading import Thread
    
    """爬取目标:http://www.qiushibaike.com/8hr/page/1
        用多线程实现
    """
    
    
    class QiuShi:
    
        def __init__(self):
    
            # url和headers
            self.base_url = 'http://www.qiushibaike.com/8hr/page/{}'
            self.headers = 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36'
    
            # 定义队列,用来传递数据
            self.url_queue = Queue()
            self.request_queue = Queue()
            self.html_queue = Queue()
    
        def get_url_list(self):
            """获取所有的url"""
            for i in range(1, 14):
                target_url = self.base_url.format(i)
                print(target_url)
                self.url_queue.put(target_url)
    
        def request_url(self):
            """向url发起请求"""
            while True:
                target_url = self.url_queue.get()
                response = requests.get(target_url, self.headers)
                self.request_queue.put(response)
                self.url_queue.task_done()
    
        def get_content(self):
            """获取数据"""
            while True:
                html_text = self.request_queue.get().content.decode()
                html = etree.HTML(html_text)
                div_list = html.xpath('//div[@id="content-left"]/div')
                content_list = []
                for div in div_list:
                    item = {}
                    item['author'] = div.xpath('.//h2/text()')[0].strip()
                    item['content'] = div.xpath('.//span/text()')[0].strip()
                    print(item)
                    content_list.append(item)
                self.html_queue.put(content_list)
                self.request_queue.task_done()
    
        def save_data(self):
            """保存入库"""
            while True:
                data_list = self.html_queue.get()
                for data in data_list:
                    with open('qiushi.text', 'a+') as f:
                        f.write(str(data))
                        f.write('
    ')
                self.html_queue.task_done()
    
        def main(self):
            """主程序逻辑"""
            # 定义一个线程收集器,用于收集线程
            thread_list = []
            # 1.获取url
            self.get_url_list()
            # 2.请求url
            t_request_url = Thread(target=self.request_url)
            thread_list.append(t_request_url)
            # 3.获取数据任务比较重,用四个线程去跑
            # for worker in range(4):
            t_get_content = Thread(target=self.get_content)
            thread_list.append(t_get_content)
            # 4.保存入库
            t_save_data = Thread(target=self.save_data)
            thread_list.append(t_save_data)
    
            # 将收集器中的线程全部跑起来
            for s in thread_list:
                s.start()  # 开启线程
    
            # 当所有队列中的任务完成了,回收线程
            for i in [self.url_queue,self.request_queue,self.html_queue]:
                i.join()
            print("主线程结束")
    
    
    if __name__ == '__main__':
        qiushi = QiuShi()
        qiushi.main()
  • 相关阅读:
    51nod 1067 Bash游戏 V2
    洛谷 P1454 圣诞夜的极光 == codevs 1293 送给圣诞夜的极光
    bzoj4754: [Jsoi2016]独特的树叶
    bzoj 4241: 历史研究
    bzoj 1266 [AHOI2006] 上学路线
    bzoj4571: [Scoi2016]美味
    bzoj4570: [Scoi2016]妖怪
    51nod 1238 最小公倍数之和 V3
    一个康托展开的板子
    poweroj1745: 餐巾计划问题
  • 原文地址:https://www.cnblogs.com/888888CN/p/10070250.html
Copyright © 2020-2023  润新知