• 19 03 12 环球网 三级页面跳转加翻页 数据抓取 打印(无数据库版)


    这次先没有数据库  

    spider

    # -*- coding: utf-8 -*-
    import scrapy
    # from yang_guan.items import YangGuanItem
    from copy import deepcopy
    from scrapy.spiders import CrawlSpider
    
    class YgSpider(scrapy.Spider):
        name = 'yg'
        # allowed_domains = ['huanqiu.com']
        start_urls = ['http://www.huanqiu.com/',
                      ]
    
        def parse (self, response):  # 总页面  第一个一定要用parse  用来传递start_urls
            item = {}
    
            class_news_urls_li = response.xpath(".//div[@class='navCon']/ul/li/a")
            print(class_news_urls_li)
            for class_news_url in class_news_urls_li:
                item["class_tittle"] = class_news_url.xpath("./text()").extract_first()
                print(item)
                new_url = class_news_url.xpath("./@href").extract_first()
                print(new_url)
                yield scrapy.Request(
                    new_url,
                    callback=self.second_class,
                    meta={"item": deepcopy(item)},  # 由于是多线程 所以要用深拷贝进入item
                )
    
        def second_class(self, response):  # 二级页面
            item = response.meta["item"]
            print(response.url)
    
            second_urls = response.xpath(".//div/h2/em")
    
            for second_url in second_urls:
    
                secoond_news_url = second_url.xpath("./a/@href").extract_first()
    
                yield scrapy.Request(
                    secoond_news_url,
                    callback=self.parse_detail_analyze,
                    meta={"item": deepcopy(item)}
                )
    
        def parse_detail_analyze(self, response):  # 进入第三成  总细节的抓取  http://china.huanqiu.com/leaders/'
            item = response.meta["item"]
    
            li_list = response.xpath("//ul[@class='listPicBox']/li")
    
            for li in li_list:
                # item = YangGuanItem()
                item["title"] = li.xpath("./h3/a/text()").extract_first()
                item["img_url"] = li.xpath("./a/img/@src").extract_first()
                item["detail"] = li.xpath("./h5/text()").extract_first()
                yield item
    
            next_url = response.xpath(".//div[@class='pageBox']/div/a[last()]/@href").extract_first()  # 遇见翻页就要这样写
    
            yield scrapy.Request(next_url, callback=self.parse_detail_analyze,meta={"item":response.meta["item"]})

    关于setting

    # -*- coding: utf-8 -*-
    import scrapy
    # from yang_guan.items import YangGuanItem
    from copy import deepcopy
    from scrapy.spiders import CrawlSpider
    
    class YgSpider(scrapy.Spider):
        name = 'yg'
        # allowed_domains = ['huanqiu.com']
        start_urls = ['http://www.huanqiu.com/',
                      ]
    
        def parse (self, response):  # 总页面  第一个一定要用parse  用来传递start_urls
            item = {}
    
            class_news_urls_li = response.xpath(".//div[@class='navCon']/ul/li/a")
            print(class_news_urls_li)
            for class_news_url in class_news_urls_li:
                item["class_tittle"] = class_news_url.xpath("./text()").extract_first()
                print(item)
                new_url = class_news_url.xpath("./@href").extract_first()
                print(new_url)
                yield scrapy.Request(
                    new_url,
                    callback=self.second_class,
                    meta={"item": deepcopy(item)},  # 由于是多线程 所以要用深拷贝进入item
                )
    
        def second_class(self, response):  # 二级页面
            item = response.meta["item"]
            print(response.url)
    
            second_urls = response.xpath(".//div/h2/em")
    
            for second_url in second_urls:
    
                secoond_news_url = second_url.xpath("./a/@href").extract_first()
    
                yield scrapy.Request(
                    secoond_news_url,
                    callback=self.parse_detail_analyze,
                    meta={"item": deepcopy(item)}
                )
    
        def parse_detail_analyze(self, response):  # 进入第三成  总细节的抓取  http://china.huanqiu.com/leaders/'
            item = response.meta["item"]
    
            li_list = response.xpath("//ul[@class='listPicBox']/li")
    
            for li in li_list:
                # item = YangGuanItem()
                item["title"] = li.xpath("./h3/a/text()").extract_first()
                item["img_url"] = li.xpath("./a/img/@src").extract_first()
                item["detail"] = li.xpath("./h5/text()").extract_first()
                yield item
    
            next_url = response.xpath(".//div[@class='pageBox']/div/a[last()]/@href").extract_first()  # 遇见翻页就要这样写
    
            yield scrapy.Request(next_url, callback=self.parse_detail_analyze,meta={"item":response.meta["item"]})
  • 相关阅读:
    死工资的缓慢呆板已不能适合这个新型的商业社会。
    java获取前一天时间SimpleDateFormat,java判断某个时间段
    设置 DNS,防止 DNS 污染,清除 DNS 缓存ipconfig /flushdns
    win7 怎么设置开机直接进入桌面? netplwiz 命令
    AWS免费云服务套餐申请步骤及常见问题
    阿里云自定义镜像可以免费保存,ECS实例到期后自定义镜像手动快照不会被删除
    php获取字符串长度函数strlen和mb_strlen
    PHP HTML混写,PHP中把大块HTML文本直接赋值给字符串变量的方法
    人工智能跟脑神经科学没有关系。没有出差错的机会,就没有进化的可能。要想自己把事情做成功,就需要弄清楚事物的本质。
    python GIL 全局锁,多核cpu下的多线程性能究竟如何?
  • 原文地址:https://www.cnblogs.com/fromlantianwei/p/10514627.html
Copyright © 2020-2023  润新知