• Scrapy抓取Quotes to Scrape


    # 爬虫主程序quotes.py
    # -*- coding: utf-8 -*-
    import scrapy
    from quotetutorial.items import QuoteItem
    # 启动爬虫
    # 请求都是默认的,我们不需要管请求的操作,只要关心解析的过程就可以了
    class QuotesSpider(scrapy.Spider):
        name = "quotes"
        allowed_domains = ["quotes.toscrape.com"]
        start_urls = ['http://quotes.toscrape.com/']
    # python默认执行了相当一部分操作,比如这里,有了start_urls以后,会默认启用start_requests方法,该方法遍历得到每一个url,并且调用make_requests_from_url进行请求,返回response给parse函数
        # 请求得到的结果,回调给parse,response即相应结果
        def parse(self, response):
            quotes = response.css('.quote')
            for quote in quotes:
                # 如果只有一个内容,可以用extract_first,如果有多个内容可以用extract全部提取出来
                # 可以用scrapy-conda shell  quotes.toscrape.com 进入到命令行交互模式下
                item = QuoteItem()
                text = quote.css('.text::text').extract_first()
                author = quote.css('.author::text').extract_first()
                tags = quote.css('.tags .tag::text').extract()
                item['text'] = text
                item['author'] = author
                item['tags'] = tags
                yield item
            # 获取下一页的相对url
            next = response.css('.pager .next a::attr(href)').extract_first()
            # 拼接成绝对url
            url = response.urljoin(next)
            #递归调用,循环打开下一页
            yield scrapy.Request(url = url, callback=self.parse)
            # 可以用scrapy-conda crawl quotes -o 文件名+扩展名
            # quotes.json/quotes.jl/quotes.csv/quotes.pickle/quotes.xml/quotes.marshal导出各种格式
            # ftp://user:pass@ftp.exampel.com/path/quotes.csv 可以远程ftp保存
    # items定义的项目数据结构
    # -*- coding: utf-8 -*-
    # Define here the models for your scraped items
    #
    # See documentation in:
    # http://doc.scrapy.org/en/latest/topics/items.html
    # items相当于定义一个数据结构,给它指定一个个字段,比如标题,图片,文本内容等
    import scrapy
    
    
    class QuoteItem(scrapy.Item):
        # define the fields for your item here like:
        # name = scrapy.Field()
        text = scrapy.Field()
        author = scrapy.Field()
        tags = scrapy.Field()
    # 中间储藏环节middlewares.py
    # -*- coding: utf-8 -*-
    
    # Define here the models for your spider middleware
    #
    # See documentation in:
    # http://doc.scrapy.org/en/latest/topics/spider-middleware.html
    
    from scrapy import signals
    
    
    class QuotetutorialSpiderMiddleware(object):
        # Not all methods need to be defined. If a method is not defined,
        # scrapy acts as if the spider middleware does not modify the
        # passed objects.
    
        @classmethod
        def from_crawler(cls, crawler):
            # This method is used by Scrapy to create your spiders.
            s = cls()
            crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
            return s
    
        def process_spider_input(response, spider):
            # Called for each response that goes through the spider
            # middleware and into the spider.
    
            # Should return None or raise an exception.
            return None
    
        def process_spider_output(response, result, spider):
            # Called with the results returned from the Spider, after
            # it has processed the response.
    
            # Must return an iterable of Request, dict or Item objects.
            for i in result:
                yield i
    
        def process_spider_exception(response, exception, spider):
            # Called when a spider or process_spider_input() method
            # (from other spider middleware) raises an exception.
    
            # Should return either None or an iterable of Response, dict
            # or Item objects.
            pass
    
        def process_start_requests(start_requests, spider):
            # Called with the start requests of the spider, and works
            # similarly to the process_spider_output() method, except
            # that it doesn’t have a response associated.
    
            # Must return only requests (not items).
            for r in start_requests:
                yield r
    
        def spider_opened(self, spider):
            spider.logger.info('Spider opened: %s' % spider.name)
    # pipelines.py管道环节,可决定数据如何处理
    # -*- coding: utf-8 -*-
    
    # Define your item pipelines here
    #
    # Don't forget to add your pipeline to the ITEM_PIPELINES setting
    # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
    from scrapy.exceptions import DropItem
    import pymongo
    #判断文本长度是否超过某一界限
    class TextPipeline(object):
        def __init__(self):
            self.limit = 50
    
        def process_item(self, item, spider):
            if item['text']:
                if len(item['text']) > self.limit:
                    item['text'] = item['text'][0:self.limit].rstrip() + '...'
                return item
            else:
                return DropItem('Missing Text')
    
    class MongoPipeline(object):
        def __init__(self, mongo_url,mongo_db):
            self.mongo_url = mongo_url
            self.mongo_db = mongo_db
    
        @classmethod
        def from_crawler(cls,crawler):
            return cls(
                mongo_url = crawler.settings.get('MONGO_URL'),
                mongo_db = crawler.settings.get('MONGO_DB')
            )
        # mongodb的初始化对象
        def open_spider(self,spider):
            self.client = pymongo.MongoClient(self.mongo_url)
            self.db = self.client[self.mongo_db]
    
        # 保存到mongodb
        def process_item(self, item, spider):
            name = item.__class__.__name__
            self.db[name].insert(dict(item))
            return item #千万要注意,这里不要忘了返回,否则就会显示None
        def close_spider(self,spider):
            self.client.close()
    # 配置文件
    # -*- coding: utf-8 -*-
    
    # Scrapy settings for quotetutorial project
    #
    # For simplicity, this file contains only settings considered important or
    # commonly used. You can find more settings consulting the documentation:
    #
    #     http://doc.scrapy.org/en/latest/topics/settings.html
    #     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
    #     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
    
    BOT_NAME = 'quotetutorial'
    
    SPIDER_MODULES = ['quotetutorial.spiders']
    NEWSPIDER_MODULE = 'quotetutorial.spiders'
    MONGO_URL = 'localhost'
    MONGO_DB = 'quotetutorial'
    
    
    # Crawl responsibly by identifying yourself (and your website) on the user-agent
    #USER_AGENT = 'quotetutorial (+http://www.yourdomain.com)'
    
    # Obey robots.txt rules
    ROBOTSTXT_OBEY = True
    
    # Configure maximum concurrent requests performed by Scrapy (default: 16)
    #CONCURRENT_REQUESTS = 32
    
    # Configure a delay for requests for the same website (default: 0)
    # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
    # See also autothrottle settings and docs
    #DOWNLOAD_DELAY = 3
    # The download delay setting will honor only one of:
    #CONCURRENT_REQUESTS_PER_DOMAIN = 16
    #CONCURRENT_REQUESTS_PER_IP = 16
    
    # Disable cookies (enabled by default)
    #COOKIES_ENABLED = False
    
    # Disable Telnet Console (enabled by default)
    #TELNETCONSOLE_ENABLED = False
    
    # Override the default request headers:
    #DEFAULT_REQUEST_HEADERS = {
    #   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    #   'Accept-Language': 'en',
    #}
    
    # Enable or disable spider middlewares
    # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
    #SPIDER_MIDDLEWARES = {
    #    'quotetutorial.middlewares.QuotetutorialSpiderMiddleware': 543,
    #}
    
    # Enable or disable downloader middlewares
    # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
    #DOWNLOADER_MIDDLEWARES = {
    #    'quotetutorial.middlewares.MyCustomDownloaderMiddleware': 543,
    #}
    
    # Enable or disable extensions
    # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
    #EXTENSIONS = {
    #    'scrapy.extensions.telnet.TelnetConsole': None,
    #}
    
    # Configure item pipelines
    # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
    ITEM_PIPELINES = {
       # 'quotetutorial.pipelines.TextPipeline': 300,
       'quotetutorial.pipelines.MongoPipeline': 400,
    }
    
    # Enable and configure the AutoThrottle extension (disabled by default)
    # See http://doc.scrapy.org/en/latest/topics/autothrottle.html
    #AUTOTHROTTLE_ENABLED = True
    # The initial download delay
    #AUTOTHROTTLE_START_DELAY = 5
    # The maximum download delay to be set in case of high latencies
    #AUTOTHROTTLE_MAX_DELAY = 60
    # The average number of requests Scrapy should be sending in parallel to
    # each remote server
    #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
    # Enable showing throttling stats for every response received:
    #AUTOTHROTTLE_DEBUG = False
    
    # Enable and configure HTTP caching (disabled by default)
    # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
    #HTTPCACHE_ENABLED = True
    #HTTPCACHE_EXPIRATION_SECS = 0
    #HTTPCACHE_DIR = 'httpcache'
    #HTTPCACHE_IGNORE_HTTP_CODES = []
    #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
    # 利用scrapy爬取quotes.toscrape.com网站
    # 第一步,分析目标网站,确定数据结构,即要爬取的项目构成.去items里面设置对应项目
    # 第二步,爬取首页信息,分析首页,提取项目items
    # 第三步,首页抓取下一页信息,循环递归调用request请求,实现翻页爬取
    # 第四步,修改settings,pipelines等,实现对items的修改,清洗,梳理以及存储
  • 相关阅读:
    LeetCode No23. 合并K个升序链表
    LeetCode No15. 三数之和
    LeetCode No398. 随机数索引
    LeetCode No24. 两两交换链表中的节点
    LeetCode No22. 括号生成
    LeetCode No25. K 个一组翻转链表
    LeetCode No13 罗马数字转整数
    LeetCode No19. 删除链表的倒数第 N 个结点
    LeetCode No21. 合并两个有序链表
    LeetCode No18. 四数之和
  • 原文地址:https://www.cnblogs.com/themost/p/6994274.html
Copyright © 2020-2023  润新知