• scrapy下载大文件


    1、spider.py

    # -*- coding: utf-8 -*-
    import scrapy
    from ..items import BigfileItem
    
    
    class ChoutiSpider(scrapy.Spider):
        name = "chouti"
        allowed_domains = ["chouti.com"]
        start_urls = (
            'http://www.chouti.com/',
        )
    
        def parse(self, response):
            yield BigfileItem(url='https://wx1.sinaimg.cn/large/8a04ef95ly1frbvy0d2opj20j00tm0vt.jpg',type='file',file_name='xx.jpg')
    View Code

    2、items.py

    import scrapy
    
    
    class BigfileItem(scrapy.Item):
        # define the fields for your item here like:
        # name = scrapy.Field()
        url = scrapy.Field()
        type = scrapy.Field()
        file_name = scrapy.Field()
    View Code

    3、pipelines 

    # -*- coding: utf-8 -*-
    
    # Define your item pipelines here
    #
    # Don't forget to add your pipeline to the ITEM_PIPELINES setting
    # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
    
    from twisted.web.client import Agent, getPage, ResponseDone, PotentialDataLoss
    
    from twisted.internet import defer, reactor, protocol
    from twisted.web._newclient import Response
    from io import BytesIO
    
    
    class _ResponseReader(protocol.Protocol):
    
        def __init__(self, finished, txresponse, file_name):
            self._finished = finished
            self._txresponse = txresponse
            self._bytes_received = 0
            self.f = open(file_name, mode='wb')
    
        def dataReceived(self, bodyBytes):
            self._bytes_received += len(bodyBytes)
    
            # 一点一点的下载
            self.f.write(bodyBytes)
    
            self.f.flush()
    
        def connectionLost(self, reason):
            if self._finished.called:
                return
            if reason.check(ResponseDone):
                # 下载完成
                self._finished.callback((self._txresponse, 'success'))
            elif reason.check(PotentialDataLoss):
                # 下载部分
                self._finished.callback((self._txresponse, 'partial'))
            else:
                # 下载异常
                self._finished.errback(reason)
    
            self.f.close()
    
    
    class BigfilePipeline(object):
        def process_item(self, item, spider):
            # 创建一个下载文件的任务
    
            if item['type'] == 'file':
                agent = Agent(reactor)
                d = agent.request(
                    method=b'GET',
                    uri=bytes(item['url'], encoding='ascii')
                )
                # 当文件开始下载之后,自动执行 self._cb_bodyready 方法
                d.addCallback(self._cb_bodyready, file_name=item['file_name'])
    
                return d
            else:
                return item
    
        def _cb_bodyready(self, txresponse, file_name):
            # 创建 Deferred 对象,控制直到下载完成后,再关闭链接
            d = defer.Deferred()
            d.addBoth(self.download_result)  # 下载完成/异常/错误之后执行的回调函数
            txresponse.deliverBody(_ResponseReader(d, txresponse, file_name))
            return d
    
        def download_result(self, response):
            pass
    View Code

    4、setting

    # -*- coding: utf-8 -*-
    
    # Scrapy settings for bigfile project
    #
    # For simplicity, this file contains only settings considered important or
    # commonly used. You can find more settings consulting the documentation:
    #
    #     http://doc.scrapy.org/en/latest/topics/settings.html
    #     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
    #     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
    
    BOT_NAME = 'bigfile'
    
    SPIDER_MODULES = ['bigfile.spiders']
    NEWSPIDER_MODULE = 'bigfile.spiders'
    
    
    # Crawl responsibly by identifying yourself (and your website) on the user-agent
    #USER_AGENT = 'bigfile (+http://www.yourdomain.com)'
    
    # Obey robots.txt rules
    ROBOTSTXT_OBEY = True
    
    # Configure maximum concurrent requests performed by Scrapy (default: 16)
    #CONCURRENT_REQUESTS = 32
    
    # Configure a delay for requests for the same website (default: 0)
    # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
    # See also autothrottle settings and docs
    #DOWNLOAD_DELAY = 3
    # The download delay setting will honor only one of:
    #CONCURRENT_REQUESTS_PER_DOMAIN = 16
    #CONCURRENT_REQUESTS_PER_IP = 16
    
    # Disable cookies (enabled by default)
    #COOKIES_ENABLED = False
    
    # Disable Telnet Console (enabled by default)
    #TELNETCONSOLE_ENABLED = False
    
    # Override the default request headers:
    #DEFAULT_REQUEST_HEADERS = {
    #   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    #   'Accept-Language': 'en',
    #}
    
    # Enable or disable spider middlewares
    # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
    #SPIDER_MIDDLEWARES = {
    #    'bigfile.middlewares.MyCustomSpiderMiddleware': 543,
    #}
    
    # Enable or disable downloader middlewares
    # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
    #DOWNLOADER_MIDDLEWARES = {
    #    'bigfile.middlewares.MyCustomDownloaderMiddleware': 543,
    #}
    
    # Enable or disable extensions
    # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
    #EXTENSIONS = {
    #    'scrapy.extensions.telnet.TelnetConsole': None,
    #}
    
    # Configure item pipelines
    # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
    ITEM_PIPELINES = {
       'bigfile.pipelines.BigfilePipeline': 300,
    }
    
    # Enable and configure the AutoThrottle extension (disabled by default)
    # See http://doc.scrapy.org/en/latest/topics/autothrottle.html
    #AUTOTHROTTLE_ENABLED = True
    # The initial download delay
    #AUTOTHROTTLE_START_DELAY = 5
    # The maximum download delay to be set in case of high latencies
    #AUTOTHROTTLE_MAX_DELAY = 60
    # The average number of requests Scrapy should be sending in parallel to
    # each remote server
    #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
    # Enable showing throttling stats for every response received:
    #AUTOTHROTTLE_DEBUG = False
    
    # Enable and configure HTTP caching (disabled by default)
    # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
    #HTTPCACHE_ENABLED = True
    #HTTPCACHE_EXPIRATION_SECS = 0
    #HTTPCACHE_DIR = 'httpcache'
    #HTTPCACHE_IGNORE_HTTP_CODES = []
    #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
    View Code

    5、scrapy.cfg

    # Automatically created by: scrapy startproject
    #
    # For more information about the [deploy] section see:
    # https://scrapyd.readthedocs.org/en/latest/deploy.html
    
    [settings]
    default = bigfile.settings
    
    [deploy]
    #url = http://localhost:6800/
    project = bigfile
    View Code
  • 相关阅读:
    BUUCTF [XMAN2018排位赛]四道misc题汇总
    祥云杯2020 Misc题解
    win10更换pip源
    BUUCTF N1BOOK配套题目
    kali下docker安装教程
    湖湘杯2020 Misc题解
    7号夺宝1元购iPhone6s 1元众筹怎么玩
    html5 的服务器推送 Server-sent Events和 websocket这两个是不是同一个东西,是两个不同的?
    WebSocket 是什么原理?为什么可以实现持久连接?
    使用HTML5的十大原因
  • 原文地址:https://www.cnblogs.com/nick477931661/p/9138838.html
Copyright © 2020-2023  润新知