• setting 常用配置


    一,保存logging 信息

    # 保存log信息的文件名
    LOG_LEVEL = "INFO"
    LOG_STDOUT = True
    LOG_ENCODING = 'utf-8'
    # 路径  os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
    LOG_FILE = os.path.dirname(__file__) + "/SHANGSHIYAOPINGMULU_error.log"

    二,禁止重定向

    REDIRECT_ENABLED = False

    三,设置延时

    import random
    DOWNLOAD_DELAY = random.random() + random.random()
    RANDOMIZE_DOWNLOAD_DELAY = True

    四,设置USER_AGENT

    USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko'

    五,启动spider下面的全部爬虫

    0.启动多个爬虫

    from scrapy.crawler import CrawlerProcess
    from scrapy.utils.project import get_project_settings
    
    process = CrawlerProcess(get_project_settings())
    process.crawl('game_top_sellers')
    process.crawl('game_online')
    process.start()

    1.与spider同级目录中创建commands文件夹

    mkdir commands

    2.进入commands文件夹

    cd commands

    3.创建__init__.py文件

    touch __init__.py

    4.创建crawlall.py文件

    touch crawlall.py

    5.打开crawlall文件编写代码

    vim crawlall.py
    

      

    from scrapy.commands import ScrapyCommand
    from scrapy.crawler import CrawlerRunner
    from scrapy.exceptions import UsageError
    from scrapy.utils.conf import arglist_to_dict
    
    
    class Command(ScrapyCommand):
        requires_project = True
    
        def syntax(self):
            return '[options]'
    
        def short_desc(self):
            return 'Runs all of the spiders'
    
        def add_options(self, parser):
            ScrapyCommand.add_options(self, parser)
            parser.add_option("-a", dest="spargs", action="append", default=[], metavar="NAME=VALUE",
                              help="set spider argument (may be repeated)")
            parser.add_option("-o", "--output", metavar="FILE",
                              help="dump scraped items into FILE (use - for stdout)")
            parser.add_option("-t", "--output-format", metavar="FORMAT",
                              help="format to use for dumping items with -o")
    
        def process_options(self, args, opts):
            ScrapyCommand.process_options(self, args, opts)
            try:
                opts.spargs = arglist_to_dict(opts.spargs)
            except ValueError:
                raise UsageError("Invalid -a value, use -a NAME=VALUE", print_help=False)
    
        def run(self, args, opts):
            # settings = get_project_settings()
    
            spider_loader = self.crawler_process.spider_loader
            for spidername in args or spider_loader.list():
                print("*********cralall NewsSpider************")
                self.crawler_process.crawl(spidername, **opts.spargs)
            self.crawler_process.start()
    

      

    6.配置commands

    COMMANDS_MODULE = 'spider.commands'   # spider 根据你的项目名称 更改
    
    cmdline.execute(['scrapy', 'crawlall'])   # 启动项目

    六,设置重新发请求的状态码

    RETRY_HTTP_CODES = [500, 520]

    七,配置redis

    # reids连接信息
    REDIS_HOST = "192.168.1.235"
    REDIS_PORT = 6379
    REDIS_PARAMS = {
        "password": "KangCe@0608",
    }
    
    # 1(必须). 使用了scrapy_redis的去重组件,在redis数据库里做去重
    DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
    
    # 2(必须). 使用了scrapy_redis的调度器,在redis里分配请求
    SCHEDULER = "scrapy_redis.scheduler.Scheduler"
    
    # 3(必须). 在redis中保持scrapy-redis用到的各个队列,从而允许暂停和暂停后恢复,也就是不清理redis queues
    SCHEDULER_PERSIST = True
    
    # 4(必须). 通过配置RedisPipeline将item写入key为 spider.name : items 的redis的list中,供后面的分布式处理item
    # 这个已经由 scrapy-redis 实现,不需要我们写代码,直接使用即可
    ITEM_PIPELINES = {
        # 'AQI.pipelines.AqiJsonPipeline': 200,
        # 'AQI.pipelines.AqiCSVPipeline': 300,
        # 'AQI.pipelines.AqiRedisPipeline': 400,
        # 'AQI.pipelines.AqiMongoPipeline': 500,
        'scrapy_redis.pipelines.RedisPipeline': 100
    }

     八.settings.py配置文件详解

    # Scrapy项目的名字,这将用来构造默认 User-Agent,同时也用来log,当您使用 startproject 命令创建项目时其也被自动赋值。
    BOT_NAME = 'demo1'
    
    # Scrapy搜索spider的模块列表 默认: [xxx.spiders]
    SPIDER_MODULES = ['demo1.spiders']
    # 使用 genspider 命令创建新spider的模块。默认: 'xxx.spiders'
    NEWSPIDER_MODULE = 'demo1.spiders'
    
    # 爬取的默认User-Agent,除非被覆盖
    USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36'
    
    # 保存log信息的文件名
    LOG_LEVEL = "INFO"
    LOG_STDOUT = True
    LOG_ENCODING = 'utf-8'
    # 路径  os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
    LOG_FILE = "SHANGSHIYAOPINGMULU_error.log"
    
    # 禁止重镜像
    REDIRECT_ENABLED = False
    
    
    
    # 设置延时
    import random
    DOWNLOAD_DELAY = random.random() + random.random()
    RANDOMIZE_DOWNLOAD_DELAY = True
    
    # 如果启用,Scrapy将会采用 robots.txt策略
    ROBOTSTXT_OBEY = True
    
    # 设置重新发请求的状态码
    RETRY_HTTP_CODES = [500, 520]
    
    # Scrapy downloader 并发请求(concurrent requests)的最大值,默认: 16
    CONCURRENT_REQUESTS = 32
    
    # 爬虫允许的最大深度,可以通过meta查看当前深度;0表示无深度
    DEPTH_LIMIT = 3
    
    #  爬取时,0表示深度优先Lifo(默认);1表示广度优先FiFo
    # 后进先出,深度优先
    DEPTH_PRIORITY = 0
    SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleLifoDiskQueue'
    SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.LifoMemoryQueue'
    # 先进先出,广度优先
    DEPTH_PRIORITY = 1
    SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleFifoDiskQueue'
    SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.FifoMemoryQueue'
    
    # 调度器队列
    from scrapy.core.scheduler import Scheduler
    SCHEDULER = 'scrapy.core.scheduler.Scheduler'
    
    # 访问URL去重
    # DUPEFILTER_CLASS = 'step8_king.duplication.RepeatUrl'
    
    # 为同一网站的请求配置延迟(默认值:0)
    # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
    # See also autothrottle settings and docs
    # 下载器在下载同一个网站下一个页面前需要等待的时间,该选项可以用来限制爬取速度,减轻服务器压力。同时也支持小数:0.25 以秒为单位
    DOWNLOAD_DELAY = 3
    
    # 下载延迟设置只有一个有效
    # 对单个网站进行并发请求的最大值。
    CONCURRENT_REQUESTS_PER_DOMAIN = 16
    # 对单个IP进行并发请求的最大值。如果非0,则忽略 CONCURRENT_REQUESTS_PER_DOMAIN 设定,使用该设定。 也就是说,并发限制将针对IP,而不是网站。该设定也影响 DOWNLOAD_DELAY: 如果 CONCURRENT_REQUESTS_PER_IP 非0,下载延迟应用在IP而不是网站上。
    CONCURRENT_REQUESTS_PER_IP = 16
    
    # 禁用Cookie(默认情况下启用)
    COOKIES_ENABLED = False
    
    # 禁用Telnet控制台(默认启用)
    TELNETCONSOLE_ENABLED = False
    
    # 覆盖默认请求标头:
    DEFAULT_REQUEST_HEADERS = {
      'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
      'Accept-Language': 'en',
    }
    
    # 启用或禁用蜘蛛中间件
    # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
    SPIDER_MIDDLEWARES = {
       'demo1.middlewares.Demo1SpiderMiddleware': 543,
    }
    
    # 启用或禁用下载器中间件
    # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
    DOWNLOADER_MIDDLEWARES = {
       'demo1.middlewares.MyCustomDownloaderMiddleware': 543,
    }
    
    # 启用或禁用扩展程序
    # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
    EXTENSIONS = {
       'scrapy.extensions.telnet.TelnetConsole': None,
    }
    
    # 配置项目管道
    # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
    ITEM_PIPELINES = {
       'demo1.pipelines.Demo1Pipeline': 300,
    }
    
    # 启用和配置AutoThrottle扩展(默认情况下禁用)(开始自动限速)
    # See http://doc.scrapy.org/en/latest/topics/autothrottle.html
    AUTOTHROTTLE_ENABLED = True
    
    # 初始下载延迟
    AUTOTHROTTLE_START_DELAY = 5
    
    # 最大下载延迟
    AUTOTHROTTLE_MAX_DELAY = 10
    
    # 禁止重试
    RETRY_ENABLED = False
    
    # 下载超时
    DOWNLOAD_TIMEOUT = 10
    
    # 下载重试次数
    RETRY_ITEMS=5
    
    # 在高延迟的情况下设置的最大下载延迟
    AUTOTHROTTLE_MAX_DELAY = 60
    
    
    # Scrapy请求的平均数量应该并行发送每个远程服务器( 平均每秒并发数)
    AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
    
    # 启用显示所收到的每个响应的调节统计信息:
    AUTOTHROTTLE_DEBUG = False
    
    # 启用和配置HTTP缓存(默认情况下禁用)
    # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
    # # 是否启用缓存策略
    HTTPCACHE_ENABLED = True
    # 缓存策略:所有请求均缓存,下次在请求直接访问原来的缓存即可
    HTTPCACHE_POLICY = "scrapy.extensions.httpcache.DummyPolicy"
    # 缓存策略:根据Http响应头:Cache-Control、Last-Modified 等进行缓存的策略
    HTTPCACHE_POLICY = "scrapy.extensions.httpcache.RFC2616Policy"
    # 缓存超时时间
    HTTPCACHE_EXPIRATION_SECS = 0
    # 缓存保存路径
    HTTPCACHE_DIR = 'httpcache'
    # 缓存忽略的Http状态码
    HTTPCACHE_IGNORE_HTTP_CODES = []
    # 缓存存储的插件
    HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
    
    
    """
    19. 代理,需要在环境变量中设置
        from scrapy.contrib.downloadermiddleware.httpproxy import HttpProxyMiddleware
    
        方式一:使用默认
            os.environ
            {
                http_proxy:http://root:woshiniba@192.168.11.11:9999/
                https_proxy:http://192.168.11.11:9999/
            }
        方式二:使用自定义下载中间件
    
        def to_bytes(text, encoding=None, errors='strict'):
            if isinstance(text, bytes):
                return text
            if not isinstance(text, six.string_types):
                raise TypeError('to_bytes must receive a unicode, str or bytes '
                                'object, got %s' % type(text).__name__)
            if encoding is None:
                encoding = 'utf-8'
            return text.encode(encoding, errors)
    
        class ProxyMiddleware(object):
            def process_request(self, request, spider):
                PROXIES = [
                    {'ip_port': '111.11.228.75:80', 'user_pass': ''},
                    {'ip_port': '120.198.243.22:80', 'user_pass': ''},
                    {'ip_port': '111.8.60.9:8123', 'user_pass': ''},
                    {'ip_port': '101.71.27.120:80', 'user_pass': ''},
                    {'ip_port': '122.96.59.104:80', 'user_pass': ''},
                    {'ip_port': '122.224.249.122:8088', 'user_pass': ''},
                ]
                proxy = random.choice(PROXIES)
                if proxy['user_pass'] is not None:
                    request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port'])
                    encoded_user_pass = base64.encodestring(to_bytes(proxy['user_pass']))
                    request.headers['Proxy-Authorization'] = to_bytes('Basic ' + encoded_user_pass)
                    print "**************ProxyMiddleware have pass************" + proxy['ip_port']
                else:
                    print "**************ProxyMiddleware no pass************" + proxy['ip_port']
                    request.meta['proxy'] = to_bytes("http://%s" % proxy['ip_port'])
    
        DOWNLOADER_MIDDLEWARES = {
           'step8_king.middlewares.ProxyMiddleware': 500,
        }
    
    """
    
    """
    20. Https访问
        Https访问时有两种情况:
        1. 要爬取网站使用的可信任证书(默认支持)
            DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory"
            DOWNLOADER_CLIENTCONTEXTFACTORY = "scrapy.core.downloader.contextfactory.ScrapyClientContextFactory"
    
        2. 要爬取网站使用的自定义证书
            DOWNLOADER_HTTPCLIENTFACTORY = "scrapy.core.downloader.webclient.ScrapyHTTPClientFactory"
            DOWNLOADER_CLIENTCONTEXTFACTORY = "step8_king.https.MySSLFactory"
    
            # https.py
            from scrapy.core.downloader.contextfactory import ScrapyClientContextFactory
            from twisted.internet.ssl import (optionsForClientTLS, CertificateOptions, PrivateCertificate)
    
            class MySSLFactory(ScrapyClientContextFactory):
                def getCertificateOptions(self):
                    from OpenSSL import crypto
                    v1 = crypto.load_privatekey(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.key.unsecure', mode='r').read())
                    v2 = crypto.load_certificate(crypto.FILETYPE_PEM, open('/Users/wupeiqi/client.pem', mode='r').read())
                    return CertificateOptions(
                        privateKey=v1,  # pKey对象
                        certificate=v2,  # X509对象
                        verify=False,
                        method=getattr(self, 'method', getattr(self, '_ssl_method', None))
                    )
        其他:
            相关类
                scrapy.core.downloader.handlers.http.HttpDownloadHandler
                scrapy.core.downloader.webclient.ScrapyHTTPClientFactory
                scrapy.core.downloader.contextfactory.ScrapyClientContextFactory
            相关配置
                DOWNLOADER_HTTPCLIENTFACTORY
                DOWNLOADER_CLIENTCONTEXTFACTORY
    
    """
    
    
    
    """
    21. 爬虫中间件
        class SpiderMiddleware(object):
    
            def process_spider_input(self,response, spider):
                '''
                下载完成,执行,然后交给parse处理
                :param response: 
                :param spider: 
                :return: 
                '''
                pass
    
            def process_spider_output(self,response, result, spider):
                '''
                spider处理完成,返回时调用
                :param response:
                :param result:
                :param spider:
                :return: 必须返回包含 Request 或 Item 对象的可迭代对象(iterable)
                '''
                return result
    
            def process_spider_exception(self,response, exception, spider):
                '''
                异常调用
                :param response:
                :param exception:
                :param spider:
                :return: None,继续交给后续中间件处理异常;含 Response 或 Item 的可迭代对象(iterable),交给调度器或pipeline
                '''
                return None
    
    
            def process_start_requests(self,start_requests, spider):
                '''
                爬虫启动时调用
                :param start_requests:
                :param spider:
                :return: 包含 Request 对象的可迭代对象
                '''
                return start_requests
    
        内置爬虫中间件:
            'scrapy.contrib.spidermiddleware.httperror.HttpErrorMiddleware': 50,
            'scrapy.contrib.spidermiddleware.offsite.OffsiteMiddleware': 500,
            'scrapy.contrib.spidermiddleware.referer.RefererMiddleware': 700,
            'scrapy.contrib.spidermiddleware.urllength.UrlLengthMiddleware': 800,
            'scrapy.contrib.spidermiddleware.depth.DepthMiddleware': 900,
    
    """
    # from scrapy.contrib.spidermiddleware.referer import RefererMiddleware
    # Enable or disable spider middlewares
    # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
    SPIDER_MIDDLEWARES = {
       # 'step8_king.middlewares.SpiderMiddleware': 543,
    }
    
    
    """
    22. 下载中间件
        class DownMiddleware1(object):
            def process_request(self, request, spider):
                '''
                请求需要被下载时,经过所有下载器中间件的process_request调用
                :param request:
                :param spider:
                :return:
                    None,继续后续中间件去下载;
                    Response对象,停止process_request的执行,开始执行process_response
                    Request对象,停止中间件的执行,将Request重新调度器
                    raise IgnoreRequest异常,停止process_request的执行,开始执行process_exception
                '''
                pass
    
    
    
            def process_response(self, request, response, spider):
                '''
                spider处理完成,返回时调用
                :param response:
                :param result:
                :param spider:
                :return:
                    Response 对象:转交给其他中间件process_response
                    Request 对象:停止中间件,request会被重新调度下载
                    raise IgnoreRequest 异常:调用Request.errback
                '''
                print('response1')
                return response
    
            def process_exception(self, request, exception, spider):
                '''
                当下载处理器(download handler)或 process_request() (下载中间件)抛出异常
                :param response:
                :param exception:
                :param spider:
                :return:
                    None:继续交给后续中间件处理异常;
                    Response对象:停止后续process_exception方法
                    Request对象:停止中间件,request将会被重新调用下载
                '''
                return None
    
    
        默认下载中间件
        {
            'scrapy.contrib.downloadermiddleware.robotstxt.RobotsTxtMiddleware': 100,
            'scrapy.contrib.downloadermiddleware.httpauth.HttpAuthMiddleware': 300,
            'scrapy.contrib.downloadermiddleware.downloadtimeout.DownloadTimeoutMiddleware': 350,
            'scrapy.contrib.downloadermiddleware.useragent.UserAgentMiddleware': 400,
            'scrapy.contrib.downloadermiddleware.retry.RetryMiddleware': 500,
            'scrapy.contrib.downloadermiddleware.defaultheaders.DefaultHeadersMiddleware': 550,
            'scrapy.contrib.downloadermiddleware.redirect.MetaRefreshMiddleware': 580,
            'scrapy.contrib.downloadermiddleware.httpcompression.HttpCompressionMiddleware': 590,
            'scrapy.contrib.downloadermiddleware.redirect.RedirectMiddleware': 600,
            'scrapy.contrib.downloadermiddleware.cookies.CookiesMiddleware': 700,
            'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 750,
            'scrapy.contrib.downloadermiddleware.chunked.ChunkedTransferMiddleware': 830,
            'scrapy.contrib.downloadermiddleware.stats.DownloaderStats': 850,
            'scrapy.contrib.downloadermiddleware.httpcache.HttpCacheMiddleware': 900,
        }
    
    """
    # from scrapy.contrib.downloadermiddleware.httpauth import HttpAuthMiddleware
    # Enable or disable downloader middlewares
    # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
    # DOWNLOADER_MIDDLEWARES = {
    #    'step8_king.middlewares.DownMiddleware1': 100,
    #    'step8_king.middlewares.DownMiddleware2': 500,
    # }
    

      

      

      

      

      

  • 相关阅读:
    hdoj 4251 The Famous ICPC Team Again
    hdoj 1879 最小生成树之继续畅通工程
    并查集之Dragon Balls
    并查集之 Is It A Tree?
    hdoj 1874最短路之畅通工程续
    1040: Count
    hdoj 1874最短路之最短路径问题
    并查集之小希的迷宫
    并查集之食物链
    Ordered Fractions
  • 原文地址:https://www.cnblogs.com/yoyo1216/p/10133703.html
Copyright © 2020-2023  润新知