• 爬虫 之 scrapy-redis组件


     scrapy-redis组件

    scrapy-redis是一个基于redis的scrapy组件,通过它可以快速实现简单分布式爬虫程序,该组件本质上提供了三大功能:

    • scheduler - 调度器
    • dupefilter - URL去重规则(被调度器使用)
    • pipeline   - 数据持久化

    基于scrapy-redis的去重规则

    方案

    #- 完全自定义 
    from scrapy.dupefilter import BaseDupeFilter
    import redis
    from scrapy.utils.request import request_fingerprint
    
    class DupFilter(BaseDupeFilter):
    	def __init__(self):
    		self.conn = redis.Redis(host='140.143.227.206',port=8888,password='beta')
    
    	def request_seen(self, request):
    		"""
    		检测当前请求是否已经被访问过
    		:param request: 
    		:return: True表示已经访问过;False表示未访问过
    		"""
    		fid = request_fingerprint(request)
    		result = self.conn.sadd('visited_urls', fid)
    		if result == 1:
    			return False
    		return True
    
    #- 使用scrapy-redis 
    #时间戳一直变,不方便查找
    
    #- 继承scrapy-redis 实现自定制 
    
    from scrapy_redis.dupefilter import RFPDupeFilter
    from scrapy_redis.connection import get_redis_from_settings
    from scrapy_redis import defaults
    
    class RedisDupeFilter(RFPDupeFilter):
    	@classmethod
    	def from_settings(cls, settings):
    		"""Returns an instance from given settings.
    
    		This uses by default the key ``dupefilter:<timestamp>``. When using the
    		``scrapy_redis.scheduler.Scheduler`` class, this method is not used as
    		it needs to pass the spider name in the key.
    
    		Parameters
    		----------
    		settings : scrapy.settings.Settings
    
    		Returns
    		-------
    		RFPDupeFilter
    			A RFPDupeFilter instance.
    
    
    		"""
    		server = get_redis_from_settings(settings)
    		# XXX: This creates one-time key. needed to support to use this
    		# class as standalone dupefilter with scrapy's default scheduler
    		# if scrapy passes spider on open() method this wouldn't be needed
    		# TODO: Use SCRAPY_JOB env as default and fallback to timestamp.
    		key = defaults.DUPEFILTER_KEY % {'timestamp': 'xiaodongbei'}
    		debug = settings.getbool('DUPEFILTER_DEBUG')
    		return cls(server, key=key, debug=debug)

    配置

    定义去重规则(被调度器调用并应用)
     
        a. 内部会使用以下配置进行连接Redis
     
            # REDIS_HOST = 'localhost'                            # 主机名
            # REDIS_PORT = 6379                                   # 端口
            # REDIS_URL = 'redis://user:pass@hostname:9001'       # 连接URL(优先于以上配置)
            # REDIS_PARAMS  = {}                                  # Redis连接参数             默认:REDIS_PARAMS = {'socket_timeout': 30,'socket_connect_timeout': 30,'retry_on_timeout': True,'encoding': REDIS_ENCODING,})
            # REDIS_PARAMS['redis_cls'] = 'myproject.RedisClient' # 指定连接Redis的Python模块  默认:redis.StrictRedis
            # REDIS_ENCODING = "utf-8"                            # redis编码类型             默认:'utf-8'
         
        b. 去重规则通过redis的集合完成,集合的Key为:
         
            key = defaults.DUPEFILTER_KEY % {'timestamp': int(time.time())}
            默认配置:
                DUPEFILTER_KEY = 'dupefilter:%(timestamp)s'
                  
        c. 去重规则中将url转换成唯一标示,然后在redis中检查是否已经在集合中存在
         
            from scrapy.utils import request
            from scrapy.http import Request
             
            req = Request(url='http://www.cnblogs.com/wupeiqi.html')
            result = request.request_fingerprint(req)
            print(result) # 8ea4fd67887449313ccc12e5b6b92510cc53675c
             
             
            PS:
                - URL参数位置不同时,计算结果一致;
                - 默认请求头不在计算范围,include_headers可以设置指定请求头
                示例:
                    from scrapy.utils import request
                    from scrapy.http import Request
                     
                    req = Request(url='http://www.baidu.com?name=8&id=1',callback=lambda x:print(x),cookies={'k1':'vvvvv'})
                    result = request.request_fingerprint(req,include_headers=['cookies',])
                     
                    print(result)
                     
                    req = Request(url='http://www.baidu.com?id=1&name=8',callback=lambda x:print(x),cookies={'k1':666})
                     
                    result = request.request_fingerprint(req,include_headers=['cookies',])
                     
                    print(result)
             
    """
    # Ensure all spiders share same duplicates filter through redis.
    # DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"

    不写方法,直接在settings中修改配置就可以用。 

    调度器

    """
    调度器,调度器使用PriorityQueue(有序集合)、FifoQueue(列表)、LifoQueue(列表)进行保存请求,并且使用RFPDupeFilter对URL去重
         
        a. 调度器
            SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue'          # 默认使用优先级队列(默认),其他:PriorityQueue(有序集合),FifoQueue(列表)、LifoQueue(列表)
            SCHEDULER_QUEUE_KEY = '%(spider)s:requests'                         # 调度器中请求存放在redis中的key
            SCHEDULER_SERIALIZER = "scrapy_redis.picklecompat"                  # 对保存到redis中的数据进行序列化,默认使用pickle
            SCHEDULER_PERSIST = True                                            # 是否在关闭时候保留原来的调度器和去重记录,True=保留,False=清空
            SCHEDULER_FLUSH_ON_START = True                                     # 是否在开始之前清空 调度器和去重记录,True=清空,False=不清空
            SCHEDULER_IDLE_BEFORE_CLOSE = 10                                    # 去调度器中获取数据时,如果为空,最多等待时间(最后没数据,未获取到)。
            SCHEDULER_DUPEFILTER_KEY = '%(spider)s:dupefilter'                  # 去重规则,在redis中保存时对应的key
            SCHEDULER_DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'# 去重规则对应处理的类
     
     
    """
    # Enables scheduling storing requests queue in redis.
    SCHEDULER = "scrapy_redis.scheduler.Scheduler"
     
    # Default requests serializer is pickle, but it can be changed to any module
    # with loads and dumps functions. Note that pickle is not compatible between
    # python versions.
    # Caveat: In python 3.x, the serializer must return strings keys and support
    # bytes as values. Because of this reason the json or msgpack module will not
    # work by default. In python 2.x there is no such issue and you can use
    # 'json' or 'msgpack' as serializers.
    # SCHEDULER_SERIALIZER = "scrapy_redis.picklecompat"
     
    # Don't cleanup redis queues, allows to pause/resume crawls.
    # SCHEDULER_PERSIST = True
     
    # Schedule requests using a priority queue. (default)
    # SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue'
     
    # Alternative queues.
    # SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.FifoQueue'
    # SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.LifoQueue'
     
    # Max idle time to prevent the spider from being closed when distributed crawling.
    # This only works if queue class is SpiderQueue or SpiderStack,
    # and may also block the same time when your spider start at the first time (because the queue is empty).
    # SCHEDULER_IDLE_BEFORE_CLOSE = 10  

    示例

    找到 SCHEDULER = "scrapy_redis.scheduler.Scheduler" 配置并实例化调度器对象
    - 执行Scheduler.from_crawler
    - 执行Scheduler.from_settings
    	- 读取配置文件:
    		SCHEDULER_PERSIST			 # 是否在关闭时候保留原来的调度器和去重记录,True=保留,False=清空
    		SCHEDULER_FLUSH_ON_START     # 是否在开始之前清空 调度器和去重记录,True=清空,False=不清空
    		SCHEDULER_IDLE_BEFORE_CLOSE  # 去调度器中获取数据时,如果为空,最多等待时间(最后没数据,未获取到)。
    	- 读取配置文件:	
    		SCHEDULER_QUEUE_KEY			 # %(spider)s:requests
    		SCHEDULER_QUEUE_CLASS		 # scrapy_redis.queue.FifoQueue
    		SCHEDULER_DUPEFILTER_KEY     # '%(spider)s:dupefilter'
    		DUPEFILTER_CLASS			 # 'scrapy_redis.dupefilter.RFPDupeFilter'
    		SCHEDULER_SERIALIZER		 # "scrapy_redis.picklecompat"
    
    	- 读取配置文件:
    		REDIS_HOST = '140.143.227.206'                            # 主机名
    		REDIS_PORT = 8888                                   # 端口
    		REDIS_PARAMS  = {'password':'beta'}                                  # Redis连接参数             默认:REDIS_PARAMS = {'socket_timeout': 30,'socket_connect_timeout': 30,'retry_on_timeout': True,'encoding': REDIS_ENCODING,})
    		REDIS_ENCODING = "utf-8"      
    - 示例Scheduler对象

    数据持久化

    定义持久化,爬虫yield Item对象时执行RedisPipeline
         
    a. 将item持久化到redis时,指定key和序列化函数
         
            REDIS_ITEMS_KEY = '%(spider)s:items'
            REDIS_ITEMS_SERIALIZER = 'json.dumps'
         
    b. 使用列表保存item数据  

    起始URL相关

    """
    起始URL相关
     
        a. 获取起始URL时,去集合中获取还是去列表中获取?True,集合;False,列表
            REDIS_START_URLS_AS_SET = False    # 获取起始URL时,如果为True,则使用self.server.spop;如果为False,则使用self.server.lpop
        b. 编写爬虫时,起始URL从redis的Key中获取
            REDIS_START_URLS_KEY = '%(name)s:start_urls'
             
    """
    # If True, it uses redis' ``spop`` operation. This could be useful if you
    # want to avoid duplicates in your start urls list. In this cases, urls must
    # be added via ``sadd`` command or you will get a type error from redis.
    # REDIS_START_URLS_AS_SET = False
     
    # Default start urls key for RedisSpider and RedisCrawlSpider.
    # REDIS_START_URLS_KEY = '%(name)s:start_urls'

    示例 

    - 调用 scheduler.enqueue_requests()
    	def enqueue_request(self, request):
    		# 请求是否需要过滤?
    		# 去重规则中是否已经有?(是否已经访问过,如果未访问添加到去重记录中。)
    		if not request.dont_filter and self.df.request_seen(request):
    			self.df.log(request, self.spider)
    			# 已经访问过就不要再访问了
    			return False
    		
    		if self.stats:
    			self.stats.inc_value('scheduler/enqueued/redis', spider=self.spider)
    		# print('未访问过,添加到调度器', request)
    		self.queue.push(request)
    		return True  

    下载器

    - 调用 scheduler.next_requests()
    	def next_request(self):
    		block_pop_timeout = self.idle_before_close
    		request = self.queue.pop(block_pop_timeout)
    		if request and self.stats:
    			self.stats.inc_value('scheduler/dequeued/redis', spider=self.spider)
    		return request
    

      

  • 相关阅读:
    Android悬浮窗拖动
    git提交错误问题如何解决?
    STM32单片机使用注意事项
    C指针说明
    cygwin注意事项
    关于百度地图的使用问题
    Android GIS osmdroid地图使用
    三个能力构建人生护城河
    浪潮之醒
    MingGW Posix VS Win32
  • 原文地址:https://www.cnblogs.com/gaoya666/p/9255864.html
Copyright © 2020-2023  润新知