URL去重
定义去重规则(被调度器调用并应用) a. 内部会使用以下配置进行连接Redis # REDIS_HOST = 'localhost' # 主机名 # REDIS_PORT = 6379 # 端口 # REDIS_URL = 'redis://user:pass@hostname:9001' # 连接URL(优先于以上配置) # REDIS_PARAMS = {} # Redis连接参数 默认:REDIS_PARAMS = {'socket_timeout': 30,'socket_connect_timeout': 30,'retry_on_timeout': True,'encoding': REDIS_ENCODING,}) # REDIS_PARAMS['redis_cls'] = 'myproject.RedisClient' # 指定连接Redis的Python模块 默认:redis.StrictRedis # REDIS_ENCODING = "utf-8" # redis编码类型 默认:'utf-8' b. 去重规则通过redis的集合完成,集合的Key为: key = defaults.DUPEFILTER_KEY % {'timestamp': int(time.time())} 默认配置: DUPEFILTER_KEY = 'dupefilter:%(timestamp)s' c. 去重规则中将url转换成唯一标示,然后在redis中检查是否已经在集合中存在 from scrapy.utils import request from scrapy.http import Request req = Request(url='http://www.cnblogs.com/wupeiqi.html') result = request.request_fingerprint(req) print(result) # 8ea4fd67887449313ccc12e5b6b92510cc53675c PS: - URL参数位置不同时,计算结果一致; - 默认请求头不在计算范围,include_headers可以设置指定请求头 示例: from scrapy.utils import request from scrapy.http import Request req = Request(url='http://www.baidu.com?name=8&id=1',callback=lambda x:print(x),cookies={'k1':'vvvvv'}) result = request.request_fingerprint(req,include_headers=['cookies',]) print(result) req = Request(url='http://www.baidu.com?id=1&name=8',callback=lambda x:print(x),cookies={'k1':666}) result = request.request_fingerprint(req,include_headers=['cookies',]) print(result) """ # Ensure all spiders share same duplicates filter through redis. # DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
REDIS_HOST = '192.168.16.86' # 主机名 REDIS_PORT = 6379 # 端口 # REDIS_URL = 'redis://user:pass@hostname:9001' # 连接URL(优先于以上配置) # REDIS_PARAMS = {} # Redis连接参数 默认:REDIS_PARAMS = {'socket_timeout': 30,'socket_connect_timeout': 30,'retry_on_timeout': True,'encoding': REDIS_ENCODING,}) # REDIS_PARAMS['redis_cls'] = 'redis.StrictRedis' # 指定连接Redis的Python模块 默认:redis.StrictRedis REDIS_ENCODING = "utf-8" # redis编码类型 默认:'utf-8' SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue' # 默认使用优先级队列(默认),其他:PriorityQueue(有序集合),FifoQueue(列表)、LifoQueue(列表) """ 每一个爬虫,都有自己scrapy-redis中的队列,在redis中对应的一个key renjian:requests: ['http://www.baidu.com','http://www.baidu.com','http://www.baidu.com','http://www.baidu.com','http://www.baidu.com','http://www.baidu.com',] jianren:requests: ['http://www.daboa.com','http://www.daboa.com','http://www.daboa.com','http://www.daboa.com',] """ SCHEDULER_QUEUE_KEY = '%(spider)s:requests' # 调度器中请求存放在redis中的key SCHEDULER_SERIALIZER = "scrapy_redis.picklecompat" # 对保存到redis中的数据进行序列化,默认使用pickle SCHEDULER_PERSIST = True # 是否在关闭时候保留原来的调度器和去重记录,True=保留,False=清空 SCHEDULER_FLUSH_ON_START = False # 是否在开始之前清空 调度器和去重记录,True=清空,False=不清空 SCHEDULER_IDLE_BEFORE_CLOSE = 10 # 去调度器中获取数据时,如果为空,最多等待时间(最后没数据,未获取到)。 SCHEDULER_DUPEFILTER_KEY = '%(spider)s:dupefilter' # 去重规则,在redis中保存时对应的key """ renjian:dupefilter:{} jianren:dupefilter:{} """ SCHEDULER_DUPEFILTER_CLASS = 'scrapy_redis.dupefilter.RFPDupeFilter'# 去重规则对应处理的类 # 调度器使用scrapy_redis SCHEDULER = "scrapy_redis.scheduler.Scheduler" # 去重使用 scrapy_redis DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter" #数据持久化 #定义持久化,爬虫yield Item对象时执行RedisPipeline #将item持久化到redis时,指定key和序列化函数 #使用列表保存item数据 # PIPELINES # ITEM_PIPELINES = { # 'scrapy_redis.pipelines.RedisPipeline': 300, # } # REDIS_ITEMS_KEY = '%(spider)s:items' # REDIS_ITEMS_SERIALIZER = 'json.dumps' # 起始URL #获取起始URL时,去集合中获取还是去列表中获取?True从集合获取,False从列表获取 #编写爬虫时,起始URL从redis的Key中获取 REDIS_START_URLS_AS_SET = False REDIS_START_URLS_KEY = '%(name)s:start_urls'
示例
import scrapy from scrapy.http import Request from scrapy.selector import HtmlXPathSelector from scrapy.dupefilter import RFPDupeFilter from scrapy.core.scheduler import Scheduler import redis from ..items import XiaobaiItem from scrapy_redis.spiders import RedisSpider class RenjianSpider(RedisSpider): name = 'xiaobai' allowed_domains = ['chouti.com'] def parse(self, response): hxs = HtmlXPathSelector(response) news_list = hxs.xpath('//*[@id="content-list"]/div[@class="item"]') for news in news_list: content = news.xpath('.//div[@class="part1"]/a/text()').extract_first().strip() url = news.xpath('.//div[@class="part1"]/a/@href').extract_first() yield XiaobaiItem(url=url,content=content) yield Request(url='http://dig.chouti.com/',callback=self.parse)
import redis conn = redis.Redis(host='192.168.16.56',port=6379) conn.lpush('xiaobai:start_urls','http://www.chouti.com')