• scrapy框架 小知识


    持久化

    步骤

    pipeline/items
    	a. 先写pipeline类
    		class XXXPipeline(object):
    			def process_item(self, item, spider):
    				return item
    				
    	b. 写Item类
    		class XdbItem(scrapy.Item):
    			href = scrapy.Field()
    			title = scrapy.Field()
    					
    	c. 配置
    		ITEM_PIPELINES = {
    		   'xdb.pipelines.XdbPipeline': 300,
    		}
    	
    	d. 爬虫,yield每执行一次,process_item就调用一次。
    		
    		yield Item对象

    编写pipeline

    from scrapy.exceptions import DropItem
    
    class FilePipeline(object):
    
    	def __init__(self,path):
    		self.f = None
    		self.path = path
    
    	@classmethod
    	def from_crawler(cls, crawler):
    		"""
    		初始化时候,用于创建pipeline对象
    		:param crawler:
    		:return:
    		"""
    		print('File.from_crawler')
    		path = crawler.settings.get('HREF_FILE_PATH')
    		return cls(path)
    
    	def open_spider(self,spider):
    		"""
    		爬虫开始执行时,调用
    		:param spider:
    		:return:
    		"""
    		print('File.open_spider')
    		self.f = open(self.path,'a+')
    
    	def process_item(self, item, spider):
    		# f = open('xx.log','a+')
    		# f.write(item['href']+'
    ')
    		# f.close()
    		print('File',item['href'])
    		self.f.write(item['href']+'
    ')
    		
    		# return item  	# 交给下一个pipeline的process_item方法
    		raise DropItem()# 后续的 pipeline的process_item方法不再执行
    
    	def close_spider(self,spider):
    		"""
    		爬虫关闭时,被调用
    		:param spider:
    		:return:
    		"""
    		print('File.close_spider')
    		self.f.close()

    注意:pipeline是所有爬虫公用,如果想要给某个爬虫定制需要使用spider参数自己进行处理。 

    去重规则

    编写类

    from scrapy.dupefilter import BaseDupeFilter
    from scrapy.utils.request import request_fingerprint
    
    class XdbDupeFilter(BaseDupeFilter):
    
    	def __init__(self):
    		self.visited_fd = set()
    
    	@classmethod
    	def from_settings(cls, settings):
    		return cls()
    
    	def request_seen(self, request):
    		fd = request_fingerprint(request=request)
    		if fd in self.visited_fd:
    			return True
    		self.visited_fd.add(fd)
    
    	def open(self):  # can return deferred
    		print('开始')
    
    	def close(self, reason):  # can return a deferred
    		print('结束')
    
    	# def log(self, request, spider):  # log that a request has been filtered
    	#     print('日志')

    配置  

    # 修改默认的去重规则
    # DUPEFILTER_CLASS = 'scrapy.dupefilter.RFPDupeFilter'
    DUPEFILTER_CLASS = 'xdb.dupefilters.XdbDupeFilter'

    爬虫使用 

    class ChoutiSpider(scrapy.Spider):
    	name = 'chouti'
    	allowed_domains = ['chouti.com']
    	start_urls = ['https://dig.chouti.com/']
    
    	def parse(self, response):
    		print(response.request.url)
    		# item_list = response.xpath('//div[@id="content-list"]/div[@class="item"]')
    		# for item in item_list:
    		#     text = item.xpath('.//a/text()').extract_first()
    		#     href = item.xpath('.//a/@href').extract_first()
    
    		page_list = response.xpath('//div[@id="dig_lcpage"]//a/@href').extract()
    		for page in page_list:
    			from scrapy.http import Request
    			page = "https://dig.chouti.com" + page
    			# yield Request(url=page,callback=self.parse,dont_filter=False) # https://dig.chouti.com/all/hot/recent/2
    			yield Request(url=page,callback=self.parse,dont_filter=True) # https://dig.chouti.com/all/hot/recent/2 

    注意:
    - request_seen中编写正确逻辑
    - dont_filter=False

    深度

    配置文件:
    	# 限制深度
    	DEPTH_LIMIT = 3
    

    cookie

    方式一:
    	- 携带 
    		Request(
    			url='https://dig.chouti.com/login',
    			method='POST',
    			body="phone=8613121758648&password=woshiniba&oneMonth=1",# # body=urlencode({})"phone=8615131255555&password=12sdf32sdf&oneMonth=1"
    			cookies=self.cookie_dict,
    			headers={
    				'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'
    			},
    			callback=self.check_login
    		)
    	
    	- 解析:
    			cookie_dict
    			cookie_jar = CookieJar()
    			cookie_jar.extract_cookies(response, response.request)
    
    			# 去对象中将cookie解析到字典
    			for k, v in cookie_jar._cookies.items():
    				for i, j in v.items():
    					for m, n in j.items():
    						cookie_dict[m] = n.value

    start_url

    内部原理

    scrapy引擎来爬虫中取起始URL:
    	1. 调用start_requests并获取返回值
    	2. v = iter(返回值)
    	3. 
    		req1 = 执行 v.__next__()
    		req2 = 执行 v.__next__()
    		req3 = 执行 v.__next__()
    		...
    	4. req全部放到调度器中  

    编写 

    class ChoutiSpider(scrapy.Spider):
    	name = 'chouti'
    	allowed_domains = ['chouti.com']
    	start_urls = ['https://dig.chouti.com/']
    	cookie_dict = {}
    	
    	def start_requests(self):
    		# 方式一:
    		for url in self.start_urls:
    			yield Request(url=url)
    		# 方式二:
    		# req_list = []
    		# for url in self.start_urls:
    		#     req_list.append(Request(url=url))
    		# return req_list 

    定制:可以去redis获取 

    深度和优先级

    深度

    - 最开始是0
    - 每次yield时,会根据原来请求中的depth + 1
    配置:DEPTH_LIMIT 深度控制

    优先级

    - 请求被下载的优先级 -= 深度 * 配置 DEPTH_PRIORITY 
    配置:DEPTH_PRIORITY 

    下载中间件

    scrapy中设置代理 

    内置

    在爬虫启动时,提前在os.envrion中设置代理即可。
    	class ChoutiSpider(scrapy.Spider):
    		name = 'chouti'
    		allowed_domains = ['chouti.com']
    		start_urls = ['https://dig.chouti.com/']
    		cookie_dict = {}
    
    		def start_requests(self):
    			import os
    			os.environ['HTTPS_PROXY'] = "http://root:woshiniba@192.168.11.11:9999/"
    			os.environ['HTTP_PROXY'] = '19.11.2.32',
    			for url in self.start_urls:
    				yield Request(url=url,callback=self.parse)

    meta(每次发请求都要自己携带) 

    class ChoutiSpider(scrapy.Spider):
    	name = 'chouti'
    	allowed_domains = ['chouti.com']
    	start_urls = ['https://dig.chouti.com/']
    	cookie_dict = {}
    
    	def start_requests(self):
    		for url in self.start_urls:
    			yield Request(url=url,callback=self.parse,meta={'proxy':'"http://root:woshiniba@192.168.11.11:9999/"'})

    自定义

    import base64
    import random
    from six.moves.urllib.parse import unquote
    try:
    from urllib2 import _parse_proxy
    except ImportError:
    from urllib.request import _parse_proxy
    from six.moves.urllib.parse import urlunparse
    from scrapy.utils.python import to_bytes
    
    class XdbProxyMiddleware(object):
    
    def _basic_auth_header(self, username, password):
    	user_pass = to_bytes(
    		'%s:%s' % (unquote(username), unquote(password)),
    		encoding='latin-1')
    	return base64.b64encode(user_pass).strip()
    
    def process_request(self, request, spider):
    	PROXIES = [
    		"http://root:woshiniba@192.168.11.11:9999/",
    		"http://root:woshiniba@192.168.11.12:9999/",
    		"http://root:woshiniba@192.168.11.13:9999/",
    		"http://root:woshiniba@192.168.11.14:9999/",
    		"http://root:woshiniba@192.168.11.15:9999/",
    		"http://root:woshiniba@192.168.11.16:9999/",
    	]
    	url = random.choice(PROXIES)
    
    	orig_type = ""
    	proxy_type, user, password, hostport = _parse_proxy(url)
    	proxy_url = urlunparse((proxy_type or orig_type, hostport, '', '', '', ''))
    
    	if user:
    		creds = self._basic_auth_header(user, password)
    	else:
    		creds = None
    	request.meta['proxy'] = proxy_url
    	if creds:
    		request.headers['Proxy-Authorization'] = b'Basic ' + creds
    
  • 相关阅读:
    企业微信授权微信开发者工具
    liunx Python3中pip3安装模块出错,找不到SSL
    superagent 调用java接口,处理http请求
    Android开发一 application 应用界面主题Theme使用方法
    HTML5的Video标签的属性,方法和事件汇总
    多个select下拉框,选中当前某一项,其他下拉框去掉选中的值
    input range滑块插件 Powerange
    thinkphp 获取session的方法
    thinkphp I()方法获取不到ajax传值
    js验证图片上传大小,格式以及宽高
  • 原文地址:https://www.cnblogs.com/gaoya666/p/9240310.html
Copyright © 2020-2023  润新知