• 17.splash_case06_ScrapySplashTest-master


    taobao.py

    # -*- coding: utf-8 -*-
    from scrapy import Spider, Request
    from urllib.parse import quote
    from scrapysplashtest.items import ProductItem
    from scrapy_splash import SplashRequest
    
    script = """
    function main(splash, args)
      splash.images_enabled = false
      assert(splash:go(args.url))
      assert(splash:wait(args.wait))
      js = string.format("document.querySelector('#mainsrp-pager div.form > input').value=%d;document.querySelector('#mainsrp-pager div.form > span.btn.J_Submit').click()", args.page)
      splash:evaljs(js)
      assert(splash:wait(args.wait))
      return splash:html()
    end
    """
    
    
    class TaobaoSpider(Spider):
        name = 'taobao'
        allowed_domains = ['www.taobao.com']
        base_url = 'https://s.taobao.com/search?q='
    
        def start_requests(self):
            for keyword in self.settings.get('KEYWORDS'):
                for page in range(1, self.settings.get('MAX_PAGE') + 1):
                    url = self.base_url + quote(keyword)
                    yield SplashRequest(url, callback=self.parse, endpoint='execute', args={'lua_source': script, 'page': page, 'wait': 7})
    
        def parse(self, response):
            products = response.xpath('//div[@id="mainsrp-itemlist"]//div[@class="items"][1]//div[contains(@class, "item")]')
            for product in products:
                item = ProductItem()
                item['price'] = ''.join(
                    product.xpath('.//div[contains(@class, "price")]//text()').extract()).strip()
                item['title'] = ''.join(
                    product.xpath('.//div[contains(@class, "title")]//text()').extract()).strip()
                item['shop'] = ''.join(
                    product.xpath('.//div[contains(@class, "shop")]//text()').extract()).strip()
                item['image'] = ''.join(product.xpath(
                    './/div[@class="pic"]//img[contains(@class, "img")]/@data-src').extract()).strip()
                item['deal'] = product.xpath(
                    './/div[contains(@class, "deal-cnt")]//text()').extract_first()
                item['location'] = product.xpath(
                    './/div[contains(@class, "location")]//text()').extract_first()
                yield item
    

    items.py

    # -*- coding: utf-8 -*-
    # Define here the models for your scraped items
    #
    # See documentation in:
    # http://doc.scrapy.org/en/latest/topics/items.html
    
    from scrapy import Item, Field
    
    class ProductItem(Item):
        collection = 'products'
        
        image = Field()
        price = Field()
        deal = Field()
        title = Field()
        shop = Field()
        location = Field()
    

    middlewares.py

    # -*- coding: utf-8 -*-
    # Define here the models for your spider middleware
    #
    # See documentation in:
    # http://doc.scrapy.org/en/latest/topics/spider-middleware.html
    
    from scrapy import signals
    
    class ScrapysplashtestSpiderMiddleware(object):
        # Not all methods need to be defined. If a method is not defined,
        # scrapy acts as if the spider middleware does not modify the
        # passed objects.
    
        @classmethod
        def from_crawler(cls, crawler):
            # This method is used by Scrapy to create your spiders.
            s = cls()
            crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
            return s
    
        def process_spider_input(self, response, spider):
            # Called for each response that goes through the spider
            # middleware and into the spider.
    
            # Should return None or raise an exception.
            return None
    
        def process_spider_output(self, response, result, spider):
            # Called with the results returned from the Spider, after
            # it has processed the response.
    
            # Must return an iterable of Request, dict or Item objects.
            for i in result:
                yield i
    
        def process_spider_exception(self, response, exception, spider):
            # Called when a spider or process_spider_input() method
            # (from other spider middleware) raises an exception.
    
            # Should return either None or an iterable of Response, dict
            # or Item objects.
            pass
    
        def process_start_requests(self, start_requests, spider):
            # Called with the start requests of the spider, and works
            # similarly to the process_spider_output() method, except
            # that it doesn’t have a response associated.
    
            # Must return only requests (not items).
            for r in start_requests:
                yield r
    
        def spider_opened(self, spider):
            spider.logger.info('Spider opened: %s' % spider.name)
    

    pipelines.py

    # -*- coding: utf-8 -*-
    # Define your item pipelines here
    #
    # Don't forget to add your pipeline to the ITEM_PIPELINES setting
    # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
    
    import pymongo
    
    class MongoPipeline(object):
        def __init__(self, mongo_uri, mongo_db):
            self.mongo_uri = mongo_uri
            self.mongo_db = mongo_db
        
        @classmethod
        def from_crawler(cls, crawler):
            return cls(mongo_uri=crawler.settings.get('MONGO_URI'), mongo_db=crawler.settings.get('MONGO_DB'))
        
        def open_spider(self, spider):
            self.client = pymongo.MongoClient(self.mongo_uri)
            self.db = self.client[self.mongo_db]
        
        def process_item(self, item, spider):
            self.db[item.collection].insert(dict(item))
            return item
        
        def close_spider(self, spider):
            self.client.close()
    

    settings.py

    # -*- coding: utf-8 -*-
    # Scrapy settings for scrapysplashtest project
    #
    # For simplicity, this file contains only settings considered important or
    # commonly used. You can find more settings consulting the documentation:
    #
    #     http://doc.scrapy.org/en/latest/topics/settings.html
    #     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
    #     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
    
    BOT_NAME = 'scrapysplashtest'
    
    SPIDER_MODULES = ['scrapysplashtest.spiders']
    NEWSPIDER_MODULE = 'scrapysplashtest.spiders'
    
    # Crawl responsibly by identifying yourself (and your website) on the user-agent
    # USER_AGENT = 'scrapysplashtest (+http://www.yourdomain.com)'
    
    # Obey robots.txt rules
    ROBOTSTXT_OBEY = False
    
    # Configure maximum concurrent requests performed by Scrapy (default: 16)
    # CONCURRENT_REQUESTS = 32
    
    # Configure a delay for requests for the same website (default: 0)
    # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
    # See also autothrottle settings and docs
    # DOWNLOAD_DELAY = 3
    # The download delay setting will honor only one of:
    # CONCURRENT_REQUESTS_PER_DOMAIN = 16
    # CONCURRENT_REQUESTS_PER_IP = 16
    
    # Disable cookies (enabled by default)
    # COOKIES_ENABLED = False
    
    # Disable Telnet Console (enabled by default)
    # TELNETCONSOLE_ENABLED = False
    
    # Override the default request headers:
    # DEFAULT_REQUEST_HEADERS = {
    #   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    #   'Accept-Language': 'en',
    # }
    
    # Enable or disable spider middlewares
    # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
    SPIDER_MIDDLEWARES = {
        'scrapy_splash.SplashDeduplicateArgsMiddleware': 100,
    }
    
    # Enable or disable downloader middlewares
    # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
    DOWNLOADER_MIDDLEWARES = {
        'scrapy_splash.SplashCookiesMiddleware': 723,
        'scrapy_splash.SplashMiddleware': 725,
        'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 810
    }
    
    # Enable or disable extensions
    # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
    # EXTENSIONS = {
    #    'scrapy.extensions.telnet.TelnetConsole': None,
    # }
    
    # Configure item pipelines
    # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
    ITEM_PIPELINES = {
        'scrapysplashtest.pipelines.MongoPipeline': 300,
    }
    
    # Enable and configure the AutoThrottle extension (disabled by default)
    # See http://doc.scrapy.org/en/latest/topics/autothrottle.html
    # AUTOTHROTTLE_ENABLED = True
    # The initial download delay
    # AUTOTHROTTLE_START_DELAY = 5
    # The maximum download delay to be set in case of high latencies
    # AUTOTHROTTLE_MAX_DELAY = 60
    # The average number of requests Scrapy should be sending in parallel to
    # each remote server
    # AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
    # Enable showing throttling stats for every response received:
    # AUTOTHROTTLE_DEBUG = False
    
    # Enable and configure HTTP caching (disabled by default)
    # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
    # HTTPCACHE_ENABLED = True
    # HTTPCACHE_EXPIRATION_SECS = 0
    # HTTPCACHE_DIR = 'httpcache'
    # HTTPCACHE_IGNORE_HTTP_CODES = []
    # HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
    
    KEYWORDS = ['iPad']
    
    MAX_PAGE = 100
    
    SPLASH_URL = 'http://localhost:8050'
    
    DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
    HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
    
    MONGO_URI = 'localhost'
    MONGO_DB = 'taobao'
    
  • 相关阅读:
    C++学习之路(四):线程安全的单例模式
    C++学习之路(三):volatile关键字
    C++学习之路(五):复制构造函数与赋值运算符重载
    类对象作为函数参数进行值传递
    System V共享内存介绍
    关于迭代器失效
    C++学习之路(二):引用
    C++学习之路(一):const与define,结构体对齐,new/delete
    epoll内核源码分析
    Redux中间件之redux-thunk使用详解
  • 原文地址:https://www.cnblogs.com/hankleo/p/10808279.html
Copyright © 2020-2023  润新知