• 爬虫pearPro


    本项目中介绍

    1. 使用了selenium,需要添加匹配的chromedriver.exe的路径在webdriver.Chrome(executable_path='path')
    2. 请求视频详情页 如:https://www.pearvideo.com/video_1708124 , xpath 解析、获取其下的.mp4播放地址时,需要用slelenium动态加载
    3. 下载了https://www.pearvideo.com/category_5 板块中 生活:LIFE 下的 最热 中的四个视频
    4. mp4播放地址url 类似:https://video.pearvideo.com/mp4/third/20201120/cont-1708124-15454898-191438-hd.mp4
    5. 访问视频详情页,获取video的mp4文件的url,然后下载到本地目录当中

    项目结构

    mypearPro
        mypearPro
            spiders
                pear.py
            items.py
            middlewares.py
            pipelines.py
            settings.py
    

    pear.py中

    # -*- coding: utf-8 -*-
    import scrapy
    from selenium import webdriver
    from selenium.webdriver.chrome.options import Options
    import random
    from selenium.webdriver.common.by import By
    from selenium.webdriver.support import expected_conditions as EC
    from selenium.webdriver.common.keys import Keys
    from selenium.webdriver.support.wait import WebDriverWait
    from selenium.webdriver import ActionChains
    import json, re
    import requests
    import os
    
    
    class PearSpider(scrapy.Spider):
        name = 'pear'
        allowed_domains = ['pearvideo.com']
        start_urls = ['https://www.pearvideo.com/category_5']
        path = os.path.dirname(os.path.abspath('__file__'))
        download_path = os.path.join(path, 'video')
    
        def __init__(self):
            chrome_options = Options()
            chrome_options.add_argument('--window-size=1500,1366')
            self.bro = webdriver.Chrome(options=chrome_options,
                                        executable_path='D:python_charm爬虫课件我的项目文件夹chromedriver.exe')
    
        def parse(self, response):
            li_lists = response.xpath('//*[@id="listvideoListUl"]/li')
            for li in li_lists:
                title = li.xpath('./div/a/div[2]/text()').extract_first()
                print('-----> 视频的标题是:', title)
                new_url = 'https://www.pearvideo.com/'+li.xpath('./div/a/@href').extract_first()
                print('-----> 视频的详情url是:', new_url)
                yield scrapy.Request(new_url, callback=self.parse_video)
            # video = response.xpath('//*[@id="JprismPlayer"]/video/@src').extract_first()
    
        # 访问视频详情页,获取video的mp4
        def parse_video(self, response):
            video = response.xpath('//*[@id="JprismPlayer"]/video/@src').extract_first()
            print('-----> video的地址是:', video)
            yield scrapy.Request(video, callback=self.parse_mp4)
    
        def parse_mp4(self, response):
            name = response.url.split('/')[-1]
            print('-----> 下载的存储的地址1为:',self.path)
            print('-----> 下载的存储的地址2为:', self.download_path)
            with open(os.path.join(self.download_path, name), 'wb') as f:
                f.write(response.body)
    
        def closed(self, spider):
            self.bro.quit()
    

    items.py

    # -*- coding: utf-8 -*-
    
    # Define here the models for your scraped items
    #
    # See documentation in:
    # https://docs.scrapy.org/en/latest/topics/items.html
    
    import scrapy
    
    
    class MypearproItem(scrapy.Item):
        # define the fields for your item here like:
        # name = scrapy.Field()
        name = scrapy.Field()
        url = scrapy.Field()
    

    middlewares.py

    # -*- coding: utf-8 -*-
    
    # Define here the models for your spider middleware
    #
    # See documentation in:
    # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
    
    from scrapy import signals
    from scrapy.http import HtmlResponse
    from time import sleep
    
    
    class MypearproSpiderMiddleware:
        # Not all methods need to be defined. If a method is not defined,
        # scrapy acts as if the spider middleware does not modify the
        # passed objects.
    
        @classmethod
        def from_crawler(cls, crawler):
            # This method is used by Scrapy to create your spiders.
            s = cls()
            crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
            return s
    
        def process_spider_input(self, response, spider):
            # Called for each response that goes through the spider
            # middleware and into the spider.
    
            # Should return None or raise an exception.
            return None
    
        def process_spider_output(self, response, result, spider):
            # Called with the results returned from the Spider, after
            # it has processed the response.
    
            # Must return an iterable of Request, dict or Item objects.
            for i in result:
                yield i
    
        def process_spider_exception(self, response, exception, spider):
            # Called when a spider or process_spider_input() method
            # (from other spider middleware) raises an exception.
    
            # Should return either None or an iterable of Request, dict
            # or Item objects.
            pass
    
        def process_start_requests(self, start_requests, spider):
            # Called with the start requests of the spider, and works
            # similarly to the process_spider_output() method, except
            # that it doesn’t have a response associated.
    
            # Must return only requests (not items).
            for r in start_requests:
                yield r
    
        def spider_opened(self, spider):
            spider.logger.info('Spider opened: %s' % spider.name)
    
    
    class MypearproDownloaderMiddleware:
        # Not all methods need to be defined. If a method is not defined,
        # scrapy acts as if the downloader middleware does not modify the
        # passed objects.
    
        @classmethod
        def from_crawler(cls, crawler):
            # This method is used by Scrapy to create your spiders.
            s = cls()
            crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
            return s
    
        def process_request(self, request, spider):
            # Called for each request that goes through the downloader
            # middleware.
    
            # Must either:
            # - return None: continue processing this request
            # - or return a Response object
            # - or return a Request object
            # - or raise IgnoreRequest: process_exception() methods of
            #   installed downloader middleware will be called
            return None
    
        def process_response(self, request, response, spider):
            bro = spider.bro
            if 'video_' in request.url:
                bro.get(request.url)
                sleep(2)
                page_text = bro.page_source
                new_response = HtmlResponse(url=request.url,body=page_text,encoding='utf-8',request=request)
                return new_response
            else:
                return response
    
        def process_exception(self, request, exception, spider):
            # Called when a download handler or a process_request()
            # (from other downloader middleware) raises an exception.
    
            # Must either:
            # - return None: continue processing this exception
            # - return a Response object: stops process_exception() chain
            # - return a Request object: stops process_exception() chain
            pass
    
        def spider_opened(self, spider):
            spider.logger.info('Spider opened: %s' % spider.name)
    

    pipelines.py

    # -*- coding: utf-8 -*-
    
    # Define your item pipelines here
    #
    # Don't forget to add your pipeline to the ITEM_PIPELINES setting
    # See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
    
    
    class MypearproPipeline:
        def process_item(self, item, spider):
            return item
    

    settings.py

    # -*- coding: utf-8 -*-
    
    # Scrapy settings for myPearPro project
    #
    # For simplicity, this file contains only settings considered important or
    # commonly used. You can find more settings consulting the documentation:
    #
    #     https://docs.scrapy.org/en/latest/topics/settings.html
    #     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
    #     https://docs.scrapy.org/en/latest/topics/spider-middleware.html
    
    BOT_NAME = 'myPearPro'
    
    SPIDER_MODULES = ['myPearPro.spiders']
    NEWSPIDER_MODULE = 'myPearPro.spiders'
    
    
    # Crawl responsibly by identifying yourself (and your website) on the user-agent
    
    # Obey robots.txt rules
    ROBOTSTXT_OBEY = False
    LOG_LEVEL = 'ERROR'
    USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36'
    
    # Configure maximum concurrent requests performed by Scrapy (default: 16)
    #CONCURRENT_REQUESTS = 32
    
    # Configure a delay for requests for the same website (default: 0)
    # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
    # See also autothrottle settings and docs
    #DOWNLOAD_DELAY = 3
    # The download delay setting will honor only one of:
    #CONCURRENT_REQUESTS_PER_DOMAIN = 16
    #CONCURRENT_REQUESTS_PER_IP = 16
    
    # Disable cookies (enabled by default)
    #COOKIES_ENABLED = False
    
    # Disable Telnet Console (enabled by default)
    #TELNETCONSOLE_ENABLED = False
    
    # Override the default request headers:
    #DEFAULT_REQUEST_HEADERS = {
    #   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    #   'Accept-Language': 'en',
    #}
    
    # Enable or disable spider middlewares
    # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
    #SPIDER_MIDDLEWARES = {
    #    'myPearPro.middlewares.MypearproSpiderMiddleware': 543,
    #}
    
    # Enable or disable downloader middlewares
    # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
    DOWNLOADER_MIDDLEWARES = {
        'myPearPro.middlewares.MypearproDownloaderMiddleware': 543,
    }
    
    # Enable or disable extensions
    # See https://docs.scrapy.org/en/latest/topics/extensions.html
    #EXTENSIONS = {
    #    'scrapy.extensions.telnet.TelnetConsole': None,
    #}
    
    # Configure item pipelines
    # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
    #ITEM_PIPELINES = {
    #    'myPearPro.pipelines.MypearproPipeline': 300,
    #}
    
    # Enable and configure the AutoThrottle extension (disabled by default)
    # See https://docs.scrapy.org/en/latest/topics/autothrottle.html
    #AUTOTHROTTLE_ENABLED = True
    # The initial download delay
    #AUTOTHROTTLE_START_DELAY = 5
    # The maximum download delay to be set in case of high latencies
    #AUTOTHROTTLE_MAX_DELAY = 60
    # The average number of requests Scrapy should be sending in parallel to
    # each remote server
    #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
    # Enable showing throttling stats for every response received:
    #AUTOTHROTTLE_DEBUG = False
    
    # Enable and configure HTTP caching (disabled by default)
    # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
    #HTTPCACHE_ENABLED = True
    #HTTPCACHE_EXPIRATION_SECS = 0
    #HTTPCACHE_DIR = 'httpcache'
    #HTTPCACHE_IGNORE_HTTP_CODES = []
    #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
  • 相关阅读:
    ssk文件 窗体皮肤
    子窗体加载时最大化出现异常不正常最大化显示
    android: ADB错误“more than one device and emulator”
    使用adb devices命令报:adb server version (39) doesn't match this client (41);killing
    测试需要关注的测试点以及测试优先级(一)——接口测试
    解决Jenkins可安装插件列表没有可选择插件问题
    Win10同时安装jdk1.7和jdk1.8
    Python中@staticmethod 和@classmethod 的区别
    windows下彻底删除jenkins
    selenium如何保证元素定位的成功率
  • 原文地址:https://www.cnblogs.com/shiyishou/p/14023822.html
Copyright © 2020-2023  润新知