• python爬虫scrapy框架——爬取伯乐在线网站文章


    一、前言 

    1. scrapy依赖包:

    二、创建工程

    1. 创建scrapy工程:

    scrapy staratproject ArticleSpider

    2. 开始(创建)新的爬虫:

    cd ArticleSpider
    scrapy genspider jobbole blog.jobbole.com //默认通过 'basic' module创建

    三、用pycharm调试scrapy

    1. 在 ArticleSpider 目录下创建 main.py 文件

    2. 用到Python提供的一个内置函数,调用这个函数可以执行scrapy脚本

    from scrapy.cmdline import execute
    
    import sys
    import os
    
    sys.path.append(os.path.dirname(os.path.abspath(__file__)))
    execute(["scrapy", "crawl", "jobbole"])
    
    #需要把当前项目目录放到sys.path下,scrapy命令必须在工程目录下运行
    #__file__:当前文件(main.py文件)
    #os.path.abspath(__file__): 获取main.py的绝对路径
    #os.path.dirname(os.path.abspath(__file__)):获取mian.py目录
    #scrapy启动命令:scrapy crawl jobbole

    3. 现在就可以在pycharm中打断点使用debug模式了

    四、Settings.py 配置

    1. 在 settings.py 文件中找到 ROBOTSTXT_OBEY , 把其值设为 False。让我们的爬虫不去读取网站的 ROBOTS 协议。

    五、Xpath语法

     :尽量不用beautifulsoup,它比Xpath慢许多

    表达式 说明
    article 选取所有的article元素的所有子节点
    /article 选取根元素article
    article/a 选取所有属于article的子元素的a元素
    //div 选取所有div子元素(不论出现在文档任何地方)
    article//div 选取所有属于article元素的后代的div元素,不管它出现在article的任何位置
    //@class 选取所有名为class的属性
    表达式 说明
    /article/div[1] 选择属于article子元素的第一个div元素
    /article/div[last()] 选择属于article子元素的最后一个div元素
    /article/div[last()-1] 选择属于article子元素的倒数第二个div元素
    //div[@lang] 选择所有拥有lang属性的div元素
    //div[@lang='eng'] 选取所有lang属性为eng的div元素
    /div/* 选取属于div元素的所有子节点
    //* 选取所有元素
    //div[@*] 选取所有带属性的div元素
    /div/a|//div/p 选取所有div元素的a和p元素
    //span|//ul 选取文档中的span和ul元素
    article/div/p|//span 选取所有属于article元素的div元素的p元素 以及文档中所有的span元素
    //h1[@id='test']/text() 获取id为test的h1的文字

     六、用Xpath和CSS爬取伯乐在线文章页面元素

    # jobbole.py
    import scrapy
    import re
    
    
    class JobboleSpider(scrapy.Spider):
        name = 'jobbole'
        allowed_domains = ['blog.jobbole.com']
        start_urls = ['http://blog.jobbole.com/110287/']
    
        def parse(self, response):
            # 通过Xpath提取字段
    
            title = response.xpath(".//*[@id='post-110287']/div[1]/h1/text()").extract_first()
            create_date = response.xpath("//p[@class='entry-meta-hide-on-mobile']/text()").extract_first().strip().replace('·', '').strip()
            praise_nums = response.xpath("//span[contains(@class, 'vote-post-up')]/h10/text()").extract_first()
            fav_nums = response.xpath("//span[contains(@class, 'bookmark-btn')]/text()").extract_first()
            match_re = re.match(r'.*?(d+).*', fav_nums)
            if match_re:
                fav_nums = match_re.group(1)
            comment_nums = response.xpath("//a[@href='#article-comment']/text()").extract()[0]
            match_re = re.match(r'.*?(d+).*', comment_nums)
            if match_re:
                comment_nums = match_re.group(1)
    
            content = response.xpath("//div[@class='entry']").extract()[0]
            tag_list = response.xpath("//p[@class='entry-meta-hide-on-mobile']/a/text()").extract()
            tag_list = [element for element in tag_list if not element.strip().endswith('评论')]
            tags = ','.join(tag_list)
    
            # 通过CSS选择器提取字段
    
            title = response.css(".entry-header h1::text").extract()
            create_date = response.css("p.entry-meta-hide-on-mobile::text").extract()[0].strip().replace('·', '').strip()
            praise_nums = response.css(".vote-post-up h10::text").extract()[0]
            fav_nums = response.css(".bookmark-btn::text").extract()[0]
            match_re = re.match(r'.*?(d+).*', fav_nums)
            if match_re:
                fav_nums = match_re.group(1)
            comment_nums = response.css("a[href='#article-comment'] span::text").extract()[0]
            match_re = re.match(r'.*?(d+).*', comment_nums)
            if match_re:
                comment_nums = match_re.group(1)
            content = response.css("div.entry").extract()[0]
            tag_list = response.css("p.entry-meta-hide-on-mobile a::text").extract()
            tag_list = [element for element in tag_list if not element.strip().endswith('评论')]
            tags = ','.join(tag_list)    

     七、利用ItemLoader爬取伯乐在线文章页面元素

    # jobbole.py
    import scrapy
    import re
    import datetime
    from scrapy.http import Request
    from urllib import parse
    from scrapy.loader import ItemLoader
    
    from ArticleSpider.items import JobBoleArticleItem, ArticleItemLoader
    from ArticleSpider.utils.common import get_md5
    
    class JobboleSpider(scrapy.Spider):
        name = 'jobbole'
        allowed_domains = ['blog.jobbole.com']
        start_urls = ['http://blog.jobbole.com/all-posts/']
    
        def parse(self, response):
            """
            1. 获取文章列表页中的文章url并交给scrapy下载后进行解析
            2. 获取下一页的url并交给scrapy进行下载,下载完成后交给parse
            """
    
            # 解析列表页中的所有文章url并交给scrapy下载后进行解析
            post_nodes = response.css("#archive .floated-thumb .post-thumb a")
            for post_node in post_nodes:
                img_url = post_node.css("img::attr(src)").extract_first("")
                post_url = post_node.css("::attr(href)").extract_first("")
                yield Request(url=parse.urljoin(response.url, post_url), meta={"front_image_url": img_url}, callback=self.parse_detail)
    
            # 提取下一页并交给scrapy进行下载
            next_url = response.css(".next.page-numbers::attr(href)").extract_first()
            if next_url:
                yield Request(url=parse.urljoin(response.url, next_url), callback=self.parse)
    
        def parse_detail(self, response):
            article_item = JobBoleArticleItem()
    
            # 通过Item_Loader加载Item
            item_loader = ArticleItemLoader(item=JobBoleArticleItem(), response=response)
            item_loader.add_css('title', '.entry-header h1::text')
            item_loader.add_value('url', response.url)
            item_loader.add_value('url_object_id', get_md5(response.url))
            item_loader.add_css('create_date', 'p.entry-meta-hide-on-mobile::text')
            item_loader.add_value('front_image_url', response.meta.get("front_image_url", ""))
            item_loader.add_css('praise_nums', '.vote-post-up h10::text')
            item_loader.add_css('comment_nums', "a[href='#article-comment'] span::text")
            item_loader.add_css('fav_nums', '.bookmark-btn::text')
            item_loader.add_css('tags', 'p.entry-meta-hide-on-mobile a::text')
            item_loader.add_css('content', 'div.entry')
            article_item = item_loader.load_item()
    
            yield article_item
    # items.py
    # -*- coding: utf-8 -*-
    
    # Define here the models for your scraped items
    #
    # See documentation in:
    # http://doc.scrapy.org/en/latest/topics/items.html
    import datetime
    import re
    
    import scrapy
    from scrapy.loader import ItemLoader
    from scrapy.loader.processors import MapCompose, TakeFirst, Join
    
    
    class ArticlespiderItem(scrapy.Item):
        # define the fields for your item here like:
        # name = scrapy.Field()
        pass
    
    
    def add_jobbole(value):
        return value+'-bobby'
    
    
    def data_convert(value):
        try:
            create_date = datetime.datetime.strptime(value, '%Y/%m/%d').date()
        except Exception as e:
            create_date = datetime.datetime.now().date()
    
        return create_date
    
    
    def get_nums(value):
        match_re = re.match(r'.*?(d+).*', value)
        if match_re:
            nums = match_re.group(1)
        else:
            nums = 0
    
        return nums
    
    
    def remove_comment_tags(value):
        # 去掉tag中提取的“评论”
        if '评论' in value:
            return ''
        return value
    
    
    def return_value(value):
        return value
    
    
    class ArticleItemLoader(ItemLoader):
        # 自定义item_loader
        default_output_processor = TakeFirst()
    
    
    class JobBoleArticleItem(scrapy.Item):
        title = scrapy.Field(
            input_processor=MapCompose(lambda x:x+'-jobbole', add_jobbole)
        )
        create_date = scrapy.Field(
            input_processor=MapCompose(data_convert)
        )
        url = scrapy.Field()
        url_object_id = scrapy.Field()
        front_image_url = scrapy.Field(
            output_processor=MapCompose(return_value)
        )
        front_image_path = scrapy.Field()
        praise_nums = scrapy.Field(
            input_processor=MapCompose(get_nums)
        )
        comment_nums = scrapy.Field(
            input_processor=MapCompose(get_nums)
        )
        fav_nums = scrapy.Field(
            input_processor=MapCompose(get_nums)
        )
        tags = scrapy.Field(
            input_processor=MapCompose(remove_comment_tags),
            output_processor=Join(',')
        )
        content = scrapy.Field()
    # pipeline.py
    # -*- coding: utf-8 -*-
    
    # Define your item pipelines here
    #
    # Don't forget to add your pipeline to the ITEM_PIPELINES setting
    # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html
    import codecs
    import json
    import MySQLdb
    import MySQLdb.cursors
    
    from scrapy.pipelines.images import ImagesPipeline
    from scrapy.exporters import JsonItemExporter
    from twisted.enterprise import adbapi
    
    
    class ArticlespiderPipeline(object):
        def process_item(self, item, spider):
            return item
    
    
    class JsonWithEncodingPipeline:
        # 自定义Json文件的导出
        def __init__(self):
            self.file = codecs.open('article.json', 'w', encoding='utf-8')
    
        def process_item(self, item, spider):
            lines = json.dumps(dict(item), ensure_ascii=False) + '
    '
            self.file.write(lines)
            return item
    
        def spider_closed(self, spider):
            self.file.close()
    
    
    class JsonExporterPipeline(object):
        # 调用scripy提供的JsonExporter导出Json文件
        def __init__(self):
            self.file = open('articleExporter.json', 'wb')
            self.exporter = JsonItemExporter(self.file, encoding='utf-8', ensure_ascii=False)
            self.exporter.start_exporting()
    
        def close_spider(self, spider):
            self.exporter.finish_exporting()
            self.file.close()
    
        def process_item(self, item, spider):
            self.exporter.export_item(item)
            return item
    
    
    class MysqlPipeline(object):
        # 采用同步的机制写入MySQL
    
        def __init__(self):
            self.conn = MySQLdb.connect('127.0.0.1', 'root', '123', 'article_spider', charset='utf8', use_unicode=True)
            self.cursor = self.conn.cursor()
    
        def process_item(self, item, spider):
            insert_sql = """
            insert into jobbole_article(url_object_id, title, url, create_date, fav_nums)
            Values ("%s", "%s", "%s", "%s", "%s")
            """
            self.cursor.execute(insert_sql % (item['url_object_id'], item['title'], item['url'], item['create_date'], item['fav_nums']))
            self.conn.commit()
            return item
    
    
    class MysqlTwistedPipeline(object):
        def __init__(self, dbpool):
            self.dbpool = dbpool
    
        @classmethod
        def from_settings(cls, settings):
            dbparms = dict(
                host=settings['MYSQL_HOST'],
                db=settings['MYSQL_DBNAME'],
                user=settings['MYSQL_USER'],
                passwd=settings['MYSQL_PASSWORD'],
                charset='utf8',
                cursorclass=MySQLdb.cursors.DictCursor,
                use_unicode=True
            )
    
            dbpool = adbapi.ConnectionPool('MySQLdb', **dbparms)
            return cls(dbpool)
    
        def process_item(self, item, spider):
            # 使用Twisted将MySQL插入变成异步执行
            query = self.dbpool.runInteraction(self.do_insert, item)
            query.addErrback(self.handle_error)
            return item
    
        def handle_error(self, failure):
            # 处理异步插入的异常
            print(failure)
    
        def do_insert(self, cursor, item):
            # 执行具体的插入
            insert_sql = """
                    insert into jobbole_article(url_object_id, title, url, create_date, fav_nums)
                    Values ("%s", "%s", "%s", "%s", "%s")
                    """
            cursor.execute(insert_sql % (item['url_object_id'], item['title'], item['url'], item['create_date'], item['fav_nums']))
            cursor.commit()
    
    
    class ArticleImagePipeline(ImagesPipeline):
        def item_completed(self, results, item, info):
            if 'front_image_url' in item:
                for ok, value in results:
                    image_file_path = value['path']
                item['front_image_path'] = image_file_path
            return item
    #utils/common.py
    
    import hashlib
    
    
    def get_md5(url):
        if isinstance(url, str):
            url = url.encode('utf-8')
        m = hashlib.md5()
        m.update(url)
        return m.hexdigest()
    
    
    if __name__ == '__main__':
        print(get_md5('http://jobbole.com'))
    # settings.py
    
    import os
    
    # Scrapy settings for ArticleSpider project
    #
    # For simplicity, this file contains only settings considered important or
    # commonly used. You can find more settings consulting the documentation:
    #
    #     http://doc.scrapy.org/en/latest/topics/settings.html
    #     http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
    #     http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
    
    BOT_NAME = 'ArticleSpider'
    
    SPIDER_MODULES = ['ArticleSpider.spiders']
    NEWSPIDER_MODULE = 'ArticleSpider.spiders'
    
    
    # Crawl responsibly by identifying yourself (and your website) on the user-agent
    #USER_AGENT = 'ArticleSpider (+http://www.yourdomain.com)'
    
    # Obey robots.txt rules
    ROBOTSTXT_OBEY = False
    
    # Configure maximum concurrent requests performed by Scrapy (default: 16)
    #CONCURRENT_REQUESTS = 32
    
    # Configure a delay for requests for the same website (default: 0)
    # See http://scrapy.readthedocs.org/en/latest/topics/settings.html#download-delay
    # See also autothrottle settings and docs
    #DOWNLOAD_DELAY = 3
    # The download delay setting will honor only one of:
    #CONCURRENT_REQUESTS_PER_DOMAIN = 16
    #CONCURRENT_REQUESTS_PER_IP = 16
    
    # Disable cookies (enabled by default)
    #COOKIES_ENABLED = False
    
    # Disable Telnet Console (enabled by default)
    #TELNETCONSOLE_ENABLED = False
    
    # Override the default request headers:
    #DEFAULT_REQUEST_HEADERS = {
    #   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    #   'Accept-Language': 'en',
    #}
    
    # Enable or disable spider middlewares
    # See http://scrapy.readthedocs.org/en/latest/topics/spider-middleware.html
    #SPIDER_MIDDLEWARES = {
    #    'ArticleSpider.middlewares.ArticlespiderSpiderMiddleware': 543,
    #}
    
    # Enable or disable downloader middlewares
    # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html
    #DOWNLOADER_MIDDLEWARES = {
    #    'ArticleSpider.middlewares.MyCustomDownloaderMiddleware': 543,
    #}
    
    # Enable or disable extensions
    # See http://scrapy.readthedocs.org/en/latest/topics/extensions.html
    #EXTENSIONS = {
    #    'scrapy.extensions.telnet.TelnetConsole': None,
    #}
    
    # Configure item pipelines
    # See http://scrapy.readthedocs.org/en/latest/topics/item-pipeline.html
    ITEM_PIPELINES = {
        # 'ArticleSpider.pipelines.JsonExporterPipeline': 2,
        # 'scrapy.pipelines.images.ImagesPipeline': 1,
        'ArticleSpider.pipelines.ArticleImagePipeline': 2,
        'ArticleSpider.pipelines.MysqlPipeline': 1,
    }
    IMAGES_URLS_FIELD = 'front_image_url'
    project_dir = os.path.abspath(os.path.dirname(__file__))
    IMAGES_STORE = os.path.join(project_dir, "images")
    
    # IMAGES_MIN_HEIGHT = 100
    # IMAGES_MIN_WIDTH = 100
    
    # Enable and configure the AutoThrottle extension (disabled by default)
    # See http://doc.scrapy.org/en/latest/topics/autothrottle.html
    #AUTOTHROTTLE_ENABLED = True
    # The initial download delay
    #AUTOTHROTTLE_START_DELAY = 5
    # The maximum download delay to be set in case of high latencies
    #AUTOTHROTTLE_MAX_DELAY = 60
    # The average number of requests Scrapy should be sending in parallel to
    # each remote server
    #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
    # Enable showing throttling stats for every response received:
    #AUTOTHROTTLE_DEBUG = False
    
    # Enable and configure HTTP caching (disabled by default)
    # See http://scrapy.readthedocs.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
    #HTTPCACHE_ENABLED = True
    #HTTPCACHE_EXPIRATION_SECS = 0
    #HTTPCACHE_DIR = 'httpcache'
    #HTTPCACHE_IGNORE_HTTP_CODES = []
    #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
    
    MYSQL_HOST = '172.0.0.1'
    MYSQL_DBNAME = 'article_spider'
    MYSQL_USER = 'root'
    MYSQL_PASSWORD = '123'
  • 相关阅读:
    POJ 2411 Mondriaan's Dream( 轮廓线dp )
    BZOJ 4177: Mike的农场( 最小割 )
    BZOJ 2186: [Sdoi2008]沙拉公主的困惑( 数论 )
    2015.8.27
    BZOJ 1084: [SCOI2005]最大子矩阵( dp )
    BZOJ 1014: [JSOI2008]火星人prefix( splay + hash )
    BZOJ 1047: [HAOI2007]理想的正方形( 单调队列 )
    BZOJ 1025: [SCOI2009]游戏( 背包dp )
    BZOJ 2795: [Poi2012]A Horrible Poem( hash )
    HDU 5636 Shortest Path 分治+搜索剪枝
  • 原文地址:https://www.cnblogs.com/wangchaowei/p/6921498.html
Copyright © 2020-2023  润新知