• 爬虫笔记八——Scrapy实战项目


    (案例一)手机App抓包爬虫

    1. items.py

    import scrapy
    
    
    
    
    
    class DouyuspiderItem(scrapy.Item):
    
        # 存储照片的名字
    
        nickName = scrapy.Field()
    
        # 照片的url路径
    
        imageUrl = scrapy.Field()
    
        # 照片保存在本地的路径
    
        imagePath = scrapy.Field()
    items

     2. spiders/douyu.py

    # -*- coding: utf-8 -*-
    
    import scrapy
    
    from douyuSpider.items import DouyuspiderItem
    
    import json
    
    
    
    
    
    class DouyuSpider(scrapy.Spider):
    
        name = 'douyu'
    
        allowed_domains = ['capi.douyucdn.cn']
    
        offset = 0
    
        url = "http://capi.douyucdn.cn/api/v1/getVerticalRoom?limit=20&offset="
    
        start_urls = [url + str(offset)]
    
    
    
        def parse(self, response):
    
            data = json.loads(response.body)["data"]
    
            for each in data:
    
                item = DouyuspiderItem()
    
                item["nickName"] = each["nickname"]
    
                item["imageUrl"] = each["vertical_src"]
    
                yield item
    
    
    
            self.offset += 20
    
            nextUrl = re.sub('offset=d+', 'offset='+str(self.offset), response.url)
    
            yield scrapy.Request(nextUrl, callback=self.parse)
    spiders/douyu.py

     3. 设置setting.py

    DEFAULT_REQUEST_HEADERS = {
    
        'User-Agent': 'News 6.6.5 rv:6.6.5.03 (iPhone; iOS 11.2.6; zh_CN) Cronet'
    
    }
     
    
    ITEM_PIPELINES = {
    
        'douyuSpider.pipelines.DouyuspiderPipeline': 300,
    
    }
    
    
    
    IMAGES_STORE = 'E:PythonSpiderday05douyuSpiderimages'
    
    
    
    # 日志文件名和处理等级
    
    LOG_FILE = "douyu.log"
    
    LOG_LEVEL = "DEBUG"
    setting

     4. pipelines.py

    # -*- coding: utf-8 -*-
    
    
    
    import scrapy
    
    from scrapy.exceptions import DropItem
    
    from scrapy.utils.project import get_project_settings
    
    from scrapy.pipelines.images import ImagesPipeline
    
    import os
    
    
    
    class DouyuspiderPipeline(ImagesPipeline):
    
        IMAGES_STORE = get_project_settings().get("IMAGES_STORE")
    
    
    
        # 根据图片的url生成图片的Request
    
        def get_media_requests(self, item, info):
    
            image_url = item["imageUrl"]
    
            yield scrapy.Request(image_url)
    
    
    
        # 图片下载完毕之后,处理结果会以二元组的方式作为results参数传递给item_completed函数
    
        # 这个二元组定义如下:(success, image_info_or_failure),第一个元素表示图片是否下载成功,第二个元素是一个字典
    
        # 字典的含义如下:{'url': 图片的url, 'path': 图片的存储地址,跟IMAGE_STORE相关, 'checksum': 图片的内容hash}
    
        def item_completed(self, results, item, info):
    
            # 如果图片下载成功,则获取图片的存储地址
    
            image_path = [x["path"] for ok, x in results if ok]
    
            #print image_path
    
            if not image_path:
    
                raise DropItem("Item contains no images")
    
            # 修改图片的存储地址:
    
            os.rename(self.IMAGES_STORE + "\" + image_path[0], self.IMAGES_STORE + "\" + item["nickName"] + ".jpg")
    
            item["imagePath"] = self.IMAGES_STORE + "\" + item["nickName"]
    
            #print item["imagePath"]
    
    
    
            return item
    pipelines

     5. 运行

    在项目根目录下新建main.py文件,用于调试
    
    from scrapy import cmdline
    
    cmdline.execute("scrapy crawl douyu".split())
    
    执行程序
    
    python main.py

    5.2. (案例二)阳光热线问政平台爬虫

    阳光热线问政平台

    http://wz.sun0769.com/index.php/question/questionType?type=4

    爬取投诉帖子的编号、帖子的url、帖子的标题,和帖子里的内容。

    import scrapy
    
     
    
    class DongguanspiderItem(scrapy.Item):
    
        # 每个帖子的标题
    
        title = scrapy.Field()
    
        # 每个帖子的编号
    
        number = scrapy.Field()
    
        # 每个帖子的文字内容
    
        content = scrapy.Field()
    
        # 每个帖子的url
    
        url = scrapy.Field()
    items.py

    spiders/sunwz.py

    # -*- coding: utf-8 -*-
    import scrapy
    from dongguanSpider.items import DongguanspiderItem
    
    
    class DongguanSpider(scrapy.Spider):
        name = 'dongguan'
        allowed_domains = ['sun0769.com']
        url = 'http://wz.sun0769.com/index.php/question/questionType?type=4&page='
        headers = {"User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0;"}
        offset = 0
        start_urls = [url + str(offset)]
    
        # 解析每个页面的响应
        def parse(self, response):
            # 获取每页帖子的链接列表
            links = response.xpath('//div[@class="greyframe"]/table//td/a[2]/@href').extract()
    
            for link in links:
                # 获取每个帖子的Request请求
                yield scrapy.Request(link, headers=self.headers, callback=self.parse_item)
    
            # 设置页码终止条件为最后一页的page值,
            if self.offset <= 88560:
                self.offset += 30
                # 获取每页的Request请求
                yield scrapy.Request(self.url + str(self.offset), headers=self.headers, callback=self.parse)
    
        # 解析页面里的每个帖子的响应
        def parse_item(self, response):
            item = DongguanspiderItem()
            titleList = response.xpath('//div[contains(@class, "pagecenter p3")]//strong/text()')[0].extract().strip().split()
            # 帖子的标题
            item['title'] = titleList[0][3:]
            # 帖子的编号
            item['number'] = titleList[1][3:]
            # 帖子的内容,先取出有图片帖子的内容,再取出没有图片帖子的内容
            content = response.xpath('//div[@class="contentext"]/text()').extract()
            if len(content) == 0:
                content = response.xpath('//div[@class="c1 text14_2"]/text()').extract()
                item['content'] = "".join(content).strip()
            else:
                item['content'] = "".join(content).strip()
            # 帖子的链接地址
            item['url'] = response.url
            yield item
    Spider版本
    # -*- coding: utf-8 -*-
    import scrapy
    from scrapy.linkextractors import LinkExtractor
    from scrapy.spiders import CrawlSpider, Rule
    from dongguanSpider.items import DongguanspiderItem
    
    
    class SunSpider(CrawlSpider):
        name = 'sun'
        allowed_domains = ['sun0769.com']
        start_urls = ['http://wz.sun0769.com/index.php/question/questionType?type=4&page=']
        # 每页链接的匹配规则
        pageLink = LinkExtractor(allow=('type=4'))
        # 每个帖子链接的匹配规则
        contentLink = LinkExtractor(allow=r'/html/question/d+/d+.shtml')
    
        rules = (
            Rule(pageLink),
            Rule(contentLink, callback='parse_item')
        )
    
        def parse_item(self, response): 
            item = DongguanspiderItem()
            titleList = response.xpath('//div[contains(@class, "pagecenter p3")]//strong/text()')[
                0].extract().strip().split()
            # 帖子的标题
            item['title'] = titleList[0][3:]
            # 帖子的编号
            item['number'] = titleList[1][3:]
            # 帖子的内容,先取出有图片帖子的内容,再取出没有图片帖子的内容
            content = response.xpath('//div[@class="contentext"]/text()').extract()
            if len(content) == 0:
                content = response.xpath('//div[@class="c1 text14_2"]/text()').extract() 
                # content为列表,通过join方法拼接字符串,并去除首尾空格
                item['content'] = "".join(content).strip()
            else:
                item['content'] = "".join(content).strip()
            # 帖子的链接地址
            item['url'] = response.url
            yield item
    CrawlSpider 版本

    pipelines.py

    # -*- coding: utf-8 -*-
    
    # 文件处理类库,可以指定编码格式
    import codecs
    import json
    
    
    class DongguanspiderPipeline(object):
        def __init__(self):
            # 创建一个可写文件,指定编码格式为utf-8
            self.filename = codecs.open('dongguan.json', 'w', encoding='utf-8')
    
        def process_item(self, item, spider):
            content = json.dumps(dict(item), ensure_ascii=False) + '
    '
            self.filename.write(content)
            return item
    
        def spider_closed(self, spider):
            self.filename.close()
    pipelines.py

    settings.py

    ITEM_PIPELINES = {
        'dongguanSpider.pipelines.DongguanspiderPipeline': 300,
    }
    
    # 日志文件名和处理等级
    LOG_FILE = "dg.log"
    LOG_LEVEL = "DEBUG"
    settings
    在项目根目录下新建main.py文件,用于调试
    
    from scrapy import cmdline
    
    cmdline.execute("scrapy crawl sun".split())
    
    执行程序
    
    py2 main.py

    5.3. (案例三)新浪网页分类资讯爬虫

    爬取新浪网导航页所有分类下所有大类、小类、小类里的子链接,以及子链接页面的新闻内容。

    效果演示图:

     items.py

    import scrapy
    
    
    
    
    
    class SinaspiderItem(scrapy.Item):
    
        # 大类的标题
    
        parentTitle = scrapy.Field()
    
        # 大类的url
    
        parentUrl = scrapy.Field()
    
    
    
        # 小类的标题
    
        subTitle = scrapy.Field()
    
        # 小类的url
    
        subUrl = scrapy.Field()
    
        # 小类的存储路径
    
        subDir = scrapy.Field()
    
    
    
        # 文章的url
    
        fileUrl = scrapy.Field()
    
        # 文章的标题
    
        title = scrapy.Field()
    
        # 文章的内容
    
        content = scrapy.Field()
    spiders/sina.py
    # -*- coding: utf-8 -*-
    
    import scrapy
    
    from sinaSpider.items import SinaspiderItem
    
    import os
    
    
    
    
    
    class SinaSpider(scrapy.Spider):
    
        name = 'sina'
    
        allowed_domains = ['sina.com.cn']
    
        start_urls = ['http://news.sina.com.cn/guide/']
    
    
    
        # 解析首页
    
        def parse(self, response):
    
            # 获取大类的标题列表
    
            parentTitleList = response.xpath('//div[@id="tab01"]//h3/a/text()').extract()
    
            # 获取大类的url列表
    
            parentUrlList = response.xpath('//div[@id="tab01"]//h3/a/@href').extract()
    
            # 遍历大类列表
    
            for i in range(len(parentTitleList)):
    
                # 根据大类的标题名新建目录
    
                parentDir = '.\Data\' + parentTitleList[i]
    
                if not os.path.exists(parentDir):
    
                    os.makedirs(parentDir)
    
                # 获取每个大类下的小类的标题列表
    
                subTitleList = response.xpath('//div[@id="tab01"]/div[{}]//li/a/text()'.format(i+1)).extract()
    
                # 获取每个大类下的小类的url列表
    
                subUrlList = response.xpath('//div[@id="tab01"]/div[{}]//li/a/@href'.format(i+1)).extract()
    
                # 遍历某一个大类下的小类列表
    
                for j in range(len(subTitleList)):
    
                    # 根据小类的标题名新建目录
    
                    subDir = parentDir + '\'+ subTitleList[j]
    
                    if not os.path.exists(subDir):
    
                        os.makedirs(subDir)
    
                    item = SinaspiderItem()
    
                    item['parentTitle'] = parentTitleList[i]
    
                    item['parentUrl'] = parentUrlList[i]
    
                    item['subTitle'] = subTitleList[j]
    
                    item['subUrl'] = subUrlList[j]
    
                    item['subDir'] = subDir
    
                    # 发送每个小类的Request请求,在Request中加入meta,即可将meta传递给response作为参数给回调函数使用
    
                    yield scrapy.Request(item['subUrl'], meta={'meta_1': item}, callback=self.parse_news)
    
    
    
        # 解析每个小类的url,爬取每个小类下的文章标题和链接
    
        def parse_news(self, response):
    
            # 获取Request请求发送的meta_1参数
    
            meta_1 = response.meta['meta_1']
    
            fileUrlList = response.xpath('//a/@href').re(r'.*d+.shtml')
    
            for i in range(len(fileUrlList)):
    
                item = SinaspiderItem()
    
                item['parentTitle'] = meta_1['parentTitle']
    
                item['parentUrl'] = meta_1['parentUrl']
    
                item['subTitle'] = meta_1['subTitle']
    
                item['subUrl'] = meta_1['subUrl']
    
                item['subDir'] = meta_1['subDir']
    
                item['fileUrl'] = fileUrlList[i]
    
                # 发送每篇新闻的Request请求,在Request中加入meta,向回调函数parse_content传递参数meta2
    
                yield scrapy.Request(item['fileUrl'], meta={'meta_2': item}, callback=self.parse_content)
    
    
    
        # 解析每个新闻页,获取新闻标题和内容
    
        def parse_content(self, response):
    
            # 获取Request请求发送的meta_2参数
    
            item = response.meta['meta_2']
    
            # 获取新闻的标题
    
            title = response.xpath('//h1[@class="main-title"]/text()')[0].extract()
    
            content = ''
    
            contentList = response.xpath('//div[@class="article"]/p/text()').extract()
    
            # 获取新闻的内容
    
            for content_one in contentList:
    
                content += content_one
    
            item['title'] = title
    
            item['content'] = content
    
    
    
            yield item
    items

    pipelines.py

    class SinaspiderPipeline(object):
    
        def process_item(self, item, spider):
    
            fileUrl = item['fileUrl']
    
            # 根据新闻链接地址命名存储新闻的文件名
    
            fileName = item['subDir'] + '\' + fileUrl[7:-6].replace('/', '_') + '.txt'
    
            with open(fileName, 'w') as f:
    
                f.write(item['content'].encode('utf-8'))
    
    
    
            return item
    piplines

     settings.py

    USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36"
    
    
    
    ITEM_PIPELINES = {
    
        'sinaSpider.pipelines.SinaspiderPipeline': 300,
    
    }
    
    
    
    # 日志文件名和处理等级
    
    LOG_FILE = "sina.log"
    
    LOG_LEVEL = "DEBUG"
    settings

    在项目根目录下新建main.py文件,用于调试
    
    from scrapy import cmdline
    
    cmdline.execute("scrapy crawl sina".split())
    
    执行程序
    
    py2 main.py
  • 相关阅读:
    Mysql连接错误:Lost connection to Mysql server at 'waiting for initial communication packet'
    linux基本命令(4) 查看文件相关
    linux基本命令(3) 文件操作相关
    php 验证码不显示
    linux基本命令(2) 修改文件所属人以及权限
    linux 修改开机欢迎文字
    Mysql 自定义HASH索引带来的巨大性能提升
    Maven依赖范围<scope>
    深入理解Java G1垃圾收集器
    Redis EXISTS命令耗时过长case排查
  • 原文地址:https://www.cnblogs.com/Mint-diary/p/9707308.html
Copyright © 2020-2023  润新知