• python之scrapy爬取某集团招聘信息


    1、创建工程

    scrapy startproject gosuncn

    2、创建项目

    cd gosuncn
     scrapy genspider gaoxinxing gosuncn.zhiye.com

    3、运行项目

     crawl gaoxinxing

    4、gaoxinxing.py代码

    # -*- coding: utf-8 -*-
    import scrapy
    import logging
    
    logger = logging.getLogger(__name__)
    #引入日志
    class GaoxinxingSpider(scrapy.Spider):
        name = 'gaoxinxing'
        allowed_domains = ['gosuncn.zhiye.com']
        start_urls = ['http://gosuncn.zhiye.com/Social']
        next_page_num = 1
        def parse(self, response):
            tr_list = response.xpath("//table[@class='jobsTable']/tr")[1:]
            #print(tr_list)
            for tr in tr_list:
                item = {}
                item["position"]=tr.xpath(".//td[1]/a/text()").extract_first()
                item["platform"] = tr.xpath(".//td[3]/text()").extract_first()
                item["num"] = tr.xpath(".//td[4]/text()").extract_first()
                item["time"] = tr.xpath(".//td[6]/text()").extract_first()
                logger.warning(item) #打印日志
                yield item
    
            #next_page_url = response.xpath("//div[@class='pager2']//a[@class='next']/@href").extract_first()
            #print(next_page_url)
            self.next_page_num = self.next_page_num+1
            if self.next_page_num<=4:
                next_url = "http://gosuncn.zhiye.com/social/?PageIndex=" + str(self.next_page_num)
                print(next_url)
                yield scrapy.Request(
                    next_url,
                    callback=self.parse
                )
    View Code

    5、settings.py文件

    # -*- coding: utf-8 -*-
    
    # Scrapy settings for gosuncn project
    #
    # For simplicity, this file contains only settings considered important or
    # commonly used. You can find more settings consulting the documentation:
    #
    #     https://doc.scrapy.org/en/latest/topics/settings.html
    #     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
    #     https://doc.scrapy.org/en/latest/topics/spider-middleware.html
    
    BOT_NAME = 'gosuncn'
    
    SPIDER_MODULES = ['gosuncn.spiders']
    NEWSPIDER_MODULE = 'gosuncn.spiders'
    
    LOG_LEVEL="WARNING"
    # Crawl responsibly by identifying yourself (and your website) on the user-agent
    #USER_AGENT = 'gosuncn (+http://www.yourdomain.com)'
    
    # Obey robots.txt rules
    ROBOTSTXT_OBEY = True
    
    # Configure maximum concurrent requests performed by Scrapy (default: 16)
    #CONCURRENT_REQUESTS = 32
    
    # Configure a delay for requests for the same website (default: 0)
    # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
    # See also autothrottle settings and docs
    #DOWNLOAD_DELAY = 3
    # The download delay setting will honor only one of:
    #CONCURRENT_REQUESTS_PER_DOMAIN = 16
    #CONCURRENT_REQUESTS_PER_IP = 16
    
    # Disable cookies (enabled by default)
    #COOKIES_ENABLED = False
    
    # Disable Telnet Console (enabled by default)
    #TELNETCONSOLE_ENABLED = False
    
    # Override the default request headers:
    #DEFAULT_REQUEST_HEADERS = {
    #   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    #   'Accept-Language': 'en',
    #}
    
    # Enable or disable spider middlewares
    # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
    #SPIDER_MIDDLEWARES = {
    #    'gosuncn.middlewares.GosuncnSpiderMiddleware': 543,
    #}
    
    # Enable or disable downloader middlewares
    # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
    #DOWNLOADER_MIDDLEWARES = {
    #    'gosuncn.middlewares.GosuncnDownloaderMiddleware': 543,
    #}
    
    # Enable or disable extensions
    # See https://doc.scrapy.org/en/latest/topics/extensions.html
    #EXTENSIONS = {
    #    'scrapy.extensions.telnet.TelnetConsole': None,
    #}
    
    # Configure item pipelines
    # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
    ITEM_PIPELINES = {
       'gosuncn.pipelines.GosuncnPipeline': 300,
    }
    LOG_LEVEL ="WARNING"
    LOG_FILE = "./log.log"
    # Enable and configure the AutoThrottle extension (disabled by default)
    # See https://doc.scrapy.org/en/latest/topics/autothrottle.html
    #AUTOTHROTTLE_ENABLED = True
    # The initial download delay
    #AUTOTHROTTLE_START_DELAY = 5
    # The maximum download delay to be set in case of high latencies
    #AUTOTHROTTLE_MAX_DELAY = 60
    # The average number of requests Scrapy should be sending in parallel to
    # each remote server
    #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
    # Enable showing throttling stats for every response received:
    #AUTOTHROTTLE_DEBUG = False
    
    # Enable and configure HTTP caching (disabled by default)
    # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
    #HTTPCACHE_ENABLED = True
    #HTTPCACHE_EXPIRATION_SECS = 0
    #HTTPCACHE_DIR = 'httpcache'
    #HTTPCACHE_IGNORE_HTTP_CODES = []
    #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
    View Code

    6、pipelines.py文件

    # -*- coding: utf-8 -*-
    
    # Define your item pipelines here
    #
    # Don't forget to add your pipeline to the ITEM_PIPELINES setting
    # See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
    
    
    class GosuncnPipeline(object):
        def process_item(self, item, spider):
            print(item)
            return item
    View Code
  • 相关阅读:
    hdu 4496 D-City 并查集
    hdu 4493 Tutor 水题
    codeforces 377A. Puzzles 水题
    hdu 1257 小希的迷宫 并查集
    图论500题
    cdoj 93 King's Sanctuary 傻逼几何题
    cdoj 题目简单分类
    cdoj 80 Cube 水题
    cdoj 71 I am Lord Voldemort 水题
    cdoj 65 CD Making 水题
  • 原文地址:https://www.cnblogs.com/ywjfx/p/11080099.html
Copyright © 2020-2023  润新知