• python scrapy 爬当当数据


    scrapy startproject dangdang

    cd dangdang

     

     

     

    scrapy genspider dangdang "dangdang.com"

     

     

    核心代码

    dangdang.py

    # -*- coding: utf-8 -*-
    import json

    import scrapy
    from copy import deepcopy
    import json


    class DangdangSpider(scrapy.Spider):
    name = 'dangdang'
    allowed_domains = ['dangdang.com']
    start_urls = ['http://e.dangdang.com/list-WY1-dd_sale-0-1.html']

    def parse(self, response):
    b_list = response.xpath("/html/body/div[5]/div[2]/div[1]/div/div")

    item = {}
    for first_level in b_list:
    item["b_cate_name"] = first_level.xpath(".//a/h3/text()").extract_first()
    item["b_cate_href"] = first_level.xpath(".//a/@href").extract_first()
    # print(item)
    li_list = first_level.xpath("./ul/li")
    # 特殊处理
    if len(li_list) > 0:
    for li in li_list:
    item["s_cate_href"] = li.xpath(".//a/@href").extract_first()
    item["s_cate_name"] = li.xpath(".//a/h4/text()").extract_first()
    item["s_cate_type"] = li.xpath(".//a/h4/@data-type").extract_first()
    item["s_books"] = "http://e.dangdang.com/media/api.go?action=mediaCategoryLeaf&promotionType=1"
    "&deviceSerialNo=html5&macAddr=html5&channelType=html5&permanentId"
    "=20200524135342174347312679598555233&returnType=json&channelId=70000"
    "&clientVersionNo=5.8.4&platformSource=DDDS-P&fromPlatform=106&deviceType"
    "=pconline&token=&start=1&end=3&category=" + item[
    "s_cate_type"] + "&dimension=dd_sale&order=0 "
    yield scrapy.Request(
    item["s_books"],
    callback=self.parse_book_list,
    meta={"item": deepcopy(item)}
    )

    # print(item)
    else:
    a_list = first_level.xpath("./ul/a")
    for a in a_list:
    item["s_cate_href"] = a.xpath("@href").extract_first()
    item["s_cate_name"] = a.xpath("./li/text()").extract_first()
    item["s_cate_type"] = li.xpath("./li/@data-type").extract_first()
    if item["s_cate_type"] is not None:
    item["s_books"] = "http://e.dangdang.com/media/api.go?action=mediaCategoryLeaf&promotionType=1"
    "&deviceSerialNo=html5&macAddr=html5&channelType=html5&permanentId"
    "=20200524135342174347312679598555233&returnType=json&channelId=70000"
    "&clientVersionNo=5.8.4&platformSource=DDDS-P&fromPlatform=106&deviceType"
    "=pconline&token=&start=1&end=3&category=" + item[
    "s_cate_type"] + "&dimension=dd_sale&order=0 "
    yield scrapy.Request(
    item["s_books"],
    callback=self.parse_book_list,
    meta={"item": deepcopy(item)})

    def parse_book_list(self, response):
    item = response.meta["item"]
    data = json.loads(response.text)
    book_list = data['data'].get('saleList', {})
    # print(book_list)
    if len(book_list) > 0:
    for book in book_list:
    item["book_author"] = book['mediaList'][0]["authorPenname"]
    item["book_title"] = book['mediaList'][0]["title"]
    print(item)




    setting.py

    # -*- coding: utf-8 -*-

    # Scrapy settings for book project
    #
    # For simplicity, this file contains only settings considered important or
    # commonly used. You can find more settings consulting the documentation:
    #
    # https://docs.scrapy.org/en/latest/topics/settings.html
    # https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
    # https://docs.scrapy.org/en/latest/topics/spider-middleware.html
    from datetime import datetime

    BOT_NAME = 'book'

    SPIDER_MODULES = ['book.spiders']
    NEWSPIDER_MODULE = 'book.spiders'
    LOG_LEVEL = "WARNING"
    USER_AGENT = 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'

    LOG_FILE="log.txt"


    # Crawl responsibly by identifying yourself (and your website) on the user-agent
    #USER_AGENT = 'book (+http://www.yourdomain.com)'

    # Obey robots.txt rules
    ROBOTSTXT_OBEY = False

    # Configure maximum concurrent requests performed by Scrapy (default: 16)
    #CONCURRENT_REQUESTS = 32

    # Configure a delay for requests for the same website (default: 0)
    # See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
    # See also autothrottle settings and docs
    #DOWNLOAD_DELAY = 3
    # The download delay setting will honor only one of:
    #CONCURRENT_REQUESTS_PER_DOMAIN = 16
    #CONCURRENT_REQUESTS_PER_IP = 16

    # Disable cookies (enabled by default)
    #COOKIES_ENABLED = False

    # Disable Telnet Console (enabled by default)
    #TELNETCONSOLE_ENABLED = False

    # Override the default request headers:
    #DEFAULT_REQUEST_HEADERS = {
    # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
    # 'Accept-Language': 'en',
    #}

    # Enable or disable spider middlewares
    # See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
    #SPIDER_MIDDLEWARES = {
    # 'book.middlewares.BookSpiderMiddleware': 543,
    #}

    # Enable or disable downloader middlewares
    # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
    #DOWNLOADER_MIDDLEWARES = {
    # 'book.middlewares.BookDownloaderMiddleware': 543,
    #}

    # Enable or disable extensions
    # See https://docs.scrapy.org/en/latest/topics/extensions.html
    #EXTENSIONS = {
    # 'scrapy.extensions.telnet.TelnetConsole': None,
    #}

    # Configure item pipelines
    # See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
    #ITEM_PIPELINES = {
    # 'book.pipelines.BookPipeline': 300,
    #}

    # Enable and configure the AutoThrottle extension (disabled by default)
    # See https://docs.scrapy.org/en/latest/topics/autothrottle.html
    #AUTOTHROTTLE_ENABLED = True
    # The initial download delay
    #AUTOTHROTTLE_START_DELAY = 5
    # The maximum download delay to be set in case of high latencies
    #AUTOTHROTTLE_MAX_DELAY = 60
    # The average number of requests Scrapy should be sending in parallel to
    # each remote server
    #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
    # Enable showing throttling stats for every response received:
    #AUTOTHROTTLE_DEBUG = False

    # Enable and configure HTTP caching (disabled by default)
    # See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
    #HTTPCACHE_ENABLED = True
    #HTTPCACHE_EXPIRATION_SECS = 0
    #HTTPCACHE_DIR = 'httpcache'
    #HTTPCACHE_IGNORE_HTTP_CODES = []
    #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
  • 相关阅读:
    RedHat 7.0及CentOS 7.0禁止Ping的三种方法
    修改WordPress后台默认登陆地址提高网站安全性
    解决WordPress用户名密码都正确但点击登陆就清空密码的问题
    Windows上使用Git托管代码到Coding
    使用Coding Pages托管网站
    Windows上设置Mozilla Thunderbird邮件客户端后台运行
    在VirtualBox中安装BlackArch Linux
    关于XML学习
    软件工程课程设计团队项目总结与项目报告
    详细设计
  • 原文地址:https://www.cnblogs.com/wchxj/p/12952534.html
Copyright © 2020-2023  润新知