• Python爬取网易云歌单




    1. 关键点

    使用单线程爬取,未登录,爬取网易云歌单主要有三个关键点:

    • url为https://music.163.com/discover/playlist/?order=hot&cat=%E8%AF%B4%E5%94%B1&limit=35&offset=105
    • 网易云使用翻页形式,其特点为url的limit和offset参数
      • limit 表示单页显示的歌单数(修改无效)
      • offset 表示当前页数,即 offset / limit + 1
    • 发送get请求时froms与url的参数一样
      • cat 歌单类型,必须将中文转码
      • order 最新或最热
      • limit 与url参数一致
      • offset 与url参数一致



    2. 效果图



    3. 源代码

    #! /usr/bin/python3
    # _*_ coding:utf-8 _*_
    
    """
    
    @ File   :网易歌单爬取.py
    @ Author :LiuHeDong
    @ Mail   :liuhedong135@163.com
    @ Date   :2019-04-15 18:28:48
    
    """
    
    from urllib import parse
    from lxml import etree
    from urllib3 import disable_warnings
    import requests
    
    class Wangyiyun(object):
    
        def __init__(self, **kwargs):
            # 歌单的歌曲风格
            self.types = kwargs['types']
            # 歌单的发布类型
            self.years = kwargs['years']
            # 这是当前爬取的页数
            self.pages = pages
            # 这是请求的url参数(页数)
            self.limit = 35
            self.offset = 35 * self.pages - self.limit
            # 这是请求的url
            self.url = "https://music.163.com/discover/playlist/?"
    
    
        # 设置请求头部信息(可扩展:不同的User - Agent)
        def set_header(self):
            self.header = {
                "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36",
                "Referer": "https://music.163.com/",
                "Upgrade-Insecure-Requests": '1',
            }
            return self.header
    
        # 设置请求表格信息
        def set_froms(self):
            self.key = parse.quote(self.types)
            self.froms = {
                "cat": self.key,
                "order": self.years,
                "limit": self.limit,
                "offset": self.offset,
            }
            return self.froms
    
        # 解析代码,获取有用的数据
        def parsing_codes(self):
            page = etree.HTML(self.code)
            # 标题
            self.title = page.xpath('//div[@class="u-cover u-cover-1"]/a[@title]/@title')
            # 作者
            self.author = page.xpath('//p/a[@class="nm nm-icn f-thide s-fc3"]/text()')
            # 阅读量
            self.listen = page.xpath('//span[@class="nb"]/text()')
            # 歌单链接
            self.link = page.xpath('//div[@class="u-cover u-cover-1"]/a[@href]/@href')
    
            # 打印看看
            for i in zip(self.title, self.link, self.author, self.listen):
                print("[歌单名称]:{}
    [发布作者]:{}
    [总播放量]:{}
    [歌单链接]:{}
    ".format(i[0],i[2],i[3],"https://music.163.com/"+i[1]))
            print('第{}页'.format(self.pages).center(50,'='))
    
        # 获取网页源代码
        def get_code(self):
            disable_warnings()
            self.froms['cat']=self.types
            disable_warnings()
            self.new_url = self.url+parse.urlencode(self.froms)
            self.code = requests.get(
                url = self.new_url,
                headers = self.header,
                data = self.froms,
                verify = False,
            ).text
    
        # 爬取多页时刷新offset
        def multi(self ,page):
            self.offset = self.limit * page - self.limit
    
    
    
    
    if __name__ == '__main__':
        #=======================================
        # 指定一些参数
        # 歌单的歌曲风格
        types = "说唱"
        # 歌单的发布类型:最热=hot,最新=new
        years = "hot"
        # 指定爬取的页数
        pages = 1
        #=======================================
    
        # =======================================
        # 例子:通过pages变量爬取指定页面(多页)
        music = Wangyiyun(
            types = types,
            years = years,
        )
    
        for i in range(pages):
            page = i+1              # 因为没有第0页
            music.multi(page)       # 爬取多页时指定,传入当前页数,刷新offset
            music.set_header()      # 调用头部方法,构造请求头信息
            music.set_froms()       # 调用froms方法,构造froms信息
            music.get_code()        # 获取当前页面的源码
            music.parsing_codes()   # 处理源码,获取指定数据
        # =======================================
    
        #=======================================
        # 例子:通过pages变量爬取指定页面(单页)
        # music = Wangyiyun(
        #     types=types,
        #     years=years,
        # )
        # music.set_header()
        # music.set_froms()
        # music.get_code()
        # music.parsing_codes()
        #=======================================
    
  • 相关阅读:
    awk例子
    vsftp搭建
    makefile里PHONY的相关介绍
    youget帮助使用手册
    正则表达式全集
    常用的正则表达式
    基本用法
    心情
    asp.net和java
    java and asp.net
  • 原文地址:https://www.cnblogs.com/liuhedong/p/10712784.html
Copyright © 2020-2023  润新知