• requests模块


    Requests模块简介

    #介绍:使用requests可以模拟浏览器的请求,比起之前用到的urllib,requests模块的api更加便捷(本质就是封装了urllib3)
    
    #注意:requests库发送请求将网页内容下载下来以后,并不会执行js代码,这需要我们自己分析目标站点然后发起新的request请求
    
    #安装:pip3 install requests
    
    #各种请求方式:常用的就是requests.get()和requests.post()
    >>> import requests
    >>> r = requests.get('https://api.github.com/events')
    >>> r = requests.post('http://httpbin.org/post', data = {'key':'value'})
    >>> r = requests.put('http://httpbin.org/put', data = {'key':'value'})
    >>> r = requests.delete('http://httpbin.org/delete')
    >>> r = requests.head('http://httpbin.org/get')
    >>> r = requests.options('http://httpbin.org/get')
    #建议在正式学习requests前,先熟悉下HTTP协议https://www.cnblogs.com/liuqingzheng/p/10191056.html
    

    基于GET请求

    基本请求

    import requests
    response=requests.get('http://dig.chouti.com/')
    print(response.text)
    

    带参数的GET请求->params

    import requests
    response=requests.get('https://www.baidu.com/s?wd=python&pn=1',
                          headers={
                            'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36',
                          })
    print(response.text)
    # response.encoding=response.apparent_encoding
    with open('ptyhon.html','wb') as f:
        f.write(response.content)
    
    
    #如果查询关键词是中文或者有其他特殊符号,则不得不进行url编码
    from urllib.parse import urlencode,unquote  # url的编码和解码
    wd='美女'
    encode_res=urlencode({'k':wd},encoding='utf-8')
    keyword=encode_res.split('=')[1]
    print(keyword)
    # 然后拼接成url
    url='https://www.baidu.com/s?wd=%s&pn=1' %keyword
    
    response=requests.get(url,
                          headers={
                            'User-Agent':'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.75 Safari/537.36',
                          })
    
    with open('girl.html','wb') as f:
        f.write(response.content)
    

    带参数的GET请求->headers

    #通常我们在发送请求时都需要带上请求头,请求头是将自身伪装成浏览器的关键,常见的有用的请求头如下
    Host
    Referer #大型网站通常都会根据该参数判断请求的来源
    User-Agent #客户端
    Cookie #Cookie信息虽然包含在请求头里,但requests模块有单独的参数来处理他,headers={}内就不要放它了
    
    
    #添加headers(浏览器会识别请求头,不加可能会被拒绝访问,比如访问https://www.zhihu.com/explore)
    import requests
    response=requests.get('https://www.zhihu.com/explore')
    response.status_code #500
    
    
    #自己定制headers
    headers={
        'User-Agent':'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/46.0.2490.76 Mobile Safari/537.36',
    
    }
    respone=requests.get('https://www.zhihu.com/explore',
                         headers=headers)
    print(respone.status_code) #200
    
    

    带参数的GET请求->cookies

    import requests
    
    # 方式一
    Cookies={   'user_session':'rzNme4L6LTH7QSresq8w0BVYhTNt5GS-asNnkOe7_FZ2CjB6',
    }
    
    response=requests.get('https://github.com/settings/emails',
                 cookies=Cookies) #github对请求头没有什么限制,我们无需定制user-agent,对于其他网站可能还需要定制
    
    print('306334678@qq.com' in response.text) #True
    
    
    # 方式二  cookies是一个字典或者CookieJar对象
    header = {
         'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.105 Safari/537.36',
         'cookie':'key=asdfasdfasdfsdfsaasdf;key2=asdfasdf;key3=asdfasdf'
    }
    res=requests.get('https://github.com/settings/emails', headers=header)
    

    基于POST请求

    #GET请求
    HTTP默认的请求方法就是GET
         * 没有请求体
         * 数据必须在1K之内!
         * GET请求数据会暴露在浏览器的地址栏中
    
    GET请求常用的操作:
           1. 在浏览器的地址栏中直接给出URL,那么就一定是GET请求
           2. 点击页面上的超链接也一定是GET请求
           3. 提交表单时,表单默认使用GET请求,但可以设置为POST
    
    
    #POST请求
    (1). 数据不会出现在地址栏中
    (2). 数据的大小没有上限
    (3). 有请求体
    (4). 请求体中如果存在中文,会使用URL编码!
    
    
    #!!!requests.post()用法与requests.get()完全一致,特殊的是requests.post()有一个data参数,用来存放请求体数据
    

    发送post请求,模拟浏览器的登录

    '''
    一 目标站点分析
        浏览器输入https://github.com/login
        然后输入错误的账号密码,抓包
        发现登录行为是post提交到:https://github.com/session
        而且请求头包含cookie
        而且请求体包含:
            commit:Sign in
            utf8:✓
            authenticity_token:lbI8IJCwGslZS8qJPnof5e7ZkCoSoMn6jmDTsL1r/m06NLyIbw7vCrpwrFAPzHMep3Tmf/TSJVoXWrvDZaVwxQ==
            login:egonlin
            password:123
    
    
    二 流程分析
        先GET:https://github.com/login拿到初始cookie与authenticity_token
        返回POST:https://github.com/session, 带上初始cookie,带上请求体(authenticity_token,用户名,密码等)
        最后拿到登录cookie
    
        ps:如果密码时密文形式,则可以先输错账号,输对密码,然后到浏览器中拿到加密后的密码,github的密码是明文
    '''
    
    import requests
    import re
    
    #第一次请求
    r1=requests.get('https://github.com/login')
    r1_cookie=r1.cookies.get_dict() #拿到初始cookie(未被授权)
    authenticity_token=re.findall(r'name="authenticity_token".*?value="(.*?)"',r1.text)[0] #从页面中拿到CSRF TOKEN
    
    #第二次请求:带着初始cookie和TOKEN发送POST请求给登录页面,带上账号密码
    data={
        'commit':'Sign in',
        'utf8':'✓',
        'authenticity_token':authenticity_token,
        'login':'317828332@qq.com',
        'password':'alex3714'
    }
    r2=requests.post('https://github.com/session',
                 data=data,
                 cookies=r1_cookie
                 )
    
    
    login_cookie=r2.cookies.get_dict()
    
    
    #第三次请求:以后的登录,拿着login_cookie就可以,比如访问一些个人配置
    r3=requests.get('https://github.com/settings/emails',
                    cookies=login_cookie)
    
    print('317828332@qq.com' in r3.text) #True
    
    自动登录gitbub(自己处理cookies))
    
    
    
    
    # 使用session自动获取cookie
    import requests
    import re
    
    session=requests.session()
    #第一次请求
    r1=session.get('https://github.com/login')
    authenticity_token=re.findall(r'name="authenticity_token".*?value="(.*?)"',r1.text)[0] #从页面中拿到CSRF TOKEN
    
    #第二次请求
    data={
        'commit':'Sign in',
        'utf8':'✓',
        'authenticity_token':authenticity_token,
        'login':'317828332@qq.com',
        'password':'alex3714'
    }
    r2=session.post('https://github.com/session',
                 data=data,
                 )
    
    #第三次请求
    r3=session.get('https://github.com/settings/emails')
    
    print('317828332@qq.com' in r3.text) #True
    
    requests.session()自动帮我们保存cookie信息
    

    解析json

    requests.post(url='xxxxxxxx',
                  data={'xxx':'yyy'}) #没有指定请求头,#默认的请求头:application/x-www-form-urlencoed
    
    #如果我们自定义请求头是application/json,并且用data传值, 则服务端取不到值
    requests.post(url='',
                  data={'':1,},
                  headers={
                      'content-type':'application/json'
                  })
    
    
    requests.post(url='',
                  json={'':1,},
                  ) #默认的请求头:application/json
    
    
    import json
    respone=requests.post('http://127.0.0.1:8000/index/',data={'name':'lqz'})
    print(type(respone.text))  # 响应的文本
    print(json.loads(respone.text))
    print(respone.json())  # 相当于上面那句话
    

    响应Response

    response属性

    import requests
    respone=requests.get('http://www.jianshu.com')
    
    print(respone.text)  # 响应的文本
    print(respone.content)  # 响应体的二进制
    
    print(respone.status_code)  # 响应状态码
    print(respone.headers)    # 响应头
    print(respone.cookies)   # cookie
    print(respone.cookies.get_dict()) #  把cookie转成字典
    print(respone.cookies.items())  # key和value
    
    print(respone.url)        # 请求的url
    print(respone.history)   #[]放重定向之前的地址
    
    print(respone.encoding)  # 响应的编码方式
    
    respone.iter_content()  # 图片,视频,大文件,一点一点循环取出来
    
    #关闭:response.close()
    from contextlib import closing
    with closing(requests.get('xxx',stream=True)) as response:
        for line in response.iter_content():
            pass
    

    编码问题

    #编码问题
    import requests
    response=requests.get('http://www.autohome.com/news')
    
    # 方式一
    response.encoding='gbk' #汽车之家网站返回的页面内容为gb2312编码的,而requests的默认编码为ISO-8859-1,如果不设置成gbk则中文乱码
    
    # 方式二
    response.encoding=response.apparent_encoding
    print(response.text)
    

    获取二进制数据

    import requests
    
    response=requests.get('https://timgsa.baidu.com/timg?image&quality=80&size=b9999_10000&sec=1509868306530&di=712e4ef3ab258b36e9f4b48e85a81c9d&imgtype=0&src=http%3A%2F%2Fc.hiphotos.baidu.com%2Fimage%2Fpic%2Fitem%2F11385343fbf2b211e1fb58a1c08065380dd78e0c.jpg')
    
    with open('a.jpg','wb') as f:
        f.write(response.content)
    
    
    # 二进制流
    #stream参数:一点一点的取,比如下载视频时,如果视频100G,用response.content然后一下子写到文件中是不合理的
    
    import requests
    
    response=requests.get('https://gss3.baidu.com/6LZ0ej3k1Qd3ote6lo7D0j9wehsv/tieba-smallvideo-transcode/1767502_56ec685f9c7ec542eeaf6eac93a65dc7_6fe25cd1347c_3.mp4',
                          stream=True)
    
    with open('b.mp4','wb') as f:
        for line in response.iter_content():
            f.write(line)
    

    解析json

    #解析json
    import requests
    response=requests.get('http://httpbin.org/get')
    
    import json
    res1=json.loads(response.text) #太麻烦
    
    res2=response.json() #直接获取json数据
    
    
    print(res1 == res2) #True
    

    Redirection and History

    import requests
    import re
    
    #第一次请求
    r1=requests.get('https://github.com/login')
    r1_cookie=r1.cookies.get_dict() #拿到初始cookie(未被授权)
    authenticity_token=re.findall(r'name="authenticity_token".*?value="(.*?)"',r1.text)[0] #从页面中拿到CSRF TOKEN
    
    #第二次请求:带着初始cookie和TOKEN发送POST请求给登录页面,带上账号密码
    data={
        'commit':'Sign in',
        'utf8':'✓',
        'authenticity_token':authenticity_token,
        'login':'317828332@qq.com',
        'password':'alex3714'
    }
    
    
    #测试一:没有指定allow_redirects=False,则响应头中出现Location就跳转到新页面,r2代表新页面的response
    r2=requests.post('https://github.com/session',
                 data=data,
                 cookies=r1_cookie
                 )
    
    print(r2.status_code) #200
    print(r2.url) #看到的是跳转后的页面
    print(r2.history) #看到的是跳转前的response
    print(r2.history[0].text) #看到的是跳转前的response.text
    
    
    #测试二:指定allow_redirects=False,则响应头中即便出现Location也不会跳转到新页面,r2代表的仍然是老页面的response
    r2=requests.post('https://github.com/session',
                 data=data,
                 cookies=r1_cookie,
                 allow_redirects=False
                 )
    
    
    print(r2.status_code) #302
    print(r2.url) #看到的是跳转前的页面https://github.com/session
    print(r2.history) #[]
    
    利用github登录后跳转到主页面的例子来验证
    

    高级用法

    SSL Cert Verification

    #证书验证(大部分网站都是https)
    import requests
    respone=requests.get('https://www.12306.cn') #如果是ssl请求,首先检查证书是否合法,不合法则报错,程序终端
    
    #改进1:去掉报错,但是会报警告
    import requests
    respone=requests.get('https://www.12306.cn',verify=False) #不验证证书,报警告,返回200
    print(respone.status_code)
    
    
    #改进2:去掉报错,并且去掉警报信息
    import requests
    from requests.packages import urllib3
    urllib3.disable_warnings() #关闭警告
    respone=requests.get('https://www.12306.cn',verify=False)
    print(respone.status_code)
    
    #改进3:加上证书
    #很多网站都是https,但是不用证书也可以访问,大多数情况都是可以携带也可以不携带证书
    #知乎百度等都是可带可不带
    #有硬性要求的,则必须带,比如对于定向的用户,拿到证书后才有权限访问某个特定网站
    import requests
    respone=requests.get('https://www.12306.cn',
                         cert=('/path/server.crt',
                               '/path/key'))
    print(respone.status_code)
    

    使用代理

    #官网链接: http://docs.python-requests.org/en/master/user/advanced/#proxies
    
    #代理设置:先发送请求给代理,然后由代理帮忙发送(封ip是常见的事情)
    import requests
    proxies={
        'http':'http://egon:123@xxxxxxxx:9743',#带用户名密码的代理,@符号前是用户名与密码
        'http':'27.46.20.226:8888',
        'https':'https://xxxxxxx:9743',
    }
    respone=requests.get('https://www.12306.cn', proxies=proxies)
    
    print(respone.status_code)
    
    
    
    #支持socks代理,安装:pip install requests[socks]
    import requests
    proxies = {
        'http': 'socks5://user:pass@host:port',
        'https': 'socks5://user:pass@host:port'
    }
    respone=requests.get('https://www.12306.cn', proxies=proxies)
    
    print(respone.status_code)
    
    # 高匿和透明代理?如果使用高匿代理,后端无论如何拿不到你的ip,使用透明,后端能够拿到你的ip
    # 后端如何拿到透明代理的ip,后端:X-Forwarded-For
    

    超时设置

    #超时设置
    #两种超时:float or tuple
    #timeout=0.1 #代表接收数据的超时时间
    #timeout=(0.1,0.2)#0.1代表链接超时  0.2代表接收数据的超时时间
    
    import requests
    respone=requests.get('https://www.baidu.com', timeout=0.0001)
    

    认证设置

    #官网链接:http://docs.python-requests.org/en/master/user/authentication/
    
    #认证设置:登陆网站是,弹出一个框,要求你输入用户名密码(与alter很类似),此时是无法获取html的
    # 但本质原理是拼接成请求头发送
    #         r.headers['Authorization'] = _basic_auth_str(self.username, self.password)
    # 一般的网站都不用默认的加密方式,都是自己写
    # 那么我们就需要按照网站的加密方式,自己写一个类似于_basic_auth_str的方法
    # 得到加密字符串后添加到请求头
    #         r.headers['Authorization'] =func('.....')
    
    #看一看默认的加密方式吧,通常网站都不会用默认的加密设置
    import requests
    from requests.auth import HTTPBasicAuth
    r=requests.get('xxx',auth=HTTPBasicAuth('user','password'))
    print(r.status_code)
    
    #HTTPBasicAuth可以简写为如下格式
    import requests
    r=requests.get('xxx',auth=('user','password'))
    print(r.status_code)
    

    异常处理

    #异常处理
    import requests
    from requests.exceptions import * #可以查看requests.exceptions获取异常类型
    
    try:
        r=requests.get('http://www.baidu.com',timeout=0.00001)
    except ReadTimeout:
        print('===:')
    # except ConnectionError: #网络不通
    #     print('-----')
    # except Timeout:
    #     print('aaaaa')
    
    except RequestException:
        print('Error')
    

    上传文件

    import requests
    files={'file':open('a.jpg','rb')}
    respone=requests.post('http://httpbin.org/post',files=files)
    print(respone.status_code)
    

    单线程下载梨视频

    import requests
    import re
    import time
    
    def get_page(url):
        try:
            response=requests.get(url)
            if response.status_code==200:
                return response.text
        except Exception as e:
            print(e)
    def get_detail(index_page):
        urls=re.findall('class="categoryem".*?href="(.*?)"',index_page,re.S)
        # print(urls)
        for url in urls:
            yield 'https://www.pearvideo.com/'+url
    
    def pase_detail(detail_page):
        # print(detail_page)
        down_url=re.findall('srcUrl="(.*?)"',detail_page,re.S)[0]
        print(down_url)
        return down_url
    
    def get_movie(url):
        response=requests.get(url)
        with open('movie/%s.mp4'%str(time.time()),'wb') as f:
            f.write(response.content)
            print('下载成功')
    if __name__ == '__main__':
        index_page=get_page('https://www.pearvideo.com/category_loading.jsp?reqType=5&categoryId=8&start=24')
        urls=get_detail(index_page)
        for url in urls:
            print(url)
            detail_page=get_page(url)
            mp4_url=pase_detail(detail_page)
            get_movie(mp4_url)
    

    线程池下载梨视频

    import requests
    import re
    import time
    from concurrent.futures import ThreadPoolExecutor
    pool=ThreadPoolExecutor(50)
    def get_page(url):
        try:
            response=requests.get(url)
            if response.status_code==200:
                return response.text
        except Exception as e:
            print(e)
    def pase_index(index_page):
        index_page=index_page.result()
        urls=re.findall('class="categoryem".*?href="(.*?)"',index_page,re.S)
        # print(urls)
        for url in urls:
            next_url= 'https://www.pearvideo.com/'+url
            pool.submit(get_page, next_url).add_done_callback(pase_detail)
    
    def pase_detail(detail_page):
        detail_page=detail_page.result()
        # print(detail_page)
        down_url=re.findall('srcUrl="(.*?)"',detail_page,re.S)[0]
        pool.submit(get_movie, down_url)
    
    def get_movie(url):
        print(url)
        response=requests.get(url)
        with open('movie/%s.mp4'%str(time.time()),'wb') as f:
            f.write(response.content)
            print('下载成功')
    if __name__ == '__main__':
        for i in range(5):
            url='https://www.pearvideo.com/category_loading.jsp?reqType=5&categoryId=8&start=%s'%((i+1)*12)
            pool.submit(get_page, url).add_done_callback(pase_index)
    
    

    获取汽车之家新闻并保存图片

    import requests
    from bs4 import BeautifulSoup
    import os
    for i in range(1, 10):
        ret = requests.get(url='https://www.autohome.com.cn/news/%s/#liststart' % (i,))
        ret.encoding = ret.apparent_encoding
        soup = BeautifulSoup(ret.text, 'lxml')
        div = soup.find(name='div', attrs={'id': 'auto-channel-lazyload-article'})
        li_list = div.find_all(name='li')
        for li in li_list:
            h3 = li.find(name='h3')
            if not h3:
                continue
            p_text = li.find(name='p').text
            href = li.find(name='a').get('href')
            img = li.find(name='img').get('src')
    
            print('''
            新闻标题:%s
            新闻简介:%s
            新闻地址:%s
            新闻图片:%s
            ''' % (h3.text, p_text, 'https:' + href, 'https:' + img))
            ret_photo = requests.get(url='https:' + img)
            photo_name = img.rsplit('__', 1)[-1]
            if not os.path.exists('download'):
                os.mkdir('download')
            with open('download/' + photo_name, 'wb') as f:
                f.write(ret_photo.content)
    

    自动登录抽屉自动点赞,评论

    import requests
    from  bs4 import BeautifulSoup
    import time
    
    
    # 打开抽屉首页
    ret = requests.get(url='https://dig.chouti.com/',
                       headers={
                           'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
                           # 'accept-language': 'zh-CN,zh;q=0.9'
                       }
                       )
    # print(ret.text)
    coo = ret.cookies.get_dict()
    print(coo)
    # 8613121758648
    # woshiniba
    # 登录到抽屉
    ret_login = requests.post(url='https://dig.chouti.com/login',
                              headers={
                                  'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
                              },
                              data={
                                  'phone': '8618953675221',
                                  'password': 'lqz123',
                                  'oneMonth': '1'
                              },
                              cookies=coo
    
                              )
    print(ret_login.text)
    # print(ret_login.cookies.get_dict())
    # https://dig.chouti.com/link/vote?linksId=21055766
    # ret_post = requests.post(url='https://dig.chouti.com/link/vote?linksId=21055766',
    #                          headers={
    #                              'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
    #                          },
    #                          cookies=coo
    #                          )
    # print(ret_post.text)
    
    for i in range(2,3):
        ret_index = requests.get(url='https://dig.chouti.com/all/hot/recent/%s'%i,
                                 headers={
                                     'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36',
                                 }
                                 )
        soup = BeautifulSoup(ret_index.text, 'html.parser')
        div = soup.find(name='div', attrs={'id': 'content-list'})
        # print(div)
        item_list = div.find_all(name='div', attrs={'class': 'item'})
        print(item_list)
        for item in item_list:
            part2_div = item.find(name='div', attrs={'class': 'part2'})
            nid = part2_div.get('share-linkid')
            print(nid)
            time.sleep(0.5)
            ret_post = requests.post(url='https://dig.chouti.com/link/vote?linksId=%s' % nid,
                                     headers={
                                         'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
                                     },
                                     cookies=coo
                                     )
            print(ret_post.text)
    # 给文章评论
    # ret_post = requests.post(url='https://dig.chouti.com/comments/create',
    #                          headers={
    #                              'user-agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36'
    #                          },
    #                          cookies=coo,
    #                          data={
    #                              'jid': 'cdu_53207078460',
    #                              'linkId': '21061426',
    #                              'isAssent': '',
    #                              'content': '写的真好',
    #                              'sortType':'score'
    #
    # }
    # )
    

    爬取bilibili视频

    '''
    
    通过该程序下载的视频和音频是分成连个文件的,没有合成,
    视频为:视频名_video.mp4
    音频为:视频名_audio.mp4
    修改url的值,换成自己想下载的页面节课
    '''
    
    # 导入requests模块,模拟发送请求
    import requests
    # 导入json
    import json
    # 导入re
    import re
    
    # 定义请求头
    headers = {
        'Accept': '*/*',
        'Accept-Language': 'en-US,en;q=0.5',
        'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.84 Safari/537.36'
    }
    
    
    # 正则表达式,根据条件匹配出值
    def my_match(text, pattern):
        match = re.search(pattern, text)
        print(match.group(1))
        print()
        return json.loads(match.group(1))
    
    
    def download_video(old_video_url, video_url, audio_url, video_name):
        headers.update({"Referer": old_video_url})
        print("开始下载视频:%s" % video_name)
        video_content = requests.get(video_url, headers=headers)
        print('%s视频大小:' % video_name, video_content.headers['content-length'])
        audio_content = requests.get(audio_url, headers=headers)
        print('%s音频大小:' % video_name, audio_content.headers['content-length'])
        # 下载视频开始
        received_video = 0
        with open('%s_video.mp4' % video_name, 'ab') as output:
            while int(video_content.headers['content-length']) > received_video:
                headers['Range'] = 'bytes=' + str(received_video) + '-'
                response = requests.get(video_url, headers=headers)
                output.write(response.content)
                received_video += len(response.content)
        # 下载视频结束
        # 下载音频开始
        audio_content = requests.get(audio_url, headers=headers)
        received_audio = 0
        with open('%s_audio.mp4' % video_name, 'ab') as output:
            while int(audio_content.headers['content-length']) > received_audio:
                # 视频分片下载
                headers['Range'] = 'bytes=' + str(received_audio) + '-'
                response = requests.get(audio_url, headers=headers)
                output.write(response.content)
                received_audio += len(response.content)
        # 下载音频结束
        return video_name
    
    
    if __name__ == '__main__':
        # 换成你要爬取的视频地址
        url = 'https://www.bilibili.com/video/av76609390'
        # 发送请求,拿回数据
        res = requests.get(url, headers=headers)
        # 视频详情json
        playinfo = my_match(res.text, '__playinfo__=(.*?)</script><script>')
        # 视频内容json
        initial_state = my_match(res.text, r'__INITIAL_STATE__=(.*?);(function()')
        # 视频分多种格式,直接取分辨率最高的视频 1080p
        video_url = playinfo['data']['dash']['video'][0]['baseUrl']
        # 取出音频地址
        audio_url = playinfo['data']['dash']['audio'][0]['baseUrl']
        video_name = initial_state['videoData']['title']
        print('视频名字为:video_name')
        print('视频地址为:', video_url)
        print('音频地址为:', audio_url)
        download_video(url, video_url, audio_url, video_name)
    
    
    ## you-get版
    
    import requests
    from you_get.common import any_download
    from concurrent.futures import ThreadPoolExecutor
    
    import ssl
    pool = ThreadPoolExecutor(10)
    # 生成证书上下文(unverified 就是不验证https证书)
    ssl._create_default_https_context = ssl._create_unverified_context
    res_json=requests.get('https://api.bilibili.com/x/player/pagelist?aid=73342471').json()
    
    def download(url):
        any_download(url, output_dir='video', merge='merge')
    
    for i,video_content in enumerate(res_json['data']):
        video_name=video_content['part']
        video_url='https://www.bilibili.com/video/av73342471'+'?p=%d'%(i+1)
        print('视频地址为:',video_url)
        print('视频名字为:',video_name)
        pool.submit(download, video_url)
    
    pool.shutdown(wait=True)
    
  • 相关阅读:
    201771030125-王英雪 实验一 软件工程准备一<构建之法与博客首秀>
    201771010102-常惠琢 实验三 结对项目—《西北师范大学疫情防控信息系统》项目报告
    201771010102-常惠琢 实验二 个人项目—《西北师范大学学生疫情上报系统》项目报告
    软件工程第四次作业
    软件工程第三次作业
    软件工程第二次作业
    软件工程第一次作业
    201771030104-道彤阳 实验四 软件项目案例分析
    201771030104-道彤阳 实验三 结对项目—《西北师范大学疫情防控信息系统》项目报告
    201771030104-道彤阳 实验二 个人项目—《西北师范大学学生疫情上报系统》项目报告
  • 原文地址:https://www.cnblogs.com/chenwenyin/p/13544831.html
Copyright © 2020-2023  润新知