• 正则表达式实战


    0x00 股票数据定向爬虫


    功能描述:

    • 目标:获取上交所和深交所所有股票名称和交易信息
    • 输出:保存到文件中
    • 技术路线:requests-bs4-re

                                            

    程序结构设计:

      步骤1:从东方财富网获取股票列表

      步骤2:根据股票列表逐个到百度股票获取个股信息

      步骤3:将结果存储到文件

    import requests
    import traceback
    from bs4 import BeautifulSoup
    import re
    
    def getHTMLText(url):                         #获取页面信息经典框架
        try:
            r = requests.get(url, timeout = 30)
            r.raise_for_status()
            r.encoding = r.apparent_encoding
            return r.text
        except:
            return ""
    
    def getStockList(lst, stockUrl):               #从东方财富网获取股票代码,以便构造之后的url
        html = getHTMLText(stockUrl)
        soup = BeautifulSoup(html, 'html.parser')
        a = soup.find_all('a')
        for i in a:                                #对于每个a标签,从其href属性的值中提取s开头后面为h或z加上6位数字的代码
            try:
                href = i.attrs['href']
                lst.append(re.findall(r'[s][hz]d{6}', href)[0])
            except:
                continue
    
    def getStockInfo(lst, stockURL, fpath):        #获取之前列表中的股票信息
        for stock in lst:                          #遍历之前获得的股票代码,构造新url,访问百度股票
            url = stockURL + stock + '.html'
            html = getHTMLText(url)
            try:                                   
                if html == "":
                    continue
                infoDick = {}                      #新建字典存储股票信息
                soup = BeautifulSoup(html, 'html.parser')
                stockInfo = soup.find('div', attrs={'class' : 'stock-bets'})      
    
                name = stockInfo.find_all(attrs={'class': 'bets-name'})[0]        #获取股票名称
                infoDick.update({'股票名称' : name.text.split()[0]})
    
                keyList = stockInfo.find_all('dt')                                #获取对应股票的相关信息
                valueList = stockInfo.find_all('dd')
                for i in range(len(keyList)):
                    key = keyList[i].text
                    value = valueList[i].text
                    infoDick[key] = value
    
                with open(fpath, 'a', encoding='utf-8') as f:                      #存储在本机文件中
                    f.write(str(infoDick) + '
    ')
            except:
                traceback.print_exc()
                continue
    
    
    def main():
        stock_list_url = "http://quote.eastmoney.com/stocklist.html"
        stock_info_url = "https://gupiao.baidu.com/stock/"
        output_file = "D://BaiduStockInfo.txt"
        slist = []
        getStockList(slist, stock_list_url)
        getStockInfo(slist, stock_info_url, output_file)
    
    main()

     代码优化:

    因为我们做的是定向爬取,因此两个url页面的编码我们都可以首先经过手工确定,直接查看源码,我们在数据头部分发现百度股票的编码为utf-8,东方财富网编码为gb2312,因此在getHTMLText()函数中,不用apparent_encoding方法去动态获取头文件中的编码,可以直接给编码赋值来提速:

    def getHTMLText(url, code = 'utf-8'):
        try:
            r = requests.get(url, timeout = 30)
            r.raise_for_status()
            r.encoding = code
            return r.text
        except:
            return ""
    
    def getStockList(lst, stockUrl):               
        html = getHTMLText(stockUrl, "GB2312")
        soup = BeautifulSoup(html, 'html.parser')
        a = soup.find_all('a')
        for i in a:                               
            try:
                href = i.attrs['href']
                lst.append(re.findall(r'[s][hz]d{6}', href)[0])
            except:
                continue
    
    def getStockInfo(lst, stockURL, fpath):        
        for stock in lst:                         
            url = stockURL + stock + '.html'
            html = getHTMLText(url)

    同时我们可以打印动态进度条,来提高用户体验

    def getStockInfo(lst, stockURL, fpath):
        count = 0
        for stock in lst:
            url = stockURL + stock + '.html'
            html = getHTMLText(url)
            try:
                if html == "":
                    continue
                infoDick = {}
                soup = BeautifulSoup(html, 'html.parser')
                stockInfo = soup.find('div', attrs={'class' : 'stock-bets'})
    
                name = stockInfo.find_all(attrs={'class': 'bets-name'})[0]
                infoDick.update({'股票名称' : name.text.split()[0]})
    
                keyList = stockInfo.find_all('dt')
                valueList = stockInfo.find_all('dd')
                for i in range(len(keyList)):
                    key = keyList[i].text
                    value = valueList[i].text
                    infoDick[key] = value
    
                with open(fpath, 'a', encoding='utf-8') as f:
                    f.write(str(infoDick) + '
    ')
                    count = count + 1
                    print("
    当前进度: {:.2f}%".format(count * 100 / len(lst)), end="")
            except:
                count = count + 1
                print("
    当前进度: {:.2f}%".format(count * 100 / len(lst)), end="")
                continue

  • 相关阅读:
    ab (ApacheBench)命令
    Linux yum apt-get 方式
    Linux 作业调度器 crond
    FastDFS 注意事项
    FastDFS 搭建
    FastDFS 基础知识
    JS判断web网站访问端是PC电脑还是手机
    C# Json数据反序列化为Dictionary并根据关键字获取指定值1
    C#委托的异步调用1
    C# Json数据反序列化为Dictionary并根据关键字获取指定值
  • 原文地址:https://www.cnblogs.com/Ragd0ll/p/10252819.html
Copyright © 2020-2023  润新知