• 数据结构化与保存


    import requests
    from  bs4 import  BeautifulSoup
    from datetime import datetime
    import re
    import pandas
    
    #获取点击次数
    def getClickCount(newsUrl):
        newsId = re.findall('\_(.*).html', newsUrl)[0].split('/')[1]
        clickUrl = 'http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(newsId)
        clickStr = requests.get(clickUrl).text
        count = int(re.search("hits').html('(.*)');",clickStr).group(1))
        return count
    
    # def writeNewsContent(content):
    #     f = open('getNewsDetail.txt','a',encoding='utf-8')
    #     f.write(content)
    #     f.close()
    
    
    # 获取新闻详情
    def getNewDetail(url):
        resd = requests.get(url)
        resd.encoding = 'utf-8'
        soupd = BeautifulSoup(resd.text, 'html.parser')
    
        news = {}
        news['title'] = soupd.select('.show-title')[0].text
        info = soupd.select('.show-info')[0].text
    
        news['dt'] = datetime.strptime(info.lstrip('发布时间:')[0:19], '%Y-%m-%d %H:%M:%S')
    
        if info.find('来源:') > 0:
            news['source'] = info[info.find('来源:'):].split()[0].lstrip('来源:')
        else:
            news['source'] = 'none'
        # if info.find('作者:') > 0:
        #     author = info[info.find('作者:'):].split()[0].lstrip('作者:')
        # else:
        #     author = 'none'
        news['clickCount'] = getClickCount(url)
        return news
    
    def getListPage(listPageUrl):
        res = requests.get(listPageUrl)
        res.encoding = 'utf-8'
        soup = BeautifulSoup(res.text, 'html.parser')
        newsList = []
        for news in soup.select('li'):
            if len(news.select('.news-list-title')) > 0:
                # 获取新闻模块链接
                a = news.a.attrs['href']
                # 调用函数获取新闻正文
                newsList.append(getNewDetail(a))
        return newsList
    
    
    
    
    # 首页列表新闻
    newsTotal = []
    firstPageUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
    newsTotal.extend(getListPage(firstPageUrl))
    
    
    #计算总页数
    def getPageNum():
        resn = requests.get('http://news.gzcc.cn/html/xiaoyuanxinwen/')
        resn.encoding = 'utf-8'
        soupn = BeautifulSoup(resn.text, 'html.parser')
        n = int(soupn.select('.a1')[0].text.rstrip(''))
        return (n // 10 + 1)
    
    n = getPageNum()
    for i in range(n,n+1):
        pageUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
        newsTotal.extend(getListPage(pageUrl))
        # print(newsTotal)
    
    df = pandas.DataFrame(newsTotal)
    df.to_excel('gzccnews.xlsx')
    
    #提取包含点击次数、标题、来源的前6行数据
    print(df.head(6))
    #提取‘学校综合办’发布的,‘点击次数’超过3000的新闻。
    print(df[(df['clickCount']>3000)&(df['source']=='学校综合办')])
    #提取'国际学院'和'学生工作处'发布的全部新闻。
    sour = ['国际学院','学生工作处']
    print(df[df['source'].isin(sour)])

    import pandas
    df = pandas.DataFrame(newstotal)
    
    df.to_excel('gzccnews.xlsx')
    df.to_excel('gzccnews.csv')
    
    print(df[['click','title','source']].head(6))
     
    print(df[(df['click']>3000)&(df['source']=='学院综合办')])
     
    sou = ['国际学院', '学生工作处']
    print(df[df['source'].isin(sou)])
     
    print(df1['2018-03'])
    
    import sqlite3
    with sqlite3.connect('gzccnewsdb.sqlite') as db:
    df3.to_sql('gzccnews05',con = db, if_exists='replace')
    
    with sqlite3.connect('gzccnewsdb.sqlite') as db:
    df2 = pandas.read_sql_query('SELECT * FROM gzccnews05',con=db)
    print(df2)
    
    import pymysql
    from sqlalchemy import create_engine
    conn = create_engine('mysql+pymysql://root:root@localhost:3306/gzccnews?charset=utf8')
    pandas.io.sql.to_sql(df, 'gzccnews', con=conn, if_exists='replace')
    

      

  • 相关阅读:
    解决IE6不支持position:fixed的bug
    响应式Web设计基础
    多行文本溢出显示省略号(…)全攻略
    解读CSS布局之-水平垂直居
    理解CSS中BFC
    七个你可能不了解的CSS单位
    屏蔽系统热键钩子Hook程序
    Win 2008 r2 远程桌面多用户登陆,一用户多登陆配置
    把Excel转换成DataTable,Excel2003+
    DataGridView不显示未绑定的列-AutoGenerateColumns
  • 原文地址:https://www.cnblogs.com/Runka/p/8870328.html
Copyright © 2020-2023  润新知