• 111


    import re
    import requests
    from bs4 import BeautifulSoup
    from datetime import datetime
    
    def getClickCount(r):
        s = re.findall('\_(.*).html', r)[0].split('/')[-1]
        res = requests.get('http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(s))
        return  int(res.text.split('.html')[-1].lstrip("('").rstrip("');"))
    
    
    def getNewsDetail(newsUrl):   #一篇新闻的全部内容
        resd = requests.get(newsUrl)
        resd.encoding = 'utf-8'
        soupd = BeautifulSoup(resd.text, 'html.parser')     #打开新闻详情并解析
    
        news = {}
        news['title']=soupd.select('.show-title')[0].text
        info=soupd.select('.show-info')[0].text
        news['dt']=datetime.strptime(info.lstrip('发布时间:')[0:19], '%Y-%m-%d %H:%M:%S')
        if  info.find('来源:')>0:
            news['source']=info[info.find('来源:'):].split()[0].lstrip('来源:')
        else:
            news['source']='none'
        news['content'] = soupd.select('.show-content')[0].text.strip()
        news['click']=getClickCount(newsUrl)
        news['newsUrl'] = newsUrl
        return(news)
    
    def getListPage(pageUrl):   #9. 取出一个新闻列表页的全部新闻 包装成函数def getListPage(pageUrl)
        res = requests.get(pageUrl)
        res.encoding = 'utf-8'
        soup = BeautifulSoup(res.text, 'html.parser')
    
        newslist = []
        for news in soup.select('li'):
            if len(news.select('.news-list-title')) > 0:
                newsUrl = news.select('a')[0].attrs['href']
                newslist.append(getNewsDetail(newsUrl))
        return(newslist)
    
    
    def getPageN():
        res = requests.get('http://news.gzcc.cn/html/xiaoyuanxinwen/')
        res.encoding = 'utf-8'
        soup = BeautifulSoup(res.text, 'html.parser')
        pagenumber=int(soup.select('.a1')[0].text.rstrip('条'))
        page = pagenumber//10+1
        return page
    
    newstotal = []
    firstPageUrl='http://news.gzcc.cn/html/xiaoyuanxinwen/'
    newstotal.extend(getListPage(firstPageUrl))
    n=getPageN()
    # f = open('gzccnews.txt','a',encoding='utf-8')
    for i in range(n,n+1):
        listPageUrl='http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
        newstotal.extend(getListPage(listPageUrl))
    
    for news in newstotal:
        print(news)
    
    import pandas
    df = pandas.DataFrame(newstotal)
    df.to_excel('gzccnews.xlsx')
    
    # print(df.tail(3))
    # print(df[2:5])
    # print(df['title'])
    # print(df[(df['click']>2000)&(df['click']<5000)])
    # sou = ['国际学院','学生工作处']
    # print(df[df['source'].isin(sou)])

    # print(df.index)
    # df[df['dt']>'2018-04-12 14:01:24']
    # df.index
    # print(df[1:5])

    # df1 = df.set_index('dt')
    # print(df1.index)
    #
    # print(df1['2018-04-12 17:27:50':'2018-04-04 09:35:00'])
    # print(df1['2018-04'])

    import sqlite3
    with sqlite3.connect('gzccnewsdb.sqlite') as db:
    df.to_sql('gzccnews',con= db,if_exists='replace')

    with sqlite3.connect('gzccnewsdb.sqlite') as db:
    df2 = pandas.read_sql_query('SELECT * FROM gzccnews',con=db)

    print(df2)

    import pymysql
    from sqlalchemy import create_engine
    conn = create_engine('mysql+pymysql://root:@localhost:3306/gz?charset=utf8')
    pandas.io.sql.to_sql(df2,'news1',con=conn,if_exists='replace')


    # f.close()

      

  • 相关阅读:
    如何管理和优化日益增长的代码复杂度?
    groupcache-readme-go
    shell脚本的桩
    软件的模块化开发
    ldd命令--查看命令依赖的库文件
    链接
    LDD命令--可执行文件依赖的库出现错误时
    开源日志系统 log4c 使用心得+总结
    SDOI2018R1划水记
    BZOJ1009:[HNOI2008]GT考试(AC自动机,矩乘DP)
  • 原文地址:https://www.cnblogs.com/qq412158152/p/8866841.html
Copyright © 2020-2023  润新知