• 获取全部校园新闻


    1.取出一个新闻列表页的全部新闻 包装成函数。

    2.获取总的新闻篇数,算出新闻总页数。

    3.获取全部新闻列表页的全部新闻详情。

    4.找一个自己感兴趣的主题,进行数据爬取,并进行分词分析。不能与其它同学雷同。

    import requests
    import re

    url = "http://news.gzcc.cn/html/xiaoyuanxinwen/"
    listnewurl = "http://news.gzcc.cn/html/xiaoyuanxinwen/index.html"

    res = requests.get(url)
    reslist = requests.get(listnewurl)

    res.encoding = 'utf-8'

    # 利用BeautifulSoup的HTML解析器,生成结构树
    from bs4 import BeautifulSoup

    soup = BeautifulSoup(res.text, 'html.parser')
    soup_list = BeautifulSoup(reslist.text, 'html.parser')


    def getlistnew(listnewurl): # 获取简单的新闻
    reslist = requests.get(listnewurl)
    reslist.encoding = 'utf-8'

    soup_list = BeautifulSoup(reslist.text, 'html.parser')

    tou = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
    shuzi = ''
    wei = '.html'

    for news in soup_list.select('li'): #首页
    if len(news.select('.news-list-title')) > 0:
    # 首页文章标题
    title = news.select('.news-list-title')[0].text
    # 首页文章链接
    href = news.select('a')[0]['href']

    print('------------------------------------------------------------------------------')
    print("文章标题:" + title)
    print(" 文章链接:" + href)

    for i in range(2, 233): #首页外全部
    shuzi = i;
    allnewurl='%s%s%s' % (tou, shuzi, wei)
    resalllist = requests.get(allnewurl)
    resalllist.encoding = 'utf-8'
    soup_alllist = BeautifulSoup(resalllist.text, 'html.parser')
    for news in soup_alllist.select('li'):
    if len(news.select('.news-list-title')) > 0:
    # 首页文章标题
    title = news.select('.news-list-title')[0].text
    # 首页文章链接
    href = news.select('a')[0]['href']

    print('------------------------------------------------------------------------------')
    print("文章标题:" + title)
    print(" 文章链接:" + href)


    def getClickCount(url):
    HitUrl = 'http://oa.gzcc.cn/api.php?op=count&id=9183&modelid=80'
    hitNumber = requests.get(HitUrl).text.split('.html')[-1].lstrip("('").rstrip("');")
    print("点击次数:", hitNumber)

    re.match('http://news.gzcc.cn/html/2018/xiaoyuanxinwen(.*).html', url).group(1).split('/')[1]
    print('新闻编号:', re.search('\_(.*).html', url).group(1))


    def getNewDetail(url): # 获取一页的详细新闻
    res = requests.get(url)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')

    for news in soup.select('li'):
    if len(news.select('.news-list-title')) > 0:
    # 首页文章标题
    title = news.select('.news-list-title')[0].text
    # 首页文章描述
    description = news.select('.news-list-description')[0].text
    # 首页文章信息
    info = news.select('.news-list-info')[0].text
    # 首页文章链接
    href = news.select('a')[0]['href']

    url = href
    res = requests.get(url)
    res.encoding = 'utf-8'
    soup = BeautifulSoup(res.text, 'html.parser')

    # 获取每篇文章的信息
    newinfo = soup.select('.show-info')[0].text

    # 获取文章内容
    content = soup.select('#content')[0].text

    # 日期
    date = newinfo.split()[0]
    # 当日时间
    time = newinfo.split()[1]
    # 作者
    author = newinfo.split()[2]
    # 审核
    checker = newinfo.split()[3]
    # 来源
    source = newinfo.split()[4]
    # 摄影
    Photography = newinfo.split()[5]

    print('------------------------------------------------------------------------------')
    print("文章标题:" + title)
    print(" 文章描述:" + description)
    print(" 文章信息: " + date + ' ' + time + ' ' + author + ' ' + checker + ' ' + source + ' ' + Photography)
    getClickCount(href) # 点击次数、新闻编号
    print(" 文章链接:" + href)
    print(content)
    print('------------------------------------------------------------------------------')


    # getNewDetail(url) #获取一页的详细新闻
    getlistnew(listnewurl) # 获取全部的新闻标题链接

  • 相关阅读:
    Openssl s_time命令
    Openssl speed命令
    Openssl s_client命令
    Openssl s_server命令
    Openssl smime命令
    关于静态与非静态之具体总结
    C++游戏系列2:角色装备武器
    POJ 2398 Toy Storage(计算几何)
    Oracle核心技术 笔记(该书读得不细致,须要找时间再细读~~)
    还在为开发APP发愁? 这里就有现成通用的代码!
  • 原文地址:https://www.cnblogs.com/XiaoFengLuo/p/8792961.html
Copyright © 2020-2023  润新知