• 爬取全部的校园新闻


    本次作业要求来自于:https://edu.cnblogs.com/campus/gzcc/GZCC-16SE1/homework/3002

    程序准备:

    # 导入需要用到的类包
    import re
    import requests
    from bs4 import BeautifulSoup
    from datetime import datetime
    import time
    import random
    import pandas as pd

    0.从新闻url获取点击次数,并整理成函数

    # 新闻点击次数
    def clickCounts(url):
        id = re.findall('d+',url)[-1]
        clickUrl = "http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80".format(id)
        clickStruct = requests.get(clickUrl).text
        clickCounts = int(clickStruct.split('.html')[-1][2:-3])
        return clickCounts

     1.从新闻url获取新闻详情: 字典,anews 

    # 新闻的发布时间
    def newsdt(showInfo):
        newsDate = showInfo.split()[0].split(':')[1]
        newsTime = showInfo.split()[1]
        newsDT = newsDate + ' ' + newsTime
        dt = datetime.strptime(newsDT,'%Y-%m-%d %H:%M:%S')
        return dt
    
    # 一篇新闻概况
    def anews(url):
        newsDetail = {}
        res = requests.get(url)
        res.encoding = 'utf-8'
        soup = BeautifulSoup(res.text,'html.parser')
        newsDetail['newsTitle'] = soup.select('.show-title')[0].text
        showInfo = soup.select('.show-info')[0].text
        newsDetail['newsDT'] = newsdt(showInfo)
        newsDetail['newsClick'] = clickCounts(url)
        return newsDetail
    # 测试爬出一篇新闻报道的概况
    newsUrl = "http://news.gzcc.cn/html/2019/xiaoyuanxinwen_0404/11155.html"
    print(anews(newsUrl))

    运行示例:

    2.从列表页的url获取新闻url:列表append(字典) alist 

    # 从列表页的url获取新闻url
    def alist(listUrl):
        res = requests.get(listUrl)
        res.encoding = 'utf-8'
        soup = BeautifulSoup(res.text,'html.parser')
        newsList = []
        for news in soup.select('li'):
            if len(news.select('.news-list-title'))>0:
                newsUrl = news.select('a')[0]['href']
                newsDesc = news.select('.news-list-description')[0].text
                newsDict = anews(newsUrl)
                newsDict['newsUrl'] = newsUrl
                newsDict['description'] = newsDesc
                newsList.append(newsDict)
        return newsList
    
    # 测试爬出一页新闻报道的概况
    listUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
    allnews = alist(listUrl)
    for newtro in allnews:
        print(newtro)

    运行示例:

    3.生成所有列表页的url并获取全部新闻 :列表extend(列表) allnews

    *每个同学爬学号尾数开始的10个列表页

    # 爬学号尾数开始的10个列表页
    allnews = []
    for i in range(25,35):
        listUrl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html'.format(i)
        allnews.extend(alist(listUrl))
    
    # 测试爬出所在网站所选页新闻报道的概况
    for n in allnews:
        print(n)
    print(len(allnews))#统计所爬取的新闻总数

    运行示例:

    4.设置合理的爬取间隔

    import time

    import random

    time.sleep(random.random()*3)

    5.用pandas做简单的数据处理并保存

    保存到csv或excel文件 

    # 用pandas做简单的数据处理
    newsdf = pd.DataFrame(allnews)
    for i in range(5):
        print(i)
        time.sleep(random.random() * 3)#设置时间间隔爬取并输出到控制台
        print(newsdf)
    # 保存到本地csv文件
    newsdf.to_csv(r'D:gzccnews.csv',encoding='utf-8')

    运行示例:

    ......

     

  • 相关阅读:
    一个老程序员的从零开始的从新出发
    Django静态文件配置
    Data truncated for column 'id' at row 1
    MyBatis之java.lang.UnsupportedOperationException异常解决方案
    mybatis批量查询-插入-删除
    Mybatis中进行批量更新(updateBatch)
    mybatis插入insert操作,返回自增id
    redis五大数据类型
    redis中key键操作
    Redis主从复制失败(master_link_status down)
  • 原文地址:https://www.cnblogs.com/me-zzy/p/10698827.html
Copyright © 2020-2023  润新知