• 用requests库和BeautifulSoup4库爬取新闻列表


    1、requests库和BeautifulSoup4库,爬取校园新闻列表的时间、标题、链接、来源。

    import requests
    from bs4 import BeautifulSoup
    url='http://news.gzcc.cn/html/xiaoyuanxinwen/'
    res=requests.get(url)
    res.encoding='utf-8'
    soup=BeautifulSoup(res.text,'html.parser')
    for news in soup.select('li'):
    if len(news.select('.news-list-title'))>0:
    title=news.select('.news-list-title')[0].text                     #标题
    time=news.select('.news-list-info')[0].contents[0].text #时间
    url=news.select('a')[0]['href']                                      #链接
    thumb=news.select('.news-list-thumb')[0].contents[0]
    print(time)
    print(title)
    print(url)
    print(thumb)

    结果:

    2、将其中的时间str转换成datetime类型。 

    import requests
    from bs4 import BeautifulSoup
    from datetime import datetime
    gzccurl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
    res = requests.get(gzccurl)
    res.encoding='utf-8'
    soup = BeautifulSoup(res.text,'html.parser')
    for news in soup.select('li'):
    if len(news.select('.news-list-title'))>0:
    title = news.select('.news-list-title')[0].text#标题
    url = news.select('a')[0]['href']#链接
    time = news.select('.news-list-info')[0].contents[0].text
    dt = datetime.strptime(time,'%Y-%m-%d')
    source = news.select('.news-list-info')[0].contents[1].text#来源
    print(dt,' ',title,' ',url,' ',source)

    结果:

    3、将取得详细内容的代码包装成函数。 

    import requests
    from bs4 import BeautifulSoup
    from datetime import datetime
    gzccurl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
    res = requests.get(gzccurl)
    res.encoding='utf-8'
    soup = BeautifulSoup(res.text,'html.parser')
    def getdetail(url):
    resd = requests.get(url)
    resd.encoding='utf-8'
    soupd = BeautifulSoup(resd.text,'html.parser')
    return soupd.select('.show-content')[0].text
    for news in soup.select('li'):
    if len(news.select('.news-list-title'))>0:
    title = news.select('.news-list-title')[0].text                                         #标题
    url = news.select('a')[0]['href']                                                          #路径
    time = news.select('.news-list-info')[0].contents[0].text                     #时间
    dt = datetime.strptime(time,'%Y-%m-%d')
    source = news.select('.news-list-info')[0].contents[1].text #来源
                                                                                                            #正文
    detail = getdetail(url)
    print(dt,title,url,source)
    print(detail)
    break

     结果:

    4、选一个自己感兴趣的主题,做类似的操作,为后面“爬取网络数据并进行文本分析”做准备。

    import requests
    from bs4 import BeautifulSoup
    from datetime import datetime
    gzccurl = 'http://news.gzcc.cn/html/gnyw/'
    res = requests.get(gzccurl)
    res.encoding='utf-8'
    soup = BeautifulSoup(res.text,'html.parser')
    def getdetail(url):
    resd = requests.get(url)
    resd.encoding='utf-8'
    soupd = BeautifulSoup(resd.text,'html.parser')
    return soupd.select('.show-content')[0].text
    for news in soup.select('li'):
    if len(news.select('.news-list-title'))>0:
    title = news.select('.news-list-title')[0].text                                     #标题
    url = news.select('a')[0]['href']                                                      #路径
    time = news.select('.news-list-info')[0].contents[0].text                #时间
    dt = datetime.strptime(time,'%Y-%m-%d')
                                                                                                       #正文
    detail = getdetail(url)
    print(dt,title,url,detail)
    break

    结果:

  • 相关阅读:
    HO引擎近况20210912
    查询超时问题的处理
    ubuntu根据关键词批量杀进程
    创建notebook适用的虚拟环境
    信赖域策略优化(Trust Region Policy Optimization, TRPO)
    强化学习(Reinforcement Learning)
    生成对抗网络(GAN与W-GAN)
    卷积神经网络CNN
    循环神经网络RNN
    PyTorch自动求导
  • 原文地址:https://www.cnblogs.com/001688yzl/p/7606382.html
Copyright © 2020-2023  润新知