• 用requests库和BeautifulSoup4库爬取新闻列表


    1、requests库和BeautifulSoup4库,爬取校园新闻列表的时间、标题、链接、来源。

    import requests
    from bs4 import BeautifulSoup
    url='http://news.gzcc.cn/html/xiaoyuanxinwen/'
    res=requests.get(url)
    res.encoding='utf-8'
    soup=BeautifulSoup(res.text,'html.parser')
    for news in soup.select('li'):
    if len(news.select('.news-list-title'))>0:
    title=news.select('.news-list-title')[0].text                     #标题
    time=news.select('.news-list-info')[0].contents[0].text #时间
    url=news.select('a')[0]['href']                                      #链接
    thumb=news.select('.news-list-thumb')[0].contents[0]
    print(time)
    print(title)
    print(url)
    print(thumb)

    结果:

    2、将其中的时间str转换成datetime类型。 

    import requests
    from bs4 import BeautifulSoup
    from datetime import datetime
    gzccurl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
    res = requests.get(gzccurl)
    res.encoding='utf-8'
    soup = BeautifulSoup(res.text,'html.parser')
    for news in soup.select('li'):
    if len(news.select('.news-list-title'))>0:
    title = news.select('.news-list-title')[0].text#标题
    url = news.select('a')[0]['href']#链接
    time = news.select('.news-list-info')[0].contents[0].text
    dt = datetime.strptime(time,'%Y-%m-%d')
    source = news.select('.news-list-info')[0].contents[1].text#来源
    print(dt,' ',title,' ',url,' ',source)

    结果:

    3、将取得详细内容的代码包装成函数。 

    import requests
    from bs4 import BeautifulSoup
    from datetime import datetime
    gzccurl = 'http://news.gzcc.cn/html/xiaoyuanxinwen/'
    res = requests.get(gzccurl)
    res.encoding='utf-8'
    soup = BeautifulSoup(res.text,'html.parser')
    def getdetail(url):
    resd = requests.get(url)
    resd.encoding='utf-8'
    soupd = BeautifulSoup(resd.text,'html.parser')
    return soupd.select('.show-content')[0].text
    for news in soup.select('li'):
    if len(news.select('.news-list-title'))>0:
    title = news.select('.news-list-title')[0].text                                         #标题
    url = news.select('a')[0]['href']                                                          #路径
    time = news.select('.news-list-info')[0].contents[0].text                     #时间
    dt = datetime.strptime(time,'%Y-%m-%d')
    source = news.select('.news-list-info')[0].contents[1].text #来源
                                                                                                            #正文
    detail = getdetail(url)
    print(dt,title,url,source)
    print(detail)
    break

     结果:

    4、选一个自己感兴趣的主题,做类似的操作,为后面“爬取网络数据并进行文本分析”做准备。

    import requests
    from bs4 import BeautifulSoup
    from datetime import datetime
    gzccurl = 'http://news.gzcc.cn/html/gnyw/'
    res = requests.get(gzccurl)
    res.encoding='utf-8'
    soup = BeautifulSoup(res.text,'html.parser')
    def getdetail(url):
    resd = requests.get(url)
    resd.encoding='utf-8'
    soupd = BeautifulSoup(resd.text,'html.parser')
    return soupd.select('.show-content')[0].text
    for news in soup.select('li'):
    if len(news.select('.news-list-title'))>0:
    title = news.select('.news-list-title')[0].text                                     #标题
    url = news.select('a')[0]['href']                                                      #路径
    time = news.select('.news-list-info')[0].contents[0].text                #时间
    dt = datetime.strptime(time,'%Y-%m-%d')
                                                                                                       #正文
    detail = getdetail(url)
    print(dt,title,url,detail)
    break

    结果:

  • 相关阅读:
    PAT甲级1018Public Bike Management
    PAT甲级1003Emergency
    android的AIDL
    View的滑动冲突和解决方案
    弹性滑动原理
    View的滑动原理和多种滑动方法
    Android的Activity的生命周期
    Android获取App版本号和版本名
    转:Android检查设备是否联网
    androidstudio实现增量更新步骤
  • 原文地址:https://www.cnblogs.com/001688yzl/p/7606382.html
Copyright © 2020-2023  润新知