1.获取单条新闻的#标题#链接#时间#来源#内容 #点击次数,并包装成一个函数。
import requests
from bs4 import BeautifulSoup
import re
res = requests.get("http://news.gzcc.cn/html/xibusudi/")
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text,'html.parser')
li = soup.select('li')
def gethits(url_1):
li_id =re.search('_.*/(.*).html',url_1).groups(0)[0]
hits = requests.get('http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(li_id)).text.split('.')[-1].rstrip('''');''').lstrip(''''html(''')
return hits
def getpageinfo(label):
for title_list in label:
if len(title_list.select('.news-list-title'))>0:
href = title_list.select('a')[0]['href']
title = title_list.select('.news-list-title')[0].text
time = title_list.select('span')[0].text
info = title_list.select('span')[1].text
res_list = requests.get(href)
res_list.encoding = 'utf-8'
soup_list = BeautifulSoup(res_list.text,'html.parser')
text_list = soup_list.select('.show-content')[0].text
hits_list = gethits(href)
print('时间:',time,
'
标题:',title,
'
链接:',href,
'
来源:',info,
'
点击次数:',hits_list,'
')
print('正文:',text_list)
break
getpageinfo(li)
结果:
2.获取一个新闻列表页的所有新闻的上述详情,并包装成一个函数。
import requests
from bs4 import BeautifulSoup
import re
url_main="http://news.gzcc.cn/html/xiaoyuanxinwen/"
res = requests.get(url_main)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text,'html.parser')
li = soup.select('li')
def gethits(url_1):
li_id =re.search('_.*/(.*).html',url_1).groups(0)[0]
hits = requests.get('http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(li_id)).text.split('.')[-1].rstrip('''');''').lstrip(''''html(''')
return hits
def getpageinfo(label):
for title_list in label:
if len(title_list.select('.news-list-title'))>0:
href = title_list.select('a')[0]['href']
title = title_list.select('.news-list-title')[0].text
time = title_list.select('span')[0].text
info = title_list.select('span')[1].text
res_list = requests.get(href)
res_list.encoding = 'utf-8'
soup_list = BeautifulSoup(res_list.text,'html.parser')
text_list = soup_list.select('.show-content')[0].text
hits_list = gethits(href)
print('时间:',time,' 标题:',title,' 链接:',href,' 来源:',info,' 点击次数:',hits_list,' ')
print('正文:',text_list)
getpageinfo(li)
结果:
3.获取所有新闻列表页的网址,调用上述函数。
import requests from bs4 import BeautifulSoup import re url_main="http://news.gzcc.cn/html/xiaoyuanxinwen/" res = requests.get(url_main) res.encoding = 'utf-8' soup = BeautifulSoup(res.text,'html.parser') li = soup.select('li') def gethits(url_1): li_id =re.search('_.*/(.*).html',url_1).groups(0)[0] hits = requests.get('http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(li_id)).text.split('.')[-1].rstrip('''');''').lstrip(''''html(''') return hits def getpageinfo(label): for title_list in label: if len(title_list.select('.news-list-title'))>0: href = title_list.select('a')[0]['href'] title = title_list.select('.news-list-title')[0].text time = title_list.select('span')[0].text info = title_list.select('span')[1].text res_list = requests.get(href) res_list.encoding = 'utf-8' soup_list = BeautifulSoup(res_list.text,'html.parser') text_list = soup_list.select('.show-content')[0].text hits_list = gethits(href) getpageinfo(li) pages = int(soup.select('.a1')[0].text.rstrip('条'))//10+1 for i in range(2,pages+1): url_page = "http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html".format(i) res_page = requests.get(url_page) res_page.encoding = 'utf-8' soup_page = BeautifulSoup(res_page.text,'html.parser') list_page = soup.select('li') getpageinfo(list_page) print(url_page)
结果:
4.完成所有校园新闻的爬取工作。
import requests
from bs4 import BeautifulSoup
import re
url_main="http://news.gzcc.cn/html/xiaoyuanxinwen/"
res = requests.get(url_main)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text,'html.parser')
li = soup.select('li')
def gethits(url_1):
li_id =re.search('_.*/(.*).html',url_1).groups(0)[0]
hits = requests.get('http://oa.gzcc.cn/api.php?op=count&id={}&modelid=80'.format(li_id)).text.split('.')[-1].rstrip('''');''').lstrip(''''html(''')
return hits
def getpageinfo(label):
for title_list in label:
if len(title_list.select('.news-list-title'))>0:
href = title_list.select('a')[0]['href']
title = title_list.select('.news-list-title')[0].text
time = title_list.select('span')[0].text
info = title_list.select('span')[1].text
res_list = requests.get(href)
res_list.encoding = 'utf-8'
soup_list = BeautifulSoup(res_list.text,'html.parser')
text_list = soup_list.select('.show-content')[0].text
hits_list = gethits(href)
print('时间:',time,'
标题:',title,'
链接:',href,'
来源:',info,'
点击次数:',hits_list,'
')
print('正文:',text_list)
#break
getpageinfo(li)
pages = int(soup.select('.a1')[0].text.rstrip('条'))//10+1
for i in range(2,pages+1):
url_page = "http://news.gzcc.cn/html/xiaoyuanxinwen/{}.html".format(i)
res_page = requests.get(url_page)
res_page.encoding = 'utf-8'
soup_page = BeautifulSoup(res_page.text,'html.parser')
list_page = soup.select('li')
getpageinfo(list_page)
#break
结果:
5.完成自己所选其他主题相应数据的爬取工作。
import requests
from bs4 import BeautifulSoup
import re
url_main="http://www.12306.cn/mormhweb/"
res = requests.get(url_main)
res.encoding = 'utf-8'
soup = BeautifulSoup(res.text,'html.parser')
for news in soup.select('li'):
if len(news.select('a'))>0:
title=news.select('a')[0].text
url=news.select('a')[0]['href']
#time=news.select('span')[0].contents[0].text
#print(time,title,url)
print(title,url)
for i in range(2,pages+1):
url_page = "http://www.gamersky.com/{}.html".format(i)
res_page = requests.get(url_page)
res_page.encoding = 'utf-8'
soup_page = BeautifulSoup(res_page.text,'html.parser')
list_page = soup.select('li')
getpageinfo(list_page)
print(url_page)
结果: