作业一
1).天气预报实验
代码
from bs4 import BeautifulSoup
from bs4 import UnicodeDammit
import urllib.request
import sqlite3
class WeatherDB:
def openDB(self):
self.con = sqlite3.connect("weathers.db")
self.cursor = self.con.cursor()
try:
self.cursor.execute(
"create table weathers (wCity varchar(16),wDate varchar(16),wWeather varchar(64),wTemp varchar(32),constraint pk_weather primary key (wCity,wDate))")
except:
self.cursor.execute("delete from weathers")
def closeDB(self):
self.con.commit()
self.con.close()
def insert(self, city, date, weather, temp):
try:
self.cursor.execute("insert into weathers (wCity,wDate,wWeather,wTemp) values(?,?,?,?)",
(city, date, weather, temp))
except Exception as err:
print(err)
def show(self):
self.cursor.execute("select * from weathers")
rows = self.cursor.fetchall()
print("%-18s%-20s%-34s%-16s" % ("city", "data", "weather", "temp"))
for row in rows:
print("%-16s%-16s%-32s%-16s" % (row[0], row[1], row[2], row[3]))
class WeatherForecast:
def __init__(self):
self.headers = {
"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US;rv:1.9pre)Gecko/2008072421 Minefield/3.0.2pre"}
self.cityCode = {"北京": "101010100", "上海": "101020100", "广州": "101280101", "深圳": "101280601"}
def forecastCity(self, city):
if city not in self.cityCode.keys():
print(city + "code cannot be found")
return
url = "http://www.weather.com.cn/weather/" + self.cityCode[city] + ".shtml"
try:
req = urllib.request.Request(url, headers=self.headers)
data = urllib.request.urlopen(req)
data = data.read()
dammit = UnicodeDammit(data, ["utf-8", "gbk"])
data = dammit.unicode_markup
soup = BeautifulSoup(data, "lxml")
lis = soup.select("ul[class='t clearfix'] li")
for li in lis:
try:
date = li.select('h1')[0].text
weather = li.select('p[class="wea"]')[0].text
temp = li.select('p[class="tem"] span')[0].text + "/" + li.select('p[class="tem"] i')[0].text
self.db.insert(city, date, weather, temp)
except Exception as err:
# 按照书上的代码这边会抛出一个list index out of range的异常,结果还是可以正常输出
pass
#print(err)
except Exception as err:
print(err)
def process(self, cities):
self.db = WeatherDB()
self.db.openDB()
for city in cities:
self.forecastCity(city)
self.db.show()
self.db.closeDB()
ws = WeatherForecast()
ws.process(["北京", "上海", "广州", "深圳"])
输出信息
2).心得体会
作业二
1).爬取股票相关信息实验
代码
import requests
import re
#用get方法访问服务器并提取页面数据
def getHtml(url):
r = requests.get(url)
r.encoding='utf-8'
data = re.findall(r'"diff":[(.*?)]', r.text)
return data
url="http://69.push2.eastmoney.com/api/qt/clist/get?cb=jQuery1124034363182413478865_1601433325654&pn=1&pz=60&po=1&np=1&ut=bd1d9ddb04089700cf9c27f6f7426281&fltt=2&invt=2&fid=f3&fs=m:0+t:6,m:0+t:13,m:0+t:80,m:1+t:2,m:1+t:23&fields=f2,f3,f4,f5,f6,f7,f12,f14,f15,f16,f17,f18&_=1601433325745"
#在url中pn参数是第i页,pz参数是返回i条股票信息,f2:"最新报价"f3:"涨跌幅"f4:"涨跌额"f5:"成交量"f6:"成交额"f7:"振幅"f12:"股票代码"f14:"股票名称"f15:"最高"f16:"最低"f17:"今开"f18:"昨收"
data=getHtml(url)
datas = data[0].strip("{").strip("}").split("},{")#匹配到的数据不需要其中的'{','}',用strip去除字符串首尾部分的'{','}',再用split,划分的同时去除不需要的字符
print("%-6s%-8s%-8s%-8s%-7s%-7s%-10s%-16s%-7s%-7s%-7s%-7s%-7s"%("序号","股票代码","股票名称","最新报价","涨跌幅","涨跌额","成交量","成交额","振幅","最高","最低","今开","昨收"))
def get_data(data):
db=[]
no=1
for data_line in data:#按行处理数据
item= data_line.split(",")
line=[]
line.append(no)#序号
for it in item:
ite = it.split(":")[1]#每行中的每一项是以f%d:“xx“的形式存放的,需要的是:后的内容
line.append(ite)
no=no+1
db.append(line)
print("%-8s%-12s%-10s%-10s%-9s%-9s%-12s%-20s%-9s%-9s%-9s%-9s%-9s"%(line[0],line[7].strip('"'),line[8].strip('"'),line[1],line[2],line[3],line[4],line[5],line[6],line[9],line[10],line[11],line[12]))
return db
db=get_data(datas)
代码结果
2).心得体会
本实验参考那篇知乎中爬取数据的操作,自己查看网页的js文件,寻找其中需要的文件,学习其获取方式,爬取这样动态刷新的数据。有试过直接获取股票网页html的数据,发现股票数据是空的,因此本次学习通过js文件爬取数据,进一步拓展了自己爬取网页数据的方法,进一步了解到爬虫的大世界。
作业三
爬取自选股实验
代码
import requests
import re
#用get方法访问服务器并提取页面数据
def getHtml(url):
r = requests.get(url)
r.encoding='utf-8'
#print(r.text)
data = re.findall(r'"data":{(.*?)}', r.text)
return data
share_no="1.603118"#所选股票查询的secid
url="http://push2.eastmoney.com/api/qt/stock/get?ut=fa5fd1943c7b386f172d6893dbfba10b&invt=2&fltt=2&fields=f46,f44,f45,f57,f58&secid="+share_no+"&cb=jQuery1124016573596267024926_1602148452251&_=1602148452288"
data=getHtml(url)
#print(data)
print("%-8s%-8s%-7s%-7s%-7s"%("股票代码","股票名称","今日开","今日最高","今日最低"))
item = data[0].split(",")
line=[]
for it in item:
ite = it.split(":")[1]
line.append(ite)
print("%-12s%-8s%-10s%-10s%-9s" % (
line[3].strip('"'), line[4].strip('"'), line[2], line[0], line[1]))
代码结果
2).心得体会
在查看js中有查看到两个包含所需数据的js,其中一个应该是记录个股历史数据的js,另一个是最新数据的js即上图所示。对于本题的要求选择最新数据的js会方便很多。根据不同的需求筛选判断合适的js也会加快自己的爬取实现。