作业1:
使用Selenium框架爬取京东商城某类商品信息及图片。
代码如下:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import urllib.request
import threading
import sqlite3
import os
import datetime
from selenium.webdriver.common.keys import Keys
import time
import pymysql
class MySpider:
headers = {
"User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre"}
imagePath = "D:/data/download"
#No=0
def startUp(self, url, key):
# # Initializing Chrome browser
#chrome_options = Options()
#chrome_options.add_argument('--headless')
#chrome_options.add_argument('--disable-gpu')
self.driver = webdriver.Chrome()#chrome_options=chrome_options)
# Initializing variables1
self.threads = []
self.No = 0
self.imgNo = 0
# Initializing database
try:
self.con = sqlite3.connect("phones.db")
self.cursor = self.con.cursor()
try:
# 如果有表就删除
self.cursor.execute("drop table phones")
except:
pass
try:
# 建立新的表
sql = "create table phones (mNo varchar(32) primary key, mMark varchar(256),mPrice varchar(32),mNote varchar(1024),mFile varchar(256))"
self.cursor.execute(sql)
except:
pass
except Exception as err:
print(err)
try:
if not os.path.exists(MySpider.imagePath):
os.mkdir(MySpider.imagePath)
images = os.listdir(MySpider.imagePath)
# 将爬取到的图片信息存入指定文件夹
for img in images:
s = os.path.join(MySpider.imagePath, img)
os.remove(s)
except Exception as err:
print(err)
self.driver.get(url)
keyInput = self.driver.find_element_by_id("key")
keyInput.send_keys(key)
#在键盘输入enter
keyInput.send_keys(Keys.ENTER)
def closeUp(self):
try:
self.con.commit()
self.con.close()
self.driver.close()
except Exception as err:
print(err);
def insertDB(self, mNo, mMark, mPrice, mNote, mFile):
try:
self.cursor.execute(
"insert into computer (mNo,mMark,mPrice,mNote,mFile) values (%s,%s,%s,%s,%s)",
(mNo, mMark, mPrice, mNote, mFile))
except Exception as err:
print(err)
def showDB(self):
try:
con = sqlite3.connect("phones.db")
cursor =con.cursor()
print("%-8s%-16s%-8s%-16s%s"%("No", "Mark", "Price", "Image", "Note"))
cursor.execute("select mNo,mMark,mPrice,mFile,mNote from phones order by mNo")
rows = cursor.fetchall()
for row in rows:
print("%-8s %-16s %-8s %-16s %s" % (row[0], row[1], row[2], row[3],row[4]))
con.close()
except Exception as err:
print(err)
def download(self, src1, src2, mFile):
data = None
if src1:
try:
req = urllib.request.Request(src1, headers=MySpider.headers)
resp = urllib.request.urlopen(req, timeout=10)
data = resp.read()
except:
pass
if not data and src2:
try:
req = urllib.request.Request(src2, headers=MySpider.headers)
resp = urllib.request.urlopen(req, timeout=10)
data = resp.read()
except:
pass
if data:
print("download begin", mFile)
fobj = open(MySpider.imagePath + "\" + mFile, "wb")
fobj.write(data)
fobj.close()
print("download finish", mFile)
def processSpider(self):
try:
time.sleep(1)
print(self.driver.current_url)
lis =self.driver.find_elements_by_xpath("//div[@id='J_goodsList']//li[@class='gl-item']")
for li in lis:
# We find that the image is either in src or in data-lazy-img attribute
try:
src1 = li.find_element_by_xpath(".//div[@class='p-img']//a//img").get_attribute("src")
except:
src1 = ""
try:
src2 = li.find_element_by_xpath(".//div[@class='p-img']//a//img").get_attribute("data-lazy-img")
except:
src2 = ""
try:
price = li.find_element_by_xpath(".//div[@class='p-price']//i").text
except:
price = "0"
try:
note = li.find_element_by_xpath(".//div[@class='p-name p-name-type-2']//em").text
mark = note.split(" ")[0]
mark = mark.replace("爱心东东
", "")
mark = mark.replace(",", "")
note = note.replace("爱心东东
", "")
note = note.replace(",", "")
except:
note = ""
mark = ""
self.No = self.No + 1
no = str(self.No)
while len(no) < 6:
no = "0" + no
print(no, mark, price)
if src1:
src1 = urllib.request.urljoin(self.driver.current_url, src1)
p = src1.rfind(".")
mFile = no + src1[p:]
elif src2:
src2 = urllib.request.urljoin(self.driver.current_url, src2)
p = src2.rfind(".")
mFile = no + src2[p:]
if src1 or src2:
T = threading.Thread(target=self.download, args=(src1, src2, mFile))
T.setDaemon(False)
T.start()
self.threads.append(T)
else:
mFile = ""
self.insertDB(no, mark, price, note, mFile)
# 取下一页的数据,直到最后一页
try:
self.driver.find_element_by_xpath("//span[@class='p-num']//a[@class='pn-next disabled']")
except:
nextPage = self.driver.find_element_by_xpath("//span[@class='p-num']//a[@class='pn-next']")
time.sleep(10)
#if(self.No<50):
nextPage.click()
self.processSpider()
except Exception as err:
print(err)
def executeSpider(self, url, key):
starttime = datetime.datetime.now()
print("Spider starting......")
self.startUp(url, key)
print("Spider processing......")
self.processSpider()
print("Spider closing......")
self.closeUp()
for t in self.threads:
t.join()
print("Spider completed......")
endtime = datetime.datetime.now()
elapsed = (endtime - starttime).seconds
print("Total ", elapsed, " seconds elapsed")
url = "http://www.jd.com"
spider = MySpider()
while True:
print("1.爬取")
print("2.显示")
print("3.退出")
s = input("请选择(1,2,3):")
if s == "1":
spider.executeSpider(url, "诺基亚老人机")
continue
elif s == "2":
spider.showDB()
continue
elif s == "3":
break
运行结果
心得体会
1.根据老师的代码了解了selenium框架爬取网页的步骤
2.selenium框架应该不适合大规模数据爬取吧,这速度真不敢恭维。。
作业2:
使用Selenium框架+ MySQL数据库存储技术路线爬取“沪深A股”、“上证A股”、“深证A股”3个板块的股票数据信息。
代码如下:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
import pymysql
def spider():
global n
time.sleep(1)
lis = driver.find_elements_by_xpath('//*[@id="table_wrapper-table"]/tbody//tr')
for li in lis:
array = li.text.split(" ")
count = array[0]
id = array[1]
name = array[2]
new_price = array[6]
extent = array[7]
change_price = array[8]
number = array[9]
money = array[10]
promote = array[11]
highest = array[12]
lowest = array[13]
today_begin = array[14]
yesterday_over = array[15]
cursor.execute("insert into stock" + str(n) + "(count,id,sName,new_price,extent,change_price,sNumber,money,"
"promote,highest,lowest,today_begin,yesterday_over) "
"values( %s, % s, % s, % s, % s, % s, % s, % s, % s, % s, % s, % s, % s)",
(count, id, name, new_price, extent, change_price, number, money, promote, highest,
lowest, today_begin, yesterday_over))
# 实现翻页功能
def nextpage():
try:
driver.find_element_by_xpath(
"//div[@class='dataTables_paginate paging_input']//a[@class='next paginate_button disabled']")
print("没有下一页了")
except:
nextPage = driver.find_element_by_xpath("//*[@id='main-table_paginate']/a[2]")
time.sleep(3)
nextPage.click()
spider()
def changeStock1():
try:
global n
n = 1
# 初次运行创建stock1
'''sql1 = "create table stock1(count int,id varchar(16),sName varchar(16),new_price varchar(16),"
"extent varchar(16),change_price varchar(16),sNumber varchar(16),money varchar(16),promote varchar(16),"
"highest varchar(16),lowest varchar(16),today_begin varchar(16),yesterday_over varchar(16));"
cursor.execute(sql1)'''
cursor.execute("delete from stock1")
# 默认在沪深A股板块,无需点击
'''hs = driver.find_element_by_xpath('//*[@id="nav_hs_a_board"]')
time.sleep(3)
hs.click()'''
# 启动爬虫
print("***开始爬取沪深A股的数据***")
spider()
# 因为数据过多,所以只进行一次翻页操作
nextpage()
print("***爬取结束,数据已存入数据库中***")
except Exception as err:
print(err)
def changeStock2():
try:
global n
n = 2
# 创建stock2
'''sql2 = "create table stock2(count int,id varchar(16),sName varchar(16),new_price varchar(16),extent "
"varchar(16),change_price varchar(16),sNumber varchar(16),money varchar(16),promote varchar(16),"
"highest varchar(16),lowest varchar(16),today_begin varchar(16),yesterday_over varchar(16));"
cursor.execute(sql2)'''
cursor.execute("delete from stock2")
# 进入上证A股板块
sh = driver.find_element_by_xpath('//*[@id="nav_sh_a_board"]')
time.sleep(3)
sh.click()
print("***开始爬取上证A股的数据***")
spider()
nextpage()
print("***爬取结束,数据已存入数据库中***")
except Exception as err:
print(err)
def changeStock3():
try:
global n
n = 3
# 创建stock3
'''sql3 = "create table stock3(count int,id varchar(16),sName varchar(16),new_price varchar(16),"
"extent varchar(16),change_price varchar(16),sNumber varchar(16),money varchar(16),promote varchar(16),"
"highest varchar(16),lowest varchar(16),today_begin varchar(16),yesterday_over varchar(16));"
cursor.execute(sql3)'''
cursor.execute("delete from stock3")
# 进入深证A股板块
sz = driver.find_element_by_xpath('//*[@id="nav_sz_a_board"]')
time.sleep(3)
sz.click()
print("***开始爬取深证A股的数据***")
spider()
nextpage()
print("***爬取结束,数据已存入数据库中***")
except Exception as err:
print(err)
# 进入网址后默认处在沪深A股板块
url = 'http://quote.eastmoney.com/center/gridlist.html#hs_a_board'
# 模拟时不打开浏览器
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get(url)
# 最大化浏览器窗口
driver.maximize_window()
# 连接数据库
con = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='123456', db='mydb', charset='utf8')
cursor = con.cursor(pymysql.cursors.DictCursor)
# 选择板块进行数据爬取
print("股票信息如下:
"
"1.沪深A股
"
"2.上证A股
"
"3.深证A股")
n = input("请选择你要爬取的板块:")
if n == "1":
changeStock1()
if n == "2":
changeStock2()
if n == "3":
changeStock3()
# 关闭连接
con.commit()
con.close()
运行结果
心得体会
1.selenium爬取网页是模拟人在操作,所以当浏览器窗口太小时可能会出现“无效点击”的报错,我理解是因为所要点击的位置需要进行左右拖动才可以找到位置,可以通过最大化浏览器窗口
driver.maximize_window()来解决这个问题。
2.selenium爬取内容速度较慢,在程序运行过程中将数据存储于缓存,没有运行结束之前数据不会存储进数据库,只有关闭连接时才会将数据传输进数据库。
3.点击新的页面后需要sleep一小段时间,等待页面数据刷新,不然所爬取的数据会是原来页面的内容。
作业3:
使用Selenium框架+MySQL爬取中国mooc网课程资源信息
代码如下:
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import time
import pymysql
def spider():
time.sleep(1)
global id
# 找到所有课程以列表形式返回
lis = driver.find_elements_by_xpath('//div[@class="_1aoKr"]//div[@class="_2mbYw"]')
for li in lis:
time.sleep(1)
li.click()
# 获取页面句柄,原网页为0,新网页为1
window = driver.window_handles
# 切换到新页面
driver.switch_to.window(window[1])
time.sleep(1)
id += 1
course = driver.find_element_by_xpath('//*[@id="g-body"]/div[1]/div/div[3]/div/div[1]/div[1]/span[1]').text
teacher = driver.find_element_by_xpath('//*[@id="j-teacher"]//h3[@class="f-fc3"]').text
collage = driver.find_element_by_xpath('//*[@id="j-teacher"]/div/a/img').get_attribute('alt')
process = driver.find_element_by_xpath('//*[@id="course-enroll-info"]/div/div[1]/div[2]/div[1]').text
count = driver.find_element_by_xpath('//*[@id="course-enroll-info"]/div/div[2]/div[1]/span').text
brief = driver.find_element_by_xpath('//*[@id="j-rectxt2"]').text
print(id)
print(course)
print(teacher)
print(collage)
print(process)
print(count)
print(brief)
cursor.execute("insert into mooc(id, course, teacher, collage, process, count, brief) "
"values( % s, % s, % s, % s, % s, % s, % s)",
(id, course, teacher, collage, process, count, brief))
# 关闭此窗口
driver.close()
# 切换回原网页,方便进行下一次操作
driver.switch_to.window(window[0])
print('***********************************')
def all_spider():
global id
spider()
time.sleep(1)
# id<21时仅爬取两页内容
if (driver.find_elements_by_xpath('//a[@class="_3YiUU "]')[-1].text == '下一页') & (id < 21):
next = driver.find_elements_by_xpath('//a[@class="_3YiUU "]')[-1]
next.click()
# 模拟鼠标向上滑动,回到页面顶部
driver.execute_script("window.scrollTo(document.body.scrollHeight,0)")
all_spider()
url = 'https://www.icourse163.org/channel/2001.htm'
# 模拟时不打开浏览器
chrome_options = Options()
'''chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')'''
driver = webdriver.Chrome(chrome_options=chrome_options)
driver.get(url)
# 最大化浏览器窗口
driver.maximize_window()
# 连接数据库
con = pymysql.connect(host='127.0.0.1', port=3306, user='root', passwd='123456', db='mydb', charset='utf8')
cursor = con.cursor(pymysql.cursors.DictCursor)
sql = 'create table mooc(id int,course varchar(32),teacher varchar(16),collage varchar(32),process varchar(64),'
'count varchar(64),brief text);'
'''cursor.execute(sql)'''
cursor.execute("delete from mooc")
id = 0
print("开始爬取")
all_spider()
print("爬取结束")
# 关闭连接
con.commit()
con.close()
运行结果
心得体会
1.通过点击事件跳转到一个新的页面,想要关闭新页面时要先获取句柄,通过句柄切换页面,不然driver.close()关闭的会是原窗口。
2.mooc的课程翻页部分好坑,上一页和下一页按钮的元素属性一模一样,如果没有通过文本信息进行判断,就会在最后一页和倒数第二页“反复横跳”,就是永远找得到try命令下的xpath目标,无法结束程序。
3.在爬取完当页课程信息,进行翻页操作后,程序无法找到下一课程信息,具体原因不太清楚,但可以通过js模拟鼠标向上滑动滚轮解决此问题。