• 第五次作业


    作业①:
     要求:

    • 熟练掌握 Selenium 查找HTML元素、爬取Ajax网页数据、等待HTML元素等内容。
    • 使用Selenium框架爬取京东商城某类商品信息及图片。

     输出信息:MYSQL的输出信息如下

    mNomMarkmPricemNotemFile
    000001三星Galaxy9199.00三星Galaxy Note20 Ultra 5G...000001.jpg
    000002............

     1)代码:

    from selenium import webdriver
    from selenium.webdriver.chrome.options import Options
    import urllib.request
    import threading
    import sqlite3
    import os
    import pymysql
    import datetime
    from selenium.webdriver.common.keys import Keys
    import time
    
    class MySpider:
        headers = {
            "User-Agent": "Mozilla/5.0 (Windows; U; Windows NT 6.0 x64; en-US; rv:1.9pre) Gecko/2008072421 Minefield/3.0.2pre"}
        imagePath = "download"
        def startUp(self, url, key):
            # Initializing Chrome browser
            chrome_options = Options()
            chrome_options.add_argument('--headless')
            chrome_options.add_argument('--disable-gpu')
            self.driver = webdriver.Chrome(chrome_options=chrome_options)
            # Initializing variables
            self.threads = []
            self.No = 0
            self.imgNo = 0
            # Initializing database
            try:
                self.con = pymysql.connect(host="127.0.0.1", port=3306, user="root",
                                           passwd="root", db="crawler", charset="utf8")
                self.cursor = self.con.cursor(pymysql.cursors.DictCursor)
                try:
                    # 如果有表就删除
                    self.cursor.execute("drop table phones if table exist")
                except:
                    pass
                try:
                    # 建立新的表
                    sql = "create table phones (mNo  varchar(32) primary key, mMark varchar(256),mPrice varchar(32),mNote varchar(1024),mFile varchar(256))"
                    self.cursor.execute(sql)
                except:
                    pass
            except Exception as err:
                print(err)
            # Initializing images folder
            try:
                if not os.path.exists(MySpider.imagePath):
                    os.mkdir(MySpider.imagePath)
                images = os.listdir(MySpider.imagePath)
                for img in images:
                    s = os.path.join(MySpider.imagePath, img)
                    os.remove(s)
            except Exception as err:
                print(err)
            self.driver.get(url)
            keyInput = self.driver.find_element_by_id("key")
            keyInput.send_keys(key)
            keyInput.send_keys(Keys.ENTER)
        def closeUp(self):
            try:
                self.con.commit()
                self.con.close()
                self.driver.close()
            except Exception as err:
                print(err)
        def insertDB(self, mNo, mMark, mPrice, mNote, mFile):
            try:
                sql = "insert into phones (mNo,mMark,mPrice,mNote,mFile) values (?,?,?,?,?)"
                self.cursor.execute(sql, (mNo, mMark, mPrice, mNote, mFile))
            except Exception as err:
                print(err)
        def showDB(self):
            try:
                con = sqlite3.connect("phones.db")
                cursor =con.cursor()
                print("%-8s%-16s%-8s%-16s%s"%("No", "Mark", "Price", "Image", "Note"))
                cursor.execute("select mNo,mMark,mPrice,mFile,mNote from phones  order by mNo")
                rows = cursor.fetchall()
                for row in rows:
                    print("%-8s %-16s %-8s %-16s %s" % (row[0], row[1], row[2], row[3],row[4]))
                con.close()
            except Exception as err:
                print(err)
        def download(self, src1, src2, mFile):
            data = None
            if src1:
                try:
                    req = urllib.request.Request(src1, headers=MySpider.headers)
                    resp = urllib.request.urlopen(req, timeout=10)
                    data = resp.read()
                except:
                    pass
            if not data and src2:
                try:
                    req = urllib.request.Request(src2, headers=MySpider.headers)
                    resp = urllib.request.urlopen(req, timeout=10)
                    data = resp.read()
                except:
                    pass
            if data:
                print("download begin", mFile)
                fobj = open(MySpider.imagePath + "\" + mFile, "wb")
                fobj.write(data)
                fobj.close()
                print("download finish", mFile)
        def processSpider(self):
            try:
                time.sleep(1)
                print(self.driver.current_url)
                lis =self.driver.find_elements_by_xpath("//div[@id='J_goodsList']//li[@class='gl-item']")
                for li in lis:
                # We find that the image is either in src or in data-lazy-img attribute
                    try:
                        src1 = li.find_element_by_xpath(".//div[@class='p-img']//a//img").get_attribute("src")
                    except:
                        src1 = ""
                    try:
                        src2 = li.find_element_by_xpath(".//div[@class='p-img']//a//img").get_attribute("data-lazy-img")
                    except:
                        src2 = ""
                    try:
                        price = li.find_element_by_xpath(".//div[@class='p-price']//i").text
                    except:
                        price = "0"
                    try:
                        note = li.find_element_by_xpath(".//div[@class='p-name p-name-type-2']//em").text
                        mark = note.split(" ")[0]
                        mark = mark.replace("爱心东东
    ", "")
                        mark = mark.replace(",", "")
                        note = note.replace("爱心东东
    ", "")
                        note = note.replace(",", "")
                    except:
                        note = ""
                        mark = ""
                self.No = self.No + 1
                no = str(self.No)
                while len(no) < 6:
                    no = "0" + no
                print(no, mark, price)
                if src1:
                    src1 = urllib.request.urljoin(self.driver.current_url, src1)
                    p = src1.rfind(".")
                    mFile = no + src1[p:]
                elif src2:
                    src2 = urllib.request.urljoin(self.driver.current_url, src2)
                    p = src2.rfind(".")
                    mFile = no + src2[p:]
                if src1 or src2:
                    T = threading.Thread(target=self.download, args=(src1, src2, mFile))
                    T.setDaemon(False)
                    T.start()
                    self.threads.append(T)
                else:
                    mFile = ""
                self.insertDB(no, mark, price, note, mFile)
                # 取下一页的数据,直到最后一页
                try:
                    self.driver.find_element_by_xpath("//span[@class='p-num']//a[@class='pn-next disabled']")
                except:
                    nextPage = self.driver.find_element_by_xpath("//span[@class='p-num']//a[@class='pn-next']")
                    time.sleep(10)
                    nextPage.click()
                    self.processSpider()
            except Exception as err:
                print(err)
        def executeSpider(self, url, key):
            starttime = datetime.datetime.now()
            print("Spider starting......")
            self.startUp(url, key)
            print("Spider processing......")
            self.processSpider()
            print("Spider closing......")
            self.closeUp()
            for t in self.threads:
                t.join()
            print("Spider completed......")
            endtime = datetime.datetime.now()
            elapsed = (endtime - starttime).seconds
            print("Total ", elapsed, " seconds elapsed")
    url = "http://www.jd.com"
    spider = MySpider()
    while True:
        print("1.爬取")
        print("2.显示")
        print("3.退出")
        s = input("请选择(1,2,3):")
        if s == "1":
            spider.executeSpider(url, "手机")
            continue
        elif s == "2":
            spider.showDB()
            continue
        elif s == "3":
            break
    
     图片:(其它两个板块也一样)

     2)心得体会:本次实验是对老师ppt的复刻,让我熟悉了selenium的流程与使用。

    作业②:
     要求:

    • 熟练掌握 Selenium 查找HTML元素、爬取Ajax网页数据、等待HTML元素等内容。
    • 使用Selenium框架+ MySQL数据库存储技术路线爬取“沪深A股”、“上证A股”、“深证A股”3个板块的股票数据信息。

     输出信息:MYSQL的输出信息如下

    序号 股票代码 股票名称 最新报价 涨跌幅 涨跌额 成交量 成交额 振幅 最高 最低 今开 昨收
    1 688093 N世华 28.47 62.22% 10.92 26.13万 7.6亿 22.34 32.0 28.08 30.2 17.55
    2 ... ... ... ... ... ... ... ... ... ... ... ...
     1)代码

    创建数据库(只是其中之一,其它两个把表名的hs换成sh,sz)

    CREATE TABLE hs_stock(
    	rank VARCHAR(5),
    	stockCode VARCHAR(10),
    	stockName VARCHAR(10),
    	latestPrice VARCHAR(10),
    	changeRange VARCHAR(10),
    	changeValue VARCHAR(10),
    	dealNumber VARCHAR(10),
    	dealTotal VARCHAR(10),
    	amplitude VARCHAR(10),
    	maxPrice VARCHAR(10),
    	minPrice VARCHAR(10),
    	today VARCHAR(10),
    	yesterday VARCHAR(10)
    )DEFAULT CHARACTER SET = utf8;
    

     爬取股票

    from selenium import webdriver
    from time import sleep
    import pymysql,threading
    
    def getStock(url,useless):
        # 连接数据库,获取游标对象
        db = pymysql.connect(host="127.0.0.1",port=3306,user="root",passwd="root",db="crawler",charset="utf8")
        cursor = db.cursor()
    
        driver = webdriver.Chrome()
        driver.get(url)
        # 三个板块的url[-10:-7]分别对应hs,sh,sz,根据url来确定插入哪个表
        tableName = url[-10:-7]+"stock"
        # 获取总页数,我全都要
        page = driver.find_elements_by_xpath("//*[@id="main-table_paginate"]/span[1]//a")[-1].get_attribute("data-index")
    
        for i in range(int(page)):
            trList = driver.find_elements_by_css_selector("#table_wrapper-table > tbody > tr")
            for tr in trList:
                d = tr.text.split()
                d = d[0:3]+d[6:16]
                # 最近发现把要插入的数据存在list中,只要写一个list名就行,更方便
                cursor.execute("insert into "+tableName+" values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)",d)
            # 点击"下一页",并提交事务
            driver.find_element_by_xpath("//*[@id="main-table_paginate"]/a[2]").click()
            db.commit()
            sleep(3)
    
    url = "http://quote.eastmoney.com/center/gridlist.html#"
    stockType=["hs_a_board","sh_a_board","sz_a_board"]
    # 使用多线程,加快速度
    for stype in stockType:
        T = threading.Thread(target=getStock,args=(url+stype,None)) #None只是一个凑数的,只有一个参数的话一直报错
        T.start()
    

     图片:

     2)心得体会:此网站已经爬取非常多次了,大家也都心知肚明,本次实验收获是插入数据只要将数据存放在列表中,插入写列表名就行,不需要写很多变量;还有就是多线程好像两行就行了,不用像课本写一大堆

    作业③:
     要求:

    • 熟练掌握 Selenium 查找HTML元素、实现用户模拟登录、爬取Ajax网页数据、等待HTML元素等内容。
    • 使用Selenium框架+MySQL爬取中国mooc网课程资源信息

     输出信息:MYSQL的输出信息如下
    1)代码:
     创建数据库

    CREATE TABLE course(
    	Id INT PRIMARY KEY AUTO_INCREMENT,
    	cCourse VARCHAR(20),
    	cCollege VARCHAR(20),
    	cTeam VARCHAR(30),
    	cCount VARCHAR(10),
    	cProcess VARCHAR(30),
    	cBrief VARCHAR(200)
    )DEFAULT CHARACTER SET = utf8;
    

     模拟登录
      过程还是挺顺利,就是登录操作时,账号send_keys()失效,而密码却没出错,所以没想到是frame的问题。
      后面百度了一下,从博客中偷学了一招切换frame,解决了卡了几个小时的问题,蕾姆

    from selenium import webdriver
    from time import sleep
    
    url = "https://www.icourse163.org"
    driver = webdriver.Chrome()
    driver.get(url)
    sleep(1)
    driver.find_element_by_xpath("//*[@id="app"]/div/div/div[1]/div[3]/div[3]/div").click()
    sleep(1)
    driver.find_element_by_xpath("//div[starts-with(@id,'auto-id')]/div/div/div/div[2]/span").click()
    sleep(1)
    driver.find_element_by_xpath("//div[starts-with(@id,'auto-id')]/div/div/div/div/div[1]/div/div[1]/div[1]/ul/li[2]").click()
    sleep(1)
    # 
    driver.switch_to.frame(driver.find_elements_by_tag_name("iframe")[1]) 
    sleep(1)
    driver.find_element_by_xpath("//input[@id='phoneipt']").send_keys("13395068352")
    sleep(1)
    driver.find_element_by_xpath("//input[@placeholder="请输入密码"]").send_keys("xxx11xxx")
    sleep(1)
    driver.find_element_by_xpath("//a[@id="submitBtn"]").click()
    sleep(3) 
    

     爬取课程信息

    from selenium import webdriver
    from time import sleep
    import pymysql
    # 后面频繁切换窗口,整合成一个函数
    def switchWindow(index):
        windows = driver.window_handles 
        driver.switch_to.window(windows[index])
    # 连接数据库
    db = pymysql.connect(host="127.0.0.1",port=3306,user="root",passwd="root",db="crawler",charset="utf8")
    cursor = db.cursor()
    
    url = "https://www.icourse163.org"
    driver = webdriver.Chrome()
    driver.get(url)
    driver.maximize_window()
    
    driver.find_element_by_xpath('//div[@class="u-baseinputui"]/input[@class="j-textarea inputtxt"]').send_keys("生化")
    sleep(1)
    driver.find_element_by_xpath('//div[@class="u-search-icon"]/span[@class="u-icon-search2 j-searchBtn"]').click()
    sleep(1)
    # 获取页面总数,如果只有一页的话,就没有分页的按钮,pageList长度就为0,那么要爬取的页数就为1;否则总页数就是倒二个a标签的text值
    pageList = driver.find_elements_by_xpath("//a[@class='th-bk-main-gh']")
    page = int(pageList[-2].text) if len(pageList) else 1
    
    for i in range(page):
        # 每一页课程的集合
        courseList = driver.find_elements_by_xpath('//div[@class="u-clist f-bgw f-cb f-pr j-href ga-click"]')
        # 每一页爬取的个数,课程数>3就小爬3页,否则就全爬,意思意思就好
        length = min(3,len(courseList))
        for course in courseList[:length]:
            info = [] # 简化数据库插入操作
            # 切换到主窗口
            switchWindow(0)
            sleep(1)
            courseName = course.find_element_by_xpath(".//span[@class=' u-course-name f-thide']").text
            info.append(courseName)
            college = course.find_element_by_xpath(".//a[@class='t21 f-fc9']").text
            info.append(college)
            team = course.find_element_by_xpath(".//a[@class='f-fc9']").text
            info.append(team)
            count = course.find_element_by_xpath(".//span[@class='hot']").text
            count = count[:count.index("人")+1]
            info.append(count)
            # 有的信息只能点进链接才能获取
            course.click()
            sleep(1)
            # 切换到新打开的窗口
            switchWindow(-1)
            sleep(2)
            time = driver.find_element_by_xpath("//div[@class='course-enroll-info_course-info_term-info_term-time']/span[2]").text
            brief = driver.find_element_by_xpath("//div[@id='j-rectxt2']").text
            driver.close()
            info.append(time)
            info.append(brief)
            cursor.execute("insert into course(cCourse,cCollege,cTeam,cCount,cProcess,cBrief) values (%s,%s,%s,%s,%s,%s)",info)
            db.commit()
        
        # 上面最后不是在主窗口,要记得切换回来
        switchWindow(0)
        if page>1 and i<page-1:# 只有总页数不止一页而且不是最后一页,才点击"下一页"按钮
            driver.find_elements_by_xpath("//a[@class='th-bk-main-gh']")[-1].click()
            sleep(1)
    db.close()
    

     2)心得体会:
    刚开始没去调窗口大小,课程点击只能点两个,第三个就报错,浩大将军在我头上点了三下,一生二,二生三,三生万物,暗示我将窗口设为全屏,就能解决了;还有就是得切换窗口,不然会有找不到元素的错误

  • 相关阅读:
    linux下创建和删除软、硬链接
    linux教程:[4]配置Tomcat开机启动
    Linux下Tomcat的启动、关闭、杀死进程
    Linux下Tomcat的安装配置
    Linux安装JDK详细步骤
    每天一个linux命令(30): chown命令
    linux系统修改系统时间与时区
    linux下tar.gz、tar、bz2、zip等解压缩、压缩命令小结
    linux下解压命令大全
    Scoped CSS规范草案
  • 原文地址:https://www.cnblogs.com/baiii/p/13994921.html
Copyright © 2020-2023  润新知