• python爬取豆瓣top250电影源码


    数据库版
    #
    -*- codeing=utf-8 -*- import sys from bs4 import BeautifulSoup import re import urllib.request,urllib.error import xlwt import sqlite3 #影片超链接的规则 findlink=re.compile(r'<a href="(.*?)">')#创建正则表达式对象,表示规则 #影片图片 findImgsrc=re.compile(r'<img.*src="(.*?)"',re.S)#re.S让换行符包含在字符中 #影片片名 findtitle=re.compile(r'<span class="title">(.*?)</span>') #影片评分 findRating=re.compile(r'<span class="rating_num" property="v:average">(.*?)</span>') #评价人数 findjudge=re.compile(r'<span>(d*)人评价</span>') # 找到概况 findinq=re.compile(r'<span class="inq">(.*)</span>') #找到影片相关内容导演 findBd=re.compile(r'<p class="">(.*?)</p>',re.S) def main(): baseurl="https://movie.douban.com/top250?start=" #爬取网页 datalist=getDate(baseurl) dbpath="movie.db" saveData2DB(datalist,dbpath) #得到指定一个url的网页内容 def askurl(url): head={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36"} request=urllib.request.Request(url,headers=head) html="" try: reponse=urllib.request.urlopen(request) html=reponse.read().decode("utf-8") # print(html) except Exception as e: print(e) return html #爬取网页 def getDate(baseurl): datalist=[] for i in range(0,10): url=baseurl+str(i*25) html=askurl(url) soup=BeautifulSoup(html,"html.parser") for item in soup.find_all('div',class_="item"): data=[] item=str(item) #获取到影片的超链接 link=re.findall(findlink,item)[0] data.append(link) imgsrc=re.findall(findImgsrc,item)[0] data.append(imgsrc) title = re.findall(findtitle, item) if (len(title)==2): ctitle=title[0]#添加中文名 data.append(ctitle) otitle=title[1].replace("/","")#去掉无关的符号 data.append(otitle)#添加外国名 else: data.append(title[0]) data.append(' ')#外国名留空 Rating = re.findall(findRating, item)[0] data.append(Rating) judge= re.findall(findjudge, item)[0] # print(judge) data.append(judge) inq = re.findall(findinq, item) if len(inq)!=0: inq=inq[0].replace("","") data.append(inq) else: data.append(inq) Bd = re.findall(findBd, item)[0] Bd=re.sub('<br(s+)?/(s+)'," ",Bd) Bd=re.sub('/'," ",Bd) data.append(Bd.strip()) datalist.append(data) return datalist #保存数据 def saveData(datalist,savepath): book=xlwt.Workbook(encoding="utf-8") sheet=book.add_sheet('豆瓣电影top250',cell_overwrite_ok=True) col=('电影详情连接',"图片连接","影片中文名","影片外国名","评分","评价人数","概况","相关信息") for i in range(0,8): sheet.write(0,i,col[i]) for i in range(0,250): print("第%d条"%(i+1)) data=datalist[i] for j in range(0,8): sheet.write(i+1,j,data[j]) book.save('豆瓣电影Top250.xls') def saveData2DB(datalist,dbpath): init_db(dbpath) conn=sqlite3.connect(dbpath) cur=conn.cursor() try: for data in datalist: for index in range(len(data)): if index==4 or index==5: continue data[index] = '"' + str(data[index]) + '"' sql=''' insert into movie250( info_link,pic_link,cname,ename,score,rated,instrduction,info) values(%s) '''%",".join(data) print(sql) cur.execute(sql) conn.commit() cur.close() conn.close() print("写入数据库成功") except Exception as e: print(e) def init_db(dbpath): conn = sqlite3.connect(dbpath) cursor=conn.cursor() sql = ''' create table movie250 ( id integer primary key autoincrement, info_link text, pic_link text, cname varchar, ename varchar, score numeric, rated numeric, instrduction text, info text ) ''' # 创建数据表 cursor.execute(sql) conn.commit() conn.close() if __name__=="__main__": baseurl="https://movie.douban.com/top250?start=" datalist=getDate(baseurl) dbpath="movie.db" saveData2DB(datalist,dbpath) print("爬取成功")
    excel版
    #-*- codeing=utf-8 -*-
    import sys
    from bs4 import BeautifulSoup
    import re
    import urllib.request,urllib.error
    import xlwt
    import sqlite3
    #影片超链接的规则
    findlink=re.compile(r'<a href="(.*?)">')#创建正则表达式对象,表示规则
    #影片图片
    findImgsrc=re.compile(r'<img.*src="(.*?)"',re.S)#re.S让换行符包含在字符中
    #影片片名
    findtitle=re.compile(r'<span class="title">(.*?)</span>')
    #影片评分
    findRating=re.compile(r'<span class="rating_num" property="v:average">(.*?)</span>')
    #评价人数
    findjudge=re.compile(r'<span>(d*)人评价</span>')
    # 找到概况
    findinq=re.compile(r'<span class="inq">(.*)</span>')
    #找到影片相关内容导演
    findBd=re.compile(r'<p class="">(.*?)</p>',re.S)
    
    
    def main():
        baseurl="https://movie.douban.com/top250?start="
        #爬取网页
        datalist=getDate(baseurl)
        savepath=".\豆瓣电影Top250表格版.xls"
        saveData(datalist,savepath)
    
    
    #得到指定一个url的网页内容
    def askurl(url):
        head={"User-Agent":"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.82 Safari/537.36"}
        request=urllib.request.Request(url,headers=head)
        html=""
        try:
            reponse=urllib.request.urlopen(request)
            html=reponse.read().decode("utf-8")
            # print(html)
        except Exception as e:
            print(e)
        return html
    
    #爬取网页
    def getDate(baseurl):
        datalist=[]
        for i in range(0,10):
            url=baseurl+str(i*25)
            html=askurl(url)
            soup=BeautifulSoup(html,"html.parser")
            for item in soup.find_all('div',class_="item"):
                data=[]
                item=str(item)
                #获取到影片的超链接
                link=re.findall(findlink,item)[0]
                data.append(link)
    
                imgsrc=re.findall(findImgsrc,item)[0]
                data.append(imgsrc)
    
                title = re.findall(findtitle, item)
                if (len(title)==2):
                    ctitle=title[0]#添加中文名
                    data.append(ctitle)
                    otitle=title[1].replace("/","")#去掉无关的符号
                    data.append(otitle)#添加外国名
                else:
                    data.append(title[0])
                    data.append(' ')#外国名留空
    
                Rating = re.findall(findRating, item)[0]
                data.append(Rating)
    
                judge= re.findall(findjudge, item)[0]
                # print(judge)
                data.append(judge)
    
                inq = re.findall(findinq, item)
                if len(inq)!=0:
                    inq=inq[0].replace("","")
                    data.append(inq)
                else:
                    data.append(inq)
    
                Bd = re.findall(findBd, item)[0]
                Bd=re.sub('<br(s+)?/(s+)'," ",Bd)
                Bd=re.sub('/'," ",Bd)
                data.append(Bd.strip())
    
                datalist.append(data)
        return datalist
    
    #保存数据
    def saveData(datalist,savepath):
        book=xlwt.Workbook(encoding="utf-8")
        sheet=book.add_sheet('豆瓣电影top250',cell_overwrite_ok=True)
        col=('电影详情连接',"图片连接","影片中文名","影片外国名","评分","评价人数","概况","相关信息")
        for i in range(0,8):
            sheet.write(0,i,col[i])
        for i in range(0,250):
            print("第%d条"%(i+1))
            data=datalist[i]
            for j in range(0,8):
                sheet.write(i+1,j,data[j])
        book.save('豆瓣电影Top250表格版.xls')
    
    
    def saveData2DB(datalist,dbpath):
        init_db(dbpath)
        conn=sqlite3.connect(dbpath)
        cur=conn.cursor()
        try:
            for data in datalist:
                for index in range(len(data)):
                    if index==4 or index==5:
                        continue
                    data[index] = '"' + str(data[index]) + '"'
                sql='''
                        insert into movie250(
                        info_link,pic_link,cname,ename,score,rated,instrduction,info)
                        values(%s)
                        '''%",".join(data)
                print(sql)
                cur.execute(sql)
                conn.commit()
            cur.close()
            conn.close()
            print("写入数据库成功")
        except Exception as e:
            print(e)
    
    
    
    
    def init_db(dbpath):
        conn = sqlite3.connect(dbpath)
        cursor=conn.cursor()
        sql = '''
            create table movie250
            (
            id integer primary key autoincrement,
            info_link text,
            pic_link text,
            cname varchar,
            ename varchar,
            score numeric,
            rated numeric,
            instrduction text,
            info text
    
        )
        '''  # 创建数据表
        cursor.execute(sql)
        conn.commit()
        conn.close()
    
    if __name__=="__main__":
        main()
        print("爬取成功")
  • 相关阅读:
    UML类图(上):类、继承和实现
    Maven实战:Maven生命周期
    MyBatis6:MyBatis集成Spring事物管理(下篇)
    MyBatis5:MyBatis集成Spring事务管理(上篇)
    Spring7:基于注解的Spring MVC(下篇)
    Spring6:基于注解的Spring MVC(上篇)
    Spring5:@Autowired注解、@Resource注解和@Service注解
    Dubbo学习小记
    Maven入门详解
    MyBatis4:动态SQL
  • 原文地址:https://www.cnblogs.com/xyz315/p/15330125.html
Copyright © 2020-2023  润新知