环境:
windows,python3.4
参考链接:
https://blog.csdn.net/weixin_36604953/article/details/78156605
代码:(亲测可以运行)
1 import requests 2 from bs4 import BeautifulSoup 3 import re 4 import random 5 import time 6 7 8 # 爬虫主函数 9 def mm(url): 10 # 设置目标url,使用requests创建请求 11 header = { 12 "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"} 13 req0 = requests.get(url=url, headers=header) 14 req0.encoding = "gb18030" # 解决乱码问题 15 html0 = req0.text 16 17 # 使用BeautifulSoup创建html代码的BeautifulSoup实例,存为soup0 18 soup0 = BeautifulSoup(html0, "html.parser") 19 20 # 获取最后一页数字,对应-122(对照前一小节获取尾页的内容看你就明白了) 21 total_page = int(soup0.find("div", class_="pagers").findAll("a")[-2].get_text()) 22 myfile = open("aika_qc_gn_1_1_1.txt", "a", encoding='gb18030', errors='ignore') # 解决乱码问题 23 print("user", " 来源", " 认为有用人数", " 类型", " comment") 24 NAME = "user" + " 来源" + " 认为有用人数" + " 类型" + " comment" 25 myfile.write(NAME + " ") 26 for i in list(range(1, total_page + 1)): 27 # 设置随机暂停时间 28 stop = random.uniform(1, 3) 29 30 url = "http://newcar.xcar.com.cn/257/review/0/0_" + str(i) + ".htm" 31 req = requests.get(url=url, headers=header) 32 req.encoding = "gb18030" # 解决乱码问题 33 html = req.text 34 35 soup = BeautifulSoup(html, "html.parser") 36 contents = soup.find('div', class_="review_comments").findAll("dl") 37 l = len(contents) 38 for content in contents: 39 tiaoshu = contents.index(content) 40 try: 41 ss = "正在爬取第%d页的第%d的评论,网址为%s" % (i, tiaoshu + 1, url) 42 print(ss) # 正在爬取的条数 43 try: 44 45 # 点评角度 46 comment_jiaodu = content.find("dt").find("em").find("a").get_text().strip().replace(" ", 47 "").replace( 48 " ", "").replace(" ", "") 49 except: 50 comment_jiaodu = "sunny" 51 try: 52 53 # 点评类型 54 comment_type0 = content.find("dt").get_text().strip().replace(" ", "").replace(" ", "").replace( 55 " ", 56 "") 57 comment_type1 = comment_type0.split("【")[1] 58 comment_type = comment_type1.split("】")[0] 59 except: 60 comment_type = "sunny" 61 62 # 认为该条评价有用的人数 63 try: 64 useful = int( 65 content.find("dd").find("div", class_="useful").find("i").find( 66 "span").get_text().strip().replace( 67 " ", "").replace(" ", "").replace(" ", "")) 68 except: 69 useful = "sunny" 70 71 # 评论来源 72 try: 73 comment_region = content.find("dd").find("p").find("a").get_text().strip().replace(" ", 74 "").replace( 75 " ", "").replace(" ", "") 76 except: 77 comment_region = "sunny" 78 79 # 评论者名称 80 try: 81 user = 82 content.find("dd").find("p").get_text().strip().replace(" ", "").replace(" ", "").replace( 83 " ", 84 "").split( 85 ":")[-1] 86 except: 87 user = "sunny" 88 89 # 评论内容 90 try: 91 comment_url = content.find('dt').findAll('a')[-1]['href'] 92 urlc = comment_url 93 headerc = { 94 "User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36"} 95 reqc = requests.get(urlc, headers=headerc) 96 htmlc = reqc.text 97 soupc = BeautifulSoup(htmlc, "html.parser") 98 99 comment0 = 100 soupc.find('div', id='mainNew').find('div', class_='maintable').findAll('form')[1].find('table', 101 class_='t_msg').findAll( 102 'tr')[1] 103 try: 104 comment = comment0.find('font').get_text().strip().replace(" ", "").replace(" ", "") 105 except: 106 comment = "sunny" 107 try: 108 comment_time = soupc.find('div', id='mainNew').find('div', class_='maintable').findAll('form')[ 109 1].find('table', class_='t_msg').find('div', 110 style='padding-top: 4px;float:left').get_text().strip().replace( 111 " ", "").replace( 112 " ", "")[4:] 113 except: 114 comment_time = "sunny" 115 except: 116 try: 117 comment = 118 content.find("dd").get_text().split(" ")[-1].split(' ')[-1].strip().replace(" ", 119 "").replace( 120 " ", "").replace(" ", "").split(":")[-1] 121 except: 122 comment = "sunny" 123 124 time.sleep(stop) 125 print(user, comment_region, useful, comment_type, comment) 126 127 tt = user + " " + comment_region + " " + str(useful) + " " + comment_type + " " + comment 128 myfile.write(tt + " ") 129 except Exception as e: 130 print(e) 131 s = "爬取第%d页的第%d的评论失败,网址为%s" % (i, tiaoshu + 1, url) 132 print(s) 133 pass 134 myfile.close() 135 136 137 # 统计评论分布 138 def fenxi(): 139 myfile = open("aika_qc_gn_1_1_1.txt", "r") 140 good = 0 141 middle = 0 142 bad = 0 143 nn = 0 144 for line in myfile: 145 commit = line.split(" ")[3] 146 if commit == "好评": 147 good = good + 1 148 elif commit == "中评": 149 middle = middle + 1 150 elif commit == "差评": 151 bad = bad + 1 152 else: 153 nn = nn + 1 154 count = good + middle + bad + nn 155 g = round(good / (count - nn) * 100, 2) 156 m = round(middle / (count - nn) * 100, 2) 157 b = round(bad / (count - nn) * 100, 2) 158 n = round(nn / (count - nn) * 100, 2) 159 print("好评占比:", g) 160 print("中评占比:", m) 161 print("差评占比:", b) 162 print ("未评论:", n) 163 164 165 url = "http://newcar.xcar.com.cn/257/review/0.htm" 166 mm(url) 167 fenxi()
BeautifulSoup神器
Python一个第三方库bs4中有一个BeautifulSoup库,是用于解析html代码的,换句话说就是可以帮助你更方便的通过标签定位你需要的信息。这里只介绍两个比较关键的方法:
1、find方法和findAll方法:
首先,BeautifulSoup会先将整个html或者你所指定的html代码编程一个BeautifulSoup对象的实例(不懂对象和实例不要紧,你只要把它当作是一套你使用F12看到的树形html代码代码就好),这个实例可以使用很多方法,最常用的就是find和findAll,二者的功能是相同的,通过find( )的参数,即find( )括号中指定的标签名,属性名,属性值去搜索对应的标签,并获取它,不过find只获取搜索到的第一个标签,而findAll将会获取搜索到的所有符合条件的标签,放入一个迭代器(实际上是将所有符合条件的标签放入一个list),findAll常用于兄弟标签的定位,如刚才定位口碑信息,口碑都在dl标签下,而同一页的10条口碑对应于10个dl标签,这时候用find方法只能获取第一个,而findAll会获取全部的10个标签,存入一个列表,想要获取每个标签的内容,只需对这个列表使用一个for循环遍历一遍即可。
2、get_text()方法:
使用find获取的内容不仅仅是我们需要的内容,而且包括标签名、属性名、属性值等,比如使用find方法获取"<Y yy='aaa'>xxxx</Y>"
的内容xxxx,使用find后,我们会得到整个"<Y yy='aaa'>xxxx</Y>"
,十分冗长,实际我们想要的仅仅是这个标签的内容xxxx,因此,对使用find方法后的对象再使用get_text( )方法,就可以得到标签的内容了,对应到这里,我们通过get_text( )方法就可以得到xxxx了。