1 # -*- coding: utf-8 -*-
2 # @author: Tele
3 # @Time : 2019/04/14 下午 3:48
4 # 多线程版
5 import time
6 import requests
7 import os
8 import json
9 from fake_useragent import UserAgent
10 from lxml import etree
11 import threading
12 from concurrent.futures import ThreadPoolExecutor, wait, as_completed
13
14
15 class JDSplier:
16 executor = ThreadPoolExecutor(max_workers=6)
17 mutex = threading.Lock()
18 flag = True
19
20 @staticmethod
21 def get_proxy():
22 return requests.get("http://127.0.0.1:5010/get/").content.decode()
23
24 @staticmethod
25 def get_ua():
26 ua = UserAgent()
27 return ua.random
28
29 def __init__(self, kw_list):
30 self.kw_list = kw_list
31 # 评论url
32 self.url_temp = "https://sclub.jd.com/comment/productPageComments.action?&productId={}&score=0&sortType=5&page={}&pageSize=10&isShadowSku=0&rid=0&fold=1"
33 self.headers = {
34 "User-Agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
35
36 }
37 self.proxies = {
38 "http": None
39 }
40 self.parent_dir = None
41 self.file_dir = None
42
43 # ua,proxy
44 def check(self):
45 self.headers["User-Agent"] = JDSplier.get_ua()
46 proxy = "http://" + JDSplier.get_proxy()
47 self.proxies["http"] = proxy
48 print("ua:", self.headers["User-Agent"])
49 print("proxy:", self.proxies["http"])
50
51 # 评论
52 def parse_url(self, product_id, page):
53 url = self.url_temp.format(product_id, page)
54 response = requests.get(url, headers=self.headers, proxies=self.proxies, verify=False)
55 if response.status_code == 200:
56 print(url)
57 data = None
58 if len(response.content) < 0:
59 return
60 # 很奇葩
61 try:
62 data = json.loads(response.content.decode("gbk"))
63 except:
64 data = json.loads(response.content.decode())
65 finally:
66 # 评论
67 if not data:
68 return
69 comment_list = data["comments"]
70 if len(comment_list) > 0:
71 item_list = list()
72 for comment in comment_list:
73 item = dict()
74 # 商品名
75 item["referenceName"] = comment["referenceName"]
76 # 评论时间
77 item["creationTime"] = comment["creationTime"]
78 # 内容
79 item["content"] = comment["content"]
80 item_list.append(item)
81
82 # 保存
83 with open(self.file_dir, "a", encoding="utf-8") as file:
84 file.write(json.dumps(item_list, ensure_ascii=False, indent=2))
85 file.write("
")
86 time.sleep(5)
87 else:
88 JDSplier.flag = False
89 else:
90 print("请求失败!")
91
92 # 提取id
93 def get_product_info(self):
94 url_temp = "https://search.jd.com/Search?keyword={}&enc=utf-8"
95 result_list = list()
96 for kw in self.kw_list:
97 url = url_temp.format(kw)
98 response = requests.get(url, headers=self.headers, proxies=self.proxies, verify=False)
99 if response.status_code == 200:
100 item_dict = dict()
101 id_list = list()
102 html_element = etree.HTML(response.content)
103 # 获得该关键词下第一页的商品id,前10个
104 id_list = html_element.xpath("//div[@id='J_goodsList']/ul/li[position()<11]/@data-sku")
105 item_dict["title"] = kw
106 item_dict["id_list"] = id_list
107 result_list.append(item_dict)
108 else:
109 pass
110 return result_list
111
112 def get_comment(self, item_list):
113 if len(item_list) > 0:
114 for item in item_list:
115 id_list = item["id_list"]
116 item_title = item["title"]
117 if len(id_list) > 0:
118 # 检查目录
119 self.parent_dir = "f:/jd_comment/" + item_title + time.strftime("-%Y-%m-%d-%H-%M-%S",
120 time.localtime(time.time()))
121 if not os.path.exists(self.parent_dir):
122 os.makedirs(self.parent_dir)
123 task_list = list()
124 # 每个商品开启一个线程爬取评论
125 for product_id in id_list:
126 t = JDSplier.executor.submit(self.job, product_id)
127 time.sleep(10)
128 task_list.append(t)
129 for re in as_completed(task_list):
130 re.result(timeout=500)
131 # wait(task_list, timeout=500)
132 else:
133 print("---error,empty id list---")
134 else:
135 print("---error,empty item list---")
136
137 def job(self, product_id):
138 self.check()
139 JDSplier.mutex.acquire()
140 page = 0
141 self.file_dir = self.parent_dir + "/" + str(product_id) + "_ratecontent.txt"
142 # 爬取评论
143 while JDSplier.flag:
144 self.parse_url(product_id, page)
145 page += 1
146 JDSplier.flag = True
147 JDSplier.mutex.release()
148
149 def run(self):
150 # self.check()
151 item_list = self.get_product_info()
152 print(item_list)
153 self.get_comment(item_list)
154 JDSplier.executor.shutdown()
155
156
157 def main():
158 # "华为p30pro", "华为mate20pro",
159 # "vivoz3""oppok1","荣耀8x", "小米9", "小米mix3", "三星s9", "iphonexr", "iphonexs"
160 kw_list = ["vivoz3"]
161 splider = JDSplier(kw_list)
162 splider.run()
163
164
165 if __name__ == '__main__':
166 main()
ps:能多睡就多睡会,虽然很慢,但不会触发jd的安全系统,爬这种电商平台还是他们的活动日时比较好爬,那个时候为了应对超高的访问量,一般会暂时关闭反爬机制