# - *- coding:utf-8-*-
import urllib2
import re
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8') #编码
from bs4 import BeautifulSoup
for page in range(1,700):
print '*'
User_Agent= 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:49.0) Gecko/20100101 Firefox/49.0'
url="http://f.qidian.com/all?size=-1&sign=-1&tag=-1&chanId=-1&subCateId=-1&orderId=&update=-1&page=%s&month=-1&style=1&action=1"%page
headers={'User-Agent':User_Agent}
request=urllib2.Request(url,headers=headers)
html=urllib2.urlopen(request).read()
soup = BeautifulSoup(html, 'html.parser')
l = soup.find_all('div', class_ = 'book-mid-info')
print #
for htmltile in l:
name = htmltile.find('h4').encode('utf-8')
reg=r'<h4><a data-bid=".*?" data-eid=".*?" href="(.*?)" target="_blank">(.*?)</a></h4>'
text=re.finditer(reg,name)
for i in text:
curl=i.group(1)
title=i.group(2)
os.mkdir(title.encode('gbk')) #创建目录
#os.chdir(title.encode('gbk')) #在当前目录下操作
html1 = urllib2.urlopen('http:'+curl+'#Catalog').read()
reg=re.compile(r'<li data-rid=".*?"><a href="(.*?)" target="_blank" data-eid="qd_G55" data-cid=".*?" title=".*?">(.*?)</a>')
titles=re.finditer(reg,html1)
for i in titles:
curl_=i.group(1)
names=i.group(2)
fd=open(title.encode('gbk')+'/'+names.encode('gbk')+'.txt','wb') #在指定目录下创建文件
#fd=open(names.encode('gbk')+'.txt','wb')
print "正在爬取%s本"%names
htmlll=urllib2.urlopen('http:'+curl_).read()
regs=re.compile(r'<div class="read-content j_readContent">s*([sS]*?)s*</div>') #正则多行时注意用s*
content=re.findall(regs,htmlll)
for m in content:
contents=m.replace('<p>','
')
fd.write(names+'
'+contents)
print "已完成%s"%names
fd.close()