版本一:
版本二:
#coding:utf-8
import Queue
import threading
import time
import urllib2
import re
import sys
queue = Queue.Queue()
url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
urls=[]
Dict_txt = {}
file_name = 'abc.txt'
class ThreadNum(threading.Thread):
def __init__(self, queue):
threading.Thread.__init__(self)
self.queue = queue
def run(self):
while True:
#消费者端,从队列中获取num
num = self.queue.get()
sys.stdout.flush()
down_text(Dict_txt, num)
print '**finished download %s**' % num
time.sleep(1)
#在完成这项工作之后,使用 queue.task_done() 函数向任务已经完成的队列发送一个信号
self.queue.task_done()
def down_text(txt_dict, url):
"""根据传入的url抓出各页内容,按页数做键存入字典"""
print '--downling %s--' % url
html_content =urllib2.urlopen(url, timeout=30).read()
text_pattern = re.compile('<span>时间:(.*?)</span>.*?<!-- <div class="host-ico">楼主</div> -->.*?<div class="bbs-content.*?>s*(.*?)</div>', re.DOTALL)
text = text_pattern.findall(html_content)
text_join = [' '.join(item) for item in text]
txt_dict[url] = text_join
def page(url):
"""根据第一页地址抓取总页数"""
html_page = urllib2.urlopen(url, timeout=30).read()
page_pattern = re.compile(r'<a href="S*?">(d*)</a>s*<a href="S*?" class="S*?">下页</a>')
page_result = page_pattern.search(html_page)
list1 = []
if page_result:
page_num = int(page_result.group(1))
page_range = range(1, page_num+1)
for num in page_range:
myurl = '%s%s.shtml' % (url[:-7], num)
list1.append(myurl)
return list1
def write_text(filename, dict, urls):
"""把字典内容按键(页数)写入文本,每个键值为每页内容的list列表"""
tx_file = open(filename, 'w+')
for i in urls:
tx_list = dict[i]
for tx in tx_list:
tx = tx.replace('<br>', ' ').replace('<br />', ' ').replace(' ', '')
tx_file.write(tx.strip()+' '*4)
tx_file.close()
start = time.time()
def main():
#产生一个 threads pool, 并把消息传递给thread函数进行处理,这里开启10个并发
urls = page(url)
for i in range(5):
t = ThreadNum(queue)
t.start()
#往队列中填错数据
for num in urls:
queue.put(num)
#wait on the queue until everything has been processed
queue.join()
print '---- start write ----'
write_text(file_name, Dict_txt, urls)
main()
print"Elapsed Time: %s" % (time.time() - start)
--版本三
#coding:utf-8
import urllib
import re
import threading
import os, time
class Down_Tianya(threading.Thread):
"""多线程下载"""
def __init__(self, url, num, dt):
threading.Thread.__init__(self)
self.url = url
self.num = num
self.txt_dict = dt
def run(self):
print 'downling from %s' % self.url
self.down_text()
def down_text(self):
"""根据传入的url抓出各页内容,按页数做键存入字典"""
html_content =urllib.urlopen(self.url).read()
text_pattern = re.compile('<span>时间:(.*?)</span>.*?<!-- <div class="host-ico">楼主</div> -->.*?<div class="bbs-content.*?>s*(.*?)</div>', re.DOTALL)
text = text_pattern.findall(html_content)
text_join = [' '.join(item) for item in text]
self.txt_dict[self.num] = text_join
def page(url):
"""根据第一页地址抓取总页数"""
html_page = urllib.urlopen(url).read()
page_pattern = re.compile(r'<a href="S*?">(d*)</a>s*<a href="S*?" class="S*?">下页</a>')
page_result = page_pattern.search(html_page)
if page_result:
page_num = int(page_result.group(1))
return page_num
def write_text(dict, fn):
"""把字典内容按键(页数)写入文本,每个键值为每页内容的list列表"""
tx_file = open(fn, 'w+')
pn = len(dict)
for i in range(1, pn+1):
tx_list = dict[i]
for tx in tx_list:
tx = tx.replace('<br>', ' ').replace('<br />', ' ').replace(' ', '')
tx_file.write(tx.strip()+' '*4)
tx_file.close()
def main():
url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
file_name ='abc.txt'
my_page = page(url)
my_dict = {}
print 'page num is : %s' % my_page
threads = []
"""根据页数构造urls进行多线程下载"""
for num in range(1, my_page+1):
myurl = '%s%s.shtml' % (url[:-7], num)
downlist = Down_Tianya(myurl, num, my_dict)
downlist.start()
threads.append(downlist)
"""检查下载完成后再进行写入"""
for t in threads:
t.join()
write_text(my_dict, file_name)
print 'All download finished. Save file at directory: %s' % os.getcwd()
if __name__ == '__main__':
main()