• 多线程队列下载天涯帖子


    版本一:


    版本二:

    1. #coding:utf-8
    2. import Queue
    3. import threading
    4. import time
    5. import urllib2
    6. import re
    7. import sys
    8. queue = Queue.Queue()
    9. url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
    10. urls=[]
    11. Dict_txt = {}
    12. file_name = 'abc.txt'
    13. class ThreadNum(threading.Thread):
    14. def __init__(self, queue):
    15. threading.Thread.__init__(self)
    16. self.queue = queue
    17. def run(self):
    18. while True:
    19. #消费者端,从队列中获取num
    20. num = self.queue.get()
    21. sys.stdout.flush()
    22. down_text(Dict_txt, num)
    23. print '**finished download %s**' % num
    24. time.sleep(1)
    25. #在完成这项工作之后,使用 queue.task_done() 函数向任务已经完成的队列发送一个信号
    26. self.queue.task_done()
    27. def down_text(txt_dict, url):
    28. """根据传入的url抓出各页内容,按页数做键存入字典"""
    29. print '--downling %s--' % url
    30. html_content =urllib2.urlopen(url, timeout=30).read()
    31. text_pattern = re.compile('<span>时间:(.*?)</span>.*?<!-- <div class="host-ico">楼主</div> -->.*?<div class="bbs-content.*?>s*(.*?)</div>', re.DOTALL)
    32. text = text_pattern.findall(html_content)
    33. text_join = [' '.join(item) for item in text]
    34. txt_dict[url] = text_join
    35. def page(url):
    36. """根据第一页地址抓取总页数"""
    37. html_page = urllib2.urlopen(url, timeout=30).read()
    38. page_pattern = re.compile(r'<a href="S*?">(d*)</a>s*<a href="S*?" class="S*?">下页</a>')
    39. page_result = page_pattern.search(html_page)
    40. list1 = []
    41. if page_result:
    42. page_num = int(page_result.group(1))
    43. page_range = range(1, page_num+1)
    44. for num in page_range:
    45. myurl = '%s%s.shtml' % (url[:-7], num)
    46. list1.append(myurl)
    47. return list1
    48. def write_text(filename, dict, urls):
    49. """把字典内容按键(页数)写入文本,每个键值为每页内容的list列表"""
    50. tx_file = open(filename, 'w+')
    51. for i in urls:
    52. tx_list = dict[i]
    53. for tx in tx_list:
    54. tx = tx.replace('<br>', ' ').replace('<br />', ' ').replace(' ', '')
    55. tx_file.write(tx.strip()+' '*4)
    56. tx_file.close()
    57. start = time.time()
    58. def main():
    59. #产生一个 threads pool, 并把消息传递给thread函数进行处理,这里开启10个并发
    60. urls = page(url)
    61. for i in range(5):
    62. t = ThreadNum(queue)
    63. t.start()
    64. #往队列中填错数据
    65. for num in urls:
    66. queue.put(num)
    67. #wait on the queue until everything has been processed
    68. queue.join()
    69. print '---- start write ----'
    70. write_text(file_name, Dict_txt, urls)
    71. main()
    72. print"Elapsed Time: %s" % (time.time() - start)


    --版本三
    1. #coding:utf-8
    2. import urllib
    3. import re
    4. import threading
    5. import os, time
    6. class Down_Tianya(threading.Thread):
    7. """多线程下载"""
    8. def __init__(self, url, num, dt):
    9. threading.Thread.__init__(self)
    10. self.url = url
    11. self.num = num
    12. self.txt_dict = dt
    13. def run(self):
    14. print 'downling from %s' % self.url
    15. self.down_text()
    16. def down_text(self):
    17. """根据传入的url抓出各页内容,按页数做键存入字典"""
    18. html_content =urllib.urlopen(self.url).read()
    19. text_pattern = re.compile('<span>时间:(.*?)</span>.*?<!-- <div class="host-ico">楼主</div> -->.*?<div class="bbs-content.*?>s*(.*?)</div>', re.DOTALL)
    20. text = text_pattern.findall(html_content)
    21. text_join = [' '.join(item) for item in text]
    22. self.txt_dict[self.num] = text_join
    23. def page(url):
    24. """根据第一页地址抓取总页数"""
    25. html_page = urllib.urlopen(url).read()
    26. page_pattern = re.compile(r'<a href="S*?">(d*)</a>s*<a href="S*?" class="S*?">下页</a>')
    27. page_result = page_pattern.search(html_page)
    28. if page_result:
    29. page_num = int(page_result.group(1))
    30. return page_num
    31. def write_text(dict, fn):
    32. """把字典内容按键(页数)写入文本,每个键值为每页内容的list列表"""
    33. tx_file = open(fn, 'w+')
    34. pn = len(dict)
    35. for i in range(1, pn+1):
    36. tx_list = dict[i]
    37. for tx in tx_list:
    38. tx = tx.replace('<br>', ' ').replace('<br />', ' ').replace(' ', '')
    39. tx_file.write(tx.strip()+' '*4)
    40. tx_file.close()
    41. def main():
    42. url = 'http://bbs.tianya.cn/post-16-996521-1.shtml'
    43. file_name ='abc.txt'
    44. my_page = page(url)
    45. my_dict = {}
    46. print 'page num is : %s' % my_page
    47. threads = []
    48. """根据页数构造urls进行多线程下载"""
    49. for num in range(1, my_page+1):
    50. myurl = '%s%s.shtml' % (url[:-7], num)
    51. downlist = Down_Tianya(myurl, num, my_dict)
    52. downlist.start()
    53. threads.append(downlist)
    54. """检查下载完成后再进行写入"""
    55. for t in threads:
    56. t.join()
    57. write_text(my_dict, file_name)
    58. print 'All download finished. Save file at directory: %s' % os.getcwd()
    59. if __name__ == '__main__':
    60. main()





  • 相关阅读:
    5.1.5 JunkMail Filter
    POJ1067 取石子游戏 跪跪跪,很好的博弈论
    USACO Section 3.2 Magic Squares (msquare)
    5.1.1 A Bug's Life
    USACO Section 3.3 Riding The Fences (fence)
    USACO Section 3.1 Stamps (stamps)
    5.2.7 Entropy
    USACO Section 3.1 AgriNet (agrinet)
    5.1.8 How Many Answers Are Wrong
    4.3.6 N皇后问题
  • 原文地址:https://www.cnblogs.com/highroom/p/da69c3af3d0b2be8b4db5946e2de99f1.html
Copyright © 2020-2023  润新知