• Python2.7爬虫练习爬百度百科python词条


    Eclipse New Project | New PyDev Package | New PyDev Module
    Project:0418

    Package:SpiderTest

    Module: 5个模块,spider_main爬虫入口,=》 url_manager URL管理器, =》html_downloader 类似C#的HttpWebRequest请求获取html响应,=》 html_parser 类似HtmlAgilityPack的数据分析,=》 html_outputer 输出到文件 

    1.spider_main

    # coding:utf8
    from baike_spider import url_manager, html_downloader, html_parser, html_outputer
    
    class SpiderMain(object): 
        def __init__(self):
            self.urls = url_manager.UrlManager()
            self.downloader = html_downloader.HtmlDownLoader()
            self.parser = html_parser.HtmlParser()
            self.outputer = html_outputer.HtmlOutPuter()          
         
        
        def craw(self, root_url):
            count = 1
            self.urls.add_new_url(root_url)
            while self.urls.has_new_url():   
                try:         
                    new_url = self.urls.get_new_url()
                    print("craw %d:%s" % (count,new_url))
                    html_cont = self.downloader.download(new_url)
                    new_urls, new_data = self.parser.parse(new_url, html_cont)
                    self.urls.add_new_urls(new_urls)
                    self.outputer.collect_data(new_data)
                    if count == 100:
                        break
                    
                    count = count + 1
                except:
                    print("craw fail")  
                
            self.outputer.output_html()
    
    if __name__ == "__main__":
        root_url = "http://baike.baidu.com/view/21087.htm"
        obj_spider = SpiderMain()
        obj_spider.craw(root_url)
    

    2.url_manager      

    # coding:utf8
    
    class UrlManager(object):
        def __init__(self):
            self.new_urls = set()
            self.old_urls = set()
        
        def add_new_url(self, url):
            if url is None:
                return
            if url not in self.new_urls and url not in self.old_urls:
                self.new_urls.add(url)       
        
        def add_new_urls(self, urls):
            if urls is None or len(urls) == 0:
                return
            for url in urls:
                self.add_new_url(url)    
        
        def has_new_url(self):
            return len(self.new_urls) != 0
        
        def get_new_url(self):
            new_url = self.new_urls.pop()
            self.old_urls.add(new_url)
            return new_url

    3.html_downloader

    # coding:utf8
    
    import urllib2
    
    class HtmlDownLoader(object):
        def download(self, url):
            if url is None:
                return None
                   
            response = urllib2.urlopen(url)
            if response.getcode() != 200:
                return None
            
            return response.read()
    

    4.html_parser

    # coding:utf8
    from bs4 import BeautifulSoup
    import re
    import urlparse
    
    class HtmlParser(object):
    
        def _get_new_urls(self, page_url, soup):
            new_urls = set()
            #  /view/123.htm
            links = soup.find_all("a", href = re.compile(r"/view/d+.htm"))
            for link in links:
                new_url = link['href']
                new_full_url = urlparse.urljoin(page_url, new_url) 
                new_urls.add(new_full_url)
                
            return new_urls 
        
        def _get_new_data(self, page_url, soup):
            res_data = {}
            res_data['url'] = page_url        
            #  <dl class="lemmaWgt-lemmaTitle lemmaWgt-lemmaTitle-"><h1>Python</h1><dd class="lemmaWgt-lemmaTitle-title">
            title_node = soup.find('dl',class_="lemmaWgt-lemmaTitle lemmaWgt-lemmaTitle-").find("h1")
            res_data['title'] = title_node.get_text() 
            
            #  <div class="lemma-summary" label-module="lemmaSummary">
            summary_node = soup.find('div', class_ = "lemma-summary")
            res_data['summary'] = summary_node.get_text()
            return res_data
        
        def parse(self, page_url, html_cont):
            if page_url is None or html_cont is None:
                return
            
            soup = BeautifulSoup(html_cont, "html.parser", from_encoding="utf-8")
            new_urls = self._get_new_urls(page_url, soup)
            new_data = self._get_new_data(page_url, soup)
            return new_urls, new_data
    

    5.html_outputer

    # coding:utf-8
    
    class HtmlOutPuter(object):
        def __init__(self):
            self.datas = []
        
        def collect_data(self, data):
            if data is None:
                return
            self.datas.append(data)
        
        def output_html(self):
            fout = open('output.html','w')
            
            fout.write("<html>")
            fout.write("<body>")
            fout.write("<table>")
            for data in self.datas:
                fout.write("<tr>")
                fout.write("<td>%s</td>" % data["url"])
                fout.write("<td>%s</td>" % data["title"].encode('utf-8'))
                fout.write("<td>%s</td>" % data["summary"].encode('utf-8'))
                fout.write("</tr>")
            
            fout.write("</table>")
            fout.write("</body>")
            fout.write("</html")
            fout.close()
    

      

  • 相关阅读:
    python autopep8
    安卓代码覆盖率:android studio+ gradle+jacoco
    mac上运行appium提示错误Encountered internal error running command 解决办法
    python ide ---wing 注册机
    python 自动发邮件 Errno61 Connection refused
    instruments usage error specified target process is invalid
    selenium 关于富文本的处理
    动态规划计算字符相似度感觉棒棒哒
    windbg不识别pdb文件符号
    正则匹配全部中文
  • 原文地址:https://www.cnblogs.com/dennysong/p/5423327.html
Copyright © 2020-2023  润新知