• 线程池 爬取一本小说


    #  本例子 开100线程

    import requests
    from lxml import etree
    from multiprocessing.dummy import Pool  # 线程类
    import time
    
    start  = time.time()
    
    url = "https://www.biquge.info/41_41486/"
    headers = {
        "user-agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36"
    }
    page = requests.get(url=url, headers=headers)
    page.encoding = 'utf-8'
    page_text = page.text
    tree = etree.HTML(page_text)
    book_name = tree.xpath("//div[@id='info']/h1/text()")[0]  #小说书名
    author = tree.xpath("//div[@id='info']/p[1]/text()")[0].replace(' ', '').strip()
    book_type = tree.xpath("//div[@id='info']/p[2]/text()")[0].replace(' ', '')
    last_update_time = tree.xpath("//div[@id='info']/p[3]/text()")[0].replace(' ', '')
    book_info = tree.xpath("//div[@id='intro']/p[1]/text()")[0].replace(' ', '')
    info = book_name + '
    ' + author + '
    ' + book_type + '
    ' + last_update_time + '
    ' + book_info + '
    '
    dd_list = tree.xpath("//div[@id='list']/dl/dd/a/@href")
    
    
    def func(a):
        res = requests.get(url=f"https://www.biquge.info/41_41486/{a}",headers=headers)
        res.encoding = 'utf-8'
        res = res.text
        tree1 = etree.HTML(res)
        content_data = tree1.xpath('//div[@id="content"]//text()')
        name = tree1.xpath('//*[@id="wrapper"]/div[4]/div/div[2]/h1/text()')[0]  #小说章节
        content = '    ' + '
    '.join(content_data).lstrip()  #小说内容
        return name,content 
    
    
    f = open(f'./{book_name}.txt','w',encoding='utf-8')
    def write_xs(lst):
        for i in lst:
            print(i[0])
            f.write(i[0]+'
    '+i[1])
        f.close()
    
    pool = Pool(100)  #线程池
    lst = pool.map(func,dd_list)  #线程池访问页面
    write_xs(lst)  #调用写的函数
    
    
    print(time.time()-start)
    print('结束')
    import requests
    from lxml import etree
    from multiprocessing.dummy import Pool # 线程
    import time

    start = time.time()

    url = "https://www.biquge.info/41_41486/"
    headers = {
    "user-agent": "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.86 Safari/537.36"
    }
    page = requests.get(url=url, headers=headers)
    page.encoding = 'utf-8'
    page_text = page.text
    tree = etree.HTML(page_text)
    book_name = tree.xpath("//div[@id='info']/h1/text()")[0]
    author = tree.xpath("//div[@id='info']/p[1]/text()")[0].replace(' ', '').strip()
    book_type = tree.xpath("//div[@id='info']/p[2]/text()")[0].replace(' ', '')
    last_update_time = tree.xpath("//div[@id='info']/p[3]/text()")[0].replace(' ', '')
    book_info = tree.xpath("//div[@id='intro']/p[1]/text()")[0].replace(' ', '')
    info = book_name + ' ' + author + ' ' + book_type + ' ' + last_update_time + ' ' + book_info + ' '
    dd_list = tree.xpath("//div[@id='list']/dl/dd/a/@href")


    def func(a):
    res = requests.get(url=f"https://www.biquge.info/41_41486/{a}",headers=headers)
    res.encoding = 'utf-8'
    res = res.text
    tree1 = etree.HTML(res)
    content_data = tree1.xpath('//div[@id="content"]//text()')
    name = tree1.xpath('//*[@id="wrapper"]/div[4]/div/div[2]/h1/text()')[0]
    content = ' ' + ' '.join(content_data).lstrip()
    return name,content


    f = open(f'./{book_name}.txt','w',encoding='utf-8')
    def write_xs(lst):
    for i in lst:
    print(i[0])
    f.write(i[0]+' '+i[1])
    f.close()

    pool = Pool(100)
    lst = pool.map(func,dd_list)
    write_xs(lst)


    print(time.time()-start)
    print('结束')
  • 相关阅读:
    一个FLAG #03# 蛇形填数
    一个FLAG #02# 逆序输出
    一个FLAG #01# 重学C/C++以及算法
    MAVLink笔记 #01# 维基百科翻(译)
    编译原理 #04# 中缀表达式转化为四元式(JavaScript实现)
    Java开发:手机电话号码校验
    解决java poi循环遍历行getLastRowNum出现不准确的问题
    Redis的安装和简单测试
    JS解析xml字符串,并把xml展示在HTML页面上
    解决cxf+springmvc发布的webservice,缺少types,portType和message标签的问题
  • 原文地址:https://www.cnblogs.com/zhangchen-sx/p/10877343.html
Copyright © 2020-2023  润新知