• python 爬虫入门之爬小说


    ##第一步 导包
    from bs4 import BeautifulSoup
    import requests
    import sys

    ##准备
    class downloder(object):
    def __init__(self):
    self.server = 'http://www.biqukan.com'
    self.target = 'http://www.biqukan.com/1_1094/'
    self.names = [] #存放章节名字
    self.urls = [] #存放章节链接
    self.nums = 0 # 章节数量
    def get_download_url(self):
    req = requests.get(url=self.target)
    html = req.text
    div_bf = BeautifulSoup(html)
    div = div_bf.find_all('div',class_='listmain')
    a_bf = BeautifulSoup(str(div[0]))
    a = a_bf.find_all('a')
    self.nums = len(a[15:])
    for eatch in a[15:]:
    self.names.append(eatch.string)
    self.urls.append(self.server +eatch.get('href'))
    def writer(self ,name,path,text):
    write_flag = True
    with open(path,'a',encoding='utf-8') as f:
    f.write(name +' ')
    f.writelines(text)
    f.writelines(' ')
    def get_contents(self,target):
    req = requests.get(url=target)
    html = req.text
    bf = BeautifulSoup(html)
    texts = bf.find_all('div',class_ = 'showtxt')
    texts = texts[0].text.replace( 'xa0'*8,' ')
    return texts



    if __name__ == '__main__':
    dl = downloder()
    dl.get_download_url()
    print('开始下载')
    for i in range(dl.nums):
    dl.writer(dl.names[i],'用点.txt',dl.get_contents(dl.urls[i]))
    print("下载完成")




    参考华哥的内容... 还有好多不懂
    http://cuijiahua.com/blog/2017/10/spider_tutorial_1.html

  • 相关阅读:
    java简单实现MD5加密
    Java用freemarker导出Word 文档
    java 反射(*)
    java解析XML
    JDBC程序实例
    web前端开发-博客目录
    虚拟主机配置
    WAMP运行原理
    WAMP配置
    web前端性能优化总结
  • 原文地址:https://www.cnblogs.com/baili-luoyun/p/8437158.html
Copyright © 2020-2023  润新知