• go-爬虫-百度贴吧(并发版)


    爬取百度贴吧的网页

    非并发版

    package main
    
    import (
    	"fmt"
    	"io"
    	"net/http"
    	"os"
    	"strconv"
    )
    
    func HttpGet(url string) (result string, err error) {
    	//	var result string
    	resp, err1 := http.Get(url)
    	if err1 != nil {
    		err = err1
    		return
    	}
    	defer resp.Body.Close()
    
    	buf := make([]byte, 4096)
    	for {
    		n, err2 := resp.Body.Read(buf)
    		if n == 0 {
    			fmt.Println("读取网页完成")
    			break
    		}
    		if err2 != nil && err2 != io.EOF {
    			err = err2
    			return
    		}
    		result += string(buf[:n])
    	}
    	return
    }
    
    func working(start, end int) {
    	fmt.Printf("正在爬取第%d到第%d页", start, end)
    	//爬取每一个网页
    	for i := start; i <= end; i++ {
    		url := "https://tieba.baidu.com/f?kw=vue&ie=utf-8&pn=" + strconv.Itoa((i-1)*50)
    		result, err := HttpGet(url)
    		if err != nil {
    			fmt.Println("httpGet err", err)
    			continue
    		}
    		//		fmt.Println("result", result)
    		f, err := os.Create("第" + strconv.Itoa(i) + "页" + ".html")
    		if err != nil {
    			fmt.Println("HttpGet err", err)
    			continue
    		}
    		f.WriteString(result)
    		f.Close()
    	}
    }
    
    func main() {
    	var start, end int
    	fmt.Print("请输入起始页。。。")
    	fmt.Scan(&start)
    	fmt.Print("请输入终止页。。。")
    	fmt.Scan(&end)
    
    	working(start, end)
    }
    
    

    并发版

    这个只是在上面的基础上加了管道和开了协程

    package main
    
    import (
    	"fmt"
    	"io"
    	"net/http"
    	"os"
    	"strconv"
    )
    
    func HttpGet(url string) (result string, err error) {
    	//	var result string
    	resp, err1 := http.Get(url)
    	if err1 != nil {
    		err = err1
    		return
    	}
    	defer resp.Body.Close()
    
    	buf := make([]byte, 4096)
    	for {
    		n, err2 := resp.Body.Read(buf)
    		if n == 0 {
    			fmt.Println("读取网页完成
    ")
    			break
    		}
    		if err2 != nil && err2 != io.EOF {
    			err = err2
    			return
    		}
    		result += string(buf[:n])
    	}
    	return
    }
    
    func SpiderPage(index int, page chan int) {
    	fmt.Printf("正在爬取第%d到页
    ", index)
    	//爬取每一个网页
    	//	for i := start; i <= end; i++ {
    	url := "https://tieba.baidu.com/f?kw=vue&ie=utf-8&pn=" + strconv.Itoa((index-1)*50)
    	result, err := HttpGet(url)
    	if err != nil {
    		fmt.Println("httpGet err", err)
    		return
    	}
    	//		fmt.Println("result", result)
    	f, err := os.Create("第" + strconv.Itoa(index) + "页" + ".html")
    	if err != nil {
    		fmt.Println("HttpGet err", err)
    		return
    	}
    	f.WriteString(result)
    	f.Close()
    	//	}
    
    	page <- index
    }
    
    func working2(start, end int) {
    	fmt.Printf("正在爬取第%d页到%d页
    ", start, end)
    
    	page := make(chan int)
    
    	for i := start; i <= end; i++ {
    		go SpiderPage(i, page)
    	}
    
    	for i := start; i <= end; i++ {
    		fmt.Printf("第%d个页面爬取完成
    ", <-page)
    	}
    }
    
    func main() {
    	var start, end int
    	fmt.Print("请输入起始页。。。")
    	fmt.Scan(&start)
    	fmt.Print("请输入终止页。。。")
    	fmt.Scan(&end)
    
    	working2(start, end)
    }
    
    
  • 相关阅读:
    Js常用的函数
    数组和集合的相互转换
    Java中的volatile的作用和synchronized作用
    mac下使用mysql控制台命令行
    几种优化ajax的执行速度的方法
    解决Macbook网络连接成功但是图标一直显示正在查找网络问题
    Android Stdio 中的Rendering Problems Android N requires the IDE to be running with Java 1.8 or later Install a supported JDK解决办法
    Activiti 中的ACT_RU_TASK表中的EXECUTION_ID和PROC_INST_ID区别
    Ibatis学习总结2--SQL Map XML 配置文件
    Ibatis学习总结1--ibatis简介和SQL Maps
  • 原文地址:https://www.cnblogs.com/ygjzs/p/12001352.html
Copyright © 2020-2023  润新知