• Python学习笔记六(免费获取代理IP)


      为获取网上免费代理IP,闲的无聊,整合了一下,免费从三个代理网站获取免费代理IP,目的是在某一代理网站被限制时,仍可从可以访问的其他网站上获取代理IP。亲测可用哦!^_^  仅供大家参考,以下脚本可添加函数,用于代理IP自动访问其他东西。

      1 import requests
      2 import urllib.request
      3 from bs4 import BeautifulSoup
      4 import random,time,re
      5 import random
      6 from urllib.request import FancyURLopener
      7 
      8 IPRegular = r"(([1-9]?d|1d{2}|2[0-4]d|25[0-5]).){3}([1-9]?d|1d{2}|2[0-4]d|25[0-5])"
      9 headers = {'User-Agent', 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36'}
     10 header1 = {'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) ''AppleWebKit/537.36 (KHTML, like Gecko) ''Ubuntu Chromium/44.0.2403.89 '
     11                          'Chrome/44.0.2403.89 ''Safari/537.36'}
     12 # 定义用于存放IP的列表
     13 IPs = []
     14 proxy_list = []
     15 
     16 
     17 #代理一:
     18 def getIPList1():
     19     for i in range(1, 3):
     20         req = urllib.request.Request(url='http://www.xicidaili.com/nt/{0}'.format(i), headers=header1)
     21         r = urllib.request.urlopen(req)
     22         soup = BeautifulSoup(r,'html.parser',from_encoding='utf-8')
     23         table = soup.find('table', attrs={'id': 'ip_list'})
     24         tr = table.find_all('tr')[1:]
     25         #解析得到代理ip的地址,端口,和类型
     26         for item in tr:
     27             tds =  item.find_all('td')
     28             temp_dict = {}
     29             kind = "{0}:{1}".format(tds[1].get_text().lower(), tds[2].get_text())
     30             proxy_list.append(kind)
     31     return proxy_list
     32 
     33 #代理二:
     34 def getIPList2():
     35     # 代理网站的地址的格式
     36     # 根据观察url,发现各省的代理IP页面由数字控制
     37     # 所以我们先用占位符{}代替这个数字的位置
     38     url = 'http://www.66ip.cn/areaindex_16/{}.html'
     39     for page in range(10):
     40         # 先填占位符生成一个省的url
     41         url = url.format(page)
     42         # get()方法访问,得到一个Response对象
     43         rsp = requests.get(url)
     44         # Response对象的text属性得到源码
     45         text = rsp.text
     46         # 用BeautifulSoup()方法将源码生成能被解析的lxml格式文件
     47         soup = BeautifulSoup(text, 'lxml')
     48         # 用find()找放置IP的表
     49         table = soup.find(name='table', attrs={'border': '2px'})
     50         # 用find_all()找到所以的IP
     51         ip_list = table.find_all(name='tr')
     52         # 循环遍历每个IP
     53         for addr in ip_list:
     54             # 观察源码发现第一个tr里的内容不是IP,所以跳过
     55             if addr == ip_list[0]:
     56                 continue
     57             # 获取IP
     58             ip = addr.find_all(name='td')[0].string
     59             # 获取端口
     60             port = addr.find_all(name='td')[1].string
     61             proxy = '%s:%s' % (ip, port)
     62             proxy_list.append(proxy)
     63     return proxy_list
     64 
     65 #代理三:
     66 def getIPList3():
     67     request_list = []
     68     headers = {
     69         'Host': 'www.iphai.com',
     70         'User-Agent': 'Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)',
     71         'Accept': r'application/json, text/javascript, */*; q=0.01',
     72         'Referer': r'http://www.iphai.com',}
     73     request_item = "http://www.iphai.com/free/ng"
     74     request_list.append(request_item)
     75     for req_id in request_list:
     76         req = urllib.request.Request(req_id, headers=headers)
     77         response = urllib.request.urlopen(req)
     78         html = response.read().decode('utf-8')
     79 
     80         ip_list = re.findall(r'd+.d+.d+.d+', html)
     81         port_list = re.findall(r'(sd+s)', html)
     82         for i in range(len(ip_list)):
     83             #total_count += 1
     84             ip = ip_list[i]
     85             port = re.sub(r'(^s*)|(s*$)/g','',port_list[i])
     86             proxy = '%s:%s' % (ip, port)
     87             proxy_list.append(proxy)
     88     return proxy_list
     89 
     90 
     91 
     92 if __name__ == "__main__":
     93     #选择可以访问的代理
     94     list1 = ['http://www.66ip.cn/','https://www.xicidaili.com','http://www.iphai.com/free/ng']
     95     while 1==1:
     96         for url in list1:
     97             try:
     98                 r = requests.get(url, timeout=5,headers=header1)
     99                 if r.status_code == 200:
    100                     if url == list1[0]:
    101                         print ("OK 网站访问正常", url)
    102                         IPs = getIPList2()
    103                     elif url == list1[1]:
    104                         print ("OK 网站访问正常", url)
    105                         IPs = getIPList1()
    106                     elif url == list1[2]:
    107                         print ("OK 网站访问正常", url)
    108                         IPs = getIPList3()
    109                     break
    110             except :
    111                 print ("Error 不能访问!", url)
    112                 break
    113         print("获取的代理IP为:",IPs)
    114         time.sleep(10)
  • 相关阅读:
    微信小程序开发---各代码文件简介
    LeetCode71. 简化路径
    LeetCode70. 爬楼梯
    LeetCode69. x 的平方根
    LeetCode68. 文本左右对齐
    LeetCode剑指 Offer 09. 用两个栈实现队列
    LeetCode67. 二进制求和
    LeetCode66. 加一
    LeetCode65. 有效数字
    LeetCode64. 最小路径和
  • 原文地址:https://www.cnblogs.com/zwh-Seeking/p/11854770.html
Copyright © 2020-2023  润新知