#!/usr/bin/env python # -*- coding:utf8 -*- import redis ''' 这种连接是连接一次就断了,耗资源.端口默认6379,就不用写 r = redis.Redis(host='127.0.0.1',port=6379,password='tianxuroot') r.set('name','root') print(r.get('name').decode('utf8')) ''' ''' 连接池: 当程序创建数据源实例时,系统会一次性创建多个数据库连接,并把这些数据库连接保存在连接池中,当程序需要进行数据库访问时, 无需重新新建数据库连接,而是从连接池中取出一个空闲的数据库连接 ''' pool = redis.ConnectionPool(host='127.0.0.1',password='helloworld') #实现一个连接池 r = redis.Redis(connection_pool=pool) r.set('foo','bar') print(r.get('foo').decode('utf8'))
from bs4 import BeautifulSoup import requests from lxml import etree import redis pool = redis.ConnectionPool(host='127.0.0.1', port=6379) r = redis.Redis(connection_pool=pool) # r = Redis.from_url("redis://127.0.0.1:6379", decode_responses=True) def get_urls(url): result = requests.get(url) selector = etree.HTML(result.text) links = selector.xpath(r'//*[@id="archive"]/div/div[2]/p[1]/a[1]/@href') for link in links: r.sadd("first_urlsss", link) next_url = extract_next_url(result.text) if next_url: get_urls(next_url) def extract_next_url(html): soup = BeautifulSoup(html, "lxml") next_url = soup.select('a[class="next page-numbers"]') for url in next_url: url = str(url) soup = BeautifulSoup(url, "lxml") next_url = soup.a["href"] return next_url if __name__ == '__main__': url = "http://python.jobbole.com/all-posts/" get_urls(url)