- 爬虫引入
爬虫:
1 百度:搜索引擎
爬虫:spider 种子网站开始爬,下载网页,分析链接,作为待抓取的网页
分词
index:词---》某个结果
Page rank(1 网站很大(互链) 2 包含多少个词 3 更新频率 )
cache:缓存里面
为啥哈希快:
书的目录
hash(xxx)---->内存地址---》保存
查找hash(xxx)---》获取到内存地址---》找
练习:写一个爬虫脚本,抓取出搜狐上所有的链接
步骤: 1 打开sohu网页 2 分析页面中所有的链接 3 打印所有的链接内容(re.findall 查找全部链接),看看是否有非法的链接地址 4 过滤掉非法的链接地址
# -*- coding: utf-8 -*- #
import requests
import re
resq = requests.get("http://www.sohu.com/")
print resq.text[:500]
links = re.findall(r'href="(.*?)"', resq.text)
print len(links)
valid_link = []
invalid_link = []
for link in links:
#先看一下前200个链接里面有啥,主要思路是根据打印出来的结果去判定该去除哪些内容
#for link in links[:200]:
#print link
if re.search(r"(.jpg)|(.jpeg)|(.gif)|(.ico)|(.png)|(.js)|(.css)$", link.strip()): #排除结尾是图片的
print 6, link
invalid_link.append(link.strip())
continue
if link.strip() == "" or link.strip() == "#" or link.strip() == "/": #排除#号、空格
# print 1,link
invalid_link.append(link)
continue
elif link.strip().startswith("//"):
# print 2,link
valid_link.append("http:" + link.strip())
continue
elif link.strip().count("javascript") >= 1 or link.strip().count("mailto:") >= 1:#排除javascript
# print 3,link
invalid_link.append(link.strip())
continue
elif re.match(r"/w+", link): #相对链接进行匹配
# print 5,link
if re.match(r"http://.*?/",resq.url.strip()):
valid_link.append(re.match(r"http://.*?/",resq.url.strip()).group()+link.strip())
else:
valid_link.append(re.match(r"http://.*",resq.url.strip()).group()+link.strip())
continue
else:
# print 7,link
valid_link.append(link.strip())
# for link in valid_link[:100]:
# print link
print len(valid_link)
# for link in invalid_link:
# print link
print len(invalid_link)
变形练习: 把首页中带篮球的页面全部下载下来
# -*- coding: utf-8 -*- #
import requests
import re
resq = requests.get("http://www.sohu.com/")
#print resq.text[:500]
links = re.findall(r'href="(.*?)"', resq.text)
#print len(links)
valid_link = []
invalid_link = []
for link in links:
#先看一下前200个链接里面有啥,主要思路是根据打印出来的结果去判定该去除哪些内容
#for link in links[:200]:
#print link
if re.search(r"(.jpg)|(.jpeg)|(.gif)|(.ico)|(.png)|(.js)|(.css)$", link.strip()): #排除结尾是图片的
#print 6, link
invalid_link.append(link.strip())
continue
if link.strip() == "" or link.strip() == "#" or link.strip() == "/": #排除#号、空格
# print 1,link
invalid_link.append(link)
continue
elif link.strip().startswith("//"):
# print 2,link
valid_link.append("http:" + link.strip())
continue
elif link.strip().count("javascript") >= 1 or link.strip().count("mailto:") >= 1:#排除javascript
# print 3,link
invalid_link.append(link.strip())
continue
elif re.match(r"/w+", link): #相对链接进行匹配
# print 5,link
if re.match(r"http://.*?/",resq.url.strip()):
valid_link.append(re.match(r"http://.*?/",resq.url.strip()).group()+link.strip())
else:
valid_link.append(re.match(r"http://.*",resq.url.strip()).group()+link.strip())
continue
else:
# print 7,link
valid_link.append(link.strip())
# for link in valid_link[:100]:
# print link
#print len(valid_link)
# for link in invalid_link:
# print link
#print len(invalid_link)
file_num =1
for link in valid_link:
#print link
resq = requests.get(link)
if u"篮球" in resq.text: #如果是二进制的文件,就用content找
if u'meta charset="utf-8"' in resq.text: #判断一下当前页面的格式
with open(r"e:\pic\"+str(file_num )+".html","w") as fp:
fp.write(resq.text.strip().encode("utf-8"))
else:
with open(r"e:\pic\"+str(file_num )+".html","w") as fp:
fp.write(resq.text.strip().encode("gbk"))
file_num +=1 #文件名累加
print "Done!"
练习2:下载所有美女图片 http://www.meituri.com
自己:
# -*- coding: utf-8 -*- #
import requests
import re
resq = requests.get("http://www.win4000.com/meinvtag34.html")
#print resq.text[:100]
#<img src="http://i.situmei.com/a/1/7215/0.jpg">
links = re.findall(r'src="(.*?)"', resq.text)
#print len(links) #打印有多少个src
valid_link = []
invalid_link = []
pic_num = 0
for link in links:
#print link #打印所有src匹配的结果,结果中发现有js的并不是图片,需要去除
if re.search(r"(.jpg)|(.jpeg)|(.gif)|(.ico)|(.png)$",link.strip()): #找出所有图片链接
pic_num += 1
# print link #打印出图片链接
resq1 = requests.get(link.strip(),allow_redirects=True)
print resq1
with open(r"e:\pic\" + str(pic_num) + ".png", "wb") as fp:
fp.write(resq1.content)
print pic_num #打印图片总个数
老师:
# -*- coding: utf-8 -*- #
import requests
import re
import urllib
resq = requests.get("http://www.win4000.com/meinvtag34.html")
link1s = re.findall(r'img src="(.*?)"', resq.text)
link2s = re.findall(r'data-original="(.*?)"', resq.text) #直接用地址下载不行,一些网站缩略图和进入的大图不一致,加载大图费时
#import re
#print re.findall(r'src="(.*?)"|data-original="(.*?)"','src="1.jpg" data-original="2.jpg"') 或者直接用正则也可以
print link2s
valid_link = []
links = link1s + link2s
print len(links)
for link in links:
if re.search(r"(.jpg)|(.jpeg)|(.gif)$", link.strip()):
# print 6,link
valid_link.append(link.strip())
continue
# for link in valid_link[:100]:
# print link
print len(valid_link)
file_num = 1
for link in list(set(valid_link)):
print link
resq1 = requests.get(link.strip(), allow_redirects=True)
print "final url:", resq1.url
# print resq1.text
with open(r"e:\pic\" + str(file_num) + ".jpg", "wb") as fp:
fp.write(resq1.content)
file_num += 1
print "Done!"
老师:
# -*- coding: utf-8 -*- #
import requests
import re
import urllib
resq = requests.get("http://www.win4000.com/meinvtag34.html")
link1s = re.findall(r'img src="(.*?)"', resq.text)
link2s = re.findall(r'data-original="(.*?)"', resq.text) #直接用地址下载不行,一些网站缩略图和进入的大图不一致,加载大图费时
#import re
#print re.findall(r'src="(.*?)"|data-original="(.*?)"','src="1.jpg" data-original="2.jpg"') 或者直接用正则也可以
print link2s
valid_link = []
links = link1s + link2s
print len(links)
for link in links:
if re.search(r"(.jpg)|(.jpeg)|(.gif)$", link.strip()):
# print 6,link
valid_link.append(link.strip())
continue
# for link in valid_link[:100]:
# print link
print len(valid_link)
file_num = 1
for link in list(set(valid_link)):
print link
resq1 = requests.get(link.strip(), allow_redirects=True)
print "final url:", resq1.url
# print resq1.text
with open(r"e:\pic\" + str(file_num) + ".jpg", "wb") as fp:
fp.write(resq1.content)
file_num += 1
print "Done!"