写了个图片蜘蛛人玩玩,抓了几个网页试试,感觉不不错。核心的代码可能20行也不到,简洁明了,嘻嘻。废话少说,翠花,上代码~~
1 #coding=utf-8
2
3 import os
4 import sys
5 import re
6 import urllib
7
8 URL_REG = re.compile(r'(http://[^///]+)', re.I)
9 IMG_REG = re.compile(r'<img[^>]*?src=([/'"])([^/1]*?)/1', re.I)
10
11 def download(dir, url):
12 '''下载网页中的图片
13
14 @dir 保存到本地的路径
15 @url 网页url
16 '''
17 global URL_REG, IMG_REG
18
19 m = URL_REG.match(url)
20 if not m:
21 print '[Error]Invalid URL: ', url
22 return
23 host = m.group(1)
24
25 if not os.path.isdir(dir):
26 os.mkdir(dir)
27
28 # 获取html,提取图片url
29 html = urllib.urlopen(url).read()
30 imgs = [item[1].lower() for item in IMG_REG.findall(html)]
31 f = lambda path: path if path.startswith('http://') else /
32 host + path if path.startswith('/') else url + '/' + path
33 imgs = list(set(map(f, imgs)))
34 print '[Info]Find %d images.' % len(imgs)
35
36 # 下载图片
37 for idx, img in enumerate(imgs):
38 name = img.split('/')[-1]
39 path = os.path.join(dir, name)
40 try:
41 print '[Info]Download(%d): %s'% (idx + 1, img)
42 urllib.urlretrieve(img, path)
43 except:
44 print "[Error]Cant't download(%d): %s" % (idx + 1, img)
45
46 def main():
47 if len(sys.argv) != 3:
48 print 'Invalid argument count.'
49 return
50 dir, url = sys.argv[1:]
51 download(dir, url)
52
53 if __name__ == '__main__':
54 # download('D://Imgs', 'http://www.163.com')
55 main()