• python线程使用场景 多线程下载


    http://blog.xiayf.cn/2015/09/11/parallelism-in-one-line
    http://python.jobbole.com/84327/
    http://www.runoob.com/python/python-multithreading.html
    http://www.jianshu.com/p/544d406e0875
    http://www.jianshu.com/p/86b8e78c418a
    http://www.imooc.com/article/6969?block_id=tuijian_wz
    http://blog.csdn.net/zhaonanemail/article/details/7175449

    实现简单的多线程下载,需要关注如下几点:
    1.文件的大小:可以从reponse header中提取,如“Content-Length:911”表示大小是911字节
    2.任务拆分:指定各个线程下载的文件的哪一块,可以通过request header中添加“Range: bytes=300-400”(表示下载300~400byte的内容),注意可以请求的文件的range是[0, size-1]字节的。
    3.下载文件的聚合:各个线程将自己下载的文件块保存为临时文件,所有线程都完成后,再将这些临时文件按顺序聚合写入到最终的一个文件中。
    
    网上看到一个不错的多线程下载文件的例子,是根据Linux上多线程下载工具axel的思想来写的,源代码在:http://fayaa.com/code/view/58/full/
    
    #!/usr/bin/python
    # -*- coding: utf-8 -*-
    # filename: paxel.py
    # FROM: http://fayaa.com/code/view/58/full/
    # Jay modified it a little and save for further potential usage.
    
    '''It is a multi-thread downloading tool
    
        It was developed following axel.
            Author: volans
            E-mail: volansw [at] gmail.com
    '''
    
    import sys
    import os
    import time
    import urllib
    from threading import Thread
    
    # in case you want to use http_proxy
    local_proxies = {'http': 'http://131.139.58.200:8080'}
    
    
    class AxelPython(Thread, urllib.FancyURLopener):
        '''Multi-thread downloading class.
    
            run() is a vitural method of Thread.
        '''
        def __init__(self, threadname, url, filename, ranges=0, proxies={}):
            Thread.__init__(self, name=threadname)
            urllib.FancyURLopener.__init__(self, proxies)
            self.name = threadname
            self.url = url
            self.filename = filename
            self.ranges = ranges
            self.downloaded = 0
    
        def run(self):
            '''vertual function in Thread'''
            try:
                self.downloaded = os.path.getsize(self.filename)
            except OSError:
                #print 'never downloaded'
                self.downloaded = 0
    
            # rebuild start poind
            self.startpoint = self.ranges[0] + self.downloaded
    
            # This part is completed
            if self.startpoint >= self.ranges[1]:
                print 'Part %s has been downloaded over.' % self.filename
                return
    
            self.oneTimeSize = 16384  # 16kByte/time
            print 'task %s will download from %d to %d' % (self.name, self.startpoint, self.ranges[1])
    
            self.addheader("Range", "bytes=%d-%d" % (self.startpoint, self.ranges[1]))
            self.urlhandle = self.open(self.url)
    
            data = self.urlhandle.read(self.oneTimeSize)
            while data:
                filehandle = open(self.filename, 'ab+')
                filehandle.write(data)
                filehandle.close()
    
                self.downloaded += len(data)
                #print "%s" % (self.name)
                #progress = u'
    ...'
    
                data = self.urlhandle.read(self.oneTimeSize)
    
    
    def GetUrlFileSize(url, proxies={}):
        urlHandler = urllib.urlopen(url, proxies=proxies)
        headers = urlHandler.info().headers
        length = 0
        for header in headers:
            if header.find('Length') != -1:
                length = header.split(':')[-1].strip()
                length = int(length)
        return length
    
    
    def SpliteBlocks(totalsize, blocknumber):
        blocksize = totalsize / blocknumber
        ranges = []
        for i in range(0, blocknumber - 1):
            ranges.append((i * blocksize, i * blocksize + blocksize - 1))
        ranges.append((blocksize * (blocknumber - 1), totalsize - 1))
    
        return ranges
    
    
    def islive(tasks):
        for task in tasks:
            if task.isAlive():
                return True
        return False
    
    
    def paxel(url, output, blocks=6, proxies=local_proxies):
        ''' paxel
        '''
        size = GetUrlFileSize(url, proxies)
        ranges = SpliteBlocks(size, blocks)
    
        threadname = ["thread_%d" % i for i in range(0, blocks)]
        filename = ["tmpfile_%d" % i for i in range(0, blocks)]
    
        tasks = []
        for i in range(0, blocks):
            task = AxelPython(threadname[i], url, filename[i], ranges[i])
            task.setDaemon(True)
            task.start()
            tasks.append(task)
    
        time.sleep(2)
        while islive(tasks):
            downloaded = sum([task.downloaded for task in tasks])
            process = downloaded / float(size) * 100
            show = u'
    Filesize:%d Downloaded:%d Completed:%.2f%%' % (size, downloaded, process)
            sys.stdout.write(show)
            sys.stdout.flush()
            time.sleep(0.5)
    
        filehandle = open(output, 'wb+')
        for i in filename:
            f = open(i, 'rb')
            filehandle.write(f.read())
            f.close()
            try:
                os.remove(i)
                pass
            except:
                pass
    
        filehandle.close()
    
    if __name__ == '__main__':
        url = 'http://dldir1.qq.com/qqfile/QQforMac/QQ_V3.1.1.dmg'
        output = 'download.file'
        paxel(url, output, blocks=4, proxies={})
    
  • 相关阅读:
    Spring Boot中实现logback多环境日志配置
    阿里云ECSLinux系统下挂载磁盘(转)
    解决 No qualifying bean of type 问题
    通过rpm离线安装Oracle 19C
    ADFS配置踩坑记
    .NET Core 2.0下载和文档
    .NET Core 2.0和ASP.NET Core 2.0正式版抢先体验
    .NET 微服务和Docker容器
    DocFX生成PDF文档
    ASP.NET Core 开源论坛项目 NETCoreBBS
  • 原文地址:https://www.cnblogs.com/liujitao79/p/5484777.html
Copyright © 2020-2023  润新知