• Python程序中的线程操作-锁


    Python程序中的线程操作-锁

    一、同步锁

    1.1多个线程抢占资源的情况

    from threading import Thread
    import os,time
    def work():
        global n
        temp=n
        time.sleep(0.1)
        n=temp-1
    if __name__ == '__main__':
        n=100
        l=[]
        for i in range(100):
            p=Thread(target=work)
            l.append(p)
            p.start()
        for p in l:
            p.join()
    
        print(n) #结果可能为99
    

    1.1.1对公共数据的操作

    import threading
    R=threading.Lock()
    R.acquire()
    '''
    对公共数据的操作
    '''
    R.release()
    

    1.2同步锁的引用

    from threading import Thread, Lock
    import os, time
    
    
    def work():
        global n
        lock.acquire()
        temp = n
        time.sleep(0.1)
        n = temp - 1
        lock.release()
    
    
    if __name__ == '__main__':
        lock = Lock()
        n = 100
        l = []
        for i in range(100):
            p = Thread(target=work)
            l.append(p)
            p.start()
        for p in l:
            p.join()
    
        print(n)  # 结果肯定为0,由原来的并发执行变成串行,牺牲了执行效率保证了数据安全
    

    1.3互斥锁与join的区别

    # 不加锁:并发执行,速度快,数据不安全
    from threading import current_thread, Thread, Lock
    import os, time
    
    
    def task():
        global n
        print('%s is running' % current_thread().getName())
        temp = n
        time.sleep(0.5)
        n = temp - 1
    
    
    if __name__ == '__main__':
        n = 100
        lock = Lock()
        threads = []
        start_time = time.time()
        for i in range(100):
            t = Thread(target=task)
            threads.append(t)
            t.start()
        for t in threads:
            t.join()
    
        stop_time = time.time()
        print('主:%s n:%s' % (stop_time - start_time, n))
    
    '''
    Thread-1 is running
    Thread-2 is running
    ......
    Thread-100 is running
    主:0.5216062068939209 n:99
    '''
    
    # 不加锁:未加锁部分并发执行,加锁部分串行执行,速度慢,数据安全
    from threading import current_thread, Thread, Lock
    import os, time
    
    
    def task():
        # 未加锁的代码并发运行
        time.sleep(3)
        print('%s start to run' % current_thread().getName())
        global n
        # 加锁的代码串行运行
        lock.acquire()
        temp = n
        time.sleep(0.5)
        n = temp - 1
        lock.release()
    
    
    if __name__ == '__main__':
        n = 100
        lock = Lock()
        threads = []
        start_time = time.time()
        for i in range(100):
            t = Thread(target=task)
            threads.append(t)
            t.start()
        for t in threads:
            t.join()
        stop_time = time.time()
        print('主:%s n:%s' % (stop_time - start_time, n))
    
    '''
    Thread-2 start to run
    Thread-3 start to run
    Thread-1 start to run
    Thread-6 start to run
    Thread-4 start to run
    ......
    Thread-99 start to run
    Thread-96 start to run
    Thread-100 start to run
    Thread-92 start to run
    Thread-93 start to run
    主:53.294203758239746 n:0
    '''
    

    有的同学可能有疑问:既然加锁会让运行变成串行,那么我在start之后立即使用join,就不用加锁了啊,也是串行的效果啊

    没错:在start之后立刻使用jion,肯定会将100个任务的执行变成串行,毫无疑问,最终n的结果也肯定是0,是安全的,但问题是

    start后立即join:任务内的所有代码都是串行执行的,而加锁,只是加锁的部分即修改共享数据的部分是串行的

    单从保证数据安全方面,二者都可以实现,但很明显是加锁的效率更高.

    from threading import current_thread, Thread, Lock
    import os, time
    
    
    def task():
        time.sleep(3)
        print('%s start to run' % current_thread().getName())
        global n
        temp = n
        time.sleep(0.5)
        n = temp - 1
    
    
    if __name__ == '__main__':
        n = 100
        lock = Lock()
        start_time = time.time()
        for i in range(100):
            t = Thread(target=task)
            t.start()
            t.join()
        stop_time = time.time()
        print('主:%s n:%s' % (stop_time - start_time, n))
    
    '''
    Thread-1 start to run
    Thread-2 start to run
    ......
    Thread-100 start to run
    主:350.6937336921692 n:0 #耗时是多么的恐怖
    '''
    

    二、死锁与递归锁

    所谓死锁:是指两个或两个以上的进程或线程在执行过程中,因争夺资源而造成的一种互相等待的现象,若无外力作用,它们都将无法推进下去。此时称系统处于死锁状态或系统产生了死锁,这些永远在互相等待的进程称为死锁进程,如下就是死锁

    2.1死锁

    from threading import Thread,Lock
    import time
    mutexA=Lock()
    mutexB=Lock()
    
    class MyThread(Thread):
        def run(self):
            self.func1()
            self.func2()
        def func1(self):
            mutexA.acquire()
            print('33[41m%s 拿到A锁33[0m' %self.name)
    
            mutexB.acquire()
            print('33[42m%s 拿到B锁33[0m' %self.name)
            mutexB.release()
    
            mutexA.release()
    
        def func2(self):
            mutexB.acquire()
            print('33[43m%s 拿到B锁33[0m' %self.name)
            time.sleep(2)
    
            mutexA.acquire()
            print('33[44m%s 拿到A锁33[0m' %self.name)
            mutexA.release()
    
            mutexB.release()
    
    if __name__ == '__main__':
        for i in range(10):
            t=MyThread()
            t.start()
    
    '''
    Thread-1 拿到A锁
    Thread-1 拿到B锁
    Thread-1 拿到B锁
    Thread-2 拿到A锁
    然后就卡住,死锁了
    '''
    

    解决方法:递归锁,在Python中为了支持在同一线程中多次请求同一资源,python提供了可重入锁RLock。

    这个RLock内部维护着一个Lock和一个counter变量,counter记录了acquire的次数,从而使得资源可以被多次require。直到一个线程所有的acquire都被release,其他的线程才能获得资源。上面的例子如果使用RLock代替Lock,则不会发生死锁。

    mutexA=mutexB=threading.RLock() #一个线程拿到锁,counter加1,该线程内又碰到加锁的情况,则counter继续加1,这期间所有其他线程都只能等待,等待该线程释放所有锁,即counter递减到0为止
    

    三、典型问题:科学家吃面

    3.1死锁问题

    import time
    from threading import Thread,Lock
    noodle_lock = Lock()
    fork_lock = Lock()
    def eat1(name):
        noodle_lock.acquire()
        print('%s 抢到了面条'%name)
        fork_lock.acquire()
        print('%s 抢到了叉子'%name)
        print('%s 吃面'%name)
        fork_lock.release()
        noodle_lock.release()
    
    def eat2(name):
        fork_lock.acquire()
        print('%s 抢到了叉子' % name)
        time.sleep(1)
        noodle_lock.acquire()
        print('%s 抢到了面条' % name)
        print('%s 吃面' % name)
        noodle_lock.release()
        fork_lock.release()
    
    for name in ['哪吒','nick','tank']:
        t1 = Thread(target=eat1,args=(name,))
        t2 = Thread(target=eat2,args=(name,))
        t1.start()
        t2.start()
    

    3.2递归锁解决死锁问题

    import time
    from threading import Thread, RLock
    
    fork_lock = noodle_lock = RLock()
    
    
    def eat1(name):
        noodle_lock.acquire()
        print('%s 抢到了面条' % name)
        fork_lock.acquire()
        print('%s 抢到了叉子' % name)
        print('%s 吃面' % name)
        fork_lock.release()
        noodle_lock.release()
    
    
    def eat2(name):
        fork_lock.acquire()
        print('%s 抢到了叉子' % name)
        time.sleep(1)
        noodle_lock.acquire()
        print('%s 抢到了面条' % name)
        print('%s 吃面' % name)
        noodle_lock.release()
        fork_lock.release()
    
    
    for name in ['哪吒', 'nick', 'tank']:
        t1 = Thread(target=eat1, args=(name,))
        t2 = Thread(target=eat2, args=(name,))
        t1.start()
        t2.start()
    

    四、信号量Semaphore

    同进程的一样

    Semaphore管理一个内置的计数器,
    每当调用acquire()时内置计数器-1;
    调用release() 时内置计数器+1;
    计数器不能小于0;当计数器为0时,acquire()将阻塞线程直到其他线程调用release()。

    实例:(同时只有5个线程可以获得semaphore,即可以限制最大连接数为5):

    from threading import Thread,Semaphore
    import threading
    import time
    
    
    def task():
        sm.acquire()
        print(f"{threading.current_thread().name} get sm")
        time.sleep(3)
        sm.release()
    
    if __name__ == '__main__':
        sm = Semaphore(5) # 同一时间只有5个进程可以执行。
        for i in range(20):
            t = Thread(target=task)
            t.start()
    

    与进程池是完全不同的概念,进程池Pool(4),最大只能产生4个进程,而且从头到尾都只是这四个进程,不会产生新的,而信号量是产生一堆线程/进程

  • 相关阅读:
    Java 的 多态和构造方法
    java 的 抽象类、接口
    java 的 封装 、继承
    eclipse的安装和基本使用、面向对象、类和对象
    方法的重载、引用数据类型、 ArrayList集合
    SQL单行函数
    JAVA
    mysql约束
    MYSQL的常用属性
    mysql的索引
  • 原文地址:https://www.cnblogs.com/Lin2396/p/11568445.html
Copyright © 2020-2023  润新知