• 使用百度API进行关键点识别


    在使用百度的AI开放平台时,不熟悉网页请求这类知识,遇到使用不畅的问题,借鉴了网上两个人的经验,最后实现了更直白的代码。

    主程序:

    '''
    # 人体关键点识别
    '''
    import base64
    import urllib
    import urllib.request,sys,base64
    import urllib.parse
    import json
    import joint
    import cv2
    
    
    request_url = "https://aip.baidubce.com/rest/2.0/image-classify/v1/body_analysis"
    #使用不同的功能时在百度的相应说明文档处替换此处
    
    f = open('/home/zhengr/Documents/data/1.jpg', 'rb')
    image = base64.b64encode(f.read())
    image64 = str(image,'utf-8')
    image_type = "BASE64"
    
    
    params = {'image': image64,'image_type':"BASE64"}
    
    params = urllib.parse.urlencode(params).encode("utf-8")
    
    
    access_token = '[24.fdd8df19e52da8ff449e1484aa582f42.2592000.1556250057.282335-15823849]'
    request_url = request_url + "?access_token=" + access_token
    #access token是每个人获得的,有效期30天?貌似
    
    request = urllib.request.urlopen(url=request_url, data=params)   # 发送请求
    
    content = request.read()  # 将返回结果读取出来
    print(content)  # 显示返回结果
    result = str(content,'utf-8')
    res = json.loads(result)
    print(res['person_info'][0]['body_parts'])
    ress = res['person_info'][0]['body_parts']
    jo = (ress)
    jo.xunhun('/home/zhengr/Documents/data/1.jpg')
    

     直接用python执行该程序就可以获得关键点识别结果,access token获得需要的代码:

    #!/bin/bash
    curl -i -k 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&client_id=QMLVBU3QbNA25XxawltynC1R&client_secret=GuwC9U5WTIbvWgo7ryolIB6Yy1e5H5Nx'
    

      其中的client_id和client_secret分别是注册百度平台时获得的API Key和Secret Key。执行以上文件,得到"access_token":"24.fdd8df19e52da8ff449e1484aa582f42.2592000.1556250057.282335-15823849"格式的即为自己的access token。以上代码中的joint.Joint()是网上的,其代码如下:

    import cv2
    import os
    class Joint(object):
        
        __circle_list = []
        
        def __init__(self,dic):    
            self.dic = dic
        
        def draw_line(self,img):
            #nose ---> neck
            cv2.line(img, (int(self.dic['nose']['x']),int(self.dic['nose']['y'])),
                     (int(self.dic['neck']['x']),int(self.dic['neck']['y'])), (0,255,0), 2)
            #neck --> left_shoulder
            cv2.line(img, (int(self.dic['neck']['x']),int(self.dic['neck']['y'])),
                     (int(self.dic['left_shoulder']['x']),int(self.dic['left_shoulder']['y'])), (0,255,0), 2)        
            #neck --> right_shoulder
            cv2.line(img, (int(self.dic['neck']['x']),int(self.dic['neck']['y'])),
                     (int(self.dic['right_shoulder']['x']),int(self.dic['right_shoulder']['y'])), (0,255,0), 2)        
            #left_shoulder --> left_elbow
            cv2.line(img, (int(self.dic['left_shoulder']['x']),int(self.dic['left_shoulder']['y'])),
                     (int(self.dic['left_elbow']['x']),int(self.dic['left_elbow']['y'])), (0,255,0), 2)         
            #left_elbow --> left_wrist
            cv2.line(img, (int(self.dic['left_elbow']['x']),int(self.dic['left_elbow']['y'])),
                     (int(self.dic['left_wrist']['x']),int(self.dic['left_wrist']['y'])), (0,255,0), 2)         
            #right_shoulder --> right_elbow
            cv2.line(img, (int(self.dic['right_shoulder']['x']),int(self.dic['right_shoulder']['y'])),
                     (int(self.dic['right_elbow']['x']),int(self.dic['right_elbow']['y'])), (0,255,0), 2)          
            #right_elbow --> right_wrist
            cv2.line(img, (int(self.dic['right_elbow']['x']),int(self.dic['right_elbow']['y'])),
                     (int(self.dic['right_wrist']['x']),int(self.dic['right_wrist']['y'])), (0,255,0), 2)         
            #neck --> left_hip
            cv2.line(img, (int(self.dic['neck']['x']),int(self.dic['neck']['y'])),
                     (int(self.dic['left_hip']['x']),int(self.dic['left_hip']['y'])), (0,255,0), 2)         
            #neck --> right_hip
            cv2.line(img, (int(self.dic['neck']['x']),int(self.dic['neck']['y'])),
                     (int(self.dic['right_hip']['x']),int(self.dic['right_hip']['y'])), (0,255,0), 2)       
            #left_hip --> left_knee
            cv2.line(img, (int(self.dic['left_hip']['x']),int(self.dic['left_hip']['y'])),
                     (int(self.dic['left_knee']['x']),int(self.dic['left_knee']['y'])), (0,255,0), 2)        
            #right_hip --> right_knee
            cv2.line(img, (int(self.dic['right_hip']['x']),int(self.dic['right_hip']['y'])),
                     (int(self.dic['right_knee']['x']),int(self.dic['right_knee']['y'])), (0,255,0), 2)        
            #left_knee --> left_ankle
            cv2.line(img, (int(self.dic['left_knee']['x']),int(self.dic['left_knee']['y'])),
                     (int(self.dic['left_ankle']['x']),int(self.dic['left_ankle']['y'])), (0,255,0), 2)        
            #right_knee --> right_ankle
            cv2.line(img, (int(self.dic['right_knee']['x']),int(self.dic['right_knee']['y'])),
                     (int(self.dic['right_ankle']['x']),int(self.dic['right_ankle']['y'])), (0,255,0), 2)
            
        def xunhun(self,img):
            im1 = cv2.imread(img,cv2.IMREAD_COLOR)
            #im2 = cv2.resize(im1, (1040,768), interpolation=cv2.INTER_CUBIC)
            
            for i in self.dic:
                cv2.circle(im1,(int(self.dic[i]['x']),int(self.dic[i]['y'])),5,(0,255,0),-1)
                   
            self.draw_line(im1)
            cv2.imshow('image',im1)
            cv2.waitKey(0)
    

    使用这个代码,会有不准的时候,试了几张图片都会出现,有部分关键点的坐标是(0,0),这种在返回值里有不正确、正确的关键点问题,应该不是我能解决的,所以就没有再追究了。

    如有不对的地方,欢迎批评指正,如有侵权,请联系我删除。

  • 相关阅读:
    java中变量的内存分配
    类加载器的双亲委派机制
    缓存行对齐
    缓存一致性与MESI协议
    Mysql优化之join优化
    MySQL crc32 & crc64函数 提高字符串查询效率
    线程不安全
    学习爱上 systemd
    Ubuntu如何备份和恢复系统
    redis报错overcommit_memory is set to 0
  • 原文地址:https://www.cnblogs.com/xiaoheizi-12345/p/10658936.html
Copyright © 2020-2023  润新知