• Python 图片匹配 之 特征匹配


    原文:https://blog.csdn.net/zhuisui_woxin/article/details/84400439

    安装相应版本的库(注意:对库安装的版本有特殊要求):

    pip install -i https://pypi.tuna.tsinghua.edu.cn/simple opencv-python==3.4.2.16
    pip install -i https://pypi.tuna.tsinghua.edu.cn/simple opencv-contrib-python==3.4.2.16
    

    如果需要卸载旧版本:

    pip uninstall opencv-python
    pip uninstall opencv-contrib-python
    

    所用到的测试图片:
    template_adjust
    target
    正常的小图

    代码1:

    #opencv----特征匹配----BFMatching
    import cv2
    from matplotlib import pyplot as plt
    #pip install -i https://pypi.tuna.tsinghua.edu.cn/simple opencv-python==3.4.2.16
    #pip install -i https://pypi.tuna.tsinghua.edu.cn/simple opencv-contrib-python==3.4.2.16
    #读取需要特征匹配的两张照片,格式为灰度图。
    template=cv2.imread("template_adjust.jpg",0)
    target=cv2.imread("target.jpg",0)
    orb=cv2.ORB_create()#建立orb特征检测器
    kp1,des1=orb.detectAndCompute(template,None)#计算template中的特征点和描述符
    kp2,des2=orb.detectAndCompute(target,None) #计算target中的
    bf = cv2.BFMatcher(cv2.NORM_HAMMING,crossCheck=True) #建立匹配关系
    mathces=bf.match(des1,des2) #匹配描述符
    mathces=sorted(mathces,key=lambda x:x.distance) #据距离来排序
    result= cv2.drawMatches(template,kp1,target,kp2,mathces[:40],None,flags=2) #画出匹配关系
    plt.imshow(result),plt.show() #matplotlib描绘出来
    
    

    代码2:

    # 
    '''
    基于FLANN的匹配器(FLANN based Matcher)
    1.FLANN代表近似最近邻居的快速库。它代表一组经过优化的算法,用于大数据集中的快速最近邻搜索以及高维特征。
    2.对于大型数据集,它的工作速度比BFMatcher快。
    3.需要传递两个字典来指定要使用的算法及其相关参数等
    对于SIFT或SURF等算法,可以用以下方法:
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    对于ORB,可以使用以下参数:
    index_params= dict(algorithm = FLANN_INDEX_LSH,
                       table_number = 6, # 12   这个参数是searchParam,指定了索引中的树应该递归遍历的次数。值越高精度越高
                       key_size = 12,     # 20
                       multi_probe_level = 1) #2
    '''
    import cv2 as cv
    from matplotlib import pyplot as plt
    queryImage=cv.imread("template_adjust.jpg",0)
    trainingImage=cv.imread("target.jpg",0)#读取要匹配的灰度照片
    sift=cv.xfeatures2d.SIFT_create()#创建sift检测器
    kp1, des1 = sift.detectAndCompute(queryImage,None)
    kp2, des2 = sift.detectAndCompute(trainingImage,None)
    #设置Flannde参数
    FLANN_INDEX_KDTREE=0
    indexParams=dict(algorithm=FLANN_INDEX_KDTREE,trees=5)
    searchParams= dict(checks=50)
    flann=cv.FlannBasedMatcher(indexParams,searchParams)
    matches=flann.knnMatch(des1,des2,k=2)
    #设置好初始匹配值
    matchesMask=[[0,0] for i in range (len(matches))]
    for i, (m,n) in enumerate(matches):
    	if m.distance< 0.5*n.distance: #舍弃小于0.5的匹配结果
    		matchesMask[i]=[1,0]
    drawParams=dict(matchColor=(0,0,255),singlePointColor=(255,0,0),matchesMask=matchesMask,flags=0) #给特征点和匹配的线定义颜色
    resultimage=cv.drawMatchesKnn(queryImage,kp1,trainingImage,kp2,matches,None,**drawParams) #画出匹配的结果
    plt.imshow(resultimage,),plt.show()
    

    代码3:

    
    
    # 原文:https://blog.csdn.net/zhuisui_woxin/article/details/84400439
    
    # 基于FLANN的匹配器(FLANN based Matcher)定位图片
    import numpy as np
    import cv2
    from matplotlib import pyplot as plt
    
    
    MIN_MATCH_COUNT = 10 # 设置最低特征点匹配数量为10
    template = cv2.imread('template_adjust.jpg',0) # queryImage
    target = cv2.imread('target.jpg',0) # trainImage
    
    # Initiate SIFT detector创建sift检测器
    sift = cv2.xfeatures2d.SIFT_create()
    # find the keypoints and descriptors with SIFT
    kp1, des1 = sift.detectAndCompute(template,None)
    kp2, des2 = sift.detectAndCompute(target,None)
    #创建设置FLANN匹配
    FLANN_INDEX_KDTREE = 0
    index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5)
    search_params = dict(checks = 50)
    flann = cv2.FlannBasedMatcher(index_params, search_params)
    matches = flann.knnMatch(des1,des2,k=2)
    # store all the good matches as per Lowe's ratio test.
    good = []
    #舍弃大于0.7的匹配
    for m,n in matches:
        if m.distance < 0.7*n.distance:
            good.append(m)
    if len(good)>MIN_MATCH_COUNT:
        # 获取关键点的坐标
        src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
        dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
        #计算变换矩阵和MASK
        M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
        matchesMask = mask.ravel().tolist()
        h,w = template.shape
        # 使用得到的变换矩阵对原图像的四个角进行变换,获得在目标图像上对应的坐标
        pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
        dst = cv2.perspectiveTransform(pts,M)
        cv2.polylines(target,[np.int32(dst)],True,0,2, cv2.LINE_AA)
    else:
        print( "Not enough matches are found - %d/%d" % (len(good),MIN_MATCH_COUNT))
        matchesMask = None
    draw_params = dict(matchColor=(0,255,0), 
                       singlePointColor=None,
                       matchesMask=matchesMask, 
                       flags=2)
    result = cv2.drawMatches(template,kp1,target,kp2,good,None,**draw_params)
    plt.imshow(result, 'gray')
    plt.show()
    

    其它之后再补全吧

  • 相关阅读:
    【转】前端防止 JS 调试技巧
    反爬虫 js怎样判断是真实点击事件还是模拟点击事件?
    js 前端 滑动验证
    【转】pyspider运行卡死在result_worker starting 的解决办法
    【转】pyspider all命令报错如下:ImportError: cannot import name 'DispatcherMiddleware' from 'werkzeug.wsgi'
    【转】pyspider中async关键字问题
    【转】Windows python3.7 下安装运行pyspider
    如何修改11g RAC集群名称
    Exadata健康检查工具EXAchk
    XD刷机中执行reclaimdisks.sh的作用
  • 原文地址:https://www.cnblogs.com/guxingy/p/12627338.html
Copyright © 2020-2023  润新知