1

我有两张在不同视图下拍摄的同一物体的图像。我的目标是从两张图像中找到匹配的关键点。然后我想从一张图像中选择关键点并在另一张图像中绘制相应的极线。我正在使用PythonOpenCV进行实验。我已经能够从这两个图像中找到关键点数学点。我已经能够绘制点对应关系。

但我无法ComputeCorrespondEpilines()python cv2 模块中找到函数'' 。因为我使用的是numpy数组,所以我不能使用 cv 模块来使用这个函数。cv2中好像没有这个ComputeCorrespondEpilines()功能。

使用 numpycv.ComputeCorrespondEpilines()会出现此错误:

  cv.ComputeCorrespondEpilines(mp_array,1, cv.fromarray(F1),cv.fromarray(liness))
TypeError: object does not have array interface

mp_array是一个 numpy 元组数组。)

我将mp_arraytuple s 转换为列表。但是发生了同样的错误。

这是我的代码:

    import cv
import cv2
import sys
import scipy as sp
import numpy as np

img1_path = sys.argv[1]
img2_path = sys.argv[2]
img1_ = cv2.imread(img1_path, cv2.CV_LOAD_IMAGE_GRAYSCALE)
img2_ = cv2.imread(img2_path, cv2.CV_LOAD_IMAGE_GRAYSCALE)
detector = cv2.FeatureDetector_create("SIFT")
descriptor = cv2.DescriptorExtractor_create("BRIEF")
matcher = cv2.DescriptorMatcher_create("BruteForce-Hamming")

detect keypoints
kp1 = detector.detect(img1)
kp2 = detector.detect(img2)

print '#keypoints in image1: %d, image2: %d' % (len(kp1), len(kp2))

# descriptors
k1, d1 = descriptor.compute(img1, kp1)
k2, d2 = descriptor.compute(img2, kp2)

print '#descriptors in image1: %d, image2: %d' % (len(d1), len(d2))

# match the keypoints
matches = matcher.match(d1, d2)
dist = [m.distance for m in matches]

print 'distance: min: %.3f' % min(dist)
print 'distance: mean: %.3f' % (sum(dist) / len(dist))
print 'distance: max: %.3f' % max(dist)

# threshold: half the mean
thres_dist = (sum(dist) / len(dist)) * 0.5

# keep only the reasonable matches
sel_matches = [m for m in matches if m.distance < thres_dist]

print '#selected matches:', len(sel_matches)
points1=[];#1
points2=[];#1

# #####################################
# visualization
h1, w1 = img1_.shape[:2]
h2, w2 = img2_.shape[:2]
view = sp.zeros((max(h1, h2), w1 + w2, 3), sp.uint8)
view[:h1, :w1, 0] = img1_
view[:h2, w1:, 0] = img2_
view[:, :, 1] = view[:, :, 0]
view[:, :, 2] = view[:, :, 0]

for m in sel_matches:
   # draw the keypoints
   # print m.queryIdx, m.trainIdx, m.distance
   match_point.append(k1[m.queryIdx].pt)
   pt1=list(k1[m.queryIdx].pt)
   pt1=[int(ii) for ii in pt1]
   pt2=list((k2[m.trainIdx].pt[0] + w1, k2[m.trainIdx].pt[1]))
   pt2=[int(ii) for ii in pt2]
   points1.append(pt1);#1
   points2.append(pt2);#1
   color = tuple([sp.random.randint(0, 255) for _ in xrange(3)])
   cv2.line(view, tuple(pt1), tuple(pt2), color)
points1=np.asarray(points1,dtype=float);
points2=np.asarray(points2,dtype=float);
liness=[]
F1,mask=cv2.findFundamentalMat(points1,
        points2,method=cv.CV_FM_RANSAC,param1=1,param2=0.99);
print F1
#points1 = cv.fromarray(points1)
#cv.ComputeCorrespondEpilines(points1,2, F1,liness)
#cv.ComputeCorrespondEpilines(cv.fromarray(points1),
         1,cv.fromarray(F1),cv.fromarray(liness))
mp_array=np.asarray(match_point,dtype=np.int)
print type(match_point)
print type(match_point[0])
print type(mp_array)
print mp_array
cv.ComputeCorrespondEpilines(match_point,1, cv.fromarray(F1),cv.fromarray(liness))
4

0 回答 0