2

我正在尝试使用 Open CV 拼接无人机图像,我将下面的代码放在一起,为图像添加填充以避免剪裁边缘。未填充的图像应该覆盖在填充的图像上,但我对拼接代码的理解不够好,无法实现这一点。我试过摆弄没有运气的东西。

我本人和我们正在调查的贻贝将不胜感激任何帮助!

结果

代码

import cv2
import numpy as np
from tkinter import Tk

# Read in images resize and convert a copy to grayscale

img1 = cv2.imread('Photos/Mussels/img1.jpg')
img2= cv2.imread('Photos/Mussels/img2.jpg')

img1 = cv2.resize(img1, (0,0), fx=0.5, fy=0.5)
img2 = cv2.resize(img2, (0,0), fx=0.5, fy=0.5)

img1_gs = cv2.cvtColor(img1,cv2.COLOR_BGR2GRAY)
img2_gs = cv2.cvtColor(img2,cv2.COLOR_BGR2GRAY)

# Add padding to prevent stitched image cutting out parts of the image 
padding = 1000
img2 = cv2.copyMakeBorder( img2, padding, padding, padding, padding, cv2.BORDER_CONSTANT, value=(0,0,0))
img2_gs = cv2.copyMakeBorder( img2_gs, padding, padding, padding, padding, cv2.BORDER_CONSTANT, value=(0,0,0))

preview_height = 500
preview_width = 500

# Find the key points and descriptors with SIFT -------------------------------#

sift = cv2.xfeatures2d.SIFT_create()
kp1, des1 = sift.detectAndCompute(img1_gs,None)
kp2, des2 = sift.detectAndCompute(img2_gs,None)

kp_show = cv2.drawKeypoints(img1,kp1,None)
kp_show_resize = cv2.resize(kp_show, (preview_width ,preview_height))
cv2.imshow('Image 1 keypoints',kp_show_resize)
# Generate matching keypoints in images
match = cv2.BFMatcher()
matches = match.knnMatch(des1,des2,k=2)

good = [] 
for m,n in matches: # Filter for good matches
    if m.distance < 0.7*n.distance:
        good.append(m)
draw_params = dict(matchColor = (0,255,0), # draw matches in green color
                   singlePointColor = None,
                   flags = 2)
img3 = cv2.drawMatches(img1,kp1,img2,kp2,good,None,**draw_params)

matches_resize = cv2.resize(img3, (preview_width * 2 ,preview_height))
cv2.imshow("DrawMatches.jpg", matches_resize)

# Stitching ------------------------------------------------------------------#

MIN_MATCH_COUNT = 10
if len(good) > MIN_MATCH_COUNT:
    src_pts = np.float32([ kp1[m.queryIdx].pt for m in good ]).reshape(-1,1,2)
    dst_pts = np.float32([ kp2[m.trainIdx].pt for m in good ]).reshape(-1,1,2)
    M, mask = cv2.findHomography(src_pts, dst_pts, cv2.RANSAC, 5.0)
    h,w, _ = img1.shape
    pts = np.float32([ [0,0],[0,h-1],[w-1,h-1],[w-1,0] ]).reshape(-1,1,2)
    dst = cv2.perspectiveTransform(pts, M)
    img2 = cv2.polylines(img2,[np.int32(dst)],True,255,3, cv2.LINE_AA)
    img2_preview = cv2.resize(img2, (preview_width ,preview_height))
    cv2.imshow("Overlapping.jpg", img2_preview)
else:
    print("Not enought matches are found - %d/%d", (len(good)/MIN_MATCH_COUNT))
dst = cv2.warpPerspective(img1,M,(img2.shape[1] + img1.shape[1], img2.shape[0]))
dst[0:img2.shape[0],0:img2.shape[1]] = img2

dst_resize = cv2.resize(dst, (preview_width ,preview_height))
cv2.imshow("Stitched.jpg", dst_resize)
4

0 回答 0