1

我正在使用KLT (Kanade-Lucas-Tomasi Tracking) 跟踪算法来跟踪印度的交通运动。我正在正确跟踪一侧流量的流量,但根本没有检测到正在帧内移动的另一侧流量。

算法包括cv2.goodFeaturesToTrackcv2.calcOpticalFlowPyrLK实现结果。

在此处输入图像描述

在图像中,您可以看到红色和银色汽车上没有跟踪功能。左侧的黄色自动也不被跟踪。这有什么原因吗?角落还在。

特征参数cv2.goodFeaturesToTrack

feature_params = dict( maxCorners = 500,   # How many pts. to locate
                       qualityLevel = 0.1,  # b/w 0 & 1, min. quality below which everyone is rejected
                       minDistance = 7,   # Min eucledian distance b/w corners detected
                       blockSize = 3 ) # Size of an average block for computing a derivative covariation matrix over each pixel neighborhood

特征参数cv2.calcOpticalFlowPyrLK

lk_params = dict( winSize  = (15,15),  # size of the search window at each pyramid level
                  maxLevel = 2,   #  0, pyramids are not used (single level), if set to 1, two levels are used, and so on
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

我必须使用的视频是 60 分钟。很长,KLT 在 5 分钟后停止跟踪.. 任何建议或帮助都会很棒。谢谢。

4

2 回答 2

4

基本上你做的一切都是对的,你只需要重新初始化这样的跟踪的优点

p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)

在每 5 帧或任何你喜欢的内容之后说希望它有帮助!以下是我的代码:

import cv2
import numpy as np

cap = cv2.VideoCapture('side.avi')
# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 100,
                       qualityLevel = 0.3,
                       minDistance = 7,
                       blockSize = 7 )
# Parameters for lucas kanade optical flow
lk_params = dict( winSize  = (15,15),
                  maxLevel = 2,
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# Create some random colors
color = np.random.randint(0,255,(100,3))
# Take first frame and find corners in it
ret, old_frame = cap.read()
for i in range(60):
    ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
print(p0)
# Create a mask image for drawing purposes
mask = np.zeros_like(old_frame)
while(1):
    ret,frame = cap.read()
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
    frame_no = cap.get(cv2.CAP_PROP_POS_FRAMES)
    if int(frame_no)%5 == 0:
        p0 = cv2.goodFeaturesToTrack(old_gray, mask = None, **feature_params)
    # calculate optical flow
    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
    # Select good points
    good_new = p1[st==1]
    good_old = p0[st==1]
    # draw the tracks
    for i,(new,old) in enumerate(zip(good_new,good_old)):
        a,b = new.ravel()
        c,d = old.ravel()
        mask = cv2.line(mask, (a,b),(c,d), color[i].tolist(), 2)
        frame = cv2.circle(frame,(a,b),5,color[i].tolist(),-1)
    img = cv2.add(frame,mask)
    cv2.imshow('frame',img)
    k = cv2.waitKey(2000) & 0xff
    if k == 27:
        break
    # Now update the previous frame and previous points
    old_gray = frame_gray.copy()
    p0 = good_new.reshape(-1,1,2)
cv2.destroyAllWindows()
cap.release()
于 2018-07-05T05:37:47.877 回答
1
import numpy as np
import cv2

video_path = ''
output_file = ""     
cap = cv2.VideoCapture(video_path)

fourcc = cv2.VideoWriter_fourcc(*'DIVX')

# params for ShiTomasi corner detection
feature_params = dict( maxCorners = 500,   # How many pts. to locate
                       qualityLevel = 0.1,  # b/w 0 & 1, min. quality below which everyone is rejected
                       minDistance = 7,   # Min eucledian distance b/w corners detected
                       blockSize = 3 ) # Size of an average block for computing a derivative covariation matrix over each pixel neighborhood

# Parameters for lucas kanade optical flow
lk_params = dict( winSize  = (15,15),  # size of the search window at each pyramid level
                  maxLevel = 2,   #  0, pyramids are not used (single level), if set to 1, two levels are used, and so on
                  criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))

''' Criteria : Termination criteria for iterative search algorithm.
    after maxcount { Criteria_Count } : no. of max iterations.
    or after { Criteria Epsilon } : search window moves by less than this epsilon '''


# Take first frame and find corners in it
ret, old_frame = cap.read()
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)  #use goodFeaturesToTrack to find the location of the good corner.

# Create a mask image for drawing purposes filed with zeros
mask = np.zeros_like(old_frame)

y = 0
is_begin = True # To save the output video
count = 1  # for the frame count
n = 50  # Frames refresh rate for feature generation

while True:
    ret,frame = cap.read()
    if frame is None:
        break
    processed = frame

    #Saving the Video
    if is_begin:
        h, w, _ = processed.shape
        out = cv2.VideoWriter(output_file, fourcc, 30, (w, h), True)
        is_begin = False

    # Convert to Grey Frame
    frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    if count%n == 0:  # Refresh the tracking features after every 50 frames
        cv2.imwrite('img/r{0:05d}.jpg'.format(y), img)
        y += 1
        ret, old_frame = cap.read()
        old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
        p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
        mask = np.zeros_like(old_frame)

    # calculate optical flow
    p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)

    # Select good points
    good_new = p1[st==1]
    good_old = p0[st==1]

    # draw the tracks
    for i,(new,old) in enumerate(zip(good_new,good_old)):
        a,b = new.ravel() #tmp new value
        c,d = old.ravel() #tmp old value
        #draws a line connecting the old point with the new point
        mask = cv2.line(mask, (a,b),(c,d), (0,255,0), 1)
        #draws the new point
        frame = cv2.circle(frame,(a,b),2,(0,0,255), -1)
    img = cv2.add(frame,mask)

    out.write(img)
    cv2.imshow('frame',img)
    k = cv2.waitKey(30) & 0xff

    #Show the Output
    if k == 27:
        cv2.imshow('', img)
        break

    # Now update the previous frame and previous points
    old_gray = frame_gray.copy()
    p0 = good_new.reshape(-1,1,2)

    count += 1

# release and destroy all windows
cv2.destroyAllWindows()
cap.release()

我为 GoodFeaturetoTrack 添加了刷新率,它正在工作,但我们不会获得完整的轨迹。现在正在努力。

于 2018-07-08T09:47:38.983 回答