我编写了这段代码来跟踪从滑槽上掉下来的小型移动物体,代码工作但使用 60 fps 1920 x 1080 镜头运行缓慢,代码仅以大约 10 fps 运行。那里的问题有点不言自明,因为我需要程序能够以实时速度准确地处理素材,并且在零件移动极快时需要非常高的 fps。我能做些什么来改善运行时间吗?我最初尝试使用一个简单的神经网络,但训练它被证明过于耗时,而这在更短的时间内产生了准确的结果。
对于代码中的任何明显错误,我是一名研究生机械工程师(我意识到我更喜欢软件)并且不得不在大约一周内学会这样做。
视频片段可以在这里看到:https ://www.youtube.com/watch?v=Zs5YekjqhxA&feature=youtu.be
import cv2
import numpy as np
import time
start_time = time.time()
count=0
ID=[0,1,2,3,4,5,6,7,8,9]
TrackList=[]
def nothing(x):
pass
def isolate(img, vertices):
mask=np.zeros_like(img)
channelcount=img.shape[2]
match=(255, )*channelcount
cv2.fillPoly(mask, vertices, match)
masked=cv2.bitwise_and(img, mask)
return masked
#read video input
cap=cv2.VideoCapture('testGood.mp4')
#background removal initiation either KNN or MOG2, KNN yeilded best results in testing
back=cv2.createBackgroundSubtractorKNN()
#grab initial frames
_,frameCap1=cap.read()
check , frameCap2=cap.read()
#main loop
while cap.isOpened:
#ensure there are frames to read
if check == False:
break
#image preprocessing
#declare region of interest eliminating some background issues
tlX,tlY,blX,blY,brX,brY,trX,trY=400,0,400,800,1480,800,1480,0
region=[(tlX,tlY), (blX, blY),(brX,brY) , (trX, trY) ]
grab=isolate(frameCap1,np.array([region],np.int32))
frame=cv2.pyrDown(grab)
#isolate region of interest
roi1=isolate(frameCap1,np.array([region],np.int32))
roi2=isolate(frameCap2,np.array([region],np.int32))
#drop resolution of working frames
frame1=cv2.pyrDown(roi1)
frame2=cv2.pyrDown(roi2)
#apply background subraction
fgmask1=back.apply(frame1)
fgmask2=back.apply(frame2)
#remove shadow pixels and replace them with black pixels or white pixels(0 or 255)
fgmask1[fgmask1==127]=0
fgmask2[fgmask2==127]=0
#apply a threshhold, not necessary but cleans ups some grey noise
_,thresh1=cv2.threshold(fgmask1,200,255,cv2.THRESH_BINARY)
_,thresh2=cv2.threshold(fgmask2,200,255,cv2.THRESH_BINARY)
#find movement
diff=cv2.absdiff(thresh1,thresh2)
contours, _=cv2.findContours(diff,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
movement=False
moveBox=[]
for contour in contours:
if cv2.contourArea(contour)<1350 or cv2.contourArea(contour)>3500:
continue
#cv2.rectangle(frame,(x,y), (x+w,y+h),(0,255,0),2)
(x,y,w,h)=cv2.boundingRect(contour)
moveBox.append([x,y,w,h])
movement=True
continue
#cv2.putText(frame, 'Status: ()'.format('Movement'),(x,y),cv2.FONT_HERSHEY_SIMPLEX,1,(0,0,255),3)
#update existing IDs
for tracked in TrackList:
success, box=tracked[2].update(frame)
if success:
x,y,w,h=[int(v) for v in box]
cv2.rectangle(frame, (x,y), (x+w, y+h),(0,0,255),2)
cv2.rectangle(thresh1, (x,y), (x+w, y+h),(255,255,255),2)
tracked[3].append([x,y,w,h])
else:
tracked[3].append(None)
#check for tracking which has stopped or tracking which hasnt moved
delList=[]
p=0
for tracked in TrackList:
if len(tracked[3])==1:
continue
moved=True
n=len(tracked[3])-1
if tracked[3][n]==tracked[3][n-1] and tracked[3][0]!=tracked[3][n]:
if tracked[3][n][1]>tracked[3][0][1]:
count+=1
print('count1: ',count)
ID.append(tracked[0])
cv2.putText(frame, 'Counted',(tracked[3][-2][0],tracked[3][-2][1]),cv2.FONT_HERSHEY_SIMPLEX,1,(0,200,255),3)
delList.append(p)
else:
ID.append(tracked[0])
delList.append(p)
print('discard 1')
cv2.putText(frame, 'discard 1',(tracked[3][-2][0],tracked[3][-2][1]),cv2.FONT_HERSHEY_SIMPLEX,1,(0,200,255),3)
print(tracked)
elif n>5 and tracked[3][n]==tracked[3][n-1] and tracked[3][0]==tracked[3][n]:
ID.append(tracked[0])
delList.append(p)
cv2.putText(frame, 'discard 1',(tracked[3][-2][0],tracked[3][-2][1]),cv2.FONT_HERSHEY_SIMPLEX,1,(0,200,255),3)
print('discard 2')
elif tracked[3][-1]==None:
count+=1
print('count2: ',count)
ID.append(tracked[0])
cv2.putText(frame, 'Counted',(tracked[3][-2][0],tracked[3][-2][1]),cv2.FONT_HERSHEY_SIMPLEX,1,(0,200,255),3)
delList.append(p)
p+=1
cv2.putText(frame, 'Count: '+str(count),(50,50),cv2.FONT_HERSHEY_SIMPLEX,1,(0,200,255),3)
if len(delList)>0:
for a in delList:
TrackList[a]=None
#remove dead IDs
cleaned=False
while cleaned==False:
try:
TrackList.remove(None)
except ValueError:
cleaned=True
#check if movement was being tracked
untracked=[]
if movement==True:
checkContours,_=cv2.findContours(thresh1,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in checkContours:
tracked=False
if 3500>cv2.contourArea(contour)>1350:
(x,y,w,h)=cv2.boundingRect(contour)
for box in TrackList:
if box[3][-1][0]<x+w/2<box[3][-1][0]+box[3][-1][2] and box[3][-1][1]<y+h/2<box[3][-1][1]+box[3][-1][3]:
tracked=True
if tracked==False:
#print('false')
(x,y,w,h)=cv2.boundingRect(contour)
cv2.rectangle(frame, (x,y), (x+w, y+h),(255,0,0),2)
cv2.rectangle(frame, (x,y), (x+w, y+h),(255,255,255),2)
untracked.append([x,y,w,h])
#assign tracking
ID.sort()
for unt in untracked:
idtemp=ID.pop(0)
tempFrame=frame
temp=[idtemp, 0, cv2.TrackerCSRT_create(),[unt]]
temp[2].init(tempFrame,(unt[0],unt[1],1.10*unt[2],1.10*unt[3]))
TrackList.append(temp)
#show frames
cv2.imshow('frame 1',frame)
#cv2.imshow('frame 2',thresh1)
#read new frame
frameCap1=frameCap2
check, frameCap2=cap.read()
#wait delay for a key to be pressed before continuing while loop or exiting
key=cv2.waitKey(1) & 0xFF
if key==27:
break
cap.release()
cv2.destroyAllWindows()
print(count)
print("runtime: %s seconds" % (time.time() - start_time))