在我的 AR 项目中,我必须将输入检测为对象???我在 Octave 平台上尝试了一个代码,但显示错误为
错误:'detectSURFFeatures'在第 10 行第 14 列附近未定义错误:在第 10 行第 13 列从 plz 调用
我安装了图像采集包和图像包,然后还使用 rgb2gray() 将彩色图像转换为灰度图像
但是我无法在 octave 中安装计算机视觉系统工具如何在 octave 上安装该工具?你能帮我解决这个问题吗?这是我的编码............
referenceImage=imread('l.jpg');
figure(1)
imshow(referenceImage);
referenceImageGray = rgb2gray(referenceImage);
figure(2)
imshow(referenceImageGray);
referencePts=detectSURFFeatures(referenceImageGray);
referenceFeatures=extractFeatures(referenceImageGray,referencePts);
figure;
imshow(referenceImage), hold on;
plot(referencePts.selectStrongest(50));
video= vision.VideoFileReader('nothing.mp4','VideoOutputDataType','uint8');
for k=1:30
step(video);
end
camera=webcam('CyberLink YouCam 7');
set(camera, 'Resolution', '640x480');
cameraFrame=snapshot(camera);
cameraFrameGray=rgb2gray(cameraFrame);
camerapts=detectSURFFeatures(cameraFrameGray);
figure(1)
imshow(cameraFrame), hold on;
plot(cameraPts.selectStrongest(50));
cameraFeatures=extractFeatures(cameraFrameGray,cameraPts);
idxPairs=matchFeatures(cameraFeatures,referenceFeatures);
matchedCamerapts=cameraPts(idxPairs(:,1));
matchedReferencePts=referencePts(idxPairs(:,2));
figure(1)
showMatchedFeatures(cameraFrame,referenceImage,matchedCameraPta,matchedReferencePts,'Montage' );
[referenceTransform,inlierReferencePts,inlierCameraPts]=estimateGeometricalTransform(matchedReferencePts,matchedCameraPts, 'Similarity');
figure(1)
showMatchedFeatures(CameraFrame,referenceImage,...
inlierCameraPts,inlierReferencePts,'Montage');
videoFrame=step(video);
repDims=size(videoFrame(:,:,1));
refDims=size(referenceImage);
scaleTransform=findScaleTransform(refDims,repDims);
outputView=imref2d(size(referenceImage));
videoFrameScaled=imwarp(video,scaleTransform,'outputView',outputView);
figure(1)
imshowpair(reference,videoFrameScaled,'Montage');
outputView=imref2d(size(cameraFrame));
videoFrameTransformed=imwarp(videoFrameScaled,referenceTransform,'output',outputView);
figure(1)
imshowpair(cameraFrame,videoframeTransformed,'Montage');
alphaBlender=vision.AlphaBlender('operation','binary mask','MaskSource','input port');
mask=videoFrameformed(:,:,1)|videoFrameTransformed(:,:,2)|videoFrameTransformed(:,:,3)>0;
outputFrame=step(alphaBlender,cameraFrame,videoFrameTransformed,mask);
figure(1)
imshow(outputFrame);
pointTracker=vision.pointTracker('MaxbidirectionError',2);
initialize(pointTracker,inlierCameraPts.Location,cameraFrame);
trackingMarkers=inserMarker(cameFrame,inliercameraPts.Location,'size',7,'color',yellow);
figure(1)
imshow(trackingMarkers);
prevCameraFrame=cameraFrame;
cameraFrame=snapshot(camera);
[trackedPoints,isValid]=step(pointTracker,cameraframe);
newValidLocation= trackedPoints(isValid,:);
oldValidLocation=inliercameraPts.Location(isValid,:);
if(nnz(isValid>=2))
[trackingGTransform,oldInlierLocation,newInlierLocation]=estimationGeometricTransform(oldValidLocation,newValidLocation,'similarity');
end
figure(1)
showMatchedFeatures(prevCameraFrame,cameraFrame,oldInlierLocation,newInlierLocation,'Motage');
setPoints(pointTracker,newValidLocation);
trackingtransform.t=referenceTransform.T*trackingTransform.T;
repFrame=step(video);
outputView=imref2d(size(referenceImage));
videoFrameScaled=imwarp(videoFrame,scaleTransform,'OutputView',outputView);
figure(1);
imshowpair(referenceImage,videoFrameScaled,'Montage');
outputView=imref2d(size(cameraFrame));
videoFrameTransformed=imwarp(videoFrameScaled,trackingTransform,'outputView',outputView);
figure(1)
imshowpair(cameraFrame,videoFrameTransformed,'Montage');
mask=videoFrameTransformed(:,:,1)|videoFrameTransformed(:,:,2)|videoFrameTransformed(:,:,3)>0;
outputFrame=step(alphaBlender,cameraFrame,videoFrametransformed,mask);
figure(1)
imshow(outputFrame);
release(video);
delete(camera);