-1

我是 EMGU CV 的新手。我想使用 cam 来检测不止一种模式。喜欢这个视频。但现在,我尝试开发这只是一种模式作为起点。

我检查了 EMGUCV 的 SURF 示例。当我尝试将此代码实现为 cam capture 的示例时,错误会打开运行时。我搜索了更多,但没有找到任何代码示例。

所以,你建议我一个解释得很好的代码片段或教程。

现在已经非常感谢了。

下面是我正在处理的代码;

...........................................
FrameRaw = capture.QueryFrame();
                    CamImageBox.Image = FrameRaw;
        Run(FrameRaw);
...........................................    

     private void Run(Image<Bgr, byte> TempImage)
            {

                Image<Gray, Byte> modelImage = new Image<Gray, byte>("sample.jpg");
                Image<Gray, Byte> observedImage = TempImage.Convert<Gray, Byte>();
                // Image<Gray, Byte> observedImage = new Image<Gray,byte>("box_in_scene.png");

                Stopwatch watch;
                HomographyMatrix homography = null;

                SURFDetector surfCPU = new SURFDetector(500, false);

                VectorOfKeyPoint modelKeyPoints;
                VectorOfKeyPoint observedKeyPoints;
                Matrix<int> indices;
                Matrix<float> dist;
                Matrix<byte> mask;

                if (GpuInvoke.HasCuda)
                {
                    GpuSURFDetector surfGPU = new GpuSURFDetector(surfCPU.SURFParams, 0.01f);
                    using (GpuImage<Gray, Byte> gpuModelImage = new GpuImage<Gray, byte>(modelImage))
                    //extract features from the object image
                    using (GpuMat<float> gpuModelKeyPoints = surfGPU.DetectKeyPointsRaw(gpuModelImage, null))
                    using (GpuMat<float> gpuModelDescriptors = surfGPU.ComputeDescriptorsRaw(gpuModelImage, null, gpuModelKeyPoints))
                    using (GpuBruteForceMatcher matcher = new GpuBruteForceMatcher(GpuBruteForceMatcher.DistanceType.L2))
                    {
                        modelKeyPoints = new VectorOfKeyPoint();
                        surfGPU.DownloadKeypoints(gpuModelKeyPoints, modelKeyPoints);
                        watch = Stopwatch.StartNew();

                        // extract features from the observed image
                        using (GpuImage<Gray, Byte> gpuObservedImage = new GpuImage<Gray, byte>(observedImage))
                        using (GpuMat<float> gpuObservedKeyPoints = surfGPU.DetectKeyPointsRaw(gpuObservedImage, null))
                        using (GpuMat<float> gpuObservedDescriptors = surfGPU.ComputeDescriptorsRaw(gpuObservedImage, null, gpuObservedKeyPoints))
                        using (GpuMat<int> gpuMatchIndices = new GpuMat<int>(gpuObservedDescriptors.Size.Height, 2, 1))
                        using (GpuMat<float> gpuMatchDist = new GpuMat<float>(gpuMatchIndices.Size, 1))
                        {
                            observedKeyPoints = new VectorOfKeyPoint();
                            surfGPU.DownloadKeypoints(gpuObservedKeyPoints, observedKeyPoints);

                            matcher.KnnMatch(gpuObservedDescriptors, gpuModelDescriptors, gpuMatchIndices, gpuMatchDist, 2, null);

                            indices = new Matrix<int>(gpuMatchIndices.Size);
                            dist = new Matrix<float>(indices.Size);
                            gpuMatchIndices.Download(indices);
                            gpuMatchDist.Download(dist);

                            mask = new Matrix<byte>(dist.Rows, 1);

                            mask.SetValue(255);

                            Features2DTracker.VoteForUniqueness(dist, 0.8, mask);

                            int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                            if (nonZeroCount >= 4)
                            {
                                nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                                if (nonZeroCount >= 4)
                                    homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
                            }

                            watch.Stop();
                        }
                    }
                }
                else
                {
                    //extract features from the object image
                    modelKeyPoints = surfCPU.DetectKeyPointsRaw(modelImage, null);
                    //MKeyPoint[] kpts = modelKeyPoints.ToArray();
                    Matrix<float> modelDescriptors = surfCPU.ComputeDescriptorsRaw(modelImage, null, modelKeyPoints);

                    watch = Stopwatch.StartNew();

                    // extract features from the observed image
                    observedKeyPoints = surfCPU.DetectKeyPointsRaw(observedImage, null);
                    Matrix<float> observedDescriptors = surfCPU.ComputeDescriptorsRaw(observedImage, null, observedKeyPoints);

                    BruteForceMatcher matcher = new BruteForceMatcher(BruteForceMatcher.DistanceType.L2F32);
                    matcher.Add(modelDescriptors);
                    int k = 2;
                    indices = new Matrix<int>(observedDescriptors.Rows, k);
                    dist = new Matrix<float>(observedDescriptors.Rows, k);
                    matcher.KnnMatch(observedDescriptors, indices, dist, k, null);

                    mask = new Matrix<byte>(dist.Rows, 1);

                    mask.SetValue(255);

                    Features2DTracker.VoteForUniqueness(dist, 0.8, mask);

                    int nonZeroCount = CvInvoke.cvCountNonZero(mask);
                    if (nonZeroCount >= 4)
                    {
                        nonZeroCount = Features2DTracker.VoteForSizeAndOrientation(modelKeyPoints, observedKeyPoints, indices, mask, 1.5, 20);
                        if (nonZeroCount >= 4)
                            homography = Features2DTracker.GetHomographyMatrixFromMatchedFeatures(modelKeyPoints, observedKeyPoints, indices, mask, 3);
                    }

                    watch.Stop();
                }

                //Draw the matched keypoints
                Image<Bgr, Byte> result = Features2DTracker.DrawMatches(modelImage, modelKeyPoints, observedImage, observedKeyPoints,
                   indices, new Bgr(255, 255, 255), new Bgr(255, 255, 255), mask, Features2DTracker.KeypointDrawType.NOT_DRAW_SINGLE_POINTS);

                #region draw the projected region on the image
                if (homography != null)
                {  //draw a rectangle along the projected model
                    Rectangle rect = modelImage.ROI;
                    PointF[] pts = new PointF[] { 
                   new PointF(rect.Left, rect.Bottom),
                   new PointF(rect.Right, rect.Bottom),
                   new PointF(rect.Right, rect.Top),
                   new PointF(rect.Left, rect.Top)};
                    homography.ProjectPoints(pts);

                    result.DrawPolyline(Array.ConvertAll<PointF, Point>(pts, Point.Round), true, new Bgr(Color.Red), 5);
                }
                #endregion

               // ImageViewer.Show(result, String.Format("Matched using {0} in {1} milliseconds", GpuInvoke.HasCuda ? "GPU" : "CPU", watch.ElapsedMilliseconds));
            }
4

1 回答 1

0

我找到了您使用的SURF 教程,但我不明白为什么它会导致错误。您是否能够在没有 GPU 加速复杂性的情况下自行执行教程代码?此外,发生了什么错误?

于 2012-03-31T18:44:29.593 回答