3

我正在尝试在 Java 中使用 OpenCV。我想把两张照片拼接在一起。OpenCV 是一个 C++ 库,它有一个 Java 包装器。

  1. 我从官方网站下载了带有预构建 Windows .dll 的 OpenCV Java: https ://sourceforge.net/projects/opencvlibrary/files/opencv-win/3.4.1/opencv-3.4.1-vc14_vc15.exe/download

  2. 我正在使用 IntelliJ 2016.1.4

  3. 我设置了我的项目并将其指向相关的 .jar

  4. 我在网上找到了如下所示的代码。

  5. 它没有开箱即用,所以我修复了一些问题,例如: private static final int CV_RANSAC = 8; // 这只是一个猜测!

  6. 我跑了。它失败并出现错误:“错误:(-5) 不支持指定的描述符提取器类型”:fe = DescriptorExtractor.create(DescriptorExtractor.SURF); 我

  7. 我尝试了一堆替代算法(ORB、SIFT、BRIEF)并得到了同样的错误。

我想让这段代码工作。理想情况下,我会得到不使用一堆不推荐使用的函数的工作代码......这些函数已被弃用,但没有评论说我应该使用什么......这总是让我烦恼。

(更一般地说,我想要任何可以将照片拼接在一起形成全景图的工作 Java 示例代码。)

任何人都可以帮忙吗?

import org.opencv.calib3d.Calib3d;
import org.opencv.core.*;
import org.opencv.features2d.DescriptorExtractor;
import org.opencv.features2d.DescriptorMatcher;
import org.opencv.features2d.FeatureDetector;
import org.opencv.features2d.Features2d;
import org.opencv.imgproc.Imgproc;

import java.util.LinkedList;
import java.util.List;

import static org.opencv.imgcodecs.Imgcodecs.imread;
import static org.opencv.imgcodecs.Imgcodecs.imwrite;


public class ImageStitching {

    static Mat image1;
    static Mat image2;

    static FeatureDetector fd;
    static DescriptorExtractor fe;
    static DescriptorMatcher fm;


    // Compulsory
    static{
        try {
            System.loadLibrary(Core.NATIVE_LIBRARY_NAME);
        }
        catch (UnsatisfiedLinkError e) {
            throw new RuntimeException("Couldn't find \"" + Core.NATIVE_LIBRARY_NAME + ".dll .\n"
                    +"You need to add something like this to the run configuration \"VM options\":\n"
                    +"-Djava.library.path=C:\\OpenCvPreBuilt\\opencv\\build\\java\\x64");
        }
    }


    public static void go()
    {
        //new CvException("hello");
        fd = FeatureDetector.create(FeatureDetector.BRISK);
        fe = DescriptorExtractor.create(DescriptorExtractor.SURF);
        fm = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE);

        //images
        image1 = imread("A.jpg");
        image2 = imread("B.jpg");

        //structures for the keypoints from the 2 images
        MatOfKeyPoint keypoints1 = new MatOfKeyPoint();
        MatOfKeyPoint keypoints2 = new MatOfKeyPoint();

        //structures for the computed descriptors
        Mat descriptors1 = new Mat();
        Mat descriptors2 = new Mat();

        //structure for the matches
        MatOfDMatch matches = new MatOfDMatch();

        //getting the keypoints
        fd.detect(image1, keypoints1);
        fd.detect(image1, keypoints2);

        //getting the descriptors from the keypoints
        fe.compute(image1, keypoints1, descriptors1);
        fe.compute(image2,keypoints2,descriptors2);

        //getting the matches the 2 sets of descriptors
        fm.match(descriptors2,descriptors1, matches);

        //turn the matches to a list
        List<DMatch> matchesList = matches.toList();

        Double maxDist = 0.0; //keep track of max distance from the matches
        Double minDist = 100.0; //keep track of min distance from the matches

        //calculate max & min distances between keypoints
        for(int i=0; i<keypoints1.rows();i++){
            Double dist = (double) matchesList.get(i).distance;
            if (dist<minDist) minDist = dist;
            if(dist>maxDist) maxDist=dist;
        }

        System.out.println("max dist: " + maxDist );
        System.out.println("min dist: " + minDist);

        //structure for the good matches
        LinkedList<DMatch> goodMatches = new LinkedList<DMatch>();

        //use only the good matches (i.e. whose distance is less than 3*min_dist)
        for(int i=0;i<descriptors1.rows();i++){
            if(matchesList.get(i).distance<3*minDist){
                goodMatches.addLast(matchesList.get(i));
            }
        }

        //structures to hold points of the good matches (coordinates)
        LinkedList<Point> objList = new LinkedList<Point>(); // image1
        LinkedList<Point> sceneList = new LinkedList<Point>(); //image 2

        List<KeyPoint> keypoints_objectList = keypoints1.toList();
        List<KeyPoint> keypoints_sceneList = keypoints2.toList();

        //putting the points of the good matches into above structures
        for(int i = 0; i<goodMatches.size(); i++){
            objList.addLast(keypoints_objectList.get(goodMatches.get(i).queryIdx).pt);
            sceneList.addLast(keypoints_sceneList.get(goodMatches.get(i).trainIdx).pt);
        }

        System.out.println("\nNum. of good matches" +goodMatches.size());

        MatOfDMatch gm = new MatOfDMatch();
        gm.fromList(goodMatches);

        //converting the points into the appropriate data structure
        MatOfPoint2f obj = new MatOfPoint2f();
        obj.fromList(objList);

        MatOfPoint2f scene = new MatOfPoint2f();
        scene.fromList(sceneList);

        //finding the homography matrix
        Mat H = Calib3d.findHomography(obj, scene, CV_RANSAC, 3);

        //LinkedList<Point> cornerList = new LinkedList<Point>();
        Mat obj_corners = new Mat(4,1,CvType.CV_32FC2);
        Mat scene_corners = new Mat(4,1,CvType.CV_32FC2);

        obj_corners.put(0,0, new double[]{0,0});
        obj_corners.put(0,0, new double[]{image1.cols(),0});
        obj_corners.put(0,0,new double[]{image1.cols(),image1.rows()});
        obj_corners.put(0,0,new double[]{0,image1.rows()});

        Core.perspectiveTransform(obj_corners, scene_corners, H);

        //structure to hold the result of the homography matrix
        Mat result = new Mat();

        //size of the new image - i.e. image 1 + image 2
        Size s = new Size(image1.cols()+image2.cols(),image1.rows());

        //using the homography matrix to warp the two images
        Imgproc.warpPerspective(image1, result, H, s);
        int i = image1.cols();
        Mat m = new Mat(result,new Rect(i,0,image2.cols(), image2.rows()));

        image2.copyTo(m);

        Mat img_mat = new Mat();

        Features2d.drawMatches(image1, keypoints1, image2, keypoints2, gm, img_mat, new Scalar(254,0,0),new Scalar(254,0,0) , new MatOfByte(), 2);

        //creating the output file
        boolean imageStitched = imwrite("imageStitched.jpg",result);
        boolean imageMatched = imwrite("imageMatched.jpg",img_mat);
    }


    public static void main(String args[])
    {
        go();
    }
}
4

1 回答 1

1

所以,让我们再试一次。我将跳过所有初始化内容,因为您似乎已成功加载库。

对于我的环境:

  • 我在 android 上使用 OpenCV 和您自己链接的下载提供的 Java 包装器。
  • OpenCV 用于 3.4.1 版本
  • 我在 Android Studio 3.1.3 中开发

我尽可能在代码中添加注释并添加源(stackoverflow 或其他)以显示我在哪里找到代码。

在这一点上,非常感谢这些人帮助我拼凑出一些真正有效的东西。

以下方法是将两个图像拼接在一起(从 AsyncTask 中提取,因为它需要很多时间):

        protected Bitmap doInBackground(Bitmap...arg0) {

        // Base code extracted from: http://privateblog.info/sozdanie-panoramy-s-pomoshhyu-opencv-i-java/
        // https://stackoverflow.com/questions/36691050/opencv-3-list-of-available-featuredetectorcreate-and-descriptorextractorc
        // https://stackoverflow.com/questions/27681389/how-to-multiply-2-matrices-in-java-and-then-use-the-result-as-transformation-mat
        // Measuring the duration
        long startTime = System.nanoTime();

        // Abort if we got not the right amount of images...
        // Stitching more than two images is not supported.
        if (arg0.length != 2) {
            return null;
        }

        // Get the two images from the given arguments
        Bitmap bitmap1 = arg0[0];
        Bitmap bitmap2 = arg0[1];

        // If something is wrong, abort...
        if (bitmap1 == null || bitmap2 == null) {
            return null;
        }

        // Convert the two bitmaps to OpenCV mats...
        Mat img1 = new Mat();
        Mat img2 = new Mat();

        Utils.bitmapToMat(bitmap1, img1);
        Utils.bitmapToMat(bitmap2, img2);

        // ...then create greyscale versions
        Mat gray_image1 = new Mat();
        Mat gray_image2 = new Mat();

        Imgproc.cvtColor(img1, gray_image1, Imgproc.COLOR_RGB2GRAY);
        Imgproc.cvtColor(img2, gray_image2, Imgproc.COLOR_RGB2GRAY);

        // At this point search for keypoints in both images and compute the matches
        MatOfKeyPoint keyPoints1 = new MatOfKeyPoint();
        MatOfKeyPoint keyPoints2 = new MatOfKeyPoint();

        Mat descriptors1 = new Mat();
        Mat descriptors2 = new Mat();

        // Since FeatureDetector and Descriptor extractor are marked deprecated and
        // crash whatever value they get, use this construct for detecting and computing...
        // Source: https://stackoverflow.com/questions/36691050/opencv-3-list-of-available-featuredetectorcreate-and-descriptorextractorc
        KAZE kaze = KAZE.create();
        kaze.detect(gray_image1, keyPoints1);
        kaze.detect(gray_image2, keyPoints2);
        kaze.compute(gray_image1, keyPoints1, descriptors1);
        kaze.compute(gray_image2, keyPoints2, descriptors2);

        MatOfDMatch matches = new MatOfDMatch();

        DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.FLANNBASED);
        matcher.match(descriptors1, descriptors2, matches);

        // Calculate min and max distance between the keypoints in the two images.
        double max_dist = 0; double min_dist = 100;
        List<DMatch> listMatches = matches.toList();

        for( int i = 0; i < listMatches.size(); i++ ) {
            double dist = listMatches.get(i).distance;
            if( dist < min_dist ) min_dist = dist;
            if( dist > max_dist ) max_dist = dist;
        }

        Log.i(this.getClass().getSimpleName(), "Min: " + min_dist);
        Log.i(this.getClass().getSimpleName(), "Max: " + max_dist);

        // Reduce the list of matching keypoints to a list of good matches...
        LinkedList<DMatch> good_matches = new LinkedList<DMatch>();
        MatOfDMatch goodMatches = new MatOfDMatch();
        for(int i = 0; i < listMatches.size(); i++) {
            if(listMatches.get(i).distance < 2*min_dist) {
                good_matches.addLast(listMatches.get(i));
            }
        }

        goodMatches.fromList(good_matches);
        Log.i(this.getClass().getSimpleName(), "Number of matches: " + listMatches.size());
        Log.i(this.getClass().getSimpleName(), "Number of good matches: " + good_matches.size());

        // Calculate the homograohy between the two images...
        LinkedList<Point> imgPoints1List = new LinkedList<Point>();
        LinkedList<Point> imgPoints2List = new LinkedList<Point>();
        List<KeyPoint> keypoints1List = keyPoints1.toList();
        List<KeyPoint> keypoints2List = keyPoints2.toList();

        for(int i = 0; i<good_matches.size(); i++) {
            imgPoints1List.addLast(keypoints1List.get(good_matches.get(i).queryIdx).pt);
            imgPoints2List.addLast(keypoints2List.get(good_matches.get(i).trainIdx).pt);
        }

        MatOfPoint2f obj = new MatOfPoint2f();
        obj.fromList(imgPoints1List);
        MatOfPoint2f scene = new MatOfPoint2f();
        scene.fromList(imgPoints2List);

        Mat H = Calib3d.findHomography(obj, scene, Calib3d.RANSAC,3);

        int imageWidth = img2.cols();
        int imageHeight = img2.rows();

        // To avoid missing some of the possible stitching scenarios, we offset the homography to the middle of a mat which has three time the size of one of the pictures.
        // Extracted from this: https://stackoverflow.com/questions/21618044/stitching-2-images-opencv
        Mat Offset = new Mat(3, 3, H.type());
        Offset.put(0,0, new double[]{1});
        Offset.put(0,1, new double[]{0});
        Offset.put(0,2, new double[]{imageWidth});
        Offset.put(1,0, new double[]{0});
        Offset.put(1,1, new double[]{1});
        Offset.put(1,2, new double[]{imageHeight});
        Offset.put(2,0, new double[]{0});
        Offset.put(2,1, new double[]{0});
        Offset.put(2,2, new double[]{1});

        // Multiply the homography mat with the offset.
        Core.gemm(Offset, H, 1, new Mat(), 0, H);

        Mat obj_corners = new Mat(4,1,CvType.CV_32FC2);
        Mat scene_corners = new Mat(4,1,CvType.CV_32FC2);

        obj_corners.put(0,0, new double[]{0,0});
        obj_corners.put(0,0, new double[]{imageWidth,0});
        obj_corners.put(0,0,new double[]{imageWidth,imageHeight});
        obj_corners.put(0,0,new double[]{0,imageHeight});

        Core.perspectiveTransform(obj_corners, scene_corners, H);

        // The resulting mat will be three times the size (width and height) of one of the source images. (We assume, that both images have the same size.
        Size s = new Size(imageWidth *3,imageHeight*3);
        Mat img_matches = new Mat(new Size(img1.cols()+img2.cols(),img1.rows()), CvType.CV_32FC2);

        // Perform the perspective warp of img1 with the given homography and place it on the large result mat.
        Imgproc.warpPerspective(img1, img_matches, H, s);

        // Create another mat which is used to hold the second image and place it in the middle of the large sized result mat.
        int m_xPos = (int)(img_matches.size().width/2 - img2.size().width/2);
        int m_yPos = (int)(img_matches.size().height/2 - img2.size().height/2);
        Mat m = new Mat(img_matches,new Rect(m_xPos, m_yPos, img2.cols(), img2.rows()));

        // Copy img2 to the mat in the middle of the large result mat
        img2.copyTo(m);

        // Some debug logging... and some duration logging following...
        Log.i(this.getClass().getSimpleName(), "Size of img2: width=" + img2.size().width + "height=" + img2.size().height);
        Log.i(this.getClass().getSimpleName(), "Size of m: width=" + m.size().width + "height=" + m.size().height);
        Log.i(this.getClass().getSimpleName(), "Size of img_matches: width=" + img_matches.size().width + "height=" + img_matches.size().height);

        long elapsedTime = System.nanoTime() - startTime;
        elapsedTime = elapsedTime / 1000000; // Milliseconds (1:1000000)
        Log.i(this.getClass().getSimpleName(), "Stitching 2 images took " + elapsedTime + "ms");
        //loadedImagesText.append("Stitching 2 images took " + elapsedTime + "ms\n");

        // The resulting mat is way to big. It holds a lot of empty "transparent" space.
        // We will not crop the image, so that only the "region of interest" remains.
        startTime = System.nanoTime();
        int stepping = 6;

        Rect imageBoundingBox3 = findImageBoundingBox2(img_matches, stepping, true);

        elapsedTime = System.nanoTime() - startTime;
        elapsedTime = elapsedTime / 1000000; // Milliseconds (1:1000000)
        Log.i(this.getClass().getSimpleName(), "Resulting rect has tl(x=" + imageBoundingBox3.tl().x + ", y=" + imageBoundingBox3.tl().y +") and br(x=" + imageBoundingBox3.br().x + ", y=" + imageBoundingBox3.br().y +") with stepping="+stepping+" and auto-correct=true\n");
        Log.i(this.getClass().getSimpleName(), "Cropping stitched image (v2.1) took " + elapsedTime + "ms");

        //loadedImagesText.append("Resulting rect has tl(x=" + imageBoundingBox3.tl().x + ", y=" + imageBoundingBox3.tl().y +") and br(x=" + imageBoundingBox3.br().x + ", y=" + imageBoundingBox3.br().y +") with stepping="+stepping+" and auto-correct=true\n");
        //loadedImagesText.append("Cropping stitched image (v2.1) took " + elapsedTime + "ms\n");

        // Extract the calculated region of interest from the result mat.
        Mat regionOfInterest = img_matches.submat(imageBoundingBox3);

        // Convert the end result to a bitmap and we are done!
        Bitmap resultBitmap = Bitmap.createBitmap(regionOfInterest.cols(),  regionOfInterest.rows(),Bitmap.Config.ARGB_8888);
        Utils.matToBitmap(regionOfInterest, resultBitmap);

        return resultBitmap;
    }

至于最后的评论:

OpenCV 本身有一个称为“stitcher”的高级类。 https://docs.opencv.org/3.4.1/d2/d8d/classcv_1_1Stitcher.html

这个类将完成我的代码所做的所有体力劳动,并且还允许拼接两个以上的图像。但似乎这个类还没有包含在股票 java 包装器中。

希望我能提供帮助,这次提供一个正确的答案。

于 2018-06-18T09:03:01.807 回答