1

我一直在使用 Processing 和 Cinder 来动态修改 Kinect 输入。但是,我还想记录完整的流(深度+颜色+加速度计值,以及那里的其他内容)。我正在录制,所以我可以在相同的材料上尝试不同的效果/处理。

因为我还在学习 Cinder,而 Processing 非常缓慢/滞后,所以我很难找到有关捕获流的策略的建议——任何东西(最好是 Cinder、oF 或 Processing)都会非常有帮助。

4

1 回答 1

3

我已经尝试过 Processing 和 OpenFrameworks。显示两个图像(深度和颜色)时处理速度较慢。在将数据写入磁盘时,OpenFrameworks 会稍微变慢,但这是基本方法:

  1. 设置 Openframeworks(打开并编译任何示例以确保您已启动并运行)
  2. 下载ofxKinect 插件并按照 github 上的说明复制示例项目。
  3. 运行 OF 和 ofxKinect 示例后,只需添加一些变量来保存数据:

在这个基本设置中,我创建了几个 ofImage 实例和一个用于切换保存的布尔值。在示例中,深度和 RGB 缓冲区被保存到 ofxCvGrayscaleImage 实例中,但我没有使用 OF 和 OpenCV 足以知道如何做一些简单的事情,例如将图像保存到磁盘,这就是我使用两个ofImage实例的原因。

我不知道您对 Processing、OF、Cinder 的感觉如何,所以,为了争论,我假设您知道您已经熟悉了 Processing,但您仍在处理 C++。

OF 与 Processing 非常相似,但有一些区别:

  1. 在处理中,您有变量声明,它们在同一个文件中使用。在 OF 中,您有一个 .h 文件,您在其中声明您是变量,还有一个 .cpp 文件,您可以在其中初始化和使用变量。
  2. 在处理中,您有 setup()(初始化变量)和 draw()(更新变量并绘制到屏幕)方法,而在 OF 中,您有 setup()(与处理中相同),update()(仅更新变量,没有visual) 和 draw() (使用更新的值绘制到屏幕上)
  3. 处理图像时,由于您使用 C++ 进行编码,因此您需要首先分配内存,而不是使用内存管理的 Processing/Java。

还有更多不同之处,我不会在这里详述。请在 wiki 上查看处理用户的 OF

回到 exampleKinect 示例,这里是我的基本设置:

.h 文件:

#pragma once

#include "ofMain.h"
#include "ofxOpenCv.h"
#include "ofxKinect.h"

class testApp : public ofBaseApp {
    public:

        void setup();
        void update();
        void draw();
        void exit();

        void drawPointCloud();

        void keyPressed  (int key);
        void mouseMoved(int x, int y );
        void mouseDragged(int x, int y, int button);
        void mousePressed(int x, int y, int button);
        void mouseReleased(int x, int y, int button);
        void windowResized(int w, int h);

        ofxKinect kinect;

        ofxCvColorImage     colorImg;

        ofxCvGrayscaleImage     grayImage;
        ofxCvGrayscaleImage     grayThresh;
        ofxCvGrayscaleImage     grayThreshFar;

        ofxCvContourFinder  contourFinder;

        ofImage             colorData;//to save RGB data to disk
        ofImage             grayData;//to save depth data to disk 

        bool                bThreshWithOpenCV;
        bool                drawPC;
        bool                saveData;//save to disk toggle

        int                 nearThreshold;
        int                 farThreshold;

        int                 angle;

        int                 pointCloudRotationY;
        int                 saveCount;//counter used for naming 'frames'
};

和 .cpp 文件:

#include "testApp.h"


//--------------------------------------------------------------
void testApp::setup() {
    //kinect.init(true);  //shows infrared image
    kinect.init();
    kinect.setVerbose(true);
    kinect.open();

    colorImg.allocate(kinect.width, kinect.height);
    grayImage.allocate(kinect.width, kinect.height);
    grayThresh.allocate(kinect.width, kinect.height);
    grayThreshFar.allocate(kinect.width, kinect.height);
    //allocate memory for these ofImages which will be saved to disk
    colorData.allocate(kinect.width, kinect.height, OF_IMAGE_COLOR);
    grayData.allocate(kinect.width, kinect.height, OF_IMAGE_GRAYSCALE);

    nearThreshold = 230;
    farThreshold  = 70;
    bThreshWithOpenCV = true;

    ofSetFrameRate(60);

    // zero the tilt on startup
    angle = 0;
    kinect.setCameraTiltAngle(angle);

    // start from the front
    pointCloudRotationY = 180;

    drawPC = false;

    saveCount = 0;//init frame counter
}

//--------------------------------------------------------------
void testApp::update() {
    ofBackground(100, 100, 100);

    kinect.update();
    if(kinect.isFrameNew()) // there is a new frame and we are connected
    {

        grayImage.setFromPixels(kinect.getDepthPixels(), kinect.width, kinect.height);

        if(saveData){
            //if toggled, set depth and rgb pixels to respective ofImage, save to disk and update the 'frame' counter 
            grayData.setFromPixels(kinect.getDepthPixels(), kinect.width, kinect.height,true);
            colorData.setFromPixels(kinect.getCalibratedRGBPixels(), kinect.width, kinect.height,true);
            grayData.saveImage("depth"+ofToString(saveCount)+".png");
            colorData.saveImage("color"+ofToString(saveCount)+".png");
            saveCount++;
        }

        //we do two thresholds - one for the far plane and one for the near plane
        //we then do a cvAnd to get the pixels which are a union of the two thresholds. 
        if( bThreshWithOpenCV ){
            grayThreshFar = grayImage;
            grayThresh = grayImage;
            grayThresh.threshold(nearThreshold, true);
            grayThreshFar.threshold(farThreshold);
            cvAnd(grayThresh.getCvImage(), grayThreshFar.getCvImage(), grayImage.getCvImage(), NULL);
        }else{

            //or we do it ourselves - show people how they can work with the pixels

            unsigned char * pix = grayImage.getPixels();
            int numPixels = grayImage.getWidth() * grayImage.getHeight();

            for(int i = 0; i < numPixels; i++){
                if( pix[i] < nearThreshold && pix[i] > farThreshold ){
                    pix[i] = 255;
                }else{
                    pix[i] = 0;
                }
            }
        }

        //update the cv image
        grayImage.flagImageChanged();

        // find contours which are between the size of 20 pixels and 1/3 the w*h pixels.
        // also, find holes is set to true so we will get interior contours as well....
        contourFinder.findContours(grayImage, 10, (kinect.width*kinect.height)/2, 20, false);
    }
}

//--------------------------------------------------------------
void testApp::draw() {
    ofSetColor(255, 255, 255);
    if(drawPC){
        ofPushMatrix();
        ofTranslate(420, 320);
        // we need a proper camera class
        drawPointCloud();
        ofPopMatrix();
    }else{
        kinect.drawDepth(10, 10, 400, 300);
        kinect.draw(420, 10, 400, 300);

        grayImage.draw(10, 320, 400, 300);
        contourFinder.draw(10, 320, 400, 300);
    }


    ofSetColor(255, 255, 255);
    stringstream reportStream;
    reportStream << "accel is: " << ofToString(kinect.getMksAccel().x, 2) << " / "
                                 << ofToString(kinect.getMksAccel().y, 2) << " / " 
                                 << ofToString(kinect.getMksAccel().z, 2) << endl
                 << "press p to switch between images and point cloud, rotate the point cloud with the mouse" << endl
                 << "using opencv threshold = " << bThreshWithOpenCV <<" (press spacebar)" << endl
                 << "set near threshold " << nearThreshold << " (press: + -)" << endl
                 << "set far threshold " << farThreshold << " (press: < >) num blobs found " << contourFinder.nBlobs
                    << ", fps: " << ofGetFrameRate() << endl
                 << "press c to close the connection and o to open it again, connection is: " << kinect.isConnected() << endl
                 << "press s to toggle saving depth and color data. currently saving:  " << saveData << endl
                 << "press UP and DOWN to change the tilt angle: " << angle << " degrees";
    ofDrawBitmapString(reportStream.str(),20,656);
}

void testApp::drawPointCloud() {
    ofScale(400, 400, 400);
    int w = 640;
    int h = 480;
    ofRotateY(pointCloudRotationY);
    float* distancePixels = kinect.getDistancePixels();
    glBegin(GL_POINTS);
    int step = 2;
    for(int y = 0; y < h; y += step) {
        for(int x = 0; x < w; x += step) {
            ofPoint cur = kinect.getWorldCoordinateFor(x, y);
            ofColor color = kinect.getCalibratedColorAt(x,y);
            glColor3ub((unsigned char)color.r,(unsigned char)color.g,(unsigned char)color.b);
            glVertex3f(cur.x, cur.y, cur.z);
        }
    }
    glEnd();
}

//--------------------------------------------------------------
void testApp::exit() {
    kinect.setCameraTiltAngle(0); // zero the tilt on exit
    kinect.close();
}

//--------------------------------------------------------------
void testApp::keyPressed (int key) {
    switch (key) {
        case ' ':
            bThreshWithOpenCV = !bThreshWithOpenCV;
        break;
        case'p':
            drawPC = !drawPC;
            break;

        case '>':
        case '.':
            farThreshold ++;
            if (farThreshold > 255) farThreshold = 255;
            break;
        case '<':       
        case ',':       
            farThreshold --;
            if (farThreshold < 0) farThreshold = 0;
            break;

        case '+':
        case '=':
            nearThreshold ++;
            if (nearThreshold > 255) nearThreshold = 255;
            break;
        case '-':       
            nearThreshold --;
            if (nearThreshold < 0) nearThreshold = 0;
            break;
        case 'w':
            kinect.enableDepthNearValueWhite(!kinect.isDepthNearValueWhite());
            break;
        case 'o':
            kinect.setCameraTiltAngle(angle);   // go back to prev tilt
            kinect.open();
            break;
        case 'c':
            kinect.setCameraTiltAngle(0);       // zero the tilt
            kinect.close();
            break;
        case 's'://s to toggle saving data
            saveData = !saveData;
            break;

        case OF_KEY_UP:
            angle++;
            if(angle>30) angle=30;
            kinect.setCameraTiltAngle(angle);
            break;

        case OF_KEY_DOWN:
            angle--;
            if(angle<-30) angle=-30;
            kinect.setCameraTiltAngle(angle);
            break;
    }
}

//--------------------------------------------------------------
void testApp::mouseMoved(int x, int y) {
    pointCloudRotationY = x;
}

//--------------------------------------------------------------
void testApp::mouseDragged(int x, int y, int button)
{}

//--------------------------------------------------------------
void testApp::mousePressed(int x, int y, int button)
{}

//--------------------------------------------------------------
void testApp::mouseReleased(int x, int y, int button)
{}

//--------------------------------------------------------------
void testApp::windowResized(int w, int h)
{}

这是一个非常基本的设置。随意修改(为保存的数据添加倾斜角度等)我很确定有办法提高这种速度(例如不要更新 xCvGrayscaleImage 实例,不要在保存时将图像绘制到屏幕上,或者堆叠几帧并间隔写入它们,而不是在每一帧上等)

祝你好运

于 2011-08-31T23:02:30.173 回答