0

大家好,stackflow的人们,

我是处理新手,但对编码相当熟悉。对于一个学校项目,我正在制作一个互动装置,游客可以在其中玩他的“影子”。他们应该能够在他的影子上画出翅膀或斗篷等物体。然后这些对象需要与玩家的骨骼一起移动。

例如,如果我在头上画一顶大帽子,它需要随着我的头移动。

现在我制作了这个简单的代码,它制作了玩家的剪影,玩家可以在上面画画并保存屏幕截图。

 import SimpleOpenNI.*;
    SimpleOpenNI context;
    PImage userImage;
    int[] userMap;
    PImage rgbImage;
    PGraphics pg;
    color pixelColor;

    int dikte = 10;

    void setup(){

      size(1024,768);
      context=new SimpleOpenNI(this);
      context.enableRGB();
      context.enableDepth();
      context.enableUser(); 
      pg = createGraphics(1024,768);
      background(255);

      userImage=createImage(640,480,RGB);
    }
    void draw(){

      pg.beginDraw();
      pg.strokeWeight(dikte);
      if (mousePressed && (mouseButton == LEFT) == true) {
        pg.stroke(0);
        pg.line(mouseX, mouseY, pmouseX, pmouseY);
      }

      if (mousePressed && (mouseButton == RIGHT) == true) {
        pg.stroke(255);
        pg.line(mouseX, mouseY, pmouseX, pmouseY);
      }

      context.update();
      rgbImage=context.rgbImage();

      userMap=context.userMap();
      for(int y=0;y<context.depthHeight();y++){
        for(int x=0;x<context.depthWidth();x++){
          int index=x+y*640;
          if(userMap[index]!=0){
              pixelColor=rgbImage.pixels[index];
            userImage.pixels[index]=color(0,0,0);
          }else{
            userImage.pixels[index]=color(255);
          }
        }
      }
      userImage.updatePixels();
      pg.endDraw();
      image(userImage, 0, 0); 
      image(pg, 0, 0);
    }

      void keyPressed() {
        if (key == CODED) {
          if (keyCode == UP) {
            //Dit maakt de screenshot
            saveFrame("line-######.png");
        }
          if (keyCode == DOWN) {
            //clear the drawings
            pg.clear();
            background(255);
        }
        if (keyCode == RIGHT && dikte<30) {
            //adjust the stroke weight
            dikte++;
        }
        if (keyCode == LEFT && dikte>2) {
            //adjust the stroke weight
            dikte--;
        }


      }
    }

现在我已经测试并检查了这些代码,但我无法更改它,以便骨架使用绘图功能将其用作肢体。

void draw(){
  //clears the screen with the black color, this is usually a good idea 
  //to avoid color artefacts from previous draw iterations
  background(255);

  //asks kinect to send new data
  context.update();

  //retrieves depth image
  PImage depthImage=context.depthImage();
  depthImage.loadPixels();

  //get user pixels - array of the same size as depthImage.pixels, that gives information about the users in the depth image:
  // if upix[i]=0, there is no user at that pixel position
  // if upix[i] > 0, upix[i] indicates which userid is at that position
  int[] upix=context.userMap();

  //colorize users
  for(int i=0; i < upix.length; i++){
    if(upix[i] > 0){
      //there is a user on that position
      //NOTE: if you need to distinguish between users, check the value of the upix[i]
      img.pixels[i]=color(0,0,255);
    }else{
      //add depth data to the image
     img.pixels[i]=depthImage.pixels[i];
    }
  }
  img.updatePixels();

  //draws the depth map data as an image to the screen 
  //at position 0(left),0(top) corner
  image(img,0,0);

  //draw significant points of users

  //get array of IDs of all users present 
  int[] users=context.getUsers();

  ellipseMode(CENTER);

    //iterate through users
    for(int i=0; i < users.length; i++){
    int uid=users[i];

    //draw center of mass of the user (simple mean across position of all user pixels that corresponds to the given user)
    PVector realCoM=new PVector();

    //get the CoM in realworld (3D) coordinates
    context.getCoM(uid,realCoM);
    PVector projCoM=new PVector();

    //convert realworld coordinates to projective (those that we can use to draw to our canvas)
    context.convertRealWorldToProjective(realCoM, projCoM);
    fill(255,0,0);
    ellipse(projCoM.x,projCoM.y,10,10);

    //check if user has a skeleton
    if(context.isTrackingSkeleton(uid)){
      //draw head
      PVector realHead=new PVector();

      //get realworld coordinates of the given joint of the user (in this case Head -> SimpleOpenNI.SKEL_HEAD)
          context.getJointPositionSkeleton(uid,SimpleOpenNI.SKEL_HEAD,realHead);
      PVector projHead=new PVector();
      context.convertRealWorldToProjective(realHead, projHead);
      fill(0,255,0);
      ellipse(projHead.x,projHead.y,10,10);

      //draw left hand
      PVector realLHand=new PVector();
      context.getJointPositionSkeleton(uid,SimpleOpenNI.SKEL_LEFT_HAND,realLHand);
      PVector projLHand=new PVector();
      context.convertRealWorldToProjective(realLHand, projLHand);
      fill(255,255,0);
      ellipse(projLHand.x,projLHand.y,10,10);

    }
  }

}
</pre>

有人可以帮我解决这个问题吗?

亲切的问候

4

0 回答 0