4

我正在C# .Net使用 3DWpf图形的平台上工作。下面是代码流程:

1) 我从 kinect 获取深度数据并将其提供给一个计算 3d 点的函数。

私人无效 display3DView()

{ 
   while(loop_run)
    {
       using ( DepthImageFrame depthFrame = sensor.DepthStream.OpenNextFrame(1000))
      {
             if (depthFrame  == null)  continue;

            Point3DCollection PointCloud ;

            depthFrame.CopyDepthImagePixelDataTo(this.depthImagePixels);

            float[,] ImageArray = new float[320, 240];

            short [ ,] depth = new short[240,320]; 

            for (int i = 0; i < 240; i++)
            {
              for (int j = 0; j <320; j++)
              {
                depth[i,j]= depthImagePixels[j+i *320].Depth;

                 ImageArray[i,j] =(float)depth[i,j]/(float)1000;
             }
           }
          PointCloud =Calculate_PointCloud(ImageArray); 

          viewModel(PointCloud);   
        }
      }
     }</i>

2)我用Kinect相机的相机参数和深度数据计算了3D点

私人 Point3DCollection Calculate_PointCloud(float[,] ImageArray) {

   Point3DCollection PointCloud = new Point3DCollection();

    float x_coodinate;``
    float y_coordinate;
    float z_coordinate;
    float thresholdvalue = 2.0f;

    for (int i = 0; i < 239; ++i)
    {
        for (int j = 0; j < 319; ++j)
        {
            if (Math.Abs(ImageArray[i, j] - ImageArray[i, j + 1]) < thresholdvalue && Math.Abs(ImageArray[i, j] - ImageArray[i + 1, j]) < thresholdvalue && Math.Abs(ImageArray[i, j + 1] - ImageArray[i + 1, j]) < thresholdvalue)
            {

                z_coordinate = ImageArray[i, j];
                x_coodinate = ((j - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
                y_coordinate = ((i - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
                Point3D point1 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
                PointCloud.Add(point1);

                z_coordinate = ImageArray[i, j + 1];
                x_coodinate = (((j + 1) - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
                y_coordinate = ((i - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
                Point3D point2 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
                PointCloud.Add(point2);

                z_coordinate = ImageArray[i + 1, j];
                x_coodinate = ((j - this.PrincipalPointX) * z_coordinate) / FocalLengthX;
                y_coordinate = (((i + 1) - this.PrincipalPointY) * z_coordinate) / FocalLengthY;
                Point3D point3 = new Point3D(x_coodinate, y_coordinate, z_coordinate);
                PointCloud.Add(point3);

              }
            }
        }
    return PointCloud;
}</i>

3)在这里,我转换为具有每个 3D 点的法线信息的三角形集,并将这些三角形提供给 3D 网格对象并使用 viewport3D 控件渲染 3d 网格对象

私人无效视图模型(Point3DCollection 点)

       {    
           DirectionalLight DirLight1 = new DirectionalLight();
            DirLight1.Color = Colors.White;
            DirLight1.Direction = new Vector3D(1, 1, 1);
           PerspectiveCamera Camera1 = new PerspectiveCamera();
           Camera1.FarPlaneDistance = 8000;
          Camera1.NearPlaneDistance = 100;
          Camera1.FieldOfView = 10;
          Camera1.Position = new Point3D(0, 0, 1);
          Camera1.LookDirection = new Vector3D(-1, -1, -1);
          Camera1.UpDirection = new Vector3D(0, 1, 0);
           bool combinedvertices = true;
          TriangleModel Triatomesh = new TriangleModel();
          MeshGeometry3D tmesh = new MeshGeometry3D();
          GeometryModel3D msheet = new GeometryModel3D();
          Model3DGroup modelGroup = new Model3DGroup();
          ModelVisual3D modelsVisual = new ModelVisual3D();
          Viewport3D myViewport = new Viewport3D();

         for(int i =0; i<points.Count; i+=3)
        {
            Triatomesh.addTriangleToMesh(points[i],points[i + 1], points[i + 2], tmesh, combinedvertices); 
        }
        msheet.Geometry = tmesh;
       msheet.Material = new DiffuseMaterial(new SolidColorBrush(Colors.White));
       modelGroup.Children.Add(msheet);
       modelGroup.Children.Add(DirLight1);
       modelsVisual.Content = modelGroup;
       myViewport.IsHitTestVisible = false;
      myViewport.Camera = Camera1;
       myViewport.Children.Add(modelsVisual);
       canvas1.Children.Add(myViewport);
       myViewport.Height = canvas1.Height;
       myViewport.Width = canvas1.Width;
       Canvas.SetTop(myViewport, 0);
       Canvas.SetLeft(myViewport, 0);
 } </i>

4)这是一个函数,它通过计算每个 3D 点的法线,将三个 3D 点作为三角形添加到 3D 网格对象中

 public  void addTriangleToMesh(Point3D p0, Point3D p1, Point3D p2,

        MeshGeometry3D mesh, bool combine_vertices)  

    {
        Vector3D normal = CalculateNormal(p0, p1, p2);

        if (combine_vertices)
        {
            addPointCombined(p0, mesh, normal);
            addPointCombined(p1, mesh, normal);
            addPointCombined(p2, mesh, normal);
        }
        else
        {
            mesh.Positions.Add(p0);
            mesh.Positions.Add(p1);
            mesh.Positions.Add(p2);
              //mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
           // mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
           // mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
            mesh.Normals.Add(normal);
            mesh.Normals.Add(normal);
            mesh.Normals.Add(normal);
        }
    }

public  Vector3D CalculateNormal(Point3D P0, Point3D P1, Point3D P2)   //static
    {
        Vector3D v0 = new Vector3D(P1.X - P0.X, P1.Y - P0.Y, P1.Z - P0.Z);

        Vector3D v1 = new Vector3D(P2.X - P1.X, P2.Y - P1.Y, P2.Z - P1.Z);

        return Vector3D.CrossProduct(v0, v1);
    }

 public  void addPointCombined(Point3D point, MeshGeometry3D mesh, Vector3D normal)  

    {
        bool found = false;
        int i = 0;
        foreach (Point3D p in mesh.Positions)

        {
            if (p.Equals(point))
            {
                found = true;
                mesh.TriangleIndices.Add(i);
                mesh.Positions.Add(point);
                mesh.Normals.Add(normal);
                break;
            }

            i++;
        }

        if (!found)
        {
            mesh.Positions.Add(point);
            mesh.TriangleIndices.Add(mesh.TriangleIndices.Count);
            mesh.Normals.Add(normal);
        }
}

5) 这是我的 XAML 代码

 <Window x:Class="PointCloud3DView.MainWindow"

    xmlns="http://schemas.microsoft.com/winfx/2006/xaml/presentation"

    xmlns:x="http://schemas.microsoft.com/winfx/2006/xaml"

    Title="PointCloud" Height="653" Width="993" Background="Black" Loaded="Window_Loaded">

<Grid Height="1130" Width="1626">

    <Canvas Height="611" HorizontalAlignment="Left" Name="canvas1" VerticalAlignment="Top" 

宽度=“967”背景=“黑色”/>

</Grid>

问题是我无法在 Wpf 屏幕中显示 3D 模型。请任何人都可以查看整个代码吗?让我明白我哪里出错了?以及建议我进行更正。提前致谢

4

1 回答 1

1

我已经用 WPF 3D 进行了几个星期的试验,并且学到了一些困难的教训:) 我现在没有时间检查和尝试整个代码,因为我正在工作。但是我会尝试三件事:

  1. 我不确定你相机的方向。它在 (0,1,0) 中,使用向量 (-1,-1,-1) 看,这意味着它专注于中心点 (-1,0,-1)。这有点奇怪...尝试将相机定位得更远(取决于模型的比例),例如(0,10,0)甚至更远,并将其聚焦到(0,0,0)或您的中心点型号是:

    Camera1.Position = new Point3D(0, 10, 0);
    Camera1.LookDirection = new Point3D(0,0,0) - Camera1.Position;

  2. 还要移除定向光(因为它使用法线,如果它们是错误的,则不会显示任何内容)并尝试使用环境光。而且你的定向闪电的矢量与你的观察方向(-1,-1,-1)和(1,1,1)正好相反。

  3. 尝试交换三角形索引中点的顺序(WPF 仅渲染网格的一侧,因此模型可能存在但内部/外部) - 而不是 0,1,2 尝试 0,2,1;

如果没有任何帮助,我将在回家后尝试您的代码。

/稍后编辑/ Itried 你的代码在简单的三角形上并根据我的提示重写它并且它有效。有一些评论和两个提示如何清理你的代码:)

    private void viewModel(Point3DCollection points)
    {
        DirectionalLight DirLight1 = new DirectionalLight();
        DirLight1.Color = Colors.White;
        DirLight1.Direction = new Vector3D(1, 1, 1);

        PerspectiveCamera Camera1 = new PerspectiveCamera();
        Camera1.FarPlaneDistance = 8000;
        //Camera1.NearPlaneDistance = 100; //close object will not be displayed with this option
        Camera1.FieldOfView = 10;   
        //Camera1.Position = new Point3D(0, 0, 1);
        //Camera1.LookDirection = new Vector3D(-1, -1, -1);
        Camera1.Position = new Point3D(0, 0, 10);
        Camera1.LookDirection = new Point3D(0, 0, 0) - Camera1.Position; //focus camera on real center of your model (0,0,0) in this case
        Camera1.UpDirection = new Vector3D(0, 1, 0);
        //you can use constructor to create Camera instead of assigning its properties like:
        //PerspectiveCamera Camera1 = new PerspectiveCamera(new Point3D(0,0,10), new Vector3D(0,0,-1), new Vector3D(0,1,0), 10);


        bool combinedvertices = true;
        TriangleModel Triatomesh = new TriangleModel();
        MeshGeometry3D tmesh = new MeshGeometry3D();
        GeometryModel3D msheet = new GeometryModel3D();
        Model3DGroup modelGroup = new Model3DGroup();
        ModelVisual3D modelsVisual = new ModelVisual3D();
        Viewport3D myViewport = new Viewport3D();

        for (int i = 0; i < points.Count; i += 3)
        {
            Triatomesh.addTriangleToMesh(points[i + 2], points[i + 1], points[i], tmesh, combinedvertices);                
            //I did swap order of vertexes you may try both options with your model               
        }

        msheet.Geometry = tmesh;
        msheet.Material = new DiffuseMaterial(new SolidColorBrush(Colors.White));
        //you can use constructor to create GeometryModel3D instead of assigning its properties like:
        //msheet = new GeometryModel3D(tmesh, new DiffuseMaterial(new SolidColorBrush(Colors.White)));             

        modelGroup.Children.Add(msheet);
        //use AMbientLIght instead of directional
        modelGroup.Children.Add(new AmbientLight(Colors.White));

        modelsVisual.Content =  modelGroup;
        myViewport.IsHitTestVisible = false;

        myViewport.Camera = Camera1;

        myViewport.Children.Add(modelsVisual);

        canvas1.Children.Add(myViewport);
        myViewport.Height = canvas1.Height;
        myViewport.Width = canvas1.Width;
        Canvas.SetTop(myViewport, 0);
        Canvas.SetLeft(myViewport, 0);
    }

我用作参数的 Points3DCollection(而不是 Kinect 输入):

    Point3DCollection points = new Point3DCollection();
    points.Add(new Point3D(0.5, 0, 0.5));
    points.Add(new Point3D(0.5, -0.5, -0.5));
    points.Add(new Point3D(-0.5, -0.1, -0.5));
    viewModel(points);
于 2014-02-11T10:10:37.450 回答