2

我想从 kinect(版本 1)中获取深度和 rgb 视频流。我正在使用Python 3 版本的 pykinect,而不是 CPython。

我找到了一些示例,但 Pykinect 文档几乎不存在,我不想使用 pygame。

在带有 freenect 的 linux 上,我做了:

rgb_stream = freenect.sync_get_video()[0]
rgb_stream = rgb_stream[:, :, ::-1]
rgb_image = cv.cvtColor(rgb_stream, cv.COLOR_BGR2RGB)

depth_stream = freenect.sync_get_depth()[0]
depth_stream = np.uint8(depth_stream)
depth_image = cv.cvtColor(depth_stream, cv.COLOR_GRAY2RGB)

但是我在 Windows 上使用 pykinect,我想以类似的方式获取深度和 rgb 流,然后用 OpenCV 处理它并用 Qt 显示它。

这是我找到的示例代码:

from pykinect import nui
import numpy
import cv2

def video_handler_function(frame):
    video = numpy.empty((480,640,4),numpy.uint8)
    frame.image.copy_bits(video.ctypes.data)
    cv2.imshow('KINECT Video Stream', video)

kinect = nui.Runtime()
kinect.video_frame_ready += video_handler_function
kinect.video_stream.open(nui.ImageStreamType.Video, 2,nui.ImageResolution.Resolution640x480,nui.ImageType.Color)

cv2.namedWindow('KINECT Video Stream', cv2.WINDOW_AUTOSIZE)

while True:

    key = cv2.waitKey(1)
    if key == 27: break

kinect.close()
cv2.destroyAllWindows()

是什么video_handler_function?目的是kinect.video_frame_ready += video_handler_function什么?

我试图kinect.depth_stream.open(nui.ImageStreamType.Depth, 2, nui.ImageResolution.Resolution320x240, nui.ImageType.Depth)通过对处理函数进行一些修改来获取深度图像,但无法使其工作。

4

1 回答 1

1
from pykinect import nui
import numpy
import cv2


kinect = nui.Runtime()
kinect.skeleton_engine.enabled = True

def getColorImage(frame):
    height, width = frame.image.height, frame.image.width  #get width and height of the images
    rgb = numpy.empty((height, width, 4), numpy.uint8)
    frame.image.copy_bits(rgb.ctypes.data)                 #copy the bit of the image to the array

    cv2.imshow('KINECT Video Stream', rgb) # display the image

def getDepthImage(frame):
    height, width = frame.image.height, frame.image.width  #get frame height and width
    depth = numpy.empty((height, width, 1), numpy.uint8)
    arr2d = (depth >> 3) & 4095
    arr2d >>= 4
    frame.image.copy_bits(arr2d.ctypes.data)

    cv2.imshow('KINECT depth Stream', arr2d)

def frame_ready(frame):
    for skeleton in frame.SkeletonData:
        if skeleton.eTrackingState == nui.SkeletonTrackingState.TRACKED:
            print(skeleton.Position.x, skeleton.Position.y, skeleton.Position.z, skeleton.Position.w)

def main():

    while True:
        kinect.video_frame_ready += getColorImage
        kinect.video_stream.open(nui.ImageStreamType.Video, 2, nui.ImageResolution.Resolution640x480, nui.ImageType.Color)
        cv2.namedWindow('KINECT Video Stream', cv2.WINDOW_AUTOSIZE)

        kinect.depth_frame_ready += getDepthImage
        kinect.depth_stream.open(nui.ImageStreamType.Depth, 2, nui.ImageResolution.Resolution320x240, nui.ImageType.Depth)
        cv2.namedWindow('KINECT depth Stream', cv2.WINDOW_AUTOSIZE)

        kinect.skeleton_frame_ready += frame_ready

        if cv2.waitKey(0) == 27:
            cv2.destroyAllWindows()
            kinect.close()
            break

if __name__ == '__main__':
    main()

~~~~~
于 2021-04-28T00:08:41.703 回答