近年の画像AI技術が発展により、Kinectとかあまり注目されなくなりましたが、おちラボではデプスカメラとしての性能の良さなどに魅力を感じていることから、懲りずに使ってます。
といっても久しぶりな面もあり、最近流行りのPythonでも利用できることから、数あるライブラリの中で、
というのが使いやすそうです。で、sampleがいくつかあるのですが、骨格情報を取得するsampleがなぜか見当たらなかったので、以下に掲載します。まあ確かにC#やるよりはラクですね。OpenCVに対応するために画像変換もさほど気にしなくて良さそうですし。。
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import cv2 | |
import pykinect_azure as pykinect | |
if __name__ == “__main__“: | |
# Initialize the library, if the library is not found, add the library path as argument | |
pykinect.initialize_libraries(track_body=True) | |
# Modify camera configuration | |
device_config = pykinect.default_configuration | |
device_config.color_resolution = pykinect.K4A_COLOR_RESOLUTION_OFF | |
device_config.depth_mode = pykinect.K4A_DEPTH_MODE_WFOV_2X2BINNED | |
#print(device_config) | |
# Start device | |
device = pykinect.start_device(config=device_config) | |
# Start body tracker | |
bodyTracker = pykinect.start_body_tracker() | |
cv2.namedWindow(‘Depth image with skeleton’,cv2.WINDOW_NORMAL) | |
while True: | |
# Get capture | |
capture = device.update() | |
# Get body tracker frame | |
body_frame = bodyTracker.update() | |
# Get the color depth image from the capture | |
ret_depth, depth_color_image = capture.get_colored_depth_image() | |
# Get the colored body segmentation | |
ret_color, body_image_color = body_frame.get_segmentation_image() | |
if not ret_depth or not ret_color: | |
continue | |
# Combine both images | |
combined_image = cv2.addWeighted(depth_color_image, 0.6, body_image_color, 0.4, 0) | |
# Draw the skeletons | |
combined_image = body_frame.draw_bodies(combined_image) | |
# Get body information | |
bodies = body_frame.get_bodies() | |
# Get body information | |
bodies = body_frame.get_bodies() | |
# Access joint positions | |
for body in bodies: | |
for joint in body.joints: | |
nose_joint = body.joints[pykinect.K4ABT_JOINT_NOSE] | |
position = joint.position | |
# Use position.x, position.y, position.z to access the x, y, z coordinates of the joint | |
print(“nose positionだよ“, position.x,position.y) | |
# Overlay body segmentation on depth image | |
cv2.imshow(‘Depth image with skeleton’,combined_image) | |
# Press q key to stop | |
if cv2.waitKey(1) == ord(‘q’): | |
break |
0 件のコメント:
コメントを投稿