简介
在上一篇里,介绍了关节点的使用办法,这一篇记录将关节点与OpenCV
结合的绘图方法。
代码
#include#include #include #include #include using namespace std;using namespace cv;void draw(Mat & img, Joint & r_1, Joint & r_2, ICoordinateMapper * myMapper);int main(void){ IKinectSensor * mySensor = nullptr; GetDefaultKinectSensor(&mySensor); mySensor->Open(); IColorFrameSource * myColorSource = nullptr; mySensor->get_ColorFrameSource(&myColorSource); IColorFrameReader * myColorReader = nullptr; myColorSource->OpenReader(&myColorReader); int colorHeight = 0, colorWidth = 0; IFrameDescription * myDescription = nullptr; myColorSource->get_FrameDescription(&myDescription); myDescription->get_Height(&colorHeight); myDescription->get_Width(&colorWidth); IColorFrame * myColorFrame = nullptr; Mat original(colorHeight, colorWidth, CV_8UC4); //**********************以上为ColorFrame的读取前准备************************** IBodyFrameSource * myBodySource = nullptr; mySensor->get_BodyFrameSource(&myBodySource); IBodyFrameReader * myBodyReader = nullptr; myBodySource->OpenReader(&myBodyReader); int myBodyCount = 0; myBodySource->get_BodyCount(&myBodyCount); IBodyFrame * myBodyFrame = nullptr; ICoordinateMapper * myMapper = nullptr; mySensor->get_CoordinateMapper(&myMapper); //**********************以上为BodyFrame以及Mapper的准备*********************** while (1) { while (myColorReader->AcquireLatestFrame(&myColorFrame) != S_OK); myColorFrame->CopyConvertedFrameDataToArray(colorHeight * colorWidth * 4, original.data, ColorImageFormat_Bgra); Mat copy = original.clone(); //读取彩色图像并输出到矩阵 while (myBodyReader->AcquireLatestFrame(&myBodyFrame) != S_OK); //读取身体图像 IBody ** myBodyArr = new IBody *[myBodyCount]; //为存身体数据的数组做准备 for (int i = 0; i < myBodyCount; i++) myBodyArr[i] = nullptr; if (myBodyFrame->GetAndRefreshBodyData(myBodyCount, myBodyArr) == S_OK) //把身体数据输入数组 for (int i = 0; i < myBodyCount; i++) { BOOLEAN result = false; if (myBodyArr[i]->get_IsTracked(&result) == S_OK && result) //先判断是否侦测到 { Joint myJointArr[JointType_Count]; if (myBodyArr[i]->GetJoints(JointType_Count, myJointArr) == S_OK) //如果侦测到就把关节数据输入到数组并画图 { draw(copy, myJointArr[JointType_Head], myJointArr[JointType_Neck], myMapper); draw(copy, myJointArr[JointType_Neck], myJointArr[JointType_SpineShoulder], myMapper); draw(copy, myJointArr[JointType_SpineShoulder], myJointArr[JointType_ShoulderLeft], myMapper); draw(copy, myJointArr[JointType_SpineShoulder], myJointArr[JointType_SpineMid], myMapper); draw(copy, myJointArr[JointType_SpineShoulder], myJointArr[JointType_ShoulderRight], myMapper); draw(copy, myJointArr[JointType_ShoulderLeft], myJointArr[JointType_ElbowLeft], myMapper); draw(copy, myJointArr[JointType_SpineMid], myJointArr[JointType_SpineBase], myMapper); draw(copy, myJointArr[JointType_ShoulderRight], myJointArr[JointType_ElbowRight], myMapper); draw(copy, myJointArr[JointType_ElbowLeft], myJointArr[JointType_WristLeft], myMapper); draw(copy, myJointArr[JointType_SpineBase], myJointArr[JointType_HipLeft], myMapper); draw(copy, myJointArr[JointType_SpineBase], myJointArr[JointType_HipRight], myMapper); draw(copy, myJointArr[JointType_ElbowRight], myJointArr[JointType_WristRight], myMapper); draw(copy, myJointArr[JointType_WristLeft], myJointArr[JointType_ThumbLeft], myMapper); draw(copy, myJointArr[JointType_WristLeft], myJointArr[JointType_HandLeft], myMapper); draw(copy, myJointArr[JointType_HipLeft], myJointArr[JointType_KneeLeft], myMapper); draw(copy, myJointArr[JointType_HipRight], myJointArr[JointType_KneeRight], myMapper); draw(copy, myJointArr[JointType_WristRight], myJointArr[JointType_ThumbRight], myMapper); draw(copy, myJointArr[JointType_WristRight], myJointArr[JointType_HandRight], myMapper); draw(copy, myJointArr[JointType_HandLeft], myJointArr[JointType_HandTipLeft], myMapper); draw(copy, myJointArr[JointType_KneeLeft], myJointArr[JointType_FootLeft], myMapper); draw(copy, myJointArr[JointType_KneeRight], myJointArr[JointType_FootRight], myMapper); draw(copy, myJointArr[JointType_HandRight], myJointArr[JointType_HandTipRight], myMapper); } } } delete[]myBodyArr; myBodyFrame->Release(); myColorFrame->Release(); imshow("TEST", copy); if (waitKey(30) == VK_ESCAPE) break; } myMapper->Release(); myDescription->Release(); myColorReader->Release(); myColorSource->Release(); myBodyReader->Release(); myBodySource->Release(); mySensor->Close(); mySensor->Release(); return 0;}void draw(Mat & img, Joint & r_1, Joint & r_2, ICoordinateMapper * myMapper){ //用两个关节点来做线段的两端,并且进行状态过滤 if (r_1.TrackingState == TrackingState_Tracked && r_2.TrackingState == TrackingState_Tracked) { ColorSpacePoint t_point; //要把关节点用的摄像机坐标下的点转换成彩色空间的点 Point p_1, p_2; myMapper->MapCameraPointToColorSpace(r_1.Position, &t_point); p_1.x = t_point.X; p_1.y = t_point.Y; myMapper->MapCameraPointToColorSpace(r_2.Position, &t_point); p_2.x = t_point.X; p_2.y = t_point.Y; line(img, p_1, p_2, Vec3b(0, 255, 0), 5); circle(img, p_1, 10, Vec3b(255, 0, 0), -1); circle(img, p_2, 10, Vec3b(255, 0, 0), -1); }}
详细说明
不难发现代码跟前面一篇非常类似,不同的地方就在于绘图部分。首先要将关节点用的CameraSpace
转换到ColorSpace
,这一操作可以借助ICoordinateMapper
完成,然后就用两个关节点来作为线段的两端画线。这里采用了广度优先的办法,从上到下一层一层选取关节点。