unsafe void UpdateFusionFrame() { //KinectのDepthImageデータからFusionのDepthImageFrameを作成 reconstruction.DepthToDepthFloatFrame(depthImagePixels, depthImageFrame, FusionDepthProcessor.DefaultMinimumDepth, FusionDepthProcessor.DefaultMaximumDepth, true); //depthImageFrameに平滑化をかけてsmoothDepthFrameに格納 reconstruction.SmoothDepthFloatFrame(depthImageFrame, smoothDepthImageFrame, SmoothingKernelWidth, SmoothingDistanceThreshold); //色情報を取得するためにDepthImage各点のColorImageでの位置を計算 ColorSpacePoint[] points = new ColorSpacePoint[depthWidth * depthHeight]; coordinateMapper.MapDepthFrameToColorSpace(depthImagePixels, points); //colorImageFrameBufferに色情報を格納 int[] colorImageFrameBuffer = new int[depthWidth * depthHeight]; fixed(byte *ptrColorImagePixels = colorImagePixels) { int *rawColorPixels = (int *)ptrColorImagePixels; Parallel.For(0, depthHeight, y => { for (int x = 0; x < depthWidth; x++) { int index = y * depthWidth + x; ColorSpacePoint point = points[index]; int colorX = (int)(Math.Ceiling(point.X)); int colorY = (int)(Math.Ceiling(point.Y)); if ((colorX >= 0) && (colorX < colorWidth) && (colorY >= 0) && (colorY < colorHeight)) { colorImageFrameBuffer[index] = rawColorPixels[colorY * colorWidth + colorX]; } } }); } //colorImageFrameBufferからFusionのcolorImageFrameを作成 colorImageFrame.CopyPixelDataFrom(colorImageFrameBuffer); //現在のカメラ位置や角度の情報を計算に反映させるため、worldToCameraTransformを最新に更新 worldToCameraTransform = reconstruction.GetCurrentWorldToCameraTransform(); //最新のsmoothDepthImageFrame,colorImageFrameをFusionのReconstructionオブジェクトに反映 float alignmentEnergy = 0.0f; bool ret = reconstruction.ProcessFrame(smoothDepthImageFrame, colorImageFrame, FusionDepthProcessor.DefaultAlignIterationCount, FusionDepthProcessor.DefaultIntegrationWeight, FusionDepthProcessor.DefaultColorIntegrationOfAllAngles, out alignmentEnergy, worldToCameraTransform); //反映結果がエラーならカウントし、カウンタが100を越えたら初めからやり直す if (!ret) { if (++errorCount >= 100) { errorCount = 0; reset(); } } //結果を格納 result(); }
/// <summary> /// processes the depth data package into the kinect fusion volume /// </summary> /// <param name="pKdp">the data package</param> void processDepthData(KinectDataPackage pKdp, System.Threading.CancellationToken pCancelToken) { lock (canWorkLock) { Log.LogManager.updateAlgorithmStatus("Kinect Fusion integration"); this.volume.ResetReconstruction(Matrix4.Identity); int picturesIntegrated = 0; foreach (ushort[] pDepth in pKdp.rawDepthData) { pCancelToken.ThrowIfCancellationRequested(); WriteableBitmap bitmap = new WriteableBitmap(this.depthFloatFrame.Width, this.depthFloatFrame.Height, 96.0, 96.0, PixelFormats.Bgr32, null); FusionFloatImageFrame depthFloatBuffer = new FusionFloatImageFrame(this.depthFloatFrame.Width, this.depthFloatFrame.Height); FusionPointCloudImageFrame pointCloudBuffer = new FusionPointCloudImageFrame(this.depthFloatFrame.Width, this.depthFloatFrame.Height); FusionColorImageFrame shadedSurfaceColorFrame = new FusionColorImageFrame(this.depthFloatFrame.Width, this.depthFloatFrame.Height); int[] voxelPixels = new int[this.depthFloatFrame.Width * this.depthFloatFrame.Height]; this.volume.DepthToDepthFloatFrame( pDepth, depthFloatBuffer, Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.minDepthClip, Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.maxDepthClip, false); float alignmentValue; bool trackingSucceeded = this.volume.ProcessFrame(depthFloatBuffer, Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.iterationCount, Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.integrationWeight, out alignmentValue, volume.GetCurrentWorldToCameraTransform()); // If camera tracking failed, no data integration or raycast for reference // point cloud will have taken place, and the internal camera pose // will be unchanged. if (!trackingSucceeded) { trackingErrorCount++; } else { Matrix4 calculatedCameraPose = volume.GetCurrentWorldToCameraTransform(); // Set the camera pose and reset tracking errors worldToCameraTransform = calculatedCameraPose; trackingErrorCount = 0; } // Calculate the point cloud volume.CalculatePointCloud(pointCloudBuffer, worldToCameraTransform); // Shade point cloud and render FusionDepthProcessor.ShadePointCloud( pointCloudBuffer, worldToCameraTransform, null, shadedSurfaceColorFrame ); shadedSurfaceColorFrame.CopyPixelDataTo(voxelPixels); bitmap.WritePixels( new Int32Rect(0, 0, bitmap.PixelWidth, bitmap.PixelHeight), voxelPixels, bitmap.PixelWidth * sizeof(int), 0); bitmap.Freeze(); OnNewFusionPictureEvent.BeginInvoke(pKdp.usedConfig.ID, bitmap, null, null); picturesIntegrated++; Log.LogManager.writeLogDebug("[DataIntegration:Reconstruction] " + picturesIntegrated + " of " + pKdp.rawDepthData.Count + " Pictures integrated"); } //if request was calibration request, export meshes if (pKdp.usedConfig.clientRequestObject.requestType == ClientConfigObject.RequestType.calibration) { exportMesh(volume, pKdp, false); Log.LogManager.writeLog("[DataIntegration:Reconstruction] Mesh of " + pKdp.usedConfig.name + " exported."); return; } //broadcast new point cloud PointCloud p = new PointCloud(volume); p.ConfigObject = pKdp.usedConfig; OnNewPointCloudEvent.BeginInvoke(p, null, null); Log.LogManager.writeLog("[DataIntegration:Reconstruction] All pictures of " + pKdp.usedConfig.name + " integrated"); Log.LogManager.updateAlgorithmStatus("Done"); } }