public void CreateSmoothDepthCloud(FusionFloatImageFrame downsampledDepthFloatFrame) { // Smooth the depth frame this.volume.SmoothDepthFloatFrame(downsampledDepthFloatFrame, this.DownsampledSmoothDepthFloatFrame, SmoothingKernelWidth, SmoothingDistanceThreshold); // Calculate point cloud from the smoothed frame FusionDepthProcessor.DepthFloatFrameToPointCloud(this.DownsampledSmoothDepthFloatFrame, DownsampledDepthPointCloudFrame); }
private void SetupVariables() { mapper = sensor.CoordinateMapper; //Calculate integers used for allocation int depthImageSize = depthWidth * depthHeight; downsampledWidth = depthWidth / DownsampleFactor; downsampledHeight = depthHeight / DownsampleFactor; int colorImageByteSize = colorWidth * colorHeight * sizeof(int); int downsampledDepthImageSize = downsampledWidth * downsampledHeight; //Allocate frames resampledColorFrameDepthAligned = new FusionColorImageFrame(depthWidth, depthHeight); depthFloatFrame = new FusionFloatImageFrame(depthWidth, depthHeight); downsampledDepthFloatFrame = new FusionFloatImageFrame(downsampledWidth, downsampledHeight); downsampledSmoothDepthFloatFrame = new FusionFloatImageFrame(downsampledWidth, downsampledHeight); downsampledDepthPointCloudFrame = new FusionPointCloudImageFrame(downsampledWidth, downsampledHeight); downsampledRaycastPointCloudFrame = new FusionPointCloudImageFrame(downsampledWidth, downsampledHeight); downsampledDeltaFromReferenceFrameColorFrame = new FusionColorImageFrame(downsampledWidth, downsampledHeight); //Allocate arrays depthImagePixels = new ushort[depthImageSize]; downsampledDepthImagePixels = new float[downsampledDepthImageSize]; downsampledDeltaFromReferenceColorPixels = new int[downsampledDepthImageSize]; deltaFromReferenceFramePixelsArgb = new int[depthImageSize]; colorCoordinates = new ColorSpacePoint[depthImageSize]; resampledColorImagePixelsAlignedToDepth = new int[depthImageSize]; depthVisibilityTestMapWidth = colorWidth / ColorDownsampleFactor; depthVisibilityTestMapHeight = colorHeight / ColorDownsampleFactor; depthVisibilityTestMap = new ushort[depthVisibilityTestMapWidth * depthVisibilityTestMapHeight]; colorImagePixels = new byte[colorImageByteSize]; }
protected virtual void Dispose(bool disposing) { if (!disposed) { if (depthFloatBuffer != null) { depthFloatBuffer.Dispose(); depthFloatBuffer = null; } if (pointCloudBuffer != null) { pointCloudBuffer.Dispose(); pointCloudBuffer = null; } if (shadedSurfaceColorFrame != null) { shadedSurfaceColorFrame.Dispose(); shadedSurfaceColorFrame = null; } if (volume != null) { volume.Dispose(); volume = null; } disposed = true; } }
public void RendorDepth(FusionFloatImageFrame df) { if (depthRenderer != null) { depthRenderer.Render(df); } }
public void UpdateAlignDeltas(FusionFloatImageFrame ff) { if (deltaFrameRenderer != null) { deltaFrameRenderer.Render(ff); } }
public ReconstructionController(KinectSensor sensor) { Contract.Requires(sensor != null); this.syncContext = SynchronizationContext.Current; this.sensor = sensor; var rparams = new ReconstructionParameters(128, 256, 256, 256); reconstruction = Reconstruction.FusionCreateReconstruction(rparams, ReconstructionProcessor.Amp, -1, worldToCameraTransform); worldToVolumeTransform = reconstruction.GetCurrentWorldToVolumeTransform(); worldToVolumeTransform.M43 -= MIN_DEPTH * rparams.VoxelsPerMeter; reconstruction.ResetReconstruction(worldToCameraTransform, worldToVolumeTransform); var depthFrameDesc = sensor.DepthFrameSource.FrameDescription; var totalPixels = depthFrameDesc.Width * depthFrameDesc.Height; rawDepthData = new ushort[totalPixels]; bodyIndexData = new byte[totalPixels]; SurfaceBitmap = new ThreadSafeBitmap(depthFrameDesc.Width, depthFrameDesc.Height); var intrinsics = sensor.CoordinateMapper.GetDepthCameraIntrinsics(); var cparams = new CameraParameters( intrinsics.FocalLengthX / depthFrameDesc.Width, intrinsics.FocalLengthY / depthFrameDesc.Height, intrinsics.PrincipalPointX / depthFrameDesc.Width, intrinsics.PrincipalPointY / depthFrameDesc.Height); floatDepthFrame = new FusionFloatImageFrame(depthFrameDesc.Width, depthFrameDesc.Height, cparams); pointCloudFrame = new FusionPointCloudImageFrame(depthFrameDesc.Width, depthFrameDesc.Height, cparams); surfaceFrame = new FusionColorImageFrame(depthFrameDesc.Width, depthFrameDesc.Height, cparams); }
public PointCloudProcessor(Engine e) { this.engine = e; var resampler = e.Resampler; this.volume = e.FusionVolume.Reconstruction; DownsampledSmoothDepthFloatFrame = new FusionFloatImageFrame(resampler.DownsampledWidth, resampler.DownsampledHeight); DownsampledRaycastPointCloudFrame = new FusionPointCloudImageFrame(resampler.DownsampledWidth, resampler.DownsampledHeight); DownsampledDepthPointCloudFrame = new FusionPointCloudImageFrame(resampler.DownsampledWidth, resampler.DownsampledHeight); DepthPointCloudFrame = new FusionPointCloudImageFrame(KinectSettings.DEPTH_WIDTH, KinectSettings.DEPTH_HEIGHT); RaycastPointCloudFrame = new FusionPointCloudImageFrame(KinectSettings.DEPTH_WIDTH, KinectSettings.DEPTH_HEIGHT); }
public FusionFloatImageFrame SmoothDepthFloatFrame(FusionFloatImageFrame depthFloatFrame) { // Lock the depth operations lock (this.rawDataLock) { var recon = engine.FusionVolume.Reconstruction; recon.SmoothDepthFloatFrame( depthFloatFrame, SmoothedDepthFloatFrame, SmoothingKernelWidth, SmoothingDistanceThreshold); return(SmoothedDepthFloatFrame); } }
private void InitializeKinectFusion() { // KinecFusionの初期化 var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ); volume = Reconstruction.FusionCreateReconstruction(volParam, ReconstructionProcessor.Amp, -1, Matrix4.Identity); // 変換バッファの作成 depthFloatBuffer = new FusionFloatImageFrame(DepthWidth, DepthHeight); pointCloudBuffer = new FusionPointCloudImageFrame(DepthWidth, DepthHeight); shadedSurfaceColorFrame = new FusionColorImageFrame(DepthWidth, DepthHeight); // リセット volume.ResetReconstruction(Matrix4.Identity); }
/// <summary> /// Downsample depth pixels with nearest neighbor /// </summary> /// <param name="dest">The destination depth image.</param> /// <param name="factor">The downsample factor (2=x/2,y/2, 4=x/4,y/4, 8=x/8,y/8, 16=x/16,y/16).</param> private unsafe void DownsampleDepthFrameNearestNeighbor(FusionFloatImageFrame dest, int factor) { #region ErrorHandling if (null == dest || null == downsampledDepthImagePixels) { throw new ArgumentException("inputs null"); } if (false == (2 == factor || 4 == factor || 8 == factor || 16 == factor)) { throw new ArgumentException("factor != 2, 4, 8 or 16"); } int downsampleWidth = depthWidth / factor; int downsampleHeight = depthHeight / factor; if (dest.Width != downsampleWidth || dest.Height != downsampleHeight) { throw new ArgumentException("dest != downsampled image size"); } #endregion fixed(ushort *rawDepthPixelPtr = depthImagePixels) { ushort *rawDepthPixels = (ushort *)rawDepthPixelPtr; // Horizontal flip the color image as the standard depth image is flipped internally in Kinect Fusion // to give a viewpoint as though from behind the Kinect looking forward by default. Parallel.For( 0, downsampleHeight, y => { int flippedDestIndex = (y * downsampleWidth) + (downsampleWidth - 1); int sourceIndex = y * depthWidth * factor; for (int x = 0; x < downsampleWidth; ++x, --flippedDestIndex, sourceIndex += factor) { // Copy depth value downsampledDepthImagePixels[flippedDestIndex] = (float)rawDepthPixels[sourceIndex] * 0.001f; } }); } dest.CopyPixelDataFrom(downsampledDepthImagePixels); }
/// <summary> /// constructor, creates new reconstruction volume /// </summary> internal DataIntegrationReconstruction() { this.canWorkLock = new Object(); this.worldToCameraTransform = Matrix4.Identity; this.volume = ColorReconstruction.FusionCreateReconstruction(new ReconstructionParameters( Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.VoxelsPerMeter, Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.VoxelResolutionX, Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.VoxelResolutionY, Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.VoxelResolutionZ), Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.ProcessorType, Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.DeviceToUse, worldToCameraTransform); this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform(); depthFloatFrame = new FusionFloatImageFrame(Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.depthWidth, Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.depthHeight); depthFloatFrameDepthPixels = new float[depthFloatFrame.Width * depthFloatFrame.Height]; depthFloatFramePixelsArgb = new int[depthFloatFrame.Width * depthFloatFrame.Height]; }
void InitializeFusion() { // Reconstruction Parameters float voxelPerMeter = 256; int voxelsX = 512; int voxelsY = 384; int voxelsZ = 512; reconstructionParameters = new ReconstructionParameters(voxelPerMeter, voxelsX, voxelsY, voxelsZ); //カメラ座標の初期値をワールド座標に設定 worldToCameraTransform = Matrix4.Identity; //FusionのReconstructionオブジェクトを作成 reconstruction = ColorReconstruction.FusionCreateReconstruction(reconstructionParameters, ReconstructionProcessor.Amp, -1, worldToCameraTransform); // Fusionのイメージフレームを作成 cameraParameters = CameraParameters.Defaults; depthImageFrame = new FusionFloatImageFrame(depthWidth, depthHeight, cameraParameters); smoothDepthImageFrame = new FusionFloatImageFrame(depthWidth, depthHeight, cameraParameters); colorImageFrame = new FusionColorImageFrame(depthWidth, depthHeight, cameraParameters); pointCloudImageFrame = new FusionPointCloudImageFrame(depthWidth, depthHeight, cameraParameters); surfaceImageFrame = new FusionColorImageFrame(depthWidth, depthHeight, cameraParameters); }
public VolumeBuilder(Scanner source, Dispatcher dispatcher) { if (source == null) { throw new ArgumentNullException("source"); } this.source = source; this.dispatcher = dispatcher; // Set the world-view transform to identity, so the world origin is the initial camera location. this.worldToCameraTransform = Matrix4.Identity; // Map world X axis to blue channel, Y axis to green channel and Z axis to red channel, // normalizing each to the range [0, 1]. We also add a shift of 0.5 to both X,Y channels // as the world origin starts located at the center of the front face of the volume, // hence we need to map negative x,y world vertex locations to positive color values. this.worldToBGRTransform = Matrix4.Identity; this.worldToBGRTransform.M11 = settings.VoxelsPerMeter / settings.VoxelsX; this.worldToBGRTransform.M22 = settings.VoxelsPerMeter / settings.VoxelsY; this.worldToBGRTransform.M33 = settings.VoxelsPerMeter / settings.VoxelsZ; this.worldToBGRTransform.M41 = 0.5f; this.worldToBGRTransform.M42 = 0.5f; this.worldToBGRTransform.M44 = 1.0f; var volumeParameters = new ReconstructionParameters(settings.VoxelsPerMeter, settings.VoxelsX, settings.VoxelsY, settings.VoxelsZ); this.volume = ColorReconstruction.FusionCreateReconstruction(volumeParameters, ReconstructionProcessor.Amp, -1, this.worldToCameraTransform); var depthWidth = this.source.Frame.DepthWidth; var depthHeight = this.source.Frame.DepthHeight; var depthSize = depthWidth * depthHeight; this.depthFloatFrame = new FusionFloatImageFrame(depthWidth, depthHeight); this.smoothDepthFloatFrame = new FusionFloatImageFrame(depthWidth, depthHeight); this.resampledColorFrame = new FusionColorImageFrame(depthWidth, depthHeight); this.resampledColorFrameDepthAligned = new FusionColorImageFrame(depthWidth, depthHeight); this.deltaFromReferenceFrame = new FusionFloatImageFrame(depthWidth, depthHeight); this.shadedSurfaceFrame = new FusionColorImageFrame(depthWidth, depthHeight); this.raycastPointCloudFrame = new FusionPointCloudImageFrame(depthWidth, depthHeight); this.depthPointCloudFrame = new FusionPointCloudImageFrame(depthWidth, depthHeight); var downsampledDepthWidth = depthWidth / settings.DownsampleFactor; var downsampledDepthHeight = depthHeight / settings.DownsampleFactor; var downsampledDepthSize = downsampledDepthWidth * downsampledDepthHeight; this.downsampledDepthFloatFrame = new FusionFloatImageFrame(downsampledDepthWidth, downsampledDepthHeight); this.downsampledSmoothDepthFloatFrame = new FusionFloatImageFrame(downsampledDepthWidth, downsampledDepthHeight); this.downsampledRaycastPointCloudFrame = new FusionPointCloudImageFrame(downsampledDepthWidth, downsampledDepthHeight); this.downsampledDepthPointCloudFrame = new FusionPointCloudImageFrame(downsampledDepthWidth, downsampledDepthHeight); this.downsampledDeltaFromReferenceFrameColorFrame = new FusionColorImageFrame(downsampledDepthWidth, downsampledDepthHeight); this.resampledColorData = new int[depthSize]; this.downsampledDepthData = new float[downsampledDepthSize]; this.downsampledDeltaFromReferenceColorPixels = new int[downsampledDepthSize]; this.deltaFromReferenceFramePixelsArgb = new int[depthSize]; this.shadedSurfaceFramePixelsArgb = new int[this.shadedSurfaceFrame.PixelDataLength]; this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform(); this.volumeBitmap = new WriteableBitmap(depthWidth, depthHeight, settings.DefaultSystemDPI, settings.DefaultSystemDPI, PixelFormats.Bgr32, null); // Create a camera pose finder with default parameters this.cameraPoseFinder = CameraPoseFinder.FusionCreateCameraPoseFinder(CameraPoseFinderParameters.Defaults); this.workerThread = new Thread(WorkerThreadProc); this.workerThread.Start(); this.source.Frame.OnDataUpdate += OnFrameDataUpdate; }
public void Evaluate(int SpreadMax) { this.VoxelResolutionX = this.FInVX[0]; this.VoxelResolutionY = this.FInVY[0]; this.VoxelResolutionZ = this.FInVZ[0]; this.VoxelsPerMeter = this.FInVPM[0]; if (this.FTextureOutput[0] == null) { this.FTextureOutput[0] = new DX11Resource<DX11DynamicTexture2D>(); } if (this.FPCOut[0] == null) { this.FPCOut[0] = new DX11Resource<IDX11ReadableStructureBuffer>(); } if (this.FGeomOut[0] == null) { this.FGeomOut[0] = new DX11Resource<DX11IndexedGeometry>(); } if (this.FOutVoxels[0] == null) { this.FOutVoxels[0] = new DX11Resource<IDX11ReadableStructureBuffer>(); } if (this.FInExport[0]) { this.FGeomOut[0].Dispose(); this.FGeomOut[0] = new DX11Resource<DX11IndexedGeometry>(); } if (this.FInvalidateConnect) { this.FInvalidateConnect = false; if (this.FInRuntime.PluginIO.IsConnected) { this.runtime = this.FInRuntime[0]; this.runtime.DepthFrameReady += this.runtime_DepthFrameReady; // Depth frames generated from the depth input this.depthFloatBuffer = new FusionFloatImageFrame(width, height); // Point cloud frames generated from the depth float input this.pointCloudBuffer = new FusionPointCloudImageFrame(width, height); // Create images to raycast the Reconstruction Volume this.shadedSurfaceColorFrame = new FusionColorImageFrame(width, height); } } if (this.FInVPM.IsChanged || this.FInVX.IsChanged || this.FInVY.IsChanged || this.FInVZ.IsChanged) { if (this.volume != null) { this.volume.Dispose(); } var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ); this.worldToCameraTransform = Matrix4.Identity; this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, 0, this.worldToCameraTransform); this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform(); this.ResetReconstruction(); } if (this.runtime != null) { bool needreset = this.FInReset[0]; if (needreset) { this.ResetReconstruction(); } } }
/// <summary> /// Execute startup tasks /// </summary> /// <param name="sender">object sending the event</param> /// <param name="e">event arguments</param> private void WindowLoaded(object sender, RoutedEventArgs e) { // Look through all sensors and start the first connected one. // This requires that a Kinect is connected at the time of app startup. // To make your app robust against plug/unplug, // it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit foreach (var potentialSensor in KinectSensor.KinectSensors) { if (potentialSensor.Status == KinectStatus.Connected) { this.sensor = potentialSensor; break; } } if (null == this.sensor) { this.statusBarText.Text = Properties.Resources.NoKinectReady; return; } // Turn on the depth stream to receive depth frames this.sensor.DepthStream.Enable(DepthImageResolution); this.frameDataLength = this.sensor.DepthStream.FramePixelDataLength; // Allocate space to put the color pixels we'll create this.colorPixels = new int[this.frameDataLength]; // This is the bitmap we'll display on-screen this.colorBitmap = new WriteableBitmap( (int)ImageSize.Width, (int)ImageSize.Height, 96.0, 96.0, PixelFormats.Bgr32, null); // Set the image we display to point to the bitmap where we'll put the image data this.Image.Source = this.colorBitmap; // Add an event handler to be called whenever there is new depth frame data this.sensor.DepthFrameReady += this.SensorDepthFrameReady; var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ); // Set the world-view transform to identity, so the world origin is the initial camera location. this.worldToCameraTransform = Matrix4.Identity; try { // This creates a volume cube with the Kinect at center of near plane, and volume directly // in front of Kinect. this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform); this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform(); if (this.translateResetPoseByMinDepthThreshold) { this.ResetReconstruction(); } } catch (InvalidOperationException ex) { this.statusBarText.Text = ex.Message; return; } catch (DllNotFoundException) { this.statusBarText.Text = this.statusBarText.Text = Properties.Resources.MissingPrerequisite; return; } // Depth frames generated from the depth input this.depthFloatBuffer = new FusionFloatImageFrame((int)ImageSize.Width, (int)ImageSize.Height); // Point cloud frames generated from the depth float input this.pointCloudBuffer = new FusionPointCloudImageFrame((int)ImageSize.Width, (int)ImageSize.Height); // Create images to raycast the Reconstruction Volume this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)ImageSize.Width, (int)ImageSize.Height); // Start the sensor! try { this.sensor.Start(); } catch (IOException ex) { // Device is in use this.sensor = null; this.statusBarText.Text = ex.Message; return; } catch (InvalidOperationException ex) { // Device is not valid, not supported or hardware feature unavailable this.sensor = null; this.statusBarText.Text = ex.Message; return; } // Set Near Mode by default try { this.sensor.DepthStream.Range = DepthRange.Near; checkBoxNearMode.IsChecked = true; } catch { // device not near mode capable } // Initialize and start the FPS timer this.fpsTimer = new DispatcherTimer(); this.fpsTimer.Tick += new EventHandler(this.FpsTimerTick); this.fpsTimer.Interval = new TimeSpan(0, 0, FpsInterval); this.fpsTimer.Start(); // Reset the reconstruction this.ResetReconstruction(); }
public void UpdateDepthPointCloud(FusionFloatImageFrame smooth) { FusionDepthProcessor.DepthFloatFrameToPointCloud(smooth, DepthPointCloudFrame); }
/// <summary> /// processes the depth data package into the kinect fusion volume /// </summary> /// <param name="pKdp">the data package</param> void processDepthData(KinectDataPackage pKdp, System.Threading.CancellationToken pCancelToken) { lock (canWorkLock) { Log.LogManager.updateAlgorithmStatus("Kinect Fusion integration"); this.volume.ResetReconstruction(Matrix4.Identity); int picturesIntegrated = 0; foreach (ushort[] pDepth in pKdp.rawDepthData) { pCancelToken.ThrowIfCancellationRequested(); WriteableBitmap bitmap = new WriteableBitmap(this.depthFloatFrame.Width, this.depthFloatFrame.Height, 96.0, 96.0, PixelFormats.Bgr32, null); FusionFloatImageFrame depthFloatBuffer = new FusionFloatImageFrame(this.depthFloatFrame.Width, this.depthFloatFrame.Height); FusionPointCloudImageFrame pointCloudBuffer = new FusionPointCloudImageFrame(this.depthFloatFrame.Width, this.depthFloatFrame.Height); FusionColorImageFrame shadedSurfaceColorFrame = new FusionColorImageFrame(this.depthFloatFrame.Width, this.depthFloatFrame.Height); int[] voxelPixels = new int[this.depthFloatFrame.Width * this.depthFloatFrame.Height]; this.volume.DepthToDepthFloatFrame( pDepth, depthFloatBuffer, Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.minDepthClip, Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.maxDepthClip, false); float alignmentValue; bool trackingSucceeded = this.volume.ProcessFrame(depthFloatBuffer, Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.iterationCount, Config.ServerConfigManager._ServerConfigObject.serverKinectFusionConfig.integrationWeight, out alignmentValue, volume.GetCurrentWorldToCameraTransform()); // If camera tracking failed, no data integration or raycast for reference // point cloud will have taken place, and the internal camera pose // will be unchanged. if (!trackingSucceeded) { trackingErrorCount++; } else { Matrix4 calculatedCameraPose = volume.GetCurrentWorldToCameraTransform(); // Set the camera pose and reset tracking errors worldToCameraTransform = calculatedCameraPose; trackingErrorCount = 0; } // Calculate the point cloud volume.CalculatePointCloud(pointCloudBuffer, worldToCameraTransform); // Shade point cloud and render FusionDepthProcessor.ShadePointCloud( pointCloudBuffer, worldToCameraTransform, null, shadedSurfaceColorFrame ); shadedSurfaceColorFrame.CopyPixelDataTo(voxelPixels); bitmap.WritePixels( new Int32Rect(0, 0, bitmap.PixelWidth, bitmap.PixelHeight), voxelPixels, bitmap.PixelWidth * sizeof(int), 0); bitmap.Freeze(); OnNewFusionPictureEvent.BeginInvoke(pKdp.usedConfig.ID, bitmap, null, null); picturesIntegrated++; Log.LogManager.writeLogDebug("[DataIntegration:Reconstruction] " + picturesIntegrated + " of " + pKdp.rawDepthData.Count + " Pictures integrated"); } //if request was calibration request, export meshes if (pKdp.usedConfig.clientRequestObject.requestType == ClientConfigObject.RequestType.calibration) { exportMesh(volume, pKdp, false); Log.LogManager.writeLog("[DataIntegration:Reconstruction] Mesh of " + pKdp.usedConfig.name + " exported."); return; } //broadcast new point cloud PointCloud p = new PointCloud(volume); p.ConfigObject = pKdp.usedConfig; OnNewPointCloudEvent.BeginInvoke(p, null, null); Log.LogManager.writeLog("[DataIntegration:Reconstruction] All pictures of " + pKdp.usedConfig.name + " integrated"); Log.LogManager.updateAlgorithmStatus("Done"); } }
/// <summary> /// Perform camera pose finding when tracking is lost using AlignPointClouds. /// This is typically more successful than FindCameraPoseAlignDepthFloatToReconstruction. /// </summary> /// <returns>Returns true if a valid camera pose was found, otherwise false.</returns> public bool FindCameraPoseAlignPointClouds(byte[] colorPixels, FusionFloatImageFrame depthFloatFrame) { if (finder == null) { return(false); } var resampledColorFrame = ProcessColorForCameraPoseFinder(colorPixels); if (resampledColorFrame.Width == 0) { return(false); } //Didn't work MatchCandidates matchCandidates = finder.FindCameraPose( depthFloatFrame, resampledColorFrame); if (null == matchCandidates) { return(false); } int poseCount = matchCandidates.GetPoseCount(); float minDistance = matchCandidates.CalculateMinimumDistance(); if (0 == poseCount || minDistance >= CameraPoseFinderDistanceThresholdReject) { logger.Log(LogLevel.Warn, "Not enough matches"); return(false); } // Smooth the depth frame var smooth = volume.Engine.DepthProcessor.SmoothDepthFloatFrame(depthFloatFrame); engine.PointCloudCalculator.UpdateDepthPointCloud(smooth); // Calculate point cloud from the smoothed frame double smallestEnergy = double.MaxValue; int smallestEnergyNeighborIndex = -1; int bestNeighborIndex = -1; Matrix4 bestNeighborCameraPose = Matrix4.Identity; double bestNeighborAlignmentEnergy = Constants.MaxAlignPointCloudsEnergyForSuccess; // Run alignment with best matched poseCount (i.e. k nearest neighbors (kNN)) int maxTests = System.Math.Min(MaxCameraPoseFinderPoseTests, poseCount); var neighbors = matchCandidates.GetMatchPoses(); for (int n = 0; n < maxTests; n++) { // Run the camera tracking algorithm with the volume // this uses the raycast frame and pose to find a valid camera pose by matching the raycast against the input point cloud Matrix4 poseProposal = neighbors[n]; // Get the saved pose view by raycasting the volume var pc = engine.PointCloudCalculator; this.volume.Reconstruction.CalculatePointCloud(pc.RaycastPointCloudFrame, poseProposal); bool success = this.volume.Reconstruction.AlignPointClouds( pc.RaycastPointCloudFrame, pc.DepthPointCloudFrame, FusionDepthProcessor.DefaultAlignIterationCount, resampledColorFrame, out this.alignmentEnergy, ref poseProposal); bool relocSuccess = success && this.alignmentEnergy <bestNeighborAlignmentEnergy && this.alignmentEnergy> Constants.MinAlignPointCloudsEnergyForSuccess; if (relocSuccess) { bestNeighborAlignmentEnergy = this.alignmentEnergy; bestNeighborIndex = n; // This is after tracking succeeds, so should be a more accurate pose to store... bestNeighborCameraPose = poseProposal; // Update the delta image resampledColorFrame.CopyPixelDataTo(engine.DeltaCalculator.DeltaFromReferenceFramePixelsArgb); } // Find smallest energy neighbor independent of tracking success if (this.alignmentEnergy < smallestEnergy) { smallestEnergy = this.alignmentEnergy; smallestEnergyNeighborIndex = n; } } matchCandidates.Dispose(); // Use the neighbor with the smallest residual alignment energy // At the cost of additional processing we could also use kNN+Mean camera pose finding here // by calculating the mean pose of the best n matched poses and also testing this to see if the // residual alignment energy is less than with kNN. if (bestNeighborIndex > -1) { this.WorldToCameraTransform = bestNeighborCameraPose; this.SetReferenceFrame(this.WorldToCameraTransform); // Tracking succeeded! engine.CameraTracker.SetTrackingSucceeded(); engine.DeltaCalculator.UpdateAlignDeltas(); logger.Log(LogLevel.Warn, "Camera Pose Finder SUCCESS! Residual energy= " + string.Format(CultureInfo.InvariantCulture, "{0:0.00000}", bestNeighborAlignmentEnergy) + ", " + poseCount + " frames stored, minimum distance=" + minDistance + ", best match index=" + bestNeighborIndex); return(true); } else { this.WorldToCameraTransform = neighbors[smallestEnergyNeighborIndex]; this.SetReferenceFrame(this.WorldToCameraTransform); // Camera pose finding failed - return the tracking failed error code engine.CameraTracker.SetTrackingFailed(); // Tracking Failed will be set again on the next iteration in ProcessDepth logger.Log(LogLevel.Warn, "Camera Pose Finder FAILED! Residual energy=" + string.Format(CultureInfo.InvariantCulture, "{0:0.00000}", smallestEnergy) + ", " + poseCount + " frames stored, minimum distance=" + minDistance + ", best match index=" + smallestEnergyNeighborIndex); return(false); } }
public DepthProcessor(Engine engine) { this.engine = engine; DepthFloatFrame = new FusionFloatImageFrame(KinectSettings.DEPTH_WIDTH, KinectSettings.DEPTH_HEIGHT); SmoothedDepthFloatFrame = new FusionFloatImageFrame(KinectSettings.DEPTH_WIDTH, KinectSettings.DEPTH_HEIGHT); }
public void Evaluate(int SpreadMax) { this.VoxelResolutionX = this.FInVX[0]; this.VoxelResolutionY = this.FInVY[0]; this.VoxelResolutionZ = this.FInVZ[0]; this.VoxelsPerMeter = this.FInVPM[0]; if (this.FTextureOutput[0] == null) { this.FTextureOutput[0] = new DX11Resource <DX11DynamicTexture2D>(); } if (this.FPCOut[0] == null) { this.FPCOut[0] = new DX11Resource <IDX11ReadableStructureBuffer>(); } if (this.FGeomOut[0] == null) { this.FGeomOut[0] = new DX11Resource <DX11IndexedGeometry>(); } if (this.FOutVoxels[0] == null) { this.FOutVoxels[0] = new DX11Resource <IDX11ReadableStructureBuffer>(); } if (this.FInExport[0]) { this.FGeomOut[0].Dispose(); this.FGeomOut[0] = new DX11Resource <DX11IndexedGeometry>(); } if (this.FInvalidateConnect) { this.FInvalidateConnect = false; if (this.FInRuntime.PluginIO.IsConnected) { this.runtime = this.FInRuntime[0]; this.runtime.DepthFrameReady += this.runtime_DepthFrameReady; // Depth frames generated from the depth input this.depthFloatBuffer = new FusionFloatImageFrame(width, height); // Point cloud frames generated from the depth float input this.pointCloudBuffer = new FusionPointCloudImageFrame(width, height); // Create images to raycast the Reconstruction Volume this.shadedSurfaceColorFrame = new FusionColorImageFrame(width, height); } } if (this.FInVPM.IsChanged || this.FInVX.IsChanged || this.FInVY.IsChanged || this.FInVZ.IsChanged) { if (this.volume != null) { this.volume.Dispose(); } var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ); this.worldToCameraTransform = Matrix4.Identity; this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, 0, this.worldToCameraTransform); this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform(); this.ResetReconstruction(); } if (this.runtime != null) { bool needreset = this.FInReset[0]; if (needreset) { this.ResetReconstruction(); } } }
/// <summary> /// Downsample depth pixels with nearest neighbor /// </summary> /// <param name="dest">The destination depth image.</param> /// <param name="factor">The downsample factor (2=x/2,y/2, 4=x/4,y/4, 8=x/8,y/8, 16=x/16,y/16).</param> public unsafe void DownsampleDepthFrameNearestNeighbor(FusionFloatImageFrame dest, ushort[] depthImagePixels, bool mirrorDepth = false) { var factor = DownsampleFactor; if (null == dest || null == this.downsampledDepthImagePixels) { throw new ArgumentException("inputs null"); } if (false == (2 == factor || 4 == factor || 8 == factor || 16 == factor)) { throw new ArgumentException("factor != 2, 4, 8 or 16"); } int downsampleWidth = KinectSettings.DEPTH_WIDTH / factor; int downsampleHeight = KinectSettings.DEPTH_HEIGHT / factor; if (dest.Width != downsampleWidth || dest.Height != downsampleHeight) { throw new ArgumentException("dest != downsampled image size"); } if (mirrorDepth) { fixed(ushort *rawDepthPixelPtr = depthImagePixels) { ushort *rawDepthPixels = (ushort *)rawDepthPixelPtr; Parallel.For( 0, downsampleHeight, y => { int destIndex = y * downsampleWidth; int sourceIndex = y * KinectSettings.DEPTH_WIDTH * factor; for (int x = 0; x < downsampleWidth; ++x, ++destIndex, sourceIndex += factor) { // Copy depth value this.downsampledDepthImagePixels[destIndex] = (float)rawDepthPixels[sourceIndex] * 0.001f; } }); } } else { fixed(ushort *rawDepthPixelPtr = depthImagePixels) { ushort *rawDepthPixels = (ushort *)rawDepthPixelPtr; // Horizontal flip the color image as the standard depth image is flipped internally in Kinect Fusion // to give a viewpoint as though from behind the Kinect looking forward by default. Parallel.For( 0, downsampleHeight, y => { int flippedDestIndex = (y * downsampleWidth) + (downsampleWidth - 1); int sourceIndex = y * KinectSettings.DEPTH_WIDTH * factor; for (int x = 0; x < downsampleWidth; ++x, --flippedDestIndex, sourceIndex += factor) { // Copy depth value this.downsampledDepthImagePixels[flippedDestIndex] = (float)rawDepthPixels[sourceIndex] * 0.001f; } }); } } dest.CopyPixelDataFrom(this.downsampledDepthImagePixels); }
void InitializeFusion() { // Reconstruction Parameters float voxelPerMeter = 256; int voxelsX = 512; int voxelsY = 384; int voxelsZ = 512; reconstructionParameters = new ReconstructionParameters( voxelPerMeter, voxelsX, voxelsY, voxelsZ ); //カメラ座標の初期値をワールド座標に設定 worldToCameraTransform = Matrix4.Identity; //FusionのReconstructionオブジェクトを作成 reconstruction = ColorReconstruction.FusionCreateReconstruction( reconstructionParameters, ReconstructionProcessor.Amp, -1, worldToCameraTransform ); // Fusionのイメージフレームを作成 cameraParameters = CameraParameters.Defaults; depthImageFrame = new FusionFloatImageFrame( depthWidth, depthHeight, cameraParameters ); smoothDepthImageFrame = new FusionFloatImageFrame( depthWidth, depthHeight, cameraParameters ); colorImageFrame = new FusionColorImageFrame( depthWidth, depthHeight, cameraParameters ); pointCloudImageFrame = new FusionPointCloudImageFrame( depthWidth, depthHeight, cameraParameters ); surfaceImageFrame = new FusionColorImageFrame( depthWidth, depthHeight, cameraParameters ); }
/// <summary> /// Execute startup tasks /// </summary> /// <param name="sender">object sending the event</param> /// <param name="e">event arguments</param> private void WindowLoaded(object sender, RoutedEventArgs e) { // Check to ensure suitable DirectX11 compatible hardware exists before initializing Kinect Fusion try { string deviceDescription = string.Empty; string deviceInstancePath = string.Empty; int deviceMemory = 0; FusionDepthProcessor.GetDeviceInfo( ProcessorType, DeviceToUse, out deviceDescription, out deviceInstancePath, out deviceMemory); } catch (IndexOutOfRangeException) { // Thrown when index is out of range for processor type or there is no DirectX11 capable device installed. // As we set -1 (auto-select default) for the DeviceToUse above, this indicates that there is no DirectX11 // capable device. The options for users in this case are to either install a DirectX11 capable device // (see documentation for recommended GPUs) or to switch to non-real-time CPU based reconstruction by // changing ProcessorType to ReconstructionProcessor.Cpu this.statusBarText.Text = Properties.Resources.NoDirectX11CompatibleDeviceOrInvalidDeviceIndex; return; } catch (DllNotFoundException) { this.statusBarText.Text = Properties.Resources.MissingPrerequisite; return; } catch (InvalidOperationException ex) { this.statusBarText.Text = ex.Message; return; } // Look through all sensors and start the first connected one. // This requires that a Kinect is connected at the time of app startup. // To make your app robust against plug/unplug, // it is recommended to use KinectSensorChooser provided in Microsoft.Kinect.Toolkit foreach (var potentialSensor in KinectSensor.KinectSensors) { if (potentialSensor.Status == KinectStatus.Connected) { this.sensor = potentialSensor; break; } } if (null == this.sensor) { this.statusBarText.Text = Properties.Resources.NoKinectReady; return; } // Turn on the depth stream to receive depth frames this.sensor.DepthStream.Enable(DepthImageResolution); this.frameDataLength = this.sensor.DepthStream.FramePixelDataLength; // Create local depth pixels buffer this.depthImagePixels = new DepthImagePixel[this.frameDataLength]; // Allocate space to put the color pixels we'll create this.colorPixels = new int[this.frameDataLength]; // This is the bitmap we'll display on-screen this.colorBitmap = new WriteableBitmap( (int)ImageSize.Width, (int)ImageSize.Height, 96.0, 96.0, PixelFormats.Bgr32, null); // Set the image we display to point to the bitmap where we'll put the image data this.Image.Source = this.colorBitmap; // Add an event handler to be called whenever there is new depth frame data this.sensor.DepthFrameReady += this.SensorDepthFrameReady; var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ); // Set the world-view transform to identity, so the world origin is the initial camera location. this.worldToCameraTransform = Matrix4.Identity; try { // This creates a volume cube with the Kinect at center of near plane, and volume directly // in front of Kinect. this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform); this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform(); if (this.translateResetPoseByMinDepthThreshold) { // Reset the reconstruction if we need to add a custom world-volume transformation this.ResetReconstruction(); } } catch (InvalidOperationException ex) { this.statusBarText.Text = ex.Message; return; } catch (DllNotFoundException) { this.statusBarText.Text = this.statusBarText.Text = Properties.Resources.MissingPrerequisite; return; } // Depth frames generated from the depth input this.depthFloatBuffer = new FusionFloatImageFrame((int)ImageSize.Width, (int)ImageSize.Height); // Point cloud frames generated from the depth float input this.pointCloudBuffer = new FusionPointCloudImageFrame((int)ImageSize.Width, (int)ImageSize.Height); // Create images to raycast the Reconstruction Volume this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)ImageSize.Width, (int)ImageSize.Height); // Start the sensor! try { this.sensor.Start(); } catch (IOException ex) { // Device is in use this.sensor = null; this.statusBarText.Text = ex.Message; return; } catch (InvalidOperationException ex) { // Device is not valid, not supported or hardware feature unavailable this.sensor = null; this.statusBarText.Text = ex.Message; return; } // Set Near Mode by default try { this.sensor.DepthStream.Range = DepthRange.Near; this.checkBoxNearMode.IsChecked = true; } catch (InvalidOperationException) { // Near mode not supported on device, silently fail during initialization this.checkBoxNearMode.IsEnabled = false; } // Initialize and start the FPS timer this.fpsTimer = new DispatcherTimer(); this.fpsTimer.Tick += new EventHandler(this.FpsTimerTick); this.fpsTimer.Interval = new TimeSpan(0, 0, FpsInterval); this.fpsTimer.Start(); this.lastFPSTimestamp = DateTime.UtcNow; }
private void InitFusion() { if (_isFusionInitialized) { return; } _isFusionInitialized = true; var depthFormat = KinectSensor.DepthStream.Format; var depthSize = FormatHelper.GetDepthSize(depthFormat); _fusionWorkItemPool = new Pool <FusionWorkItem, DepthImageFormat>(5, depthFormat, FusionWorkItem.Create); _fusionWorkQueue = new WorkQueue <FusionWorkItem>(ProcessFusionFrameBackground) { CanceledCallback = ReturnFusionWorkItem, MaxQueueLength = 2 }; this.frameDataLength = KinectSensor.DepthStream.FramePixelDataLength; // Allocate space to put the color pixels we'll create this.colorPixels = new int[(int)(depthSize.Width * 2 * depthSize.Height * 2)]; // This is the bitmap we'll display on-screen this.colorFusionBitmap = new WriteableBitmap( (int)depthSize.Width * 2, (int)depthSize.Height * 2, 96.0, 96.0, PixelFormats.Bgr32, null); FusionOutputImage = colorFusionBitmap; var volParam = new ReconstructionParameters(VoxelsPerMeter, VoxelResolutionX, VoxelResolutionY, VoxelResolutionZ); // Set the world-view transform to identity, so the world origin is the initial camera location. this.worldToCameraTransform = Matrix4.Identity; try { // This creates a volume cube with the Kinect at center of near plane, and volume directly // in front of Kinect. this.volume = Reconstruction.FusionCreateReconstruction(volParam, ProcessorType, DeviceToUse, this.worldToCameraTransform); this.defaultWorldToVolumeTransform = this.volume.GetCurrentWorldToVolumeTransform(); if (this.translateResetPose) { this.ResetReconstruction(_currentVolumeCenter); } } catch (ArgumentException) { FusionStatusMessage = "ArgumentException - DX11 GPU not found?"; return; } catch (InvalidOperationException ex) { FusionStatusMessage = ex.Message; return; } catch (DllNotFoundException) { FusionStatusMessage = Properties.Resources.MissingPrerequisite; return; } // Depth frames generated from the depth input this.depthFloatBuffer = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height); this.residualFloatBuffer = new FusionFloatImageFrame((int)depthSize.Width, (int)depthSize.Height); _residualData = new float[(int)(depthSize.Width * depthSize.Height)]; // Point cloud frames generated from the depth float input this.pointCloudBuffer = new FusionPointCloudImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2); // Create images to raycast the Reconstruction Volume this.shadedSurfaceColorFrame = new FusionColorImageFrame((int)depthSize.Width * 2, (int)depthSize.Height * 2); // Reset the reconstruction this.ResetReconstruction(_currentVolumeCenter); _audioManager.Start(); }