private void sessionOfflineModeGlyphDetectToolStripMenuItem_Click(object sender, EventArgs e) { IObjectRecogAlgo objectRecognizer = new GlyphBoxObjectRecognition(null, options.prototypeList, 5); var objectRecognizerIncluded = new Dictionary<IObjectRecogAlgo, bool>(); objectRecognizerIncluded[objectRecognizer] = true; if (mappingReader == null) { mappingReader = new DepthCoordinateMappingReader("coordinateMapping.dat"); } Task t = Task.Run(async () => { List<Object> detectedObjects = await Utils.DetectObjects("Progress on " + currentSession.sessionName, currentSession.getVideo(0), currentSession.getDepth(0), new List<IObjectRecogAlgo> { objectRecognizer }, objectRecognizerIncluded, (depthImage, result) => mappingReader.projectDepthImageToCameraSpacePoint(depthImage, currentSession.getDepth(0).depthWidth, currentSession.getDepth(0).depthHeight, currentSession.getVideo(0).frameWidth, currentSession.getVideo(0).frameHeight, result) ); // Run on UI thread this.Invoke((MethodInvoker)delegate { AddDetectedObjects(detectedObjects); }); }); }
private void generate3d_Click(object sender, EventArgs e) { // Current assumption is that the depth field is the first one var depthReader = o.session.getDepth(0); var mappingReader = new DepthCoordinateMappingReader("coordinateMapping.dat"); if (depthReader != null) { o.generate3d(depthReader, mappingReader); } }
/// <summary> /// /// </summary> /// <param name="depthReader"></param> /// <param name="mappingHelper"></param> /// <returns> If the object is 3d-generated </returns> internal virtual bool generate3d(BaseDepthReader depthReader, DepthCoordinateMappingReader mappingHelper) { if (depthReader == null) { Console.WriteLine("depthReader is null "); return false; } if (mappingHelper == null) { Console.WriteLine("mappingHelper is null "); return false; } switch (this.genType) { case GenType.MANUAL: var voxMLReader = VoxMLReader.getDefaultVoxMLReader(); var voxMLType = voxMLReader.getVoxMLType(this.semanticType); string inform = ""; if (voxMLType.HasValue) { inform = "Object 3d will be generated based on VoxML semantic type " + voxMLType.Value.pred; } else { inform = "There is no corresponding VoxML semantic type. The boundary of objects will be projected directly onto 3d. Do you want to continue?"; } var result = System.Windows.Forms.MessageBox.Show(inform, "Generate 3D", System.Windows.Forms.MessageBoxButtons.YesNo); if (result == System.Windows.Forms.DialogResult.Yes) { foreach (var entry in objectMarks) { int frameNo = entry.Key; // Mapping depth image // At this point we use video frameNo // It's actually just an approximation for the depth frameNo Point3[,] colorSpaceToCameraSpacePoint = mappingHelper.projectDepthImageToColor(depthReader.readFrame(frameNo), depthReader.getWidth(), depthReader.getHeight(), session.getVideo(videoFile).frameWidth, session.getVideo(videoFile).frameHeight); LocationMark objectMark = entry.Value; List<PointF> boundary = new List<PointF>(); List<Point3> boundary3d = new List<Point3>(); if (this is RectangleObject) { var boundingBox = ((RectangleLocationMark)objectMark).boundingBox; boundary.Add(new PointF(boundingBox.X, boundingBox.Y)); boundary.Add(new PointF(boundingBox.X + boundingBox.Width, boundingBox.Y)); boundary.Add(new PointF(boundingBox.X, boundingBox.Y + boundingBox.Height)); boundary.Add(new PointF(boundingBox.X + boundingBox.Width, boundingBox.Y + boundingBox.Height)); } else if (this is PolygonObject) { boundary.AddRange(((PolygonLocationMark2D)objectMark).boundingPolygon); } // Using flat information if possible if (voxMLType.HasValue && voxMLType.Value.concavity == "Flat") { // First algorithm : has completed // Using the majority of points to infer the exact locations // of corner points // Divide the diagonal between any two corners // into noOfInner + 1 sub-segments // int noOfInner = 2; // Create a list of inner points for the surface // by linear combination of corners //List<PointF> innerPoints = new List<PointF>(); //for (int i = 0; i < boundary.Count; i++) // for (int j = i + 1; j < boundary.Count; j++) { // var start = boundary[i]; // var end = boundary[j]; // for ( int k = 0; k < noOfInner; k ++ ) // { // var p = new PointF((start.X * (k + 1) + end.X * (noOfInner - k)) / (noOfInner + 1), // (start.Y * (k + 1) + end.Y * (noOfInner - k)) / (noOfInner + 1)); // innerPoints.Add(p); // } // } //// Add the original corner points //innerPoints.AddRange(boundary); int tryDivide = 10; // Second algorithm, work only if one corner point is retrievable PointF anchorCorner = new PointF(); Point3 anchorPoint = nullCameraSpacePoint; foreach (PointF p in boundary) { Point3 cameraSpacePoint = getCameraSpacePoint(colorSpaceToCameraSpacePoint, p); if (cameraSpacePoint != null && !cameraSpacePoint.Equals(nullCameraSpacePoint)) { anchorCorner = p; anchorPoint = cameraSpacePoint; break; } } if (!anchorPoint.Equals(nullCameraSpacePoint)) { foreach (PointF corner in boundary) { Point3 cameraSpacePoint = getCameraSpacePoint(colorSpaceToCameraSpacePoint, corner); // If point p is out of the depth view if (cameraSpacePoint.Equals(nullCameraSpacePoint)) { var start = anchorCorner; var end = corner; var added = false; // For each value of k, try to get the point // between anchorCorner and corner // that divide the segment into 1 andk k - 1 for (int k = 2; k < tryDivide; k++) { var p = new PointF((start.X + end.X * (k - 1)) / k, (start.Y + end.Y * (k - 1)) / k); Point3 middleCameraSpacePoint = getCameraSpacePoint(colorSpaceToCameraSpacePoint, p); if (middleCameraSpacePoint != null && !middleCameraSpacePoint.Equals(nullCameraSpacePoint)) { var inferred_p = new Point3() { X = anchorPoint.X + (middleCameraSpacePoint.X - anchorPoint.X) * k, Y = anchorPoint.Y + (middleCameraSpacePoint.Y - anchorPoint.Y) * k, Z = anchorPoint.Z + (middleCameraSpacePoint.Z - anchorPoint.Z) * k, }; boundary3d.Add(inferred_p); added = true; break; } } // If that doesn't work if (!added) { boundary3d.Add(nullCameraSpacePoint); } } else { boundary3d.Add(cameraSpacePoint); } } } } else { // Just mapping to 3d points foreach (PointF p in boundary) { Point3 cameraSpacePoint = getCameraSpacePoint(colorSpaceToCameraSpacePoint, p); if (cameraSpacePoint != null && !cameraSpacePoint.Equals(nullCameraSpacePoint)) { boundary3d.Add(cameraSpacePoint); } else { boundary3d.Add(nullCameraSpacePoint); } } } set3DBounding(frameNo, new PolygonLocationMark3D(frameNo, boundary3d)); } } break; default: return false; } this.objectType = ObjectType._3D; return true; }