Exemplo n.º 1
0
        public void Initialize(BaseCameraApplication capture)
        {
            DepthCameraFrame frame = capture.GetDevices()[0].GetDepthImage();
            try
            {
                StreamReader reader = new StreamReader(new MemoryStream(Perceptual.Foundation.Properties.Resources.AdaptiveTemporalFilter));
                string text = reader.ReadToEnd();

                CLCalc.Program.Compile(capture.GetPrimaryDevice().GetPreprocessCode() + "\n#define HISTORY_SIZE 0\n" + text);
                reader.Close();

            }
            catch (Exception ex)
            {
                System.Console.WriteLine(ex.Message);
                System.Console.WriteLine("Could not find DoNothingFilter.cl");
                Environment.Exit(1);
            }
            updateBuffer = new CLCalc.Program.Kernel("UpdateFilter");
            copyToTemporalBuffer = new CLCalc.Program.Kernel("CopyToTemporalBuffer");
            depthBuffer = CLCalc.Program.Variable.Create(new ComputeBuffer<float>(CLCalc.Program.Context, ComputeMemoryFlags.ReadWrite, 4 * frame.Width * frame.Height));
            depthCopyBuffer = new CLCalc.Program.Variable(new float[4 * frame.Width * frame.Height]);

            depthImage = new CLCalc.Program.Image2D(new float[frame.Height * frame.Width * 4], frame.Width, frame.Height);
            uvImage = new CLCalc.Program.Image2D(new float[frame.Height * frame.Width * 4], frame.Width, frame.Height);

            kernelCopyImage = new CLCalc.Program.Kernel("CopyImage");
        }
Exemplo n.º 2
0
        public void Initialize(BaseCameraApplication app)
        {
            try
            {
                CLCalc.Program.Compile(app.GetPrimaryDevice().GetPreprocessCode() + src);
            }
            catch (BuildProgramFailureComputeException ex)
            {
                System.Console.WriteLine(ex.Message);
                Environment.Exit(1);
            }
            int width = app.GetPrimaryDevice().GetDepthImage().Width;

            int height = app.GetPrimaryDevice().GetDepthImage().Height;
            pointBuffer = CLCalc.Program.Variable.Create(new ComputeBuffer<float>(CLCalc.Program.Context, ComputeMemoryFlags.ReadWrite | ComputeMemoryFlags.CopyHostPointer, points = new float[width * height * 4]));
            colorBuffer = CLCalc.Program.Variable.Create(new ComputeBuffer<float>(CLCalc.Program.Context, ComputeMemoryFlags.ReadWrite | ComputeMemoryFlags.CopyHostPointer, colors = new float[width * height * 4]));

            kernelCopyImage = new CLCalc.Program.Kernel("CopyToPoinCloud");
        }
Exemplo n.º 3
0
 public void Process(BaseCameraApplication app)
 {
     bbox = app.GetPrimaryDevice().GetBoundingBox();
     GroundPlane = app.GetPrimaryDevice().GetGroundPlane();
     GroundPlane.Invert();
 }
Exemplo n.º 4
0
        public void Process(BaseCameraApplication capture)
        {
            const double ScaleFactor = 1.0850;
            DepthCameraFrame depthFrame = capture.GetPrimaryDevice().GetDepthImage();
            ColorCameraFrame rgbFrame = capture.GetPrimaryDevice().GetColorImage();
            TextureMapFrame uvFrame = capture.GetPrimaryDevice().GetTextureImage();
            kernelCopyIRImage.Execute(new CLCalc.Program.MemoryObject[] { depthFrame.GetMemoryObject(), irImageBuffer }, width * height);
            CLCalc.Program.CommQueues[CLCalc.Program.DefaultCQ].Read<byte>(((ComputeBuffer<byte>)irImageBuffer.VarPointer), true, 0, width * height, gray.ImageData, null);
            storage.Clear();
            //Use OpenCV for face tracking in IR image. SDK has its own face tracker, but it only operates in RGB. Either could be used for this example.
            CvSeq<CvAvgComp> faces = Cv.HaarDetectObjects(gray, faceCascade, storage, ScaleFactor, 2, 0, new CvSize(40, 40));
            if (faces.Total > 0)
            {
                CvRect face = faces[0].Value.Rect;
                Cv.SetImageROI(gray, face);
                Cv.SetImageROI(dilate, face);
                Cv.SetImageROI(erode, face);
                Cv.SetImageROI(tmp, face);
                //Filter the image to enhance contrast between eyes/face.
                Cv.Dilate(gray, tmp);
                Cv.Dilate(tmp, dilate);
                Cv.Threshold(gray, tmp, 0, 1, ThresholdType.Binary);
                Cv.Erode(gray, erode);
                Cv.Sub(gray, erode, gray);
                Cv.Mul(gray, tmp, gray);
                Cv.SetImageROI(mask, face);
                Cv.SetImageROI(imgLabel, face);
                //Threshold out peaks.
                Cv.Threshold(gray, mask, 128, 255, ThresholdType.Binary);
                blobs.Clear();
                uint result = blobs.Label(mask, imgLabel);
                double minDistLeft = 1E10;
                double minDistRight = 1E10;
                int xCenter = face.Width / 2;
                int yCenter = (int)((face.Height) * 0.35);
                CvPoint center = new CvPoint(xCenter, yCenter);
                CvPoint right = new CvPoint(-1, -1);
                CvPoint left = new CvPoint(-1, -1);

                //Assign blobs to eyes.
                foreach (KeyValuePair<uint, CvBlob> item in blobs)
                {
                    CvBlob b = item.Value;
                    double d = CvPoint.Distance(b.Centroid, center);
                    if (b.Centroid.X < center.X)
                    {
                        if (d < minDistLeft)
                        {
                            minDistLeft = d;
                            right = b.Centroid;
                        }
                    }
                    else
                    {
                        if (d < minDistRight)
                        {
                            minDistRight = d;
                            left = b.Centroid;
                        }
                    }
                }
                if (right.X >= 0 && left.X >= 0)
                {
                    rightEye2D = new int2(right.X + face.X, right.Y + face.Y);
                    leftEye2D = new int2(left.X + face.X, left.Y + face.Y);
                    boundingBox2D = new int4(face.X, face.Y, face.Width, face.Height);
                    //Find bridge and nose. This was done in opencl to leverage read_imagef.
                    kernelFindFaceLandmarks.Execute(new CLCalc.Program.MemoryObject[] { rightEye2D, leftEye2D, boundingBox2D, faceDetectionBuffer, filter.GetDepthImage() }, 1);
                    ReadFaceLandmarksFromBuffer();
                    foundFace = true;
                }
                else
                {
                    foundFace = false;
                }
                Cv.ResetImageROI(gray);
                Cv.ResetImageROI(erode);
                Cv.ResetImageROI(dilate);
                Cv.ResetImageROI(tmp);
            }
            else
            {
                foundFace = false;
                WriteFaceLandmarksToBuffer();
            }
        }
Exemplo n.º 5
0
        public void Initialize(BaseCameraApplication capture)
        {
            DepthCameraFrame depthImage = capture.GetPrimaryDevice().GetDepthImage();
            this.width = depthImage.Width;
            this.height = depthImage.Height;
            this.filter = ((AdaptiveTemporalFilter)capture.GetImageFilter());
            CvSize sz = new CvSize(depthImage.Width, depthImage.Height);
            gray = new IplImage(sz, BitDepth.U8, 1);
            erode = new IplImage(sz, BitDepth.U8, 1);
            dilate = new IplImage(sz, BitDepth.U8, 1);
            tmp = new IplImage(sz, BitDepth.U8, 1);
            mask = new IplImage(sz, BitDepth.U8, 1);
            imgLabel = new IplImage(sz, BitDepth.F32, 1);
            faceDetectionBuffer = CLCalc.Program.Variable.Create(new ComputeBuffer<FaceLandmarks>(CLCalc.Program.Context, ComputeMemoryFlags.ReadWrite, 1));
            try
            {
                CLCalc.Program.Compile(capture.GetPrimaryDevice().GetPreprocessCode() + src);

            }
            catch (BuildProgramFailureComputeException ex)
            {
                System.Console.WriteLine(ex.Message);
                Environment.Exit(1);
            }
            irImageBuffer = CLCalc.Program.Variable.Create(new ComputeBuffer<byte>(CLCalc.Program.Context, ComputeMemoryFlags.ReadWrite | ComputeMemoryFlags.CopyHostPointer, ir = new byte[width * height]));
            kernelCopyIRImage = new CLCalc.Program.Kernel("CopyIRImage");
            kernelFindFaceLandmarks = new CLCalc.Program.Kernel("FindFaceLandmarks");
        }
Exemplo n.º 6
0
        public void Process(BaseCameraApplication app)
        {
            CameraDataFilter filter = app.GetImageFilter();
            kernelCopyImage.Execute(new CLCalc.Program.MemoryObject[] {
                app.GetPrimaryDevice().GetDepthImage().GetMemoryObject(),
                filter.GetTextureImage(),
                filter.GetColorImage(),
                pointBuffer,
                colorBuffer}, new int[] { filter.GetDepthImage().Width, filter.GetDepthImage().Height });
            pointBuffer.ReadFromDeviceTo(points);
            int vertexCount = points.Length / 4;
            /*
            for (int i = 0; i < points.Length; i += 4)
            {

                float4 pt = new float4(points[i], points[i + 1], points[i + 2], points[i + 3]);
                if (pt.z>200.0&&pt.z < 2000.0f&&pt.w>100.0f)
                {
                    vertexCount++;
                }
            }
             * */
            colorBuffer.ReadFromDeviceTo(colors);
            string path = outputDir + "pointcloud" + counter.ToString("0000") + ".xyz";
            MeshReaderWriter writer = new MeshReaderWriter(vertexCount, 0, path);
            float minDepth = app.GetPrimaryDevice().GetMinDepth();
            float maxDepth = app.GetPrimaryDevice().GetMaxDepth();
            for (int i = 0; i < points.Length; i += 4)
            {
                float4 pt = new float4(points[i], points[i + 1], points[i + 2], points[i + 3]);
                float4 rgb = new float4(colors[i], colors[i + 1], colors[i + 2], colors[i + 3]);
                if (pt.z > minDepth && pt.z < maxDepth && pt.w > 100.0f)
                {
                    writer.AddPoint(pt, rgb);
                }
                else
                {
                    writer.AddPoint(new float4(), new float4());
                }
            }
            writer.Close();
            counter++;
        }
 public void Process(BaseCameraApplication app)
 {
     if (Visible || WireFrame)
     {
         DepthCameraFrame depthFrame = app.GetPrimaryDevice().GetDepthImage();
         ColorCameraFrame colorFrame = app.GetPrimaryDevice().GetColorImage();
         TextureMapFrame textureFrame = app.GetPrimaryDevice().GetTextureImage();
         CameraDataFilter filter = (CameraDataFilter)app.GetImageFilter();
         CLGLInteropFunctions.AcquireGLElements(new CLCalc.Program.MemoryObject[] { positionBuffer, colorBuffer, normalBuffer });
         CLCalc.Program.MemoryObject[] args = new CLCalc.Program.MemoryObject[] {
          app.GetPrimaryDevice().GetBoundingBox(),filter.GetDepthImage(),filter.GetTextureImage(),colorFrame.GetMemoryObject(),positionBuffer,colorBuffer,normalBuffer};
         kernelCopyImage.Execute(args, new int[] { depthFrame.Width, depthFrame.Height });
         CLGLInteropFunctions.ReleaseGLElements(new CLCalc.Program.MemoryObject[] { positionBuffer, colorBuffer, normalBuffer });
     }
 }
        public void Initialize(BaseCameraApplication app, OpenTKWrapper.CLGLInterop.GLAdvancedRender glw)
        {
            try
            {
                CLCalc.Program.Compile(app.GetPrimaryDevice().GetPreprocessCode() + src);
            }
            catch (BuildProgramFailureComputeException ex)
            {
                System.Console.WriteLine(ex.Message);
                Environment.Exit(1);
            }
            kernelCopyImage = new CLCalc.Program.Kernel("CopyImageToMesh");
            BoundingBox bbox = app.GetPrimaryDevice().GetBoundingBox();
            int w = app.GetPrimaryDevice().GetDepthImage().Width;
            int h = app.GetPrimaryDevice().GetDepthImage().Height;
            int size = w * h;
            ColorData = new float[16 * size];
            PositionData = new float[16 * size];
            NormalData = new float[12 * size];
            for (int i = 0; i < size; i++)
            {
                PositionData[4 * i] = (i / w) - w / 2;
                PositionData[4 * i + 2] = i % w - h / 2;
                PositionData[4 * i + 1] = i % 7;
                PositionData[4 * i + 3] = 1.0f;

            }

            GL.GenBuffers(3, QuadMeshBufs);

            GL.BindBuffer(BufferTarget.ArrayBuffer, QuadMeshBufs[0]);
            GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(ColorData.Length * sizeof(float)), ColorData, BufferUsageHint.StreamDraw);

            GL.BindBuffer(BufferTarget.ArrayBuffer, QuadMeshBufs[1]);
            GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(PositionData.Length * sizeof(float)), PositionData, BufferUsageHint.StreamDraw);//Notice STREAM DRAW

            GL.BindBuffer(BufferTarget.ArrayBuffer, QuadMeshBufs[2]);
            GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(NormalData.Length * sizeof(float)), NormalData, BufferUsageHint.StreamDraw);//Notice STREAM DRAW

            colorBuffer = new CLCalc.Program.Variable(QuadMeshBufs[0], typeof(float));
            positionBuffer = new CLCalc.Program.Variable(QuadMeshBufs[1], typeof(float));
            normalBuffer = new CLCalc.Program.Variable(QuadMeshBufs[2], typeof(float));

            GL.BindBuffer(BufferTarget.ArrayBuffer, 0);
            GL.Enable(EnableCap.Blend);
        }
 public void Process(BaseCameraApplication app)
 {
     this.GroundPlane = app.GetPrimaryDevice().GetGroundPlane();
 }
Exemplo n.º 10
0
 public void Initialize(BaseCameraApplication app, GLAdvancedRender glw)
 {
     minDepth = app.GetPrimaryDevice().GetMinDepth();
     maxDepth = app.GetPrimaryDevice().GetMaxDepth();
     bbox = app.GetPrimaryDevice().GetBoundingBox();
     screenWidth = glw.GLCtrl.Width;
     screenHeight = glw.GLCtrl.Height;
 }
Exemplo n.º 11
0
        public void Initialize(BaseCameraApplication capture, GLAdvancedRender glw)
        {
            try
            {
                CLCalc.Program.Compile(capture.GetPrimaryDevice().GetPreprocessCode() + src);

            }
            catch (BuildProgramFailureComputeException ex)
            {
                System.Console.WriteLine(ex.Message);
                Environment.Exit(1);
            }
            DepthCameraFrame frame = capture.GetDevices()[0].GetDepthImage();
            kernelCopyBmp = new CLCalc.Program.Kernel("CopyImageToPointCloud");
            int size = frame.Width * frame.Height;
            bufs = new int[4];

            ColorData = new float[4 * size];
            PositionData = new float[4 * size];

            GL.GenBuffers(2, bufs);

            GL.BindBuffer(BufferTarget.ArrayBuffer, bufs[0]);
            GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(ColorData.Length * sizeof(float)), ColorData, BufferUsageHint.StreamDraw);

            GL.BindBuffer(BufferTarget.ArrayBuffer, bufs[1]);
            GL.BufferData(BufferTarget.ArrayBuffer, (IntPtr)(PositionData.Length * sizeof(float)), PositionData, BufferUsageHint.StreamDraw);//Notice STREAM DRAW
            GL.Enable(EnableCap.PointSmooth);
            GL.PointSize(4.0f);
            positions = new CLCalc.Program.Variable(bufs[1], typeof(float));
            colors = new CLCalc.Program.Variable(bufs[0], typeof(float));
        }
Exemplo n.º 12
0
        public void Process(BaseCameraApplication capture)
        {
            if (Visisble)
            {
                DepthCameraFrame depthFrame = capture.GetPrimaryDevice().GetDepthImage();
                ColorCameraFrame colorFrame = capture.GetPrimaryDevice().GetColorImage();
                TextureMapFrame textureFrame = capture.GetPrimaryDevice().GetTextureImage();
                if (depthFrame != null && colorFrame != null)
                {

                    CLCalc.Program.MemoryObject[] args = new CLCalc.Program.MemoryObject[] { depthFrame.GetMemoryObject(), textureFrame.GetMemoryObject(), colorFrame.GetMemoryObject(), positions, colors };
                    CLGLInteropFunctions.AcquireGLElements(args);
                    kernelCopyBmp.Execute(args, new int[] { depthFrame.Width, depthFrame.Height });
                    CLGLInteropFunctions.ReleaseGLElements(args);
                }
            }
        }