public Image<Gray, byte> Solve(Image<Gray, byte> left, Image<Gray, byte> right)
        {
            using (var disparity16S = new Mat(left.Size, DepthType.Cv16S, 1))
            using (var disparity8U = new Mat(left.Size, DepthType.Cv8U, 1))
            {
                algorithm.Compute(left, right, disparity16S);

                CvInvoke.MinMaxLoc(disparity16S, ref min, ref max, ref minPosition, ref maxPosition);

                disparity16S.ConvertTo(disparity8U, DepthType.Cv8U, 255.0/(Max - Min));

                return new Image<Gray, byte>(disparity8U.Bitmap);
            }
        }
Beispiel #2
0
        /// <summary>
        /// Update the motion history with the specific image and the specific timestamp
        /// </summary>
        /// <param name="foregroundMask">The foreground of the image to be added to history</param>
        /// <param name="timestamp">The time when the image is captured</param>
        public void Update(Mat foregroundMask, DateTime timestamp)
        {
            _lastTime = timestamp;
            TimeSpan ts = _lastTime.Subtract(_initTime);

            if (_mhi == null)
            {
                _mhi = new Mat(foregroundMask.Rows, foregroundMask.Cols, DepthType.Cv32F, 1);
            }
            CvInvoke.UpdateMotionHistory(foregroundMask, _mhi, ts.TotalSeconds, _mhiDuration);
            double scale = 255.0 / _mhiDuration;

            _mhi.ConvertTo(_mask, DepthType.Cv8U, scale, (_mhiDuration - ts.TotalSeconds) * scale);

            CvInvoke.CalcMotionGradient(_mhi, _mask, _orientation, _maxTimeDelta, _minTimeDelta);
        }
Beispiel #3
0
      /// <summary>
      /// Add a plot of the 1D histogram. You should call the Refresh() function to update the control after all modification is complete.
      /// </summary>
      /// <param name="name">The name of the histogram</param>
      /// <param name="color">The drawing color</param>
      /// <param name="histogram">The 1D histogram to be drawn</param>
      /// <param name="binSize">The size of the bin</param>
      /// <param name="ranges">The ranges</param>
      public void AddHistogram(String name, Color color, Mat histogram, int binSize, float[] ranges)
      {
         //Debug.Assert(histogram.Dimension == 1, Properties.StringTable.Only1DHistogramSupported);

         GraphPane pane = new GraphPane();
         // Set the Title
         pane.Title.Text = name;
         pane.XAxis.Title.Text = Properties.StringTable.Value;
         pane.YAxis.Title.Text = Properties.StringTable.Count;

         #region draw the histogram
         RangeF range = new RangeF(ranges[0], ranges[1]);
         
         float step = (range.Max - range.Min) / binSize;
         float start = range.Min;
         double[] bin = new double[binSize];
         for (int binIndex = 0; binIndex < binSize; binIndex++)
         {
            bin[binIndex] = start;
            start += step;
         }

         double[] binVal = new double[histogram.Size.Height];
         GCHandle handle = GCHandle.Alloc(binVal, GCHandleType.Pinned);
         using (Matrix<double> m = new Matrix<double>(binVal.Length, 1, handle.AddrOfPinnedObject(), sizeof(double)))
         {
            histogram.ConvertTo(m, DepthType.Cv64F);
            PointPairList pointList = new PointPairList(
               bin,
               binVal);

            pane.AddCurve(name, pointList, color);
         }
         handle.Free();
         
         #endregion

         zedGraphControl1.MasterPane.Add(pane);
      }
        public Image<Gray, byte> Solve(Image<Gray, byte> left, Image<Gray, byte> right)
        {
            var size = left.Size;

            using (var leftGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
            using (var rightGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
            using (var disparityGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
            using (var filteredDisparityGpu = new GpuMat(left.Rows, left.Cols, DepthType.Cv16S, 1))
            using (var filteredDisparity16S = new Mat(size, DepthType.Cv16S, 1))
            using (var filteredDisparity8U = new Mat(size, DepthType.Cv8U, 1))
            {
                leftGpu.Upload(left.Mat);
                rightGpu.Upload(right.Mat);

                algorithm.FindStereoCorrespondence(leftGpu, rightGpu, disparityGpu);

                filter.Apply(disparityGpu, leftGpu, filteredDisparityGpu);

                filteredDisparityGpu.Download(filteredDisparity16S);

                CvInvoke.MinMaxLoc(filteredDisparity16S, ref min, ref max, ref minPosition, ref maxPosition);

                filteredDisparity16S.ConvertTo(filteredDisparity8U, DepthType.Cv8U, 255.0/(Max - Min));

                return new Image<Gray, byte>(filteredDisparity8U.Bitmap);
            }
        }
Beispiel #5
0
        /// <summary>
        /// ToDo
        /// Perform a forward pass on the image using the current <see cref="m_interpreter"/>.
        /// We assume a PoseNetEstimator instance constructed with the constructor with arguments.
        /// Process:
        ///     -> convert inputImage to float32 precision (values 0...1)
        ///     -> resize inputImage to input tensor dim and load it in input tensor
        ///     -> invoke interpreter to perform a forward pass, get heatmaps and offsets in output tensor dim
        ///     -> get keypoint from heatmaps in output tensor dim
        ///     -> translate keypoint to input tensor dim using offset
        ///     -> rescale keypoints to inputImage dim
        ///     -> return keypoints
        /// </summary>
        /// <param name="inputImage">A RGB image. It will be resized during the inference to match the network's input size.</param>
        /// <returns>
        /// On error : An empty array of <see cref="Keypoint"/>.
        /// On success : Return an array of <see cref="m_numberOfKeypoints"/> <see cref="Keypoint"/> representing human body parts ordered as <see cref="BodyParts"/>.
        ///              If the probability <see cref="Keypoint.score"/> of a <see cref="Keypoint"/> is too low (hardcoded threshold for now, see below),
        ///              keypoint position is set to Point(-1,-1). This value can be used in conditional statements.
        ///              The keypoints are returned in the dimension of the inputImage. No need to further rescale the result for display.
        ///
        /// </returns>
        public Keypoint[] Inference(Emgu.CV.Mat inputImage)
        {
            // 0- Forward pass
            // Is the input empty ?
            if (inputImage.IsEmpty)
            {
                Console.WriteLine("ERROR:");
                Console.WriteLine("Empty image given to Inference PoseNetEstimarot. " +
                                  "Return new Keyoint[0] - empty array of Keypoints.");
                return(new Keypoint[0]);
            }

            int inputWidth  = inputImage.Cols;
            int inputHeigth = inputImage.Rows;

            if (inputImage.Depth != Emgu.CV.CvEnum.DepthType.Cv32F)
            {
                inputImage.ConvertTo(inputImage, Emgu.CV.CvEnum.DepthType.Cv32F);
                inputImage /= 255;
            }

            using (Mat image = inputImage)
            {
                try
                {
                    // Load image in interpreter using ReadTensorFromMatBgr function from the utils.
                    Utils.ReadTensorFromMatBgr(
                        image: image,
                        tensor: m_inputTensor,
                        inputHeight: m_inputTensor.Dims[1],
                        inputWidth: m_inputTensor.Dims[2]
                        );

                    // Actually perfom the inference
                    m_interpreter.Invoke();
                }
                catch
                {
                    Console.WriteLine("ERROR:");
                    Console.WriteLine("Unable to invoke interpreter in DeepNetworkLite.");
                    return(new Keypoint[0]);
                }
            }

            // 1- Converts 3D tensors to Emgu.CV.Mat
            Emgu.CV.Mat heatmaps_mat              = new Emgu.CV.Mat();
            Emgu.CV.Mat offsets_mat               = new Emgu.CV.Mat();
            Emgu.CV.Mat displacement_forward_mat  = new Emgu.CV.Mat();
            Emgu.CV.Mat displacement_backward_mat = new Emgu.CV.Mat();
            try
            {
                heatmaps_mat = new Mat(
                    m_outputTensors[0].Dims[1], m_outputTensors[0].Dims[2],
                    DepthType.Cv32F, m_outputTensors[0].Dims[3], m_outputTensors[0].DataPointer,
                    sizeof(float) * 3 * m_outputTensors[0].Dims[1]);
                offsets_mat = new Mat(
                    m_outputTensors[1].Dims[1], m_outputTensors[1].Dims[2],
                    DepthType.Cv32F, m_outputTensors[1].Dims[3], m_outputTensors[1].DataPointer,
                    sizeof(float) * 3 * m_outputTensors[1].Dims[1]);
                displacement_forward_mat = new Mat(
                    m_outputTensors[2].Dims[1], m_outputTensors[2].Dims[2],
                    DepthType.Cv32F, m_outputTensors[2].Dims[3], m_outputTensors[2].DataPointer,
                    sizeof(float) * 3 * m_outputTensors[2].Dims[1]);
                displacement_backward_mat = new Mat(
                    m_outputTensors[3].Dims[1], m_outputTensors[3].Dims[2],
                    DepthType.Cv32F, m_outputTensors[3].Dims[3], m_outputTensors[3].DataPointer,
                    sizeof(float) * 3 * m_outputTensors[3].Dims[1]);
            }
            catch
            {
                Console.WriteLine("Unable to read heatmaps or offsets in PoseNetEstimator. " +
                                  "Return new Keyoint[0] - empty array of Keypoints.");
                return(new Keypoint[0]);
            }

            // 2 - Split channels and store them in vector of mat
            if (!heatmaps_mat.IsEmpty & !offsets_mat.IsEmpty)
            {
                Emgu.CV.CvInvoke.Split(heatmaps_mat, m_heatmapsChannels);
                Emgu.CV.CvInvoke.Split(offsets_mat, m_offsetsChannels);
                Emgu.CV.CvInvoke.Split(displacement_forward_mat, m_forwardDisplacementChannels);
                Emgu.CV.CvInvoke.Split(displacement_backward_mat, m_backwardDisplacementChannels);
            }
            else
            {
                Console.WriteLine("Empty heatmaps_mat or offsets_mat in Inference from PoseNetEstimator. " +
                                  "Return new Keyoint[0] - empty array of Keypoints.");
                return(new Keypoint[0]);
            }

            // 3 -Estimate body pose
            //singleBodyPoseEstimation();
            improveSingleBodyPoseEstimation();
            rescaleKeypointsPosition(inputWidth, inputHeigth);

            return(m_keypoints);
        }