Example #1
0
    /// <summary>
    /// Process video and motion frames doing further post processing after
    /// performed motion detection.
    /// </summary>
    /// 
    /// <param name="videoFrame">Original video frame.</param>
    /// <param name="motionFrame">Motion frame provided by motion detection
    /// algorithm (see <see cref="IMotionDetector"/>).</param>
    /// 
    /// <remarks><para>Processes provided motion frame and highlights motion areas
    /// on the original video frame with <see cref="HighlightColor">specified color</see>.</para>
    /// </remarks>
    ///
    public unsafe void ProcessFrame(UnmanagedImage videoFrame, UnmanagedImage motionFrame)
    {
      int width = videoFrame.Width;
      int height = videoFrame.Height;

      if ((motionFrame.Width != width) || (motionFrame.Height != height))
        return;

      byte* src = (byte*)videoFrame.ImageData.ToPointer();
      byte* motion = (byte*)motionFrame.ImageData.ToPointer();

      int srcOffset = videoFrame.Stride - width * 3;
      int motionOffset = motionFrame.Stride - width;

      byte fillR = highlightColor.R;
      byte fillG = highlightColor.G;
      byte fillB = highlightColor.B;

      for (int y = 0; y < height; y++)
      {
        for (int x = 0; x < width; x++, motion++, src += 3)
        {
          if ((*motion != 0) && (((x + y) & 1) == 0))
          {
            src[RGB.R] = fillR;
            src[RGB.G] = fillG;
            src[RGB.B] = fillB;
          }
        }
        src += srcOffset;
        motion += motionOffset;
      }
    }
    /// <summary>
    /// Process new video frame.
    /// </summary>
    /// 
    /// <param name="videoFrame">Video frame to process (detect motion in).</param>
    /// 
    /// <remarks><para>Processes new frame from video source and detects motion in it.</para>
    /// 
    /// <para>Check <see cref="MotionLevel"/> property to get information about amount of motion
    /// (changes) in the processed frame.</para>
    /// </remarks>
    /// 
    public unsafe void ProcessFrame(UnmanagedImage videoFrame)
    {
      lock (sync)
      {
        // check previous frame
        if (previousFrame == null)
        {
          // save image dimension
          width = videoFrame.Width;
          height = videoFrame.Height;

          // alocate memory for previous and current frames
          previousFrame = UnmanagedImage.Create(width, height, PixelFormat.Format8bppIndexed);
          motionFrame = UnmanagedImage.Create(width, height, PixelFormat.Format8bppIndexed);

          frameSize = motionFrame.Stride * height;

          // temporary buffer
          if (suppressNoise)
          {
            tempFrame = UnmanagedImage.Create(width, height, PixelFormat.Format8bppIndexed);
          }

          // convert source frame to grayscale
          Grayscale.CommonAlgorithms.BT709.Apply(videoFrame, previousFrame);

          return;
        }

        // check image dimension
        if ((videoFrame.Width != width) || (videoFrame.Height != height))
          return;

        // convert current image to grayscale
        Grayscale.CommonAlgorithms.BT709.Apply(videoFrame, motionFrame);

        // pointers to previous and current frames
        byte* prevFrame = (byte*)previousFrame.ImageData.ToPointer();
        byte* currFrame = (byte*)motionFrame.ImageData.ToPointer();
        // difference value
        int diff;

        // 1 - get difference between frames
        // 2 - threshold the difference
        // 3 - copy current frame to previous frame
        for (int i = 0; i < frameSize; i++, prevFrame++, currFrame++)
        {
          // difference
          diff = (int)*currFrame - (int)*prevFrame;
          // copy current frame to previous
          *prevFrame = *currFrame;
          // treshold
          *currFrame = ((diff >= differenceThreshold) || (diff <= differenceThresholdNeg)) ? (byte)255 : (byte)0;
        }

        if (suppressNoise)
        {
          // suppress noise and calculate motion amount
          UnmanagedMemoryHelper.CopyUnmanagedMemory(tempFrame.ImageData, motionFrame.ImageData, frameSize);
          erosionFilter.Apply(tempFrame, motionFrame);
        }

        // calculate amount of motion pixels
        pixelsChanged = 0;
        byte* motion = (byte*)motionFrame.ImageData.ToPointer();

        for (int i = 0; i < frameSize; i++, motion++)
        {
          pixelsChanged += (*motion & 1);
        }
      }
    }
    /// <summary>
    /// Process video and motion frames doing further post processing after
    /// performed motion detection.
    /// </summary>
    /// 
    /// <param name="videoFrame">Original video frame.</param>
    /// <param name="motionFrame">Motion frame provided by motion detection
    /// algorithm (see <see cref="IMotionDetector"/>).</param>
    /// 
    /// <remarks><para>Processes provided motion frame and counts number of separate
    /// objects, which size satisfies <see cref="MinObjectsWidth"/> and <see cref="MinObjectsHeight"/>
    /// properties. In the case if <see cref="HighlightMotionRegions"/> property is
    /// set to <see langword="true"/>, the found object are also highlighted on the
    /// original video frame.
    /// </para></remarks>
    /// 
    public unsafe void ProcessFrame(UnmanagedImage videoFrame, UnmanagedImage motionFrame)
    {
      int width = videoFrame.Width;
      int height = videoFrame.Height;

      if ((motionFrame.Width != width) || (motionFrame.Height != height))
        return;

      lock (blobCounter)
      {
        blobCounter.ProcessImage(motionFrame);
      }

      if (highlightMotionRegions)
      {
        // highlight each moving object
        Rectangle[] rects = blobCounter.GetObjectsRectangles();

        foreach (Rectangle rect in rects)
        {
          DrawingHelper.Rectangle(videoFrame, rect, highlightColor);
        }
      }
    }
Example #4
0
    /// <summary>
    /// Actual objects map building.
    /// </summary>
    /// 
    /// <param name="image">Unmanaged image to process.</param>
    /// 
    /// <remarks>The method supports 8 bpp indexed grayscale images and 24/32 bpp color images.</remarks>
    /// 
    /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format of the source image.</exception>
    /// <exception cref="InvalidImagePropertiesException">Cannot process images that are one pixel wide. Rotate the image
    /// or use RecursiveBlobCounter.</exception>
    /// 
    protected override void BuildObjectsMap(UnmanagedImage image)
    {
      int stride = image.Stride;

      // check pixel format
      if ((image.PixelFormat != PixelFormat.Format8bppIndexed) &&
           (image.PixelFormat != PixelFormat.Format24bppRgb) &&
           (image.PixelFormat != PixelFormat.Format32bppArgb) &&
           (image.PixelFormat != PixelFormat.Format32bppPArgb))
      {
        throw new UnsupportedImageFormatException("Unsupported pixel format of the source image.");
      }

      // we don't want one pixel width images
      if (imageWidth == 1)
      {
        throw new InvalidImagePropertiesException("BlobCounter cannot process images that are one pixel wide. Rotate the image or use RecursiveBlobCounter.");
      }

      // allocate labels array
      objectLabels = new int[imageWidth * imageHeight];
      // initial labels count
      int labelsCount = 0;

      // create map
      int maxObjects = ((imageWidth / 2) + 1) * ((imageHeight / 2) + 1) + 1;
      int[] map = new int[maxObjects];

      // initially map all labels to themself
      for (int i = 0; i < maxObjects; i++)
      {
        map[i] = i;
      }

      // do the job
      unsafe
      {
        byte* src = (byte*)image.ImageData.ToPointer();
        int p = 0;

        if (image.PixelFormat == PixelFormat.Format8bppIndexed)
        {
          int offset = stride - imageWidth;

          // 1 - for pixels of the first row
          if (*src > backgroundThresholdG)
          {
            objectLabels[p] = ++labelsCount;
          }
          ++src;
          ++p;

          // process the rest of the first row
          for (int x = 1; x < imageWidth; x++, src++, p++)
          {
            // check if we need to label current pixel
            if (*src > backgroundThresholdG)
            {
              // check if the previous pixel already was labeled
              if (src[-1] > backgroundThresholdG)
              {
                // label current pixel, as the previous
                objectLabels[p] = objectLabels[p - 1];
              }
              else
              {
                // create new label
                objectLabels[p] = ++labelsCount;
              }
            }
          }
          src += offset;

          // 2 - for other rows
          // for each row
          for (int y = 1; y < imageHeight; y++)
          {
            // for the first pixel of the row, we need to check
            // only upper and upper-right pixels
            if (*src > backgroundThresholdG)
            {
              // check surrounding pixels
              if (src[-stride] > backgroundThresholdG)
              {
                // label current pixel, as the above
                objectLabels[p] = objectLabels[p - imageWidth];
              }
              else if (src[1 - stride] > backgroundThresholdG)
              {
                // label current pixel, as the above right
                objectLabels[p] = objectLabels[p + 1 - imageWidth];
              }
              else
              {
                // create new label
                objectLabels[p] = ++labelsCount;
              }
            }
            ++src;
            ++p;

            // check left pixel and three upper pixels for the rest of pixels
            for (int x = 1; x < imageWidth - 1; x++, src++, p++)
            {
              if (*src > backgroundThresholdG)
              {
                // check surrounding pixels
                if (src[-1] > backgroundThresholdG)
                {
                  // label current pixel, as the left
                  objectLabels[p] = objectLabels[p - 1];
                }
                else if (src[-1 - stride] > backgroundThresholdG)
                {
                  // label current pixel, as the above left
                  objectLabels[p] = objectLabels[p - 1 - imageWidth];
                }
                else if (src[-stride] > backgroundThresholdG)
                {
                  // label current pixel, as the above
                  objectLabels[p] = objectLabels[p - imageWidth];
                }

                if (src[1 - stride] > backgroundThresholdG)
                {
                  if (objectLabels[p] == 0)
                  {
                    // label current pixel, as the above right
                    objectLabels[p] = objectLabels[p + 1 - imageWidth];
                  }
                  else
                  {
                    int l1 = objectLabels[p];
                    int l2 = objectLabels[p + 1 - imageWidth];

                    if ((l1 != l2) && (map[l1] != map[l2]))
                    {
                      // merge
                      if (map[l1] == l1)
                      {
                        // map left value to the right
                        map[l1] = map[l2];
                      }
                      else if (map[l2] == l2)
                      {
                        // map right value to the left
                        map[l2] = map[l1];
                      }
                      else
                      {
                        // both values already mapped
                        map[map[l1]] = map[l2];
                        map[l1] = map[l2];
                      }

                      // reindex
                      for (int i = 1; i <= labelsCount; i++)
                      {
                        if (map[i] != i)
                        {
                          // reindex
                          int j = map[i];
                          while (j != map[j])
                          {
                            j = map[j];
                          }
                          map[i] = j;
                        }
                      }
                    }
                  }
                }

                // label the object if it is not yet
                if (objectLabels[p] == 0)
                {
                  // create new label
                  objectLabels[p] = ++labelsCount;
                }
              }
            }

            // for the last pixel of the row, we need to check
            // only upper and upper-left pixels
            if (*src > backgroundThresholdG)
            {
              // check surrounding pixels
              if (src[-1] > backgroundThresholdG)
              {
                // label current pixel, as the left
                objectLabels[p] = objectLabels[p - 1];
              }
              else if (src[-1 - stride] > backgroundThresholdG)
              {
                // label current pixel, as the above left
                objectLabels[p] = objectLabels[p - 1 - imageWidth];
              }
              else if (src[-stride] > backgroundThresholdG)
              {
                // label current pixel, as the above
                objectLabels[p] = objectLabels[p - imageWidth];
              }
              else
              {
                // create new label
                objectLabels[p] = ++labelsCount;
              }
            }
            ++src;
            ++p;

            src += offset;
          }
        }
        else
        {
          // color images
          int pixelSize = Bitmap.GetPixelFormatSize(image.PixelFormat) / 8;
          int offset = stride - imageWidth * pixelSize;

          int strideM1 = stride - pixelSize;
          int strideP1 = stride + pixelSize;

          // 1 - for pixels of the first row
          if ((src[RGB.R] | src[RGB.G] | src[RGB.B]) != 0)
          {
            objectLabels[p] = ++labelsCount;
          }
          src += pixelSize;
          ++p;

          // process the rest of the first row
          for (int x = 1; x < imageWidth; x++, src += pixelSize, p++)
          {
            // check if we need to label current pixel
            if ((src[RGB.R] > backgroundThresholdR) ||
                 (src[RGB.G] > backgroundThresholdG) ||
                 (src[RGB.B] > backgroundThresholdB))
            {
              // check if the previous pixel already was labeled
              if ((src[RGB.R - pixelSize] > backgroundThresholdR) ||
                   (src[RGB.G - pixelSize] > backgroundThresholdG) ||
                   (src[RGB.B - pixelSize] > backgroundThresholdB))
              {
                // label current pixel, as the previous
                objectLabels[p] = objectLabels[p - 1];
              }
              else
              {
                // create new label
                objectLabels[p] = ++labelsCount;
              }
            }
          }
          src += offset;

          // 2 - for other rows
          // for each row
          for (int y = 1; y < imageHeight; y++)
          {
            // for the first pixel of the row, we need to check
            // only upper and upper-right pixels
            if ((src[RGB.R] > backgroundThresholdR) ||
                 (src[RGB.G] > backgroundThresholdG) ||
                 (src[RGB.B] > backgroundThresholdB))
            {
              // check surrounding pixels
              if ((src[RGB.R - stride] > backgroundThresholdR) ||
                   (src[RGB.G - stride] > backgroundThresholdG) ||
                   (src[RGB.B - stride] > backgroundThresholdB))
              {
                // label current pixel, as the above
                objectLabels[p] = objectLabels[p - imageWidth];
              }
              else if ((src[RGB.R - strideM1] > backgroundThresholdR) ||
                        (src[RGB.G - strideM1] > backgroundThresholdG) ||
                        (src[RGB.B - strideM1] > backgroundThresholdB))
              {
                // label current pixel, as the above right
                objectLabels[p] = objectLabels[p + 1 - imageWidth];
              }
              else
              {
                // create new label
                objectLabels[p] = ++labelsCount;
              }
            }
            src += pixelSize;
            ++p;

            // check left pixel and three upper pixels for the rest of pixels
            for (int x = 1; x < imageWidth - 1; x++, src += pixelSize, p++)
            {
              if ((src[RGB.R] > backgroundThresholdR) ||
                   (src[RGB.G] > backgroundThresholdG) ||
                   (src[RGB.B] > backgroundThresholdB))
              {
                // check surrounding pixels
                if ((src[RGB.R - pixelSize] > backgroundThresholdR) ||
                     (src[RGB.G - pixelSize] > backgroundThresholdG) ||
                     (src[RGB.B - pixelSize] > backgroundThresholdB))
                {
                  // label current pixel, as the left
                  objectLabels[p] = objectLabels[p - 1];
                }
                else if ((src[RGB.R - strideP1] > backgroundThresholdR) ||
                          (src[RGB.G - strideP1] > backgroundThresholdG) ||
                          (src[RGB.B - strideP1] > backgroundThresholdB))
                {
                  // label current pixel, as the above left
                  objectLabels[p] = objectLabels[p - 1 - imageWidth];
                }
                else if ((src[RGB.R - stride] > backgroundThresholdR) ||
                          (src[RGB.G - stride] > backgroundThresholdG) ||
                          (src[RGB.B - stride] > backgroundThresholdB))
                {
                  // label current pixel, as the above
                  objectLabels[p] = objectLabels[p - imageWidth];
                }

                if ((src[RGB.R - strideM1] > backgroundThresholdR) ||
                     (src[RGB.G - strideM1] > backgroundThresholdG) ||
                     (src[RGB.B - strideM1] > backgroundThresholdB))
                {
                  if (objectLabels[p] == 0)
                  {
                    // label current pixel, as the above right
                    objectLabels[p] = objectLabels[p + 1 - imageWidth];
                  }
                  else
                  {
                    int l1 = objectLabels[p];
                    int l2 = objectLabels[p + 1 - imageWidth];

                    if ((l1 != l2) && (map[l1] != map[l2]))
                    {
                      // merge
                      if (map[l1] == l1)
                      {
                        // map left value to the right
                        map[l1] = map[l2];
                      }
                      else if (map[l2] == l2)
                      {
                        // map right value to the left
                        map[l2] = map[l1];
                      }
                      else
                      {
                        // both values already mapped
                        map[map[l1]] = map[l2];
                        map[l1] = map[l2];
                      }

                      // reindex
                      for (int i = 1; i <= labelsCount; i++)
                      {
                        if (map[i] != i)
                        {
                          // reindex
                          int j = map[i];
                          while (j != map[j])
                          {
                            j = map[j];
                          }
                          map[i] = j;
                        }
                      }
                    }
                  }
                }

                // label the object if it is not yet
                if (objectLabels[p] == 0)
                {
                  // create new label
                  objectLabels[p] = ++labelsCount;
                }
              }
            }

            // for the last pixel of the row, we need to check
            // only upper and upper-left pixels
            if ((src[RGB.R] > backgroundThresholdR) ||
                 (src[RGB.G] > backgroundThresholdG) ||
                 (src[RGB.B] > backgroundThresholdB))
            {
              // check surrounding pixels
              if ((src[RGB.R - pixelSize] > backgroundThresholdR) ||
                   (src[RGB.G - pixelSize] > backgroundThresholdG) ||
                   (src[RGB.B - pixelSize] > backgroundThresholdB))
              {
                // label current pixel, as the left
                objectLabels[p] = objectLabels[p - 1];
              }
              else if ((src[RGB.R - strideP1] > backgroundThresholdR) ||
                        (src[RGB.G - strideP1] > backgroundThresholdG) ||
                        (src[RGB.B - strideP1] > backgroundThresholdB))
              {
                // label current pixel, as the above left
                objectLabels[p] = objectLabels[p - 1 - imageWidth];
              }
              else if ((src[RGB.R - stride] > backgroundThresholdR) ||
                        (src[RGB.G - stride] > backgroundThresholdG) ||
                        (src[RGB.B - stride] > backgroundThresholdB))
              {
                // label current pixel, as the above
                objectLabels[p] = objectLabels[p - imageWidth];
              }
              else
              {
                // create new label
                objectLabels[p] = ++labelsCount;
              }
            }
            src += pixelSize;
            ++p;

            src += offset;
          }
        }
      }

      // allocate remapping array
      int[] reMap = new int[map.Length];

      // count objects and prepare remapping array
      objectsCount = 0;
      for (int i = 1; i <= labelsCount; i++)
      {
        if (map[i] == i)
        {
          // increase objects count
          reMap[i] = ++objectsCount;
        }
      }
      // second pass to complete remapping
      for (int i = 1; i <= labelsCount; i++)
      {
        if (map[i] != i)
        {
          reMap[i] = reMap[map[i]];
        }
      }

      // repair object labels
      for (int i = 0, n = objectLabels.Length; i < n; i++)
      {
        objectLabels[i] = reMap[objectLabels[i]];
      }
    }
Example #5
0
    /// <summary>
    /// 克隆该非托管图片
    /// </summary>
    /// <returns>Returns clone of the unmanaged image.</returns>
    /// <remarks><para>The method does complete cloning of the object.</para></remarks>
    public UnmanagedImage Clone()
    {
      // allocate memory for the image
      IntPtr newImageData = System.Runtime.InteropServices.Marshal.AllocHGlobal(stride * height);

      UnmanagedImage newImage = new UnmanagedImage(newImageData, width, height, stride, pixelFormat);
      newImage.mustBeDisposed = true;

      UnmanagedMemoryHelper.CopyUnmanagedMemory(newImageData, imageData, stride * height);

      return newImage;
    }
Example #6
0
    /// <summary>
    /// Reset motion detector to initial state.
    /// </summary>
    /// 
    /// <remarks><para>The method resets motion detection and motion processing algotithms by calling
    /// their <see cref="IMotionDetector.Reset"/> and <see cref="IMotionProcessing.Reset"/> methods.</para>
    /// </remarks>
    /// 
    public void Reset()
    {
      lock (sync)
      {
        if (detector != null)
        {
          detector.Reset();
        }
        if (processor != null)
        {
          processor.Reset();
        }

        videoWidth = 0;
        videoHeight = 0;

        if (zonesFrame != null)
        {
          zonesFrame.Dispose();
          zonesFrame = null;
        }
      }
    }
Example #7
0
        private void SetUnmanagedLastFrame(Bitmap frame)
        {
            lock (this)
              {
            try
            {
              if (_lastFrame != null)
            _lastFrame.Dispose();

              _lastFrame = UnmanagedImage.FromManagedImage(frame);
            }
            catch { }
              }
        }
Example #8
0
    /// <summary>
    /// Fill rectangle on the specified image.
    /// </summary>
    /// 
    /// <param name="image">Source image to draw on.</param>
    /// <param name="rectangle">Rectangle's coordinates to fill.</param>
    /// <param name="color">Rectangle's color.</param>
    /// 
    /// <exception cref="UnsupportedImageFormatException">The source image has incorrect pixel format.</exception>
    /// 
    public static unsafe void FillRectangle(UnmanagedImage image, Rectangle rectangle, Color color)
    {
      CheckPixelFormat(image.PixelFormat);

      int pixelSize = System.Drawing.Image.GetPixelFormatSize(image.PixelFormat) / 8;

      // image dimension
      int imageWidth = image.Width;
      int imageHeight = image.Height;
      int stride = image.Stride;

      // rectangle dimension and position
      int rectX1 = rectangle.X;
      int rectY1 = rectangle.Y;
      int rectX2 = rectangle.X + rectangle.Width - 1;
      int rectY2 = rectangle.Y + rectangle.Height - 1;

      // check if rectangle is in the image
      if ((rectX1 >= imageWidth) || (rectY1 >= imageHeight) || (rectX2 < 0) || (rectY2 < 0))
      {
        // nothing to draw
        return;
      }

      int startX = Math.Max(0, rectX1);
      int stopX = Math.Min(imageWidth - 1, rectX2);
      int startY = Math.Max(0, rectY1);
      int stopY = Math.Min(imageHeight - 1, rectY2);

      // do the job
      byte* ptr = (byte*)image.ImageData.ToPointer() + startY * stride + startX * pixelSize;

      if (image.PixelFormat == PixelFormat.Format8bppIndexed)
      {
        // grayscale image
        byte gray = (byte)(0.2125 * color.R + 0.7154 * color.G + 0.0721 * color.B);

        int fillWidth = stopX - startX + 1;

        for (int y = startY; y <= stopY; y++)
        {
          UnmanagedMemoryHelper.SetUnmanagedMemory(ptr, gray, fillWidth);
          ptr += stride;
        }
      }
      else
      {
        // color image
        byte red = color.R;
        byte green = color.G;
        byte blue = color.B;

        int offset = stride - (stopX - startX + 1) * pixelSize;

        for (int y = startY; y <= stopY; y++)
        {
          for (int x = startX; x <= stopX; x++, ptr += pixelSize)
          {
            ptr[RGB.R] = red;
            ptr[RGB.G] = green;
            ptr[RGB.B] = blue;
          }
          ptr += offset;
        }
      }
    }
    /// <summary>
    /// Reset motion detector to initial state.
    /// </summary>
    /// 
    /// <remarks><para>Resets internal state and variables of motion detection algorithm.
    /// Usually this is required to be done before processing new video source, but
    /// may be also done at any time to restart motion detection algorithm.</para>
    /// </remarks>
    /// 
    public void Reset()
    {
      lock (sync)
      {
        if (backgroundFrame != null)
        {
          backgroundFrame.Dispose();
          backgroundFrame = null;
        }

        if (motionFrame != null)
        {
          motionFrame.Dispose();
          motionFrame = null;
        }

        if (tempFrame != null)
        {
          tempFrame.Dispose();
          tempFrame = null;
        }

        framesCounter = 0;
      }
    }
    /// <summary>
    /// Process new video frame.
    /// </summary>
    /// 
    /// <param name="videoFrame">Video frame to process (detect motion in).</param>
    /// 
    /// <remarks><para>Processes new frame from video source and detects motion in it.</para>
    /// 
    /// <para>Check <see cref="MotionLevel"/> property to get information about amount of motion
    /// (changes) in the processed frame.</para>
    /// </remarks>
    ///
    public unsafe void ProcessFrame(UnmanagedImage videoFrame)
    {
      lock (sync)
      {
        // check background frame
        if (backgroundFrame == null)
        {
          lastTimeMeasurment = DateTime.Now;

          // save image dimension
          width = videoFrame.Width;
          height = videoFrame.Height;

          // alocate memory for previous and current frames
          backgroundFrame = UnmanagedImage.Create(width, height, PixelFormat.Format8bppIndexed);
          motionFrame = UnmanagedImage.Create(width, height, PixelFormat.Format8bppIndexed);

          frameSize = motionFrame.Stride * height;

          // temporary buffer
          if (suppressNoise)
          {
            tempFrame = UnmanagedImage.Create(width, height, PixelFormat.Format8bppIndexed);
          }

          // convert source frame to grayscale
          Grayscale.CommonAlgorithms.BT709.Apply(videoFrame, backgroundFrame);

          return;
        }

        // check image dimension
        if ((videoFrame.Width != width) || (videoFrame.Height != height))
          return;

        // convert current image to grayscale
        Grayscale.CommonAlgorithms.BT709.Apply(videoFrame, motionFrame);

        // pointers to background and current frames
        byte* backFrame;
        byte* currFrame;
        int diff;

        // update background frame
        if (millisecondsPerBackgroundUpdate == 0)
        {
          // update background frame using frame counter as a base
          if (++framesCounter == framesPerBackgroundUpdate)
          {
            framesCounter = 0;

            backFrame = (byte*)backgroundFrame.ImageData.ToPointer();
            currFrame = (byte*)motionFrame.ImageData.ToPointer();

            for (int i = 0; i < frameSize; i++, backFrame++, currFrame++)
            {
              diff = *currFrame - *backFrame;
              if (diff > 0)
              {
                (*backFrame)++;
              }
              else if (diff < 0)
              {
                (*backFrame)--;
              }
            }
          }
        }
        else
        {
          // update background frame using timer as a base

          // get current time and calculate difference
          DateTime currentTime = DateTime.Now;
          TimeSpan timeDff = currentTime - lastTimeMeasurment;
          // save current time as the last measurment
          lastTimeMeasurment = currentTime;

          int millisonds = (int)timeDff.TotalMilliseconds + millisecondsLeftUnprocessed;

          // save remainder so it could be taken into account in the future
          millisecondsLeftUnprocessed = millisonds % millisecondsPerBackgroundUpdate;
          // get amount for background update 
          int updateAmount = (int)(millisonds / millisecondsPerBackgroundUpdate);

          backFrame = (byte*)backgroundFrame.ImageData.ToPointer();
          currFrame = (byte*)motionFrame.ImageData.ToPointer();

          for (int i = 0; i < frameSize; i++, backFrame++, currFrame++)
          {
            diff = *currFrame - *backFrame;
            if (diff > 0)
            {
              (*backFrame) += (byte)((diff < updateAmount) ? diff : updateAmount);
            }
            else if (diff < 0)
            {
              (*backFrame) += (byte)((-diff < updateAmount) ? diff : -updateAmount);
            }
          }
        }

        backFrame = (byte*)backgroundFrame.ImageData.ToPointer();
        currFrame = (byte*)motionFrame.ImageData.ToPointer();

        // 1 - get difference between frames
        // 2 - threshold the difference
        for (int i = 0; i < frameSize; i++, backFrame++, currFrame++)
        {
          // difference
          diff = (int)*currFrame - (int)*backFrame;
          // treshold
          *currFrame = ((diff >= differenceThreshold) || (diff <= differenceThresholdNeg)) ? (byte)255 : (byte)0;
        }

        if (suppressNoise)
        {
          // suppress noise and calculate motion amount
          UnmanagedMemoryHelper.CopyUnmanagedMemory(tempFrame.ImageData, motionFrame.ImageData, frameSize);
          erosionFilter.Apply(tempFrame, motionFrame);

          if (keepObjectEdges)
          {
            UnmanagedMemoryHelper.CopyUnmanagedMemory(tempFrame.ImageData, motionFrame.ImageData, frameSize);
            dilatationFilter.Apply(tempFrame, motionFrame);
          }
        }

        // calculate amount of motion pixels
        pixelsChanged = 0;
        byte* motion = (byte*)motionFrame.ImageData.ToPointer();

        for (int i = 0; i < frameSize; i++, motion++)
        {
          pixelsChanged += (*motion & 1);
        }
      }
    }
    /// <summary>
    /// Process video and motion frames doing further post processing after
    /// performed motion detection.
    /// </summary>
    /// 
    /// <param name="videoFrame">Original video frame.</param>
    /// <param name="motionFrame">Motion frame provided by motion detection
    /// algorithm (see <see cref="IMotionDetector"/>).</param>
    /// 
    /// <remarks><para>Processes provided motion frame and highlights borders of motion areas
    /// on the original video frame with <see cref="HighlightColor">specified color</see>.</para>
    /// </remarks>
    ///
    public unsafe void ProcessFrame(UnmanagedImage videoFrame, UnmanagedImage motionFrame)
    {
      int width = videoFrame.Width;
      int height = videoFrame.Height;

      if ((motionFrame.Width != width) || (motionFrame.Height != height))
        return;

      byte fillR = highlightColor.R;
      byte fillG = highlightColor.G;
      byte fillB = highlightColor.B;

      byte* src = (byte*)videoFrame.ImageData.ToPointer();
      byte* motion = (byte*)motionFrame.ImageData.ToPointer();

      int srcOffset = videoFrame.Stride - (width - 2) * 3;
      int motionOffset = motionFrame.Stride - (width - 2);

      src += videoFrame.Stride + 3;
      motion += motionFrame.Stride + 1;

      int widthM1 = width - 1;
      int heightM1 = height - 1;

      // use simple edge detector
      for (int y = 1; y < heightM1; y++)
      {
        for (int x = 1; x < widthM1; x++, motion++, src += 3)
        {
          if (4 * *motion - motion[-width] - motion[width] - motion[1] - motion[-1] != 0)
          {
            src[RGB.R] = fillR;
            src[RGB.G] = fillG;
            src[RGB.B] = fillB;
          }
        }

        motion += motionOffset;
        src += srcOffset;
      }
    }
Example #12
0
    /// <summary>
    /// 从指定的托管内存图片转换成非托管内存图片
    /// </summary>
    /// <param name="imageData">锁定的托管内存图片</param>
    /// <returns>Returns new unmanaged image, which is a copy of source managed image.</returns>
    /// <remarks><para>The method creates an exact copy of specified managed image, but allocated
    /// in unmanaged memory. This means that managed image may be unlocked right after call to this
    /// method.</para></remarks>
    /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format of source image.</exception>
    public static UnmanagedImage FromManagedImage(BitmapData imageData)
    {
      PixelFormat pixelFormat = imageData.PixelFormat;

      // check source pixel format
      if (
          (pixelFormat != PixelFormat.Format8bppIndexed) &&
          (pixelFormat != PixelFormat.Format16bppGrayScale) &&
          (pixelFormat != PixelFormat.Format24bppRgb) &&
          (pixelFormat != PixelFormat.Format32bppRgb) &&
          (pixelFormat != PixelFormat.Format32bppArgb) &&
          (pixelFormat != PixelFormat.Format32bppPArgb) &&
          (pixelFormat != PixelFormat.Format48bppRgb) &&
          (pixelFormat != PixelFormat.Format64bppArgb) &&
          (pixelFormat != PixelFormat.Format64bppPArgb))
      {
        throw new UnsupportedImageFormatException("Unsupported pixel format of the source image.");
      }

      // allocate memory for the image
      IntPtr dstImageData = System.Runtime.InteropServices.Marshal.AllocHGlobal(imageData.Stride * imageData.Height);

      UnmanagedImage image = new UnmanagedImage(dstImageData, imageData.Width, imageData.Height, imageData.Stride, pixelFormat);
      UnmanagedMemoryHelper.CopyUnmanagedMemory(dstImageData, imageData.Scan0, imageData.Stride * imageData.Height);
      image.mustBeDisposed = true;

      return image;
    }
Example #13
0
    /// <summary>
    /// 为新的图片创建非托管内存
    /// </summary>
    /// <param name="width">Image width.</param>
    /// <param name="height">Image height.</param>
    /// <param name="pixelFormat">Image pixel format.</param>
    /// <returns>Return image allocated in unmanaged memory.</returns>
    /// <remarks><para>Allocate new image with specified attributes in unmanaged memory.</para>
    /// <para><note>The method supports only
    /// <see cref="System.Drawing.Imaging.PixelFormat">Format8bppIndexed</see>,
    /// <see cref="System.Drawing.Imaging.PixelFormat">Format16bppGrayScale</see>,
    /// <see cref="System.Drawing.Imaging.PixelFormat">Format24bppRgb</see>,
    /// <see cref="System.Drawing.Imaging.PixelFormat">Format32bppRgb</see>,
    /// <see cref="System.Drawing.Imaging.PixelFormat">Format32bppArgb</see>,
    /// <see cref="System.Drawing.Imaging.PixelFormat">Format32bppPArgb</see>,
    /// <see cref="System.Drawing.Imaging.PixelFormat">Format48bppRgb</see>,
    /// <see cref="System.Drawing.Imaging.PixelFormat">Format64bppArgb</see> and
    /// <see cref="System.Drawing.Imaging.PixelFormat">Format64bppPArgb</see> pixel formats.
    /// In the case if <see cref="System.Drawing.Imaging.PixelFormat">Format8bppIndexed</see>
    /// format is specified, pallete is not not created for the image (supposed that it is
    /// 8 bpp grayscale image).
    /// </note></para>
    /// </remarks>
    /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format was specified.</exception>
    /// <exception cref="InvalidImagePropertiesException">Invalid image size was specified.</exception>
    public static UnmanagedImage Create(int width, int height, PixelFormat pixelFormat)
    {
      int bytesPerPixel = 0;

      // calculate bytes per pixel
      switch (pixelFormat)
      {
        case PixelFormat.Format8bppIndexed:
          bytesPerPixel = 1;
          break;
        case PixelFormat.Format16bppGrayScale:
          bytesPerPixel = 2;
          break;
        case PixelFormat.Format24bppRgb:
          bytesPerPixel = 3;
          break;
        case PixelFormat.Format32bppRgb:
        case PixelFormat.Format32bppArgb:
        case PixelFormat.Format32bppPArgb:
          bytesPerPixel = 4;
          break;
        case PixelFormat.Format48bppRgb:
          bytesPerPixel = 6;
          break;
        case PixelFormat.Format64bppArgb:
        case PixelFormat.Format64bppPArgb:
          bytesPerPixel = 8;
          break;
        default:
          throw new UnsupportedImageFormatException("Can not create image with specified pixel format.");
      }

      // check image size
      if ((width <= 0) || (height <= 0))
      {
        throw new InvalidImagePropertiesException("Invalid image size specified.");
      }

      // calculate stride
      int stride = width * bytesPerPixel;

      if (stride % 4 != 0)
      {
        stride += (4 - (stride % 4));
      }

      // allocate memory for the image
      IntPtr imageData = System.Runtime.InteropServices.Marshal.AllocHGlobal(stride * height);
      UnmanagedMemoryHelper.SetUnmanagedMemory(imageData, 0, stride * height);

      UnmanagedImage image = new UnmanagedImage(imageData, width, height, stride, pixelFormat);
      image.mustBeDisposed = true;

      return image;
    }
Example #14
0
    /// <summary>
    /// 拷贝该非托管图片
    /// </summary>
    /// <param name="destImage">将非托管图片拷贝至的目标图片位置</param>
    /// <remarks><para>The method copies current unmanaged image to the specified image.
    /// Size and pixel format of the destination image must be exactly the same.</para></remarks>
    /// <exception cref="InvalidImagePropertiesException">Destination image has different size or pixel format.</exception>
    public void Copy(UnmanagedImage destImage)
    {
      if (
          (width != destImage.width) || (height != destImage.height) ||
          (pixelFormat != destImage.pixelFormat))
      {
        throw new InvalidImagePropertiesException("Destination image has different size or pixel format.");
      }

      if (stride == destImage.stride)
      {
        // copy entire image
        UnmanagedMemoryHelper.CopyUnmanagedMemory(destImage.imageData, imageData, stride * height);
      }
      else
      {
        unsafe
        {
          int dstStride = destImage.stride;
          int copyLength = (stride < dstStride) ? stride : dstStride;

          byte* src = (byte*)imageData.ToPointer();
          byte* dst = (byte*)destImage.imageData.ToPointer();

          // copy line by line
          for (int i = 0; i < height; i++)
          {
            UnmanagedMemoryHelper.CopyUnmanagedMemory(dst, src, copyLength);

            dst += dstStride;
            src += stride;
          }
        }
      }
    }
    /// <summary>
    /// Reset motion detector to initial state.
    /// </summary>
    /// 
    /// <remarks><para>Resets internal state and variables of motion detection algorithm.
    /// Usually this is required to be done before processing new video source, but
    /// may be also done at any time to restart motion detection algorithm.</para>
    /// </remarks>
    /// 
    public void Reset()
    {
      lock (sync)
      {
        if (previousFrame != null)
        {
          previousFrame.Dispose();
          previousFrame = null;
        }

        if (motionFrame != null)
        {
          motionFrame.Dispose();
          motionFrame = null;
        }

        if (tempFrame != null)
        {
          tempFrame.Dispose();
          tempFrame = null;
        }
      }
    }
Example #16
0
    /// <summary>
    /// Draw a line on the specified image.
    /// </summary>
    /// 
    /// <param name="image">Source image to draw on.</param>
    /// <param name="point1">The first point to connect.</param>
    /// <param name="point2">The second point to connect.</param>
    /// <param name="color">Line's color.</param>
    /// 
    /// <exception cref="UnsupportedImageFormatException">The source image has incorrect pixel format.</exception>
    /// 
    public static unsafe void Line(UnmanagedImage image, IntPoint point1, IntPoint point2, Color color)
    {
      // TODO: faster line drawing algorithm may be implemented with integer math

      CheckPixelFormat(image.PixelFormat);

      int pixelSize = System.Drawing.Image.GetPixelFormatSize(image.PixelFormat) / 8;

      // image dimension
      int imageWidth = image.Width;
      int imageHeight = image.Height;
      int stride = image.Stride;

      // check if there is something to draw
      if (
          ((point1.X < 0) && (point2.X < 0)) ||
          ((point1.Y < 0) && (point2.Y < 0)) ||
          ((point1.X >= imageWidth) && (point2.X >= imageWidth)) ||
          ((point1.Y >= imageHeight) && (point2.Y >= imageHeight)))
      {
        // nothing to draw
        return;
      }

      CheckEndPoint(imageWidth, imageHeight, point1, ref point2);
      CheckEndPoint(imageWidth, imageHeight, point2, ref point1);

      // check again if there is something to draw
      if (
          ((point1.X < 0) && (point2.X < 0)) ||
          ((point1.Y < 0) && (point2.Y < 0)) ||
          ((point1.X >= imageWidth) && (point2.X >= imageWidth)) ||
          ((point1.Y >= imageHeight) && (point2.Y >= imageHeight)))
      {
        // nothing to draw
        return;
      }

      int startX = point1.X;
      int startY = point1.Y;
      int stopX = point2.X;
      int stopY = point2.Y;

      // compute pixel for grayscale image
      byte gray = 0;
      if (image.PixelFormat == PixelFormat.Format8bppIndexed)
      {
        gray = (byte)(0.2125 * color.R + 0.7154 * color.G + 0.0721 * color.B);
      }

      // draw the line
      int dx = stopX - startX;
      int dy = stopY - startY;

      if (Math.Abs(dx) >= Math.Abs(dy))
      {
        // the line is more horizontal, we'll plot along the X axis
        float slope = (dx != 0) ? (float)dy / dx : 0;
        int step = (dx > 0) ? 1 : -1;

        // correct dx so last point is included as well
        dx += step;

        if (image.PixelFormat == PixelFormat.Format8bppIndexed)
        {
          // grayscale image
          for (int x = 0; x != dx; x += step)
          {
            int px = startX + x;
            int py = (int)((float)startY + (slope * (float)x));

            byte* ptr = (byte*)image.ImageData.ToPointer() + py * stride + px;
            *ptr = gray;
          }
        }
        else
        {
          // color image
          for (int x = 0; x != dx; x += step)
          {
            int px = startX + x;
            int py = (int)((float)startY + (slope * (float)x));

            byte* ptr = (byte*)image.ImageData.ToPointer() + py * stride + px * pixelSize;

            ptr[RGB.R] = color.R;
            ptr[RGB.G] = color.G;
            ptr[RGB.B] = color.B;
          }
        }
      }
      else
      {
        // the line is more vertical, we'll plot along the y axis.
        float slope = (dy != 0) ? (float)dx / dy : 0;
        int step = (dy > 0) ? 1 : -1;

        // correct dy so last point is included as well
        dy += step;

        if (image.PixelFormat == PixelFormat.Format8bppIndexed)
        {
          // grayscale image
          for (int y = 0; y != dy; y += step)
          {
            int px = (int)((float)startX + (slope * (float)y));
            int py = startY + y;

            byte* ptr = (byte*)image.ImageData.ToPointer() + py * stride + px;
            *ptr = gray;
          }
        }
        else
        {
          // color image
          for (int y = 0; y != dy; y += step)
          {
            int px = (int)((float)startX + (slope * (float)y));
            int py = startY + y;

            byte* ptr = (byte*)image.ImageData.ToPointer() + py * stride + px * pixelSize;

            ptr[RGB.R] = color.R;
            ptr[RGB.G] = color.G;
            ptr[RGB.B] = color.B;
          }
        }
      }
    }
    /// <summary>
    /// Process video and motion frames doing further post processing after
    /// performed motion detection.
    /// </summary>
    /// 
    /// <param name="videoFrame">Original video frame.</param>
    /// <param name="motionFrame">Motion frame provided by motion detection
    /// algorithm (see <see cref="IMotionDetector"/>).</param>
    /// 
    /// <remarks><para>Processes provided motion frame and calculates motion level
    /// for each grid's cell. In the case if <see cref="HighlightMotionGrid"/> property is
    /// set to <see langword="true"/>, the cell with motion level above threshold are
    /// highlighted.</para></remarks>
    ///
    public unsafe void ProcessFrame(UnmanagedImage videoFrame, UnmanagedImage motionFrame)
    {
      int width = videoFrame.Width;
      int height = videoFrame.Height;

      if ((motionFrame.Width != width) || (motionFrame.Height != height))
        return;

      int cellWidth = width / gridWidth;
      int cellHeight = height / gridHeight;

      // temporary variables
      int xCell, yCell;

      // process motion frame calculating amount of changed pixels
      // in each grid's cell
      byte* motion = (byte*)motionFrame.ImageData.ToPointer();
      int motionOffset = motionFrame.Stride - width;

      for (int y = 0; y < height; y++)
      {
        // get current grid's row
        yCell = y / cellHeight;
        // correct row number if image was not divided by grid equally
        if (yCell >= gridHeight)
          yCell = gridHeight - 1;

        for (int x = 0; x < width; x++, motion++)
        {
          if (*motion != 0)
          {
            // get current grid's collumn
            xCell = x / cellWidth;
            // correct column number if image was not divided by grid equally
            if (xCell >= gridWidth)
              xCell = gridWidth - 1;

            motionGrid[yCell, xCell]++;
          }
        }
        motion += motionOffset;
      }

      // update motion grid converting absolute number of changed
      // pixel to relative for each cell
      int gridHeightM1 = gridHeight - 1;
      int gridWidthM1 = gridWidth - 1;

      int lastRowHeight = height - cellHeight * gridHeightM1;
      int lastColumnWidth = width - cellWidth * gridWidthM1;

      for (int y = 0; y < gridHeight; y++)
      {
        int ch = (y != gridHeightM1) ? cellHeight : lastRowHeight;

        for (int x = 0; x < gridWidth; x++)
        {
          int cw = (x != gridWidthM1) ? cellWidth : lastColumnWidth;

          motionGrid[y, x] /= (cw * ch);
        }
      }

      if (highlightMotionGrid)
      {
        // highlight motion grid - cells, which have enough motion

        byte* src = (byte*)videoFrame.ImageData.ToPointer();
        int srcOffset = videoFrame.Stride - width * 3;

        byte fillR = highlightColor.R;
        byte fillG = highlightColor.G;
        byte fillB = highlightColor.B;

        for (int y = 0; y < height; y++)
        {
          yCell = y / cellHeight;
          if (yCell >= gridHeight)
            yCell = gridHeight - 1;

          for (int x = 0; x < width; x++, src += 3)
          {
            xCell = x / cellWidth;
            if (xCell >= gridWidth)
              xCell = gridWidth - 1;

            if ((motionGrid[yCell, xCell] > motionAmountToHighlight) && (((x + y) & 1) == 0))
            {
              src[RGB.R] = fillR;
              src[RGB.G] = fillG;
              src[RGB.B] = fillB;
            }
          }
          src += srcOffset;
        }
      }
    }
Example #18
0
 /// <summary>
 /// Draw a polyline on the specified image.
 /// </summary>
 /// 
 /// <param name="image">Source image to draw on.</param>
 /// <param name="points">Points of the polyline to draw.</param>
 /// <param name="color">polyline's color.</param>
 /// 
 /// <remarks><para>The method draws a polyline by connecting all points from the
 /// first one to the last one. Unlike <see cref="Polygon( UnmanagedImage, List{IntPoint}, Color )"/>
 /// method, this method does not connect the last point with the first one.
 /// </para></remarks>
 /// 
 public static void Polyline(UnmanagedImage image, List<IntPoint> points, Color color)
 {
   for (int i = 1, n = points.Count; i < n; i++)
   {
     Line(image, points[i - 1], points[i], color);
   }
 }
    // Reset motion detector to initial state
    private void Reset(bool force)
    {
      lock (sync)
      {
        if (
            (backgroundFrame != null) &&
            ((force == true) || (manuallySetBackgroundFrame == false))
            )
        {
          backgroundFrame.Dispose();
          backgroundFrame = null;
        }

        if (motionFrame != null)
        {
          motionFrame.Dispose();
          motionFrame = null;
        }

        if (tempFrame != null)
        {
          tempFrame.Dispose();
          tempFrame = null;
        }
      }
    }
Example #20
0
    /// <summary>
    /// Process new video frame.
    /// </summary>
    /// 
    /// <param name="videoFrame">Video frame to process (detect motion in).</param>
    /// 
    /// <returns>Returns amount of motion, which is provided <see cref="IMotionDetector.MotionLevel"/>
    /// property of the <see cref="MotionDetectionAlgorithm">motion detection algorithm in use</see>.</returns>
    /// 
    /// <remarks><para>The method first of all applies motion detection algorithm to the specified video
    /// frame to calculate <see cref="IMotionDetector.MotionLevel">motion level</see> and
    /// <see cref="IMotionDetector.MotionFrame">motion frame</see>. After this it applies motion processing algorithm
    /// (if it was set) to do further post processing, like highlighting motion areas, counting moving
    /// objects, etc.</para>
    /// 
    /// <para><note>In the case if <see cref="MotionZones"/> property is set, this method will perform
    /// motion filtering right after motion algorithm is done and before passing motion frame to motion
    /// processing algorithm. The method does filtering right on the motion frame, which is produced
    /// by motion detection algorithm. At the same time the method recalculates motion level and returns
    /// new value, which takes motion zones into account (but the new value is not set back to motion detection
    /// algorithm' <see cref="IMotionDetector.MotionLevel"/> property).
    /// </note></para>
    /// </remarks>
    /// 
    public float ProcessFrame(UnmanagedImage videoFrame)
    {
      lock (sync)
      {
        if (detector == null)
          return 0;

        videoWidth = videoFrame.Width;
        videoHeight = videoFrame.Height;

        float motionLevel = 0;
        // call motion detection
        detector.ProcessFrame(videoFrame);
        motionLevel = detector.MotionLevel;

        // check if motion zones are specified
        if (motionZones != null)
        {
          if (zonesFrame == null)
          {
            CreateMotionZonesFrame();
          }

          if ((videoWidth == zonesFrame.Width) && (videoHeight == zonesFrame.Height))
          {
            unsafe
            {
              // pointers to background and current frames
              byte* zonesPtr = (byte*)zonesFrame.ImageData.ToPointer();
              byte* motionPtr = (byte*)detector.MotionFrame.ImageData.ToPointer();

              motionLevel = 0;

              for (int i = 0, frameSize = zonesFrame.Stride * videoHeight; i < frameSize; i++, zonesPtr++, motionPtr++)
              {
                *motionPtr &= *zonesPtr;
                motionLevel += (*motionPtr & 1);
              }
              motionLevel /= (videoWidth * videoHeight);
            }
          }
        }

        // call motion post processing
        if ((processor != null) && (detector.MotionFrame != null))
        {
          processor.ProcessFrame(videoFrame, detector.MotionFrame);
        }

        return motionLevel;
      }
    }
    /// <summary>
    /// Set background frame.
    /// </summary>
    /// 
    /// <param name="backgroundFrame">Background frame to set.</param>
    /// 
    /// <remarks><para>The method sets background frame, which will be used to calculate
    /// difference with.</para></remarks>
    /// 
    public void SetBackgroundFrame(UnmanagedImage backgroundFrame)
    {
      // reset motion detection algorithm
      Reset(true);

      lock (sync)
      {
        // save image dimension
        width = backgroundFrame.Width;
        height = backgroundFrame.Height;

        // alocate memory for previous and current frames
        this.backgroundFrame = UnmanagedImage.Create(width, height, PixelFormat.Format8bppIndexed);
        frameSize = this.backgroundFrame.Stride * height;

        // convert source frame to grayscale
        Grayscale.CommonAlgorithms.BT709.Apply(backgroundFrame, this.backgroundFrame);

        manuallySetBackgroundFrame = true;
      }
    }
Example #22
0
    // Create motion zones' image
    private unsafe void CreateMotionZonesFrame()
    {
      lock (sync)
      {
        // free previous motion zones frame
        if (zonesFrame != null)
        {
          zonesFrame.Dispose();
          zonesFrame = null;
        }

        // create motion zones frame only in the case if the algorithm has processed at least one frame
        if ((motionZones != null) && (motionZones.Length != 0) && (videoWidth != 0))
        {
          zonesFrame = UnmanagedImage.Create(videoWidth, videoHeight, PixelFormat.Format8bppIndexed);

          Rectangle imageRect = new Rectangle(0, 0, videoWidth, videoHeight);

          // draw all motion zones on motion frame
          foreach (Rectangle rect in motionZones)
          {
            rect.Intersect(imageRect);

            // rectangle's dimenstion
            int rectWidth = rect.Width;
            int rectHeight = rect.Height;

            // start pointer
            int stride = zonesFrame.Stride;
            byte* ptr = (byte*)zonesFrame.ImageData.ToPointer() + rect.Y * stride + rect.X;

            for (int y = 0; y < rectHeight; y++)
            {
              UnmanagedMemoryHelper.SetUnmanagedMemory(ptr, 255, rectWidth);
              ptr += stride;
            }
          }
        }
      }
    }
Example #23
0
 /// <summary>
 /// Initializes a new instance of the <see cref="BlobCounter"/> class.
 /// </summary>
 /// 
 /// <param name="image">Unmanaged image to look for objects in.</param>
 /// 
 public BlobCounter(UnmanagedImage image) : base(image) { }