Exemplo n.º 1
0
 // Get grayscale image out of the specified one
 public static void ConvertToGrayscale( UnmanagedImage source, UnmanagedImage destination )
 {
     if ( source.PixelFormat != PixelFormat.Format8bppIndexed )
     {
         Grayscale.CommonAlgorithms.BT709.Apply( source, destination );
     }
     else
     {
         source.Copy( destination );
     }
 }
Exemplo n.º 2
0
        // Gather statistics for the specified image
        private unsafe void ProcessImage( UnmanagedImage image, byte* mask, int maskLineSize )
        {
            // get image dimension
            int width  = image.Width;
            int height = image.Height;

            pixels = pixelsWithoutBlack = 0;

            red = green = blue = gray = null;
            redWithoutBlack = greenWithoutBlack = blueWithoutBlack = grayWithoutBlack = null;

            int maskOffset = maskLineSize - width;

            // check pixel format
            if ( image.PixelFormat == PixelFormat.Format8bppIndexed )
            {
                // alloc arrays
                int[] g   = new int[256];
                int[] gwb = new int[256];

                byte value;
                int  offset = image.Stride - width;

                // do the job
                byte * p = (byte*) image.ImageData.ToPointer( );

                if ( mask == null )
                {
                    // for each pixel
                    for ( int y = 0; y < height; y++ )
                    {
                        // for each pixel
                        for ( int x = 0; x < width; x++, p++ )
                        {
                            // get pixel value
                            value = *p;

                            g[value]++;
                            pixels++;

                            if ( value != 0 )
                            {
                                gwb[value]++;
                                pixelsWithoutBlack++;
                            }
                        }
                        p += offset;
                    }
                }
                else
                {
                    // for each pixel
                    for ( int y = 0; y < height; y++ )
                    {
                        // for each pixel
                        for ( int x = 0; x < width; x++, p++, mask++ )
                        {
                            if ( *mask == 0 )
                                continue;

                            // get pixel value
                            value = *p;

                            g[value]++;
                            pixels++;

                            if ( value != 0 )
                            {
                                gwb[value]++;
                                pixelsWithoutBlack++;
                            }
                        }
                        p += offset;
                        mask += maskOffset;
                    }
                }

                // create historgram for gray level
                gray = new Histogram( g );
                grayWithoutBlack = new Histogram( gwb );
            }
            else
            {
                // alloc arrays
                int[]	r = new int[256];
                int[]	g = new int[256];
                int[]	b = new int[256];

                int[]	rwb = new int[256];
                int[]	gwb = new int[256];
                int[]	bwb = new int[256];

                byte rValue, gValue, bValue;
                int  pixelSize = ( image.PixelFormat == PixelFormat.Format24bppRgb ) ? 3 : 4;
                int  offset = image.Stride - width * pixelSize;

                // do the job
                byte * p = (byte*) image.ImageData.ToPointer( );

                if ( mask == null )
                {
                    // for each line
                    for ( int y = 0; y < height; y++ )
                    {
                        // for each pixel
                        for ( int x = 0; x < width; x++, p += pixelSize )
                        {
                            // get pixel values
                            rValue = p[RGB.R];
                            gValue = p[RGB.G];
                            bValue = p[RGB.B];

                            r[rValue]++;
                            g[gValue]++;
                            b[bValue]++;
                            pixels++;

                            if ( ( rValue != 0 ) || ( gValue != 0 ) || ( bValue != 0 ) )
                            {
                                rwb[rValue]++;
                                gwb[gValue]++;
                                bwb[bValue]++;
                                pixelsWithoutBlack++;
                            }
                        }
                        p += offset;
                    }
                }
                else
                {
                    // for each line
                    for ( int y = 0; y < height; y++ )
                    {
                        // for each pixel
                        for ( int x = 0; x < width; x++, p += pixelSize, mask++ )
                        {
                            if ( *mask == 0 )
                                continue;

                            // get pixel values
                            rValue = p[RGB.R];
                            gValue = p[RGB.G];
                            bValue = p[RGB.B];

                            r[rValue]++;
                            g[gValue]++;
                            b[bValue]++;
                            pixels++;

                            if ( ( rValue != 0 ) || ( gValue != 0 ) || ( bValue != 0 ) )
                            {
                                rwb[rValue]++;
                                gwb[gValue]++;
                                bwb[bValue]++;
                                pixelsWithoutBlack++;
                            }
                        }
                        p += offset;
                        mask += maskOffset;
                    }
                }

                // create histograms
                red   = new Histogram( r );
                green = new Histogram( g );
                blue  = new Histogram( b );

                redWithoutBlack   = new Histogram( rwb );
                greenWithoutBlack = new Histogram( gwb );
                blueWithoutBlack  = new Histogram( bwb );
            }
        }
        /// <summary>
        /// Process new video frame.
        /// </summary>
        /// 
        /// <param name="videoFrame">Video frame to process (detect motion in).</param>
        /// 
        /// <remarks><para>Processes new frame from video source and detects motion in it.</para>
        /// 
        /// <para>Check <see cref="MotionLevel"/> property to get information about amount of motion
        /// (changes) in the processed frame.</para>
        /// </remarks>
        /// 
        public unsafe void ProcessFrame( UnmanagedImage videoFrame )
        {
            lock ( sync )
            {
                // check previous frame
                if ( previousFrame == null )
                {
                    // save image dimension
                    width = videoFrame.Width;
                    height = videoFrame.Height;

                    // alocate memory for previous and current frames
                    previousFrame = UnmanagedImage.Create( width, height, PixelFormat.Format8bppIndexed );
                    motionFrame = UnmanagedImage.Create( width, height, PixelFormat.Format8bppIndexed );

                    frameSize = motionFrame.Stride * height;

                    // temporary buffer
                    if ( suppressNoise )
                    {
                        tempFrame = UnmanagedImage.Create( width, height, PixelFormat.Format8bppIndexed );
                    }

                    // convert source frame to grayscale
                    Tools.ConvertToGrayscale( videoFrame, previousFrame );

                    return;
                }

                // check image dimension
                if ( ( videoFrame.Width != width ) || ( videoFrame.Height != height ) )
                    return;

                // convert current image to grayscale
                Tools.ConvertToGrayscale( videoFrame, motionFrame );

                // pointers to previous and current frames
                byte* prevFrame = (byte*) previousFrame.ImageData.ToPointer( );
                byte* currFrame = (byte*) motionFrame.ImageData.ToPointer( );
                // difference value
                int diff;

                // 1 - get difference between frames
                // 2 - threshold the difference
                // 3 - copy current frame to previous frame
                for ( int i = 0; i < frameSize; i++, prevFrame++, currFrame++ )
                {
                    // difference
                    diff = (int) *currFrame - (int) *prevFrame;
                    // copy current frame to previous
                    *prevFrame = *currFrame;
                    // treshold
                    *currFrame = ( ( diff >= differenceThreshold ) || ( diff <= differenceThresholdNeg ) ) ? (byte) 255 : (byte) 0;
                }

                if ( suppressNoise )
                {
                    // suppress noise and calculate motion amount
                     BestCS.SystemTools.CopyUnmanagedMemory( tempFrame.ImageData, motionFrame.ImageData, frameSize );
                    erosionFilter.Apply( tempFrame, motionFrame );
                }

                // calculate amount of motion pixels
                pixelsChanged = 0;
                byte* motion = (byte*) motionFrame.ImageData.ToPointer( );

                for ( int i = 0; i < frameSize; i++, motion++ )
                {
                    pixelsChanged += ( *motion & 1 );
                }
            }
        }
Exemplo n.º 4
0
        /// <summary>
        /// Initializes a new instance of the <see cref="ImageStatisticsHSL"/> class.
        /// </summary>
        /// 
        /// <param name="image">Image to gather statistics about.</param>
        /// <param name="mask">Mask array which specifies areas to collect statistics for.</param>
        /// 
        /// <remarks><para>The mask array must be of the same size as the specified source image, where 0 values
        /// correspond to areas which should be excluded from processing. So statistics is calculated only for pixels,
        /// which have none zero corresponding value in the mask.
        /// </para></remarks>
        /// 
        /// <exception cref="UnsupportedImageFormatException">Source pixel format is not supported.</exception>
        /// <exception cref="ArgumentException">Mask must have the same size as the source image to get statistics for.</exception>
        /// 
        public ImageStatisticsHSL( UnmanagedImage image, byte[,] mask )
        {
            CheckSourceFormat( image.PixelFormat );
            CheckMaskProperties( PixelFormat.Format8bppIndexed,
                new Size( mask.GetLength( 1 ), mask.GetLength( 0 ) ), new Size( image.Width, image.Height ) );

            unsafe
            {
                fixed ( byte* maskPtr = mask )
                {
                    ProcessImage( image, maskPtr, mask.GetLength( 1 ) );
                }
            }
        }
Exemplo n.º 5
0
 /// <summary>
 /// Initializes a new instance of the <see cref="ImageStatisticsHSL"/> class.
 /// </summary>
 /// 
 /// <param name="image">Unmanaged image to gather statistics about.</param>
 /// 
 /// <exception cref="UnsupportedImageFormatException">Source pixel format is not supported.</exception>
 /// 
 public ImageStatisticsHSL( UnmanagedImage image )
 {
     CheckSourceFormat( image.PixelFormat );
     unsafe
     {
         ProcessImage( image, null, 0 );
     }
 }
Exemplo n.º 6
0
        /// <summary>
        /// Reset motion detector to initial state.
        /// </summary>
        /// 
        /// <remarks><para>The method resets motion detection and motion processing algotithms by calling
        /// their <see cref="IMotionDetector.Reset"/> and <see cref="IMotionProcessing.Reset"/> methods.</para>
        /// </remarks>
        /// 
        public void Reset( )
        {
            lock ( sync )
            {
                if ( detector != null )
                {
                    detector.Reset( );
                }
                if ( processor != null )
                {
                    processor.Reset( );
                }

                videoWidth  = 0;
                videoHeight = 0;

                if ( zonesFrame != null )
                {
                    zonesFrame.Dispose( );
                    zonesFrame = null;
                }
            }
        }
Exemplo n.º 7
0
 /// <summary>
 /// Initializes a new instance of the <see cref="BlobCounter"/> class.
 /// </summary>
 /// 
 /// <param name="image">Unmanaged image to look for objects in.</param>
 /// 
 public BlobCounter( UnmanagedImage image ) : base( image ) { }
Exemplo n.º 8
0
        /// <summary>
        /// Allocate new image in unmanaged memory.
        /// </summary>
        /// 
        /// <param name="width">Image width.</param>
        /// <param name="height">Image height.</param>
        /// <param name="pixelFormat">Image pixel format.</param>
        /// 
        /// <returns>Return image allocated in unmanaged memory.</returns>
        /// 
        /// <remarks><para>Allocate new image with specified attributes in unmanaged memory.</para>
        /// 
        /// <para><note>The method supports only
        /// <see cref="System.Drawing.Imaging.PixelFormat">Format8bppIndexed</see>,
        /// <see cref="System.Drawing.Imaging.PixelFormat">Format16bppGrayScale</see>,
        /// <see cref="System.Drawing.Imaging.PixelFormat">Format24bppRgb</see>,
        /// <see cref="System.Drawing.Imaging.PixelFormat">Format32bppRgb</see>,
        /// <see cref="System.Drawing.Imaging.PixelFormat">Format32bppArgb</see>,
        /// <see cref="System.Drawing.Imaging.PixelFormat">Format32bppPArgb</see>,
        /// <see cref="System.Drawing.Imaging.PixelFormat">Format48bppRgb</see>,
        /// <see cref="System.Drawing.Imaging.PixelFormat">Format64bppArgb</see> and
        /// <see cref="System.Drawing.Imaging.PixelFormat">Format64bppPArgb</see> pixel formats.
        /// In the case if <see cref="System.Drawing.Imaging.PixelFormat">Format8bppIndexed</see>
        /// format is specified, pallete is not not created for the image (supposed that it is
        /// 8 bpp grayscale image).
        /// </note></para>
        /// </remarks>
        /// 
        /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format was specified.</exception>
        /// <exception cref="InvalidImagePropertiesException">Invalid image size was specified.</exception>
        /// 
        public static UnmanagedImage Create( int width, int height, PixelFormat pixelFormat )
        {
            int bytesPerPixel = 0 ;

            // calculate bytes per pixel
            switch ( pixelFormat )
            {
                case PixelFormat.Format8bppIndexed:
                    bytesPerPixel = 1;
                    break;
                case PixelFormat.Format16bppGrayScale:
                    bytesPerPixel = 2;
                    break;
                case PixelFormat.Format24bppRgb:
                    bytesPerPixel = 3;
                    break;
                case PixelFormat.Format32bppRgb:
                case PixelFormat.Format32bppArgb:
                case PixelFormat.Format32bppPArgb:
                    bytesPerPixel = 4;
                    break;
                case PixelFormat.Format48bppRgb:
                    bytesPerPixel = 6;
                    break;
                case PixelFormat.Format64bppArgb:
                case PixelFormat.Format64bppPArgb:
                    bytesPerPixel = 8;
                    break;
                default:
                    throw new UnsupportedImageFormatException( "Can not create image with specified pixel format." );
            }

            // check image size
            if ( ( width <= 0 ) || ( height <= 0 ) )
            {
                throw new InvalidImagePropertiesException( "Invalid image size specified." );
            }

            // calculate stride
            int stride = width * bytesPerPixel;

            if ( stride % 4 != 0 )
            {
                stride += ( 4 - ( stride % 4 ) );
            }

            // allocate memory for the image
            IntPtr imageData = System.Runtime.InteropServices.Marshal.AllocHGlobal( stride * height );
             BestCS.SystemTools.SetUnmanagedMemory( imageData, 0, stride * height );
            System.GC.AddMemoryPressure( stride * height );

            UnmanagedImage image = new UnmanagedImage( imageData, width, height, stride, pixelFormat );
            image.mustBeDisposed = true;

            return image;
        }
Exemplo n.º 9
0
        /// <summary>
        /// Create unmanaged image from the specified managed image.
        /// </summary>
        /// 
        /// <param name="imageData">Source locked image data.</param>
        /// 
        /// <returns>Returns new unmanaged image, which is a copy of source managed image.</returns>
        /// 
        /// <remarks><para>The method creates an exact copy of specified managed image, but allocated
        /// in unmanaged memory. This means that managed image may be unlocked right after call to this
        /// method.</para></remarks>
        /// 
        /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format of source image.</exception>
        /// 
        public static UnmanagedImage FromManagedImage( BitmapData imageData )
        {
            PixelFormat pixelFormat = imageData.PixelFormat;

            // check source pixel format
            if (
                ( pixelFormat != PixelFormat.Format8bppIndexed ) &&
                ( pixelFormat != PixelFormat.Format16bppGrayScale ) &&
                ( pixelFormat != PixelFormat.Format24bppRgb ) &&
                ( pixelFormat != PixelFormat.Format32bppRgb ) &&
                ( pixelFormat != PixelFormat.Format32bppArgb ) &&
                ( pixelFormat != PixelFormat.Format32bppPArgb ) &&
                ( pixelFormat != PixelFormat.Format48bppRgb ) &&
                ( pixelFormat != PixelFormat.Format64bppArgb ) &&
                ( pixelFormat != PixelFormat.Format64bppPArgb ) )
            {
                throw new UnsupportedImageFormatException( "Unsupported pixel format of the source image." );
            }

            // allocate memory for the image
            IntPtr dstImageData = System.Runtime.InteropServices.Marshal.AllocHGlobal( imageData.Stride * imageData.Height );
            System.GC.AddMemoryPressure( imageData.Stride * imageData.Height );

            UnmanagedImage image = new UnmanagedImage( dstImageData, imageData.Width, imageData.Height, imageData.Stride, pixelFormat );
             BestCS.SystemTools.CopyUnmanagedMemory( dstImageData, imageData.Scan0, imageData.Stride * imageData.Height );
            image.mustBeDisposed = true;

            return image;
        }
Exemplo n.º 10
0
        /// <summary>
        /// Clone the unmanaged images.
        /// </summary>
        /// 
        /// <returns>Returns clone of the unmanaged image.</returns>
        /// 
        /// <remarks><para>The method does complete cloning of the object.</para></remarks>
        /// 
        public UnmanagedImage Clone( )
        {
            // allocate memory for the image
            IntPtr newImageData = System.Runtime.InteropServices.Marshal.AllocHGlobal( stride * height );
            System.GC.AddMemoryPressure( stride * height );

            UnmanagedImage newImage = new UnmanagedImage( newImageData, width, height, stride, pixelFormat );
            newImage.mustBeDisposed = true;

             BestCS.SystemTools.CopyUnmanagedMemory( newImageData, imageData, stride * height );

            return newImage;
        }
Exemplo n.º 11
0
        /// <summary>
        /// Copy unmanaged image.
        /// </summary>
        /// 
        /// <param name="destImage">Destination image to copy this image to.</param>
        /// 
        /// <remarks><para>The method copies current unmanaged image to the specified image.
        /// Size and pixel format of the destination image must be exactly the same.</para></remarks>
        /// 
        /// <exception cref="InvalidImagePropertiesException">Destination image has different size or pixel format.</exception>
        /// 
        public void Copy( UnmanagedImage destImage )
        {
            if (
                ( width != destImage.width ) || ( height != destImage.height ) ||
                ( pixelFormat != destImage.pixelFormat ) )
            {
                throw new InvalidImagePropertiesException( "Destination image has different size or pixel format." );
            }

            if ( stride == destImage.stride )
            {
                // copy entire image
                 BestCS.SystemTools.CopyUnmanagedMemory( destImage.imageData, imageData, stride * height );
            }
            else
            {
                unsafe
                {
                    int dstStride = destImage.stride;
                    int copyLength = ( stride < dstStride ) ? stride : dstStride;

                    byte* src = (byte*) imageData.ToPointer( );
                    byte* dst = (byte*) destImage.imageData.ToPointer( );

                    // copy line by line
                    for ( int i = 0; i < height; i++ )
                    {
                         BestCS.SystemTools.CopyUnmanagedMemory( dst, src, copyLength );

                        dst += dstStride;
                        src += stride;
                    }
                }
            }
        }
Exemplo n.º 12
0
        // Gather statistics for the specified image
        private unsafe void ProcessImage( UnmanagedImage image, byte* mask, int maskLineSize )
        {
            // get image dimension
            int width  = image.Width;
            int height = image.Height;

            pixels = pixelsWithoutBlack = 0;

            int[] yhisto  = new int[256];
            int[] cbhisto = new int[256];
            int[] crhisto = new int[256];

            int[] yhistoWB	= new int[256];
            int[] cbhistoWB	= new int[256];
            int[] crhistoWB	= new int[256];

            RGB   rgb   = new RGB( );
            YCbCr ycbcr = new YCbCr( );

            int pixelSize = ( image.PixelFormat == PixelFormat.Format24bppRgb ) ? 3 : 4;
            int offset = image.Stride - width * pixelSize;
            int maskOffset = maskLineSize - width;

            // do the job
            byte * p = (byte*) image.ImageData.ToPointer( );

            if ( mask == null )
            {
                // for each line
                for ( int y = 0; y < height; y++ )
                {
                    // for each pixel
                    for ( int x = 0; x < width; x++, p += pixelSize )
                    {
                        rgb.Red   = p[RGB.R];
                        rgb.Green = p[RGB.G];
                        rgb.Blue  = p[RGB.B];

                        // convert to YCbCr color space
                         BestCS.Imaging.YCbCr.FromRGB( rgb, ycbcr );

                        yhisto[(int) ( ycbcr.Y * 255 )]++;
                        cbhisto[(int) ( ( ycbcr.Cb + 0.5 ) * 255 )]++;
                        crhisto[(int) ( ( ycbcr.Cr + 0.5 ) * 255 )]++;

                        pixels++;

                        if ( ( ycbcr.Y != 0.0 ) || ( ycbcr.Cb != 0.0 ) || ( ycbcr.Cr != 0.0 ) )
                        {
                            yhistoWB[(int) ( ycbcr.Y * 255 )]++;
                            cbhistoWB[(int) ( ( ycbcr.Cb + 0.5 ) * 255 )]++;
                            crhistoWB[(int) ( ( ycbcr.Cr + 0.5 ) * 255 )]++;

                            pixelsWithoutBlack++;
                        }
                    }
                    p += offset;
                }
            }
            else
            {
                // for each line
                for ( int y = 0; y < height; y++ )
                {
                    // for each pixel
                    for ( int x = 0; x < width; x++, p += pixelSize, mask++ )
                    {
                        if ( *mask == 0 )
                            continue;

                        rgb.Red   = p[RGB.R];
                        rgb.Green = p[RGB.G];
                        rgb.Blue  = p[RGB.B];

                        // convert to YCbCr color space
                         BestCS.Imaging.YCbCr.FromRGB( rgb, ycbcr );

                        yhisto[(int) ( ycbcr.Y * 255 )]++;
                        cbhisto[(int) ( ( ycbcr.Cb + 0.5 ) * 255 )]++;
                        crhisto[(int) ( ( ycbcr.Cr + 0.5 ) * 255 )]++;

                        pixels++;

                        if ( ( ycbcr.Y != 0.0 ) || ( ycbcr.Cb != 0.0 ) || ( ycbcr.Cr != 0.0 ) )
                        {
                            yhistoWB[(int) ( ycbcr.Y * 255 )]++;
                            cbhistoWB[(int) ( ( ycbcr.Cb + 0.5 ) * 255 )]++;
                            crhistoWB[(int) ( ( ycbcr.Cr + 0.5 ) * 255 )]++;

                            pixelsWithoutBlack++;
                        }
                    }
                    p += offset;
                    mask += maskOffset;
                }
            }

            // create histograms
            yHistogram  = new ContinuousHistogram( yhisto,  new Range(  0.0f, 1.0f ) );
            cbHistogram = new ContinuousHistogram( cbhisto, new Range( -0.5f, 0.5f ) );
            crHistogram = new ContinuousHistogram( crhisto, new Range( -0.5f, 0.5f ) );

            yHistogramWithoutBlack  = new ContinuousHistogram( yhistoWB,  new Range(  0.0f, 1.0f ) );
            cbHistogramWithoutBlack = new ContinuousHistogram( cbhistoWB, new Range( -0.5f, 0.5f ) );
            crHistogramWithoutBlack = new ContinuousHistogram( crhistoWB, new Range( -0.5f, 0.5f ) );
        }
Exemplo n.º 13
0
        /// <summary>
        /// Process an image building Hough map.
        /// </summary>
        /// 
        /// <param name="image">Source unmanaged image to process.</param>
        /// <param name="rect">Image's rectangle to process.</param>
        /// 
        /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format of the source image.</exception>
        /// 
        public void ProcessImage( UnmanagedImage image, Rectangle rect )
        {
            if ( image.PixelFormat != PixelFormat.Format8bppIndexed )
            {
                throw new UnsupportedImageFormatException( "Unsupported pixel format of the source image." );
            }

            // get source image size
            int width       = image.Width;
            int height      = image.Height;
            int halfWidth   = width / 2;
            int halfHeight  = height / 2;

            // make sure the specified rectangle recides with the source image
            rect.Intersect( new Rectangle( 0, 0, width, height ) );

            int startX = -halfWidth  + rect.Left;
            int startY = -halfHeight + rect.Top;
            int stopX  = width  - halfWidth  - ( width  - rect.Right );
            int stopY  = height - halfHeight - ( height - rect.Bottom );

            int offset = image.Stride - rect.Width;

            // calculate Hough map's width
            int halfHoughWidth = (int) Math.Sqrt( halfWidth * halfWidth + halfHeight * halfHeight );
            int houghWidth = halfHoughWidth * 2;

            houghMap = new short[houghHeight, houghWidth];

            // do the job
            unsafe
            {
                byte* src = (byte*) image.ImageData.ToPointer( ) +
                    rect.Top * image.Stride + rect.Left;

                // for each row
                for ( int y = startY; y < stopY; y++ )
                {
                    // for each pixel
                    for ( int x = startX; x < stopX; x++, src++ )
                    {
                        if ( *src != 0 )
                        {
                            // for each Theta value
                            for ( int theta = 0; theta < houghHeight; theta++ )
                            {
                                int radius = (int) Math.Round( cosMap[theta] * x - sinMap[theta] * y ) + halfHoughWidth;

                                if ( ( radius < 0 ) || ( radius >= houghWidth ) )
                                    continue;

                                houghMap[theta, radius]++;
                            }
                        }
                    }
                    src += offset;
                }
            }

            // find max value in Hough map
            maxMapIntensity = 0;
            for ( int i = 0; i < houghHeight; i++ )
            {
                for ( int j = 0; j < houghWidth; j++ )
                {
                    if ( houghMap[i, j] > maxMapIntensity )
                    {
                        maxMapIntensity = houghMap[i, j];
                    }
                }
            }

            CollectLines( );
        }
Exemplo n.º 14
0
 /// <summary>
 /// Process an image building Hough map.
 /// </summary>
 /// 
 /// <param name="image">Source unmanaged image to process.</param>
 /// 
 /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format of the source image.</exception>
 /// 
 public void ProcessImage( UnmanagedImage image )
 {
     ProcessImage( image, new Rectangle( 0, 0, image.Width, image.Height ) );
 }
Exemplo n.º 15
0
        /// <summary>
        /// Actual objects map building.
        /// </summary>
        /// 
        /// <param name="image">Unmanaged image to process.</param>
        /// 
        /// <remarks>The method supports 8 bpp indexed grayscale images and 24/32 bpp color images.</remarks>
        /// 
        /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format of the source image.</exception>
        /// 
        protected override void BuildObjectsMap( UnmanagedImage image )
        {
            this.stride = image.Stride;

            // check pixel format
            if ( ( image.PixelFormat != PixelFormat.Format8bppIndexed ) &&
                 ( image.PixelFormat != PixelFormat.Format24bppRgb ) &&
                 ( image.PixelFormat != PixelFormat.Format32bppRgb ) &&
                 ( image.PixelFormat != PixelFormat.Format32bppArgb ) &&
                 ( image.PixelFormat != PixelFormat.Format32bppPArgb ) )
            {
                throw new UnsupportedImageFormatException( "Unsupported pixel format of the source image." );
            }

            // allocate temporary labels array
            tempLabels = new int[( imageWidth + 2 ) * ( imageHeight + 2 )];
            // fill boundaries with reserved value
            for ( int x = 0, mx = imageWidth + 2; x < mx; x++ )
            {
                tempLabels[x] = -1;
                tempLabels[x + ( imageHeight + 1 ) * ( imageWidth + 2 )] = -1;
            }
            for ( int y = 0, my = imageHeight + 2; y < my; y++ )
            {
                tempLabels[y * ( imageWidth + 2 )] = -1;
                tempLabels[y * ( imageWidth + 2 ) + imageWidth + 1] = -1;
            }

            // initial objects count
            objectsCount = 0;

            // do the job
            unsafe
            {
                byte* src = (byte*) image.ImageData.ToPointer( );
                int p = imageWidth + 2 + 1;

                if ( image.PixelFormat == PixelFormat.Format8bppIndexed )
                {
                    int offset = stride - imageWidth;

                    // for each line
                    for ( int y = 0; y < imageHeight; y++ )
                    {
                        // for each pixel
                        for ( int x = 0; x < imageWidth; x++, src++, p++ )
                        {
                            // check for non-labeled pixel
                            if ( ( *src > backgroundThresholdG ) && ( tempLabels[p] == 0 ) )
                            {
                                objectsCount++;
                                LabelPixel( src, p );
                            }
                        }
                        src += offset;
                        p += 2;
                    }
                }
                else
                {
                    pixelSize = Bitmap.GetPixelFormatSize( image.PixelFormat ) / 8;
                    int offset = stride - imageWidth * pixelSize;

                    // for each line
                    for ( int y = 0; y < imageHeight; y++ )
                    {
                        // for each pixel
                        for ( int x = 0; x < imageWidth; x++, src += pixelSize, p++ )
                        {
                            // check for non-labeled pixel
                            if ( (
                                    ( src[RGB.R] > backgroundThresholdR ) ||
                                    ( src[RGB.G] > backgroundThresholdG ) ||
                                    ( src[RGB.B] > backgroundThresholdB )
                                  ) && 
                                ( tempLabels[p] == 0 ) )
                            {
                                objectsCount++;
                                LabelColorPixel( src, p );
                            }
                        }
                        src += offset;
                        p += 2;
                    }
                }
            }

            // allocate labels array
            objectLabels = new int[imageWidth * imageHeight];

            for ( int y = 0; y < imageHeight; y++ )
            {
                Array.Copy( tempLabels, ( y + 1 ) * ( imageWidth + 2 ) + 1, objectLabels, y * imageWidth, imageWidth );
            }
        }
Exemplo n.º 16
0
        /// <summary>
        /// Process an image building Hough map.
        /// </summary>
        /// 
        /// <param name="image">Source unmanaged image to process.</param>
        /// 
        /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format of the source image.</exception>
        /// 
        public void ProcessImage( UnmanagedImage image )
        {
            if ( image.PixelFormat != PixelFormat.Format8bppIndexed )
            {
                throw new UnsupportedImageFormatException( "Unsupported pixel format of the source image." );
            }

            // get source image size
            width  = image.Width;
            height = image.Height;

            int srcOffset = image.Stride - width;

            // allocate Hough map of the same size like image
            houghMap = new short[height, width];

            // do the job
            unsafe
            {
                byte* src = (byte*) image.ImageData.ToPointer( );

                // for each row
                for ( int y = 0; y < height; y++ )
                {
                    // for each pixel
                    for ( int x = 0; x < width; x++, src++ )
                    {
                        if ( *src != 0 )
                        {
                            DrawHoughCircle( x, y );
                        }
                    }
                    src += srcOffset;
                }
            }

            // find max value in Hough map
            maxMapIntensity = 0;
            for ( int i = 0; i < height; i++ )
            {
                for ( int j = 0; j < width; j++ )
                {
                    if ( houghMap[i, j] > maxMapIntensity )
                    {
                        maxMapIntensity = houghMap[i, j];
                    }
                }
            }

            CollectCircles( );
        }
Exemplo n.º 17
0
        /// <summary>
        /// Process new video frame.
        /// </summary>
        /// 
        /// <param name="videoFrame">Video frame to process (detect motion in).</param>
        /// 
        /// <returns>Returns amount of motion, which is provided <see cref="IMotionDetector.MotionLevel"/>
        /// property of the <see cref="MotionDetectionAlgorithm">motion detection algorithm in use</see>.</returns>
        /// 
        /// <remarks><para>The method first of all applies motion detection algorithm to the specified video
        /// frame to calculate <see cref="IMotionDetector.MotionLevel">motion level</see> and
        /// <see cref="IMotionDetector.MotionFrame">motion frame</see>. After this it applies motion processing algorithm
        /// (if it was set) to do further post processing, like highlighting motion areas, counting moving
        /// objects, etc.</para>
        /// 
        /// <para><note>In the case if <see cref="MotionZones"/> property is set, this method will perform
        /// motion filtering right after motion algorithm is done and before passing motion frame to motion
        /// processing algorithm. The method does filtering right on the motion frame, which is produced
        /// by motion detection algorithm. At the same time the method recalculates motion level and returns
        /// new value, which takes motion zones into account (but the new value is not set back to motion detection
        /// algorithm' <see cref="IMotionDetector.MotionLevel"/> property).
        /// </note></para>
        /// </remarks>
        /// 
        public float ProcessFrame( UnmanagedImage videoFrame )
        {
            lock ( sync )
            {
                if ( detector == null )
                    return 0;

                videoWidth  = videoFrame.Width;
                videoHeight = videoFrame.Height;

                float motionLevel = 0;
                // call motion detection
                detector.ProcessFrame( videoFrame );
                motionLevel = detector.MotionLevel;

                // check if motion zones are specified
                if ( motionZones != null )
                {
                    if ( zonesFrame == null )
                    {
                        CreateMotionZonesFrame( );
                    }

                    if ( ( videoWidth == zonesFrame.Width ) && ( videoHeight == zonesFrame.Height ) )
                    {
                        unsafe
                        {
                            // pointers to background and current frames
                            byte* zonesPtr  = (byte*) zonesFrame.ImageData.ToPointer( );
                            byte* motionPtr = (byte*) detector.MotionFrame.ImageData.ToPointer( );

                            motionLevel = 0;

                            for ( int i = 0, frameSize = zonesFrame.Stride * videoHeight; i < frameSize; i++, zonesPtr++, motionPtr++ )
                            {
                                *motionPtr &= *zonesPtr;
                                motionLevel += ( *motionPtr & 1 );
                            }
                            motionLevel /= ( videoWidth * videoHeight );
                        }
                    }
                }

                // call motion post processing
                if ( ( processor != null ) && ( detector.MotionFrame != null ) )
                {
                    processor.ProcessFrame( videoFrame, detector.MotionFrame );
                }

                return motionLevel;
            }
        }
Exemplo n.º 18
0
        /// <summary>
        /// Initializes a new instance of the <see cref="HorizontalIntensityStatistics"/> class.
        /// </summary>
        /// 
        /// <param name="image">Source unmanaged image.</param>
        /// 
        /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format of the source image.</exception>
        /// 
        public VerticalIntensityStatistics( UnmanagedImage image )
        {
            // check image format
            if (
                ( image.PixelFormat != PixelFormat.Format8bppIndexed ) &&
                ( image.PixelFormat != PixelFormat.Format16bppGrayScale ) &&
                ( image.PixelFormat != PixelFormat.Format24bppRgb ) &&
                ( image.PixelFormat != PixelFormat.Format32bppRgb ) &&
                ( image.PixelFormat != PixelFormat.Format32bppArgb ) &&
                ( image.PixelFormat != PixelFormat.Format48bppRgb ) &&
                ( image.PixelFormat != PixelFormat.Format64bppArgb )
                )
            {
                throw new UnsupportedImageFormatException( "Unsupported pixel format of the source image." );
            }

            // gather statistics
            ProcessImage( image );
        }
Exemplo n.º 19
0
        // Create motion zones' image
        private unsafe void CreateMotionZonesFrame( )
        {
            lock ( sync )
            {
                // free previous motion zones frame
                if ( zonesFrame != null )
                {
                    zonesFrame.Dispose( );
                    zonesFrame = null;
                }

                // create motion zones frame only in the case if the algorithm has processed at least one frame
                if ( ( motionZones != null ) && ( motionZones.Length != 0 ) && ( videoWidth != 0 ) )
                {
                    zonesFrame = UnmanagedImage.Create( videoWidth, videoHeight, PixelFormat.Format8bppIndexed );

                    Rectangle imageRect = new Rectangle( 0, 0, videoWidth, videoHeight );

                    // draw all motion zones on motion frame
                    foreach ( Rectangle rect in motionZones )
                    {
                        rect.Intersect( imageRect );

                        // rectangle's dimenstion
                        int rectWidth  = rect.Width;
                        int rectHeight = rect.Height;

                        // start pointer
                        int stride = zonesFrame.Stride;
                        byte* ptr = (byte*) zonesFrame.ImageData.ToPointer( ) + rect.Y * stride + rect.X;

                        for ( int y = 0; y < rectHeight; y++ )
                        {
                             BestCS.SystemTools.SetUnmanagedMemory( ptr, 255, rectWidth );
                            ptr += stride;
                        }
                    }
                }
            }
        }
Exemplo n.º 20
0
        /// <summary>
        /// Gather vertical intensity statistics for specified image.
        /// </summary>
        /// 
        /// <param name="image">Source image.</param>
        /// 
        private void ProcessImage( UnmanagedImage image )
        {
            PixelFormat pixelFormat = image.PixelFormat;
            // get image dimension
            int width  = image.Width;
            int height = image.Height;

            red = green = blue = gray = null;

            // do the job
            unsafe
            {
                // check pixel format
                if ( pixelFormat == PixelFormat.Format8bppIndexed )
                {
                    // 8 bpp grayscale image
                    byte* p = (byte*) image.ImageData.ToPointer( );
                    int offset = image.Stride - width;

                    // histogram array
                    int[] g = new int[height];

					// for each pixel
                    for ( int y = 0; y < height; y++ )
                    {
                        int lineSum = 0;

                        // for each pixel
                        for ( int x = 0; x < width; x++, p++ )
                        {
                            lineSum += *p;
                        }
                        g[y] = lineSum;

                        p += offset;
                    }

                    // create historgram for gray level
                    gray = new Histogram( g );
                }
                else if ( pixelFormat == PixelFormat.Format16bppGrayScale )
                {
                    // 16 bpp grayscale image
                    byte* basePtr = (byte*) image.ImageData.ToPointer( );
                    int stride = image.Stride;

                    // histogram array
                    int[] g = new int[height];

                    // for each pixel
                    for ( int y = 0; y < height; y++ )
                    {
                        ushort* p = (ushort*) ( basePtr + stride * y );
                        int lineSum = 0;

                        // for each pixel
                        for ( int x = 0; x < width; x++, p++ )
                        {
                            lineSum += *p;
                        }
                        g[y] = lineSum;
                    }

                    // create historgram for gray level
                    gray = new Histogram( g );
                }
                else if (
                    ( pixelFormat == PixelFormat.Format24bppRgb ) ||
                    ( pixelFormat == PixelFormat.Format32bppRgb ) ||
                    ( pixelFormat == PixelFormat.Format32bppArgb ) )
                {
                    // 24/32 bpp color image
                    byte* p = (byte*) image.ImageData.ToPointer( );
                    int pixelSize = ( pixelFormat == PixelFormat.Format24bppRgb ) ? 3 : 4;
                    int offset = image.Stride - width * pixelSize;

                    // histogram arrays
                    int[] r = new int[height];
                    int[] g = new int[height];
                    int[] b = new int[height];

                    // for each line
                    for ( int y = 0; y < height; y++ )
                    {
                        int lineRSum = 0;
                        int lineGSum = 0;
                        int lineBSum = 0;

                        // for each pixel
                        for ( int x = 0; x < width; x++, p += pixelSize )
                        {
                            lineRSum += p[RGB.R];
                            lineGSum += p[RGB.G];
                            lineBSum += p[RGB.B];
                        }
                        r[y] = lineRSum;
                        g[y] = lineGSum;
                        b[y] = lineBSum;

                        p += offset;
                    }

                    // create histograms
                    red   = new Histogram( r );
                    green = new Histogram( g );
                    blue  = new Histogram( b );
                }
                else if (
                    ( pixelFormat == PixelFormat.Format48bppRgb ) ||
                    ( pixelFormat == PixelFormat.Format64bppArgb ) )
                {
                    // 48/64 bpp color image
                    byte* basePtr = (byte*) image.ImageData.ToPointer( );
                    int stride = image.Stride;
                    int pixelSize = ( pixelFormat == PixelFormat.Format48bppRgb ) ? 3 : 4;

                    // histogram arrays
                    int[] r = new int[height];
                    int[] g = new int[height];
                    int[] b = new int[height];

                    // for each line
                    for ( int y = 0; y < height; y++ )
                    {
                        ushort* p = (ushort*) ( basePtr + stride * y );

                        int lineRSum = 0;
                        int lineGSum = 0;
                        int lineBSum = 0;

                        // for each pixel
                        for ( int x = 0; x < width; x++, p += pixelSize )
                        {
                            lineRSum += p[RGB.R];
                            lineGSum += p[RGB.G];
                            lineBSum += p[RGB.B];
                        }
                        r[y] = lineRSum;
                        g[y] = lineGSum;
                        b[y] = lineBSum;
                    }

                    // create histograms
                    red   = new Histogram( r );
                    green = new Histogram( g );
                    blue  = new Histogram( b );
                }
            }
        }
Exemplo n.º 21
0
        /// <summary>
        /// Actual objects map building.
        /// </summary>
        /// 
        /// <param name="image">Unmanaged image to process.</param>
        /// 
        /// <remarks>The method supports 8 bpp indexed grayscale images and 24/32 bpp color images.</remarks>
        /// 
        /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format of the source image.</exception>
        /// <exception cref="InvalidImagePropertiesException">Cannot process images that are one pixel wide. Rotate the image
        /// or use <see cref="RecursiveBlobCounter"/>.</exception>
        /// 
        protected override void BuildObjectsMap( UnmanagedImage image )
        {
            int stride = image.Stride;

            // check pixel format
            if ( ( image.PixelFormat != PixelFormat.Format8bppIndexed ) &&
                 ( image.PixelFormat != PixelFormat.Format24bppRgb ) &&
                 ( image.PixelFormat != PixelFormat.Format32bppRgb ) &&
                 ( image.PixelFormat != PixelFormat.Format32bppArgb ) &&
                 ( image.PixelFormat != PixelFormat.Format32bppPArgb ) )
            {
                throw new UnsupportedImageFormatException( "Unsupported pixel format of the source image." );
            }

            // we don't want one pixel width images
            if ( imageWidth == 1 )
            {
                throw new InvalidImagePropertiesException( "BlobCounter cannot process images that are one pixel wide. Rotate the image or use RecursiveBlobCounter." );
            }

            int imageWidthM1 = imageWidth - 1;

            // allocate labels array
            objectLabels = new int[imageWidth * imageHeight];
            // initial labels count
            int labelsCount = 0;

            // create map
            int maxObjects = ( ( imageWidth / 2 ) + 1 ) * ( ( imageHeight / 2 ) + 1 ) + 1;
            int[] map = new int[maxObjects];

            // initially map all labels to themself
            for ( int i = 0; i < maxObjects; i++ )
            {
                map[i] = i;
            }

            // do the job
            unsafe
            {
                byte* src = (byte*) image.ImageData.ToPointer( );
                int p = 0;

                if ( image.PixelFormat == PixelFormat.Format8bppIndexed )
                {
                    int offset = stride - imageWidth;

                    // 1 - for pixels of the first row
                    if ( *src > backgroundThresholdG )
                    {
                        objectLabels[p] = ++labelsCount;
                    }
                    ++src;
                    ++p;

                    // process the rest of the first row
                    for ( int x = 1; x < imageWidth; x++, src++, p++ )
                    {
                        // check if we need to label current pixel
                        if ( *src > backgroundThresholdG )
                        {
                            // check if the previous pixel already was labeled
                            if ( src[-1] > backgroundThresholdG )
                            {
                                // label current pixel, as the previous
                                objectLabels[p] = objectLabels[p - 1];
                            }
                            else
                            {
                                // create new label
                                objectLabels[p] = ++labelsCount;
                            }
                        }
                    }
                    src += offset;

                    // 2 - for other rows
                    // for each row
                    for ( int y = 1; y < imageHeight; y++ )
                    {
                        // for the first pixel of the row, we need to check
                        // only upper and upper-right pixels
                        if ( *src > backgroundThresholdG )
                        {
                            // check surrounding pixels
                            if ( src[-stride] > backgroundThresholdG )
                            {
                                // label current pixel, as the above
                                objectLabels[p] = objectLabels[p - imageWidth];
                            }
                            else if ( src[1 - stride] > backgroundThresholdG )
                            {
                                // label current pixel, as the above right
                                objectLabels[p] = objectLabels[p + 1 - imageWidth];
                            }
                            else
                            {
                                // create new label
                                objectLabels[p] = ++labelsCount;
                            }
                        }
                        ++src;
                        ++p;

                        // check left pixel and three upper pixels for the rest of pixels
                        for ( int x = 1; x < imageWidthM1; x++, src++, p++ )
                        {
                            if ( *src > backgroundThresholdG )
                            {
                                // check surrounding pixels
                                if ( src[-1] > backgroundThresholdG )
                                {
                                    // label current pixel, as the left
                                    objectLabels[p] = objectLabels[p - 1];
                                }
                                else if ( src[-1 - stride] > backgroundThresholdG )
                                {
                                    // label current pixel, as the above left
                                    objectLabels[p] = objectLabels[p - 1 - imageWidth];
                                }
                                else if ( src[-stride] > backgroundThresholdG )
                                {
                                    // label current pixel, as the above
                                    objectLabels[p] = objectLabels[p - imageWidth];
                                }

                                if ( src[1 - stride] > backgroundThresholdG )
                                {
                                    if ( objectLabels[p] == 0 )
                                    {
                                        // label current pixel, as the above right
                                        objectLabels[p] = objectLabels[p + 1 - imageWidth];
                                    }
                                    else
                                    {
                                        int l1 = objectLabels[p];
                                        int l2 = objectLabels[p + 1 - imageWidth];

                                        if ( ( l1 != l2 ) && ( map[l1] != map[l2] ) )
                                        {
                                            // merge
                                            if ( map[l1] == l1 )
                                            {
                                                // map left value to the right
                                                map[l1] = map[l2];
                                            }
                                            else if ( map[l2] == l2 )
                                            {
                                                // map right value to the left
                                                map[l2] = map[l1];
                                            }
                                            else
                                            {
                                                // both values already mapped
                                                map[map[l1]] = map[l2];
                                                map[l1] = map[l2];
                                            }

                                            // reindex
                                            for ( int i = 1; i <= labelsCount; i++ )
                                            {
                                                if ( map[i] != i )
                                                {
                                                    // reindex
                                                    int j = map[i];
                                                    while ( j != map[j] )
                                                    {
                                                        j = map[j];
                                                    }
                                                    map[i] = j;
                                                }
                                            }
                                        }
                                    }
                                }

                                // label the object if it is not yet
                                if ( objectLabels[p] == 0 )
                                {
                                    // create new label
                                    objectLabels[p] = ++labelsCount;
                                }
                            }
                        }

                        // for the last pixel of the row, we need to check
                        // only upper and upper-left pixels
                        if ( *src > backgroundThresholdG )
                        {
                            // check surrounding pixels
                            if ( src[-1] > backgroundThresholdG )
                            {
                                // label current pixel, as the left
                                objectLabels[p] = objectLabels[p - 1];
                            }
                            else if ( src[-1 - stride] > backgroundThresholdG )
                            {
                                // label current pixel, as the above left
                                objectLabels[p] = objectLabels[p - 1 - imageWidth];
                            }
                            else if ( src[-stride] > backgroundThresholdG )
                            {
                                // label current pixel, as the above
                                objectLabels[p] = objectLabels[p - imageWidth];
                            }
                            else
                            {
                                // create new label
                                objectLabels[p] = ++labelsCount;
                            }
                        }
                        ++src;
                        ++p;

                        src += offset;
                    }
                }
                else
                {
                    // color images
                    int pixelSize = Bitmap.GetPixelFormatSize( image.PixelFormat ) / 8;
                    int offset = stride - imageWidth * pixelSize;

                    int strideM1 = stride - pixelSize;
                    int strideP1 = stride + pixelSize;

                    // 1 - for pixels of the first row
                    if ( ( src[RGB.R] | src[RGB.G] | src[RGB.B] ) != 0 )
                    {
                        objectLabels[p] = ++labelsCount;
                    }
                    src += pixelSize;
                    ++p;

                    // process the rest of the first row
                    for ( int x = 1; x < imageWidth; x++, src += pixelSize, p++ )
                    {
                        // check if we need to label current pixel
                        if ( ( src[RGB.R] > backgroundThresholdR ) ||
                             ( src[RGB.G] > backgroundThresholdG ) ||
                             ( src[RGB.B] > backgroundThresholdB ) )
                        {
                            // check if the previous pixel already was labeled
                            if ( ( src[RGB.R - pixelSize] > backgroundThresholdR ) ||
                                 ( src[RGB.G - pixelSize] > backgroundThresholdG ) ||
                                 ( src[RGB.B - pixelSize] > backgroundThresholdB ) )
                            {
                                // label current pixel, as the previous
                                objectLabels[p] = objectLabels[p - 1];
                            }
                            else
                            {
                                // create new label
                                objectLabels[p] = ++labelsCount;
                            }
                        }
                    }
                    src += offset;

                    // 2 - for other rows
                    // for each row
                    for ( int y = 1; y < imageHeight; y++ )
                    {
                        // for the first pixel of the row, we need to check
                        // only upper and upper-right pixels
                        if ( ( src[RGB.R] > backgroundThresholdR ) ||
                             ( src[RGB.G] > backgroundThresholdG ) ||
                             ( src[RGB.B] > backgroundThresholdB ) )
                        {
                            // check surrounding pixels
                            if ( ( src[RGB.R - stride] > backgroundThresholdR ) ||
                                 ( src[RGB.G - stride] > backgroundThresholdG ) ||
                                 ( src[RGB.B - stride] > backgroundThresholdB ) )
                            {
                                // label current pixel, as the above
                                objectLabels[p] = objectLabels[p - imageWidth];
                            }
                            else if ( ( src[RGB.R - strideM1] > backgroundThresholdR ) ||
                                      ( src[RGB.G - strideM1] > backgroundThresholdG ) ||
                                      ( src[RGB.B - strideM1] > backgroundThresholdB ) )
                            {
                                // label current pixel, as the above right
                                objectLabels[p] = objectLabels[p + 1 - imageWidth];
                            }
                            else
                            {
                                // create new label
                                objectLabels[p] = ++labelsCount;
                            }
                        }
                        src += pixelSize;
                        ++p;

                        // check left pixel and three upper pixels for the rest of pixels
                        for ( int x = 1; x < imageWidth - 1; x++, src += pixelSize, p++ )
                        {
                            if ( ( src[RGB.R] > backgroundThresholdR ) ||
                                 ( src[RGB.G] > backgroundThresholdG ) ||
                                 ( src[RGB.B] > backgroundThresholdB ) )
                            {
                                // check surrounding pixels
                                if ( ( src[RGB.R - pixelSize] > backgroundThresholdR ) ||
                                     ( src[RGB.G - pixelSize] > backgroundThresholdG ) ||
                                     ( src[RGB.B - pixelSize] > backgroundThresholdB ) )
                                {
                                    // label current pixel, as the left
                                    objectLabels[p] = objectLabels[p - 1];
                                }
                                else if ( ( src[RGB.R - strideP1] > backgroundThresholdR ) ||
                                          ( src[RGB.G - strideP1] > backgroundThresholdG ) ||
                                          ( src[RGB.B - strideP1] > backgroundThresholdB ) )
                                {
                                    // label current pixel, as the above left
                                    objectLabels[p] = objectLabels[p - 1 - imageWidth];
                                }
                                else if ( ( src[RGB.R - stride] > backgroundThresholdR ) ||
                                          ( src[RGB.G - stride] > backgroundThresholdG ) ||
                                          ( src[RGB.B - stride] > backgroundThresholdB ) )
                                {
                                    // label current pixel, as the above
                                    objectLabels[p] = objectLabels[p - imageWidth];
                                }

                                if ( ( src[RGB.R - strideM1] > backgroundThresholdR ) ||
                                     ( src[RGB.G - strideM1] > backgroundThresholdG ) ||
                                     ( src[RGB.B - strideM1] > backgroundThresholdB ) )
                                {
                                    if ( objectLabels[p] == 0 )
                                    {
                                        // label current pixel, as the above right
                                        objectLabels[p] = objectLabels[p + 1 - imageWidth];
                                    }
                                    else
                                    {
                                        int l1 = objectLabels[p];
                                        int l2 = objectLabels[p + 1 - imageWidth];

                                        if ( ( l1 != l2 ) && ( map[l1] != map[l2] ) )
                                        {
                                            // merge
                                            if ( map[l1] == l1 )
                                            {
                                                // map left value to the right
                                                map[l1] = map[l2];
                                            }
                                            else if ( map[l2] == l2 )
                                            {
                                                // map right value to the left
                                                map[l2] = map[l1];
                                            }
                                            else
                                            {
                                                // both values already mapped
                                                map[map[l1]] = map[l2];
                                                map[l1] = map[l2];
                                            }

                                            // reindex
                                            for ( int i = 1; i <= labelsCount; i++ )
                                            {
                                                if ( map[i] != i )
                                                {
                                                    // reindex
                                                    int j = map[i];
                                                    while ( j != map[j] )
                                                    {
                                                        j = map[j];
                                                    }
                                                    map[i] = j;
                                                }
                                            }
                                        }
                                    }
                                }

                                // label the object if it is not yet
                                if ( objectLabels[p] == 0 )
                                {
                                    // create new label
                                    objectLabels[p] = ++labelsCount;
                                }
                            }
                        }

                        // for the last pixel of the row, we need to check
                        // only upper and upper-left pixels
                        if ( ( src[RGB.R] > backgroundThresholdR ) ||
                             ( src[RGB.G] > backgroundThresholdG ) ||
                             ( src[RGB.B] > backgroundThresholdB ) )
                        {
                            // check surrounding pixels
                            if ( ( src[RGB.R - pixelSize] > backgroundThresholdR ) ||
                                 ( src[RGB.G - pixelSize] > backgroundThresholdG ) ||
                                 ( src[RGB.B - pixelSize] > backgroundThresholdB ) )
                            {
                                // label current pixel, as the left
                                objectLabels[p] = objectLabels[p - 1];
                            }
                            else if ( ( src[RGB.R - strideP1] > backgroundThresholdR ) ||
                                      ( src[RGB.G - strideP1] > backgroundThresholdG ) ||
                                      ( src[RGB.B - strideP1] > backgroundThresholdB ) )
                            {
                                // label current pixel, as the above left
                                objectLabels[p] = objectLabels[p - 1 - imageWidth];
                            }
                            else if ( ( src[RGB.R - stride] > backgroundThresholdR ) ||
                                      ( src[RGB.G - stride] > backgroundThresholdG ) ||
                                      ( src[RGB.B - stride] > backgroundThresholdB ) )
                            {
                                // label current pixel, as the above
                                objectLabels[p] = objectLabels[p - imageWidth];
                            }
                            else
                            {
                                // create new label
                                objectLabels[p] = ++labelsCount;
                            }
                        }
                        src += pixelSize;
                        ++p;

                        src += offset;
                    }
                }
            }

            // allocate remapping array
            int[] reMap = new int[map.Length];

            // count objects and prepare remapping array
            objectsCount = 0;
            for ( int i = 1; i <= labelsCount; i++ )
            {
                if ( map[i] == i )
                {
                    // increase objects count
                    reMap[i] = ++objectsCount;
                }
            }
            // second pass to complete remapping
            for ( int i = 1; i <= labelsCount; i++ )
            {
                if ( map[i] != i )
                {
                    reMap[i] = reMap[map[i]];
                }
            }

            // repair object labels
            for ( int i = 0, n = objectLabels.Length; i < n; i++ )
            {
                objectLabels[i] = reMap[objectLabels[i]];
            }
        }
Exemplo n.º 22
0
        /// <summary>
        /// Process image looking for corners.
        /// </summary>
        /// 
        /// <param name="image">Unmanaged source image to process.</param>
        /// 
        /// <returns>Returns array of found corners (X-Y coordinates).</returns>
        ///
        /// <exception cref="UnsupportedImageFormatException">The source image has incorrect pixel format.</exception>
        /// 
        public List<IntPoint> ProcessImage( UnmanagedImage image )
        {
            // check image format
            if (
                ( image.PixelFormat != PixelFormat.Format8bppIndexed ) &&
                ( image.PixelFormat != PixelFormat.Format24bppRgb ) &&
                ( image.PixelFormat != PixelFormat.Format32bppRgb ) &&
                ( image.PixelFormat != PixelFormat.Format32bppArgb )
                )
            {
                throw new UnsupportedImageFormatException( "Unsupported pixel format of the source image." );
            }

            // get source image size
            int width  = image.Width;
            int height = image.Height;
            int stride = image.Stride;
            int pixelSize = Bitmap.GetPixelFormatSize( image.PixelFormat ) / 8;
            // window radius
            int windowRadius = windowSize / 2;

            // offset
            int offset = stride - windowSize * pixelSize;

            // create moravec cornerness map
            int[,] moravecMap = new int[height, width];

            // do the job
            unsafe
            {
                byte* ptr = (byte*) image.ImageData.ToPointer( );

                // for each row
                for ( int y = windowRadius, maxY = height - windowRadius; y < maxY; y++ )
                {
                    // for each pixel
                    for ( int x = windowRadius, maxX = width - windowRadius; x < maxX; x++ )
                    {
                        int minSum = int.MaxValue;

                        // go through 8 possible shifting directions
                        for ( int k = 0; k < 8; k++ )
                        {
                            // calculate center of shifted window
                            int sy = y + yDelta[k];
                            int sx = x + xDelta[k];

                            // check if shifted window is within the image
                            if (
                                ( sy < windowRadius ) || ( sy >= maxY ) ||
                                ( sx < windowRadius ) || ( sx >= maxX )
                            )
                            {
                                // skip this shifted window
                                continue;
                            }

                            int sum = 0;

                            byte* ptr1 = ptr + ( y - windowRadius )  * stride + ( x - windowRadius )  * pixelSize;
                            byte* ptr2 = ptr + ( sy - windowRadius ) * stride + ( sx - windowRadius ) * pixelSize;

                            // for each windows' rows
                            for ( int i = 0; i < windowSize; i++ )
                            {
                                // for each windows' pixels
                                for ( int j = 0, maxJ = windowSize * pixelSize; j < maxJ; j++, ptr1++, ptr2++ )
                                {
                                    int dif = *ptr1 - *ptr2;
                                    sum += dif * dif;
                                }
                                ptr1 += offset;
                                ptr2 += offset;
                            }

                            // check if the sum is mimimal
                            if ( sum < minSum )
                            {
                                minSum = sum;
                            }
                        }

                        // threshold the minimum sum
                        if ( minSum < threshold )
                        {
                            minSum = 0;
                        }

                        moravecMap[y, x] = minSum;
                    }
                }
            }

            // collect interesting points - only those points, which are local maximums
            List<IntPoint> cornersList = new List<IntPoint>( );

            // for each row
            for ( int y = windowRadius, maxY = height - windowRadius; y < maxY; y++ )
            {
                // for each pixel
                for ( int x = windowRadius, maxX = width - windowRadius; x < maxX; x++ )
                {
                    int currentValue = moravecMap[y, x];

                    // for each windows' rows
                    for ( int i = -windowRadius; ( currentValue != 0 ) && ( i <= windowRadius ); i++ )
                    {
                        // for each windows' pixels
                        for ( int j = -windowRadius; j <= windowRadius; j++ )
                        {
                            if ( moravecMap[y + i, x + j] > currentValue )
                            {
                                currentValue = 0;
                                break;
                            }
                        }
                    }

                    // check if this point is really interesting
                    if ( currentValue != 0 )
                    {
                        cornersList.Add( new IntPoint( x, y ) );
                    }
                }
            }

            return cornersList;
        }
Exemplo n.º 23
0
        /// <summary>
        /// Find corners of quadrilateral/triangular area in the specified image.
        /// </summary>
        /// 
        /// <param name="image">Source image to search quadrilateral for.</param>
        /// 
        /// <returns>Returns a list of points, which are corners of the quadrilateral/triangular area found
        /// in the specified image. The first point in the list is the point with lowest
        /// X coordinate (and with lowest Y if there are several points with the same X value).
        /// Points are in clockwise order (screen coordinates system).</returns>
        ///
        /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format of the source image.</exception>
        /// 
        public List<IntPoint> ProcessImage( UnmanagedImage image )
        {
            CheckPixelFormat( image.PixelFormat );

            // get source image size
            int width  = image.Width;
            int height = image.Height;

            // collection of points
            List<IntPoint> points = new List<IntPoint>( );

            // collect edge points
            unsafe
            {
                byte* src = (byte*) image.ImageData.ToPointer( );
                int stride = image.Stride;

                bool lineIsEmpty;

                if ( image.PixelFormat == PixelFormat.Format8bppIndexed )
                {
                    // for each row
                    for ( int y = 0; y < height; y++ )
                    {
                        lineIsEmpty = true;

                        // scan from left to right
                        for ( int x = 0; x < width; x++ )
                        {
                            if ( src[x] != 0 )
                            {
                                points.Add( new IntPoint( x, y ) );
                                lineIsEmpty = false;
                                break;
                            }
                        }
                        if ( !lineIsEmpty )
                        {
                            // scan from right to left
                            for ( int x = width - 1; x >= 0; x-- )
                            {
                                if ( src[x] != 0 )
                                {
                                    points.Add( new IntPoint( x, y ) );
                                    break;
                                }
                            }
                        }
                        src += stride;
                    }
                }
                else
                {
                    // 24 or 32 bpp color image
                    int pixelSize = System.Drawing.Image.GetPixelFormatSize( image.PixelFormat ) / 8; 

                    byte* ptr = null;

                    // for each row
                    for ( int y = 0; y < height; y++ )
                    {
                        lineIsEmpty = true;
                        // scan from left to right
                        ptr = src;
                        for ( int x = 0; x < width; x++, ptr += pixelSize )
                        {
                            if ( ( ptr[RGB.R] != 0 ) || ( ptr[RGB.G] != 0 ) || ( ptr[RGB.B] != 0 ) )
                            {
                                points.Add( new IntPoint( x, y ) );
                                lineIsEmpty = false;
                                break;
                            }
                        }
                        if ( !lineIsEmpty )
                        {
                            // scan from right to left
                            ptr = src + width * pixelSize - pixelSize;
                            for ( int x = width - 1; x >= 0; x--, ptr -= pixelSize )
                            {
                                if ( ( ptr[RGB.R] != 0 ) || ( ptr[RGB.G] != 0 ) || ( ptr[RGB.B] != 0 ) )
                                {
                                    points.Add( new IntPoint( x, y ) );
                                    break;
                                }
                            }
                        }
                        src += stride;
                    }
                }
            }

            return PointsCloud.FindQuadrilateralCorners( points );
       }
Exemplo n.º 24
0
 /// <summary>
 /// Get skew angle of the provided document image.
 /// </summary>
 /// 
 /// <param name="image">Document's unmanaged image to get skew angle of.</param>
 /// 
 /// <returns>Returns document's skew angle. If the returned angle equals to -90,
 /// then document skew detection has failed.</returns>
 /// 
 /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format of the source image.</exception>
 /// 
 public double GetSkewAngle( UnmanagedImage image )
 {
     return GetSkewAngle( image, new Rectangle( 0, 0, image.Width, image.Height ) );
 }
Exemplo n.º 25
0
        /// <summary>
        /// Initializes a new instance of the <see cref="ImageStatisticsHSL"/> class.
        /// </summary>
        /// 
        /// <param name="image">Image to gather statistics about.</param>
        /// <param name="mask">Mask image which specifies areas to collect statistics for.</param>
        /// 
        /// <remarks><para>The mask image must be a grayscale/binary (8bpp) image of the same size as the
        /// specified source image, where black pixels (value 0) correspond to areas which should be excluded
        /// from processing. So statistics is calculated only for pixels, which are none black in the mask image.
        /// </para></remarks>
        /// 
        /// <exception cref="UnsupportedImageFormatException">Source pixel format is not supported.</exception>
        /// <exception cref="ArgumentException">Mask image must be 8 bpp grayscale image.</exception>
        /// <exception cref="ArgumentException">Mask must have the same size as the source image to get statistics for.</exception>
        /// 
        public ImageStatisticsHSL( UnmanagedImage image, UnmanagedImage mask )
        {
            CheckSourceFormat( image.PixelFormat );
            CheckMaskProperties( mask.PixelFormat, new Size( mask.Width, mask.Height ), new Size( image.Width, image.Height ) );

            unsafe
            {
                ProcessImage( image, (byte*) mask.ImageData.ToPointer( ), mask.Stride );
            }
        }
Exemplo n.º 26
0
        /// <summary>
        /// Get skew angle of the provided document image.
        /// </summary>
        /// 
        /// <param name="image">Document's unmanaged image to get skew angle of.</param>
        /// <param name="rect">Image's rectangle to process (used to exclude processing of
        /// regions, which are not relevant to skew detection).</param>
        /// 
        /// <returns>Returns document's skew angle. If the returned angle equals to -90,
        /// then document skew detection has failed.</returns>
        /// 
        /// <exception cref="UnsupportedImageFormatException">Unsupported pixel format of the source image.</exception>
        /// 
        public double GetSkewAngle( UnmanagedImage image, Rectangle rect )
        {
            if ( image.PixelFormat != PixelFormat.Format8bppIndexed )
            {
                throw new UnsupportedImageFormatException( "Unsupported pixel format of the source image." );
            }

            // init hough transformation settings
            InitHoughMap( );

            // get source image size
            int width       = image.Width;
            int height      = image.Height;
            int halfWidth   = width / 2;
            int halfHeight  = height / 2;

            // make sure the specified rectangle recides with the source image
            rect.Intersect( new Rectangle( 0, 0, width, height ) );

            int startX = -halfWidth  + rect.Left;
            int startY = -halfHeight + rect.Top;
            int stopX  = width  - halfWidth  - ( width  - rect.Right );
            int stopY  = height - halfHeight - ( height - rect.Bottom ) - 1;

            int offset = image.Stride - rect.Width;

            // calculate Hough map's width
            int halfHoughWidth = (int) Math.Sqrt( halfWidth * halfWidth + halfHeight * halfHeight );
            int houghWidth = halfHoughWidth * 2;

            houghMap = new short[houghHeight, houghWidth];

            // do the job
            unsafe
            {
                byte* src = (byte*) image.ImageData.ToPointer( ) +
                    rect.Top * image.Stride + rect.Left;
                byte* srcBelow = src + image.Stride;

                // for each row
                for ( int y = startY; y < stopY; y++ )
                {
                    // for each pixel
                    for ( int x = startX; x < stopX; x++, src++, srcBelow++ )
                    {
                        // if current pixel is more black
                        // and pixel below is more white
                        if ( ( *src < 128 ) && ( *srcBelow >= 128 ) )
                        {
                            // for each Theta value
                            for ( int theta = 0; theta < houghHeight; theta++ )
                            {
                                int radius = (int) ( cosMap[theta] * x - sinMap[theta] * y ) + halfHoughWidth;

                                if ( ( radius < 0 ) || ( radius >= houghWidth ) )
                                    continue;

                                houghMap[theta, radius]++;
                            }
                        }
                    }
                    src += offset;
                    srcBelow += offset;
                }
            }

            // find max value in Hough map
            maxMapIntensity = 0;
            for ( int i = 0; i < houghHeight; i++ )
            {
                for ( int j = 0; j < houghWidth; j++ )
                {
                    if ( houghMap[i, j] > maxMapIntensity )
                    {
                        maxMapIntensity = houghMap[i, j];
                    }
                }
            }

            CollectLines( (short) ( width / 10 ) );

            // get skew angle
            HoughLine[] hls = this.GetMostIntensiveLines( 5 );

            double skewAngle = 0;
            double sumIntensity = 0;

            foreach ( HoughLine hl in hls )
            {
                if ( hl.RelativeIntensity > 0.5 )
                {
                    skewAngle += ( hl.Theta * hl.RelativeIntensity );
                    sumIntensity += hl.RelativeIntensity;
                }
            }
            if ( hls.Length > 0 ) skewAngle = skewAngle / sumIntensity;

            return skewAngle - 90.0;
        }
Exemplo n.º 27
0
        // Gather statistics for the specified image
        private unsafe void ProcessImage( UnmanagedImage image, byte* mask, int maskLineSize )
        {
            // get image dimension
            int width  = image.Width;
            int height = image.Height;

            pixels = pixelsWithoutBlack = 0;

            int[] s   = new int[256];
            int[] l   = new int[256];
            int[] swb = new int[256];
            int[] lwb = new int[256];
            RGB   rgb = new RGB( );
            HSL   hsl = new HSL( );

            int pixelSize = ( image.PixelFormat == PixelFormat.Format24bppRgb ) ? 3 : 4;
            int offset = image.Stride - width * pixelSize;
            int maskOffset = maskLineSize - width;

            // do the job
            byte * p = (byte*) image.ImageData.ToPointer( );

            if ( mask == null )
            {
                // for each line
                for ( int y = 0; y < height; y++ )
                {
                    // for each pixel
                    for ( int x = 0; x < width; x++, p += pixelSize )
                    {
                        rgb.Red   = p[RGB.R];
                        rgb.Green = p[RGB.G];
                        rgb.Blue  = p[RGB.B];

                        // convert to HSL color space
                         BestCS.Imaging.HSL.FromRGB( rgb, hsl );

                        s[(int) ( hsl.Saturation * 255 )]++;
                        l[(int) ( hsl.Luminance  * 255 )]++;
                        pixels++;

                        if ( hsl.Luminance != 0.0 )
                        {
                            swb[(int) ( hsl.Saturation * 255 )]++;
                            lwb[(int) ( hsl.Luminance  * 255 )]++;
                            pixelsWithoutBlack++;
                        }
                    }
                    p += offset;
                }
            }
            else
            {
                // for each line
                for ( int y = 0; y < height; y++ )
                {
                    // for each pixel
                    for ( int x = 0; x < width; x++, p += pixelSize, mask++ )
                    {
                        if ( *mask == 0 )
                            continue;

                        rgb.Red   = p[RGB.R];
                        rgb.Green = p[RGB.G];
                        rgb.Blue  = p[RGB.B];

                        // convert to HSL color space
                         BestCS.Imaging.HSL.FromRGB( rgb, hsl );

                        s[(int) ( hsl.Saturation * 255 )]++;
                        l[(int) ( hsl.Luminance  * 255 )]++;
                        pixels++;

                        if ( hsl.Luminance != 0.0 )
                        {
                            swb[(int) ( hsl.Saturation * 255 )]++;
                            lwb[(int) ( hsl.Luminance  * 255 )]++;
                            pixelsWithoutBlack++;
                        }
                    }
                    p += offset;
                    mask += maskOffset;
                }
            }

            // create histograms
            saturation = new ContinuousHistogram( s, new Range( 0, 1 ) );
            luminance  = new ContinuousHistogram( l, new Range( 0, 1 ) );

            saturationWithoutBlack = new ContinuousHistogram( swb, new Range( 0, 1 ) );
            luminanceWithoutBlack  = new ContinuousHistogram( lwb, new Range( 0, 1 ) );
        }
Exemplo n.º 28
0
 /// <summary>
 /// Initializes a new instance of the <see cref="RecursiveBlobCounter"/> class.
 /// </summary>
 /// 
 /// <param name="image">Unmanaged image to look for objects in.</param>
 /// 
 public RecursiveBlobCounter( UnmanagedImage image ) : base( image ) { }
Exemplo n.º 29
0
        /// <summary>
        /// Reset motion detector to initial state.
        /// </summary>
        /// 
        /// <remarks><para>Resets internal state and variables of motion detection algorithm.
        /// Usually this is required to be done before processing new video source, but
        /// may be also done at any time to restart motion detection algorithm.</para>
        /// </remarks>
        /// 
        public void Reset( )
        {
            lock ( sync )
            {
                if ( previousFrame != null )
                {
                    previousFrame.Dispose( );
                    previousFrame = null;
                }

                if ( motionFrame != null )
                {
                    motionFrame.Dispose( );
                    motionFrame = null;
                }

                if ( tempFrame != null )
                {
                    tempFrame.Dispose( );
                    tempFrame = null;
                }
            }
        }
Exemplo n.º 30
0
        /// <summary>
        /// Process video and motion frames doing further post processing after
        /// performed motion detection.
        /// </summary>
        /// 
        /// <param name="videoFrame">Original video frame.</param>
        /// <param name="motionFrame">Motion frame provided by motion detection
        /// algorithm (see <see cref="IMotionDetector"/>).</param>
        /// 
        /// <remarks><para>Processes provided motion frame and highlights motion areas
        /// on the original video frame with <see cref="HighlightColor">specified color</see>.</para>
        /// </remarks>
        /// 
        /// <exception cref="InvalidImagePropertiesException">Motion frame is not 8 bpp image, but it must be so.</exception>
        /// <exception cref="UnsupportedImageFormatException">Video frame must be 8 bpp grayscale image or 24/32 bpp color image.</exception>
        ///
        public unsafe void ProcessFrame( UnmanagedImage videoFrame, UnmanagedImage motionFrame )
        {
            if ( motionFrame.PixelFormat != PixelFormat.Format8bppIndexed )
            {
                throw new InvalidImagePropertiesException( "Motion frame must be 8 bpp image." );
            }

            if ( ( videoFrame.PixelFormat != PixelFormat.Format8bppIndexed ) &&
                 ( videoFrame.PixelFormat != PixelFormat.Format24bppRgb ) &&
                 ( videoFrame.PixelFormat != PixelFormat.Format32bppRgb ) &&
                 ( videoFrame.PixelFormat != PixelFormat.Format32bppArgb ) )
            {
                throw new UnsupportedImageFormatException( "Video frame must be 8 bpp grayscale image or 24/32 bpp color image." );
            }

            int width  = videoFrame.Width;
            int height = videoFrame.Height;
            int pixelSize = Bitmap.GetPixelFormatSize( videoFrame.PixelFormat ) / 8; 

            if ( ( motionFrame.Width != width ) || ( motionFrame.Height != height ) )
                return;

            byte* src = (byte*) videoFrame.ImageData.ToPointer( );
            byte* motion = (byte*) motionFrame.ImageData.ToPointer( );

            int srcOffset = videoFrame.Stride - width * pixelSize;
            int motionOffset = motionFrame.Stride - width;

            if ( pixelSize == 1 )
            {
                // grayscale case
                byte fillG = (byte) ( 0.2125 * highlightColor.R +
                                      0.7154 * highlightColor.G +
                                      0.0721 * highlightColor.B );

                for ( int y = 0; y < height; y++ )
                {
                    for ( int x = 0; x < width; x++, motion++, src++ )
                    {
                        if ( ( *motion != 0 ) && ( ( ( x + y ) & 1 ) == 0 ) )
                        {
                            *src = fillG;
                        }
                    }
                    src += srcOffset;
                    motion += motionOffset;
                }
            }
            else
            {
                // color case
                byte fillR = highlightColor.R;
                byte fillG = highlightColor.G;
                byte fillB = highlightColor.B;

                for ( int y = 0; y < height; y++ )
                {
                    for ( int x = 0; x < width; x++, motion++, src += pixelSize )
                    {
                        if ( ( *motion != 0 ) && ( ( ( x + y ) & 1 ) == 0 ) )
                        {
                            src[RGB.R] = fillR;
                            src[RGB.G] = fillG;
                            src[RGB.B] = fillB;
                        }
                    }
                    src += srcOffset;
                    motion += motionOffset;
                }
            }
        }