コード例 #1
0
ファイル: QuickDetect.cs プロジェクト: Micky-G/VideoTrace
        /// <summary>
        /// 取得一个样本的特征
        /// </summary>
        /// <param name="bmpSource">源图像,需24位或32位真彩位图</param>
        /// <param name="template">基础分类器模板</param>
        /// <param name="sampletype">样本类型,1表示正样本,-1表示负样本</param>
        /// <returns>返回一个样本的特征</returns>
        public static SampleFeature GetSampleFeature(Bitmap bmpSource, BaseClassifierTemplate template, int sampletype)
        {
            SampleFeature sf  = null;
            Bitmap        bmp = bmpSource;

            if (bmp != null && (bmp.PixelFormat == PixelFormat.Format24bppRgb || bmp.PixelFormat ==
                                PixelFormat.Format32bppRgb || bmp.PixelFormat == PixelFormat.Format32bppArgb))
            {
                // 将图像调整至模板大小
                if (bmp.Width != template.BmpWidth || bmp.Height != template.BmpHeight)
                {
                    bmp = ImgOper.ResizeImage(bmpSource, template.BmpWidth, template.BmpHeight);
                }

                bmp = ImgOper.Grayscale(bmpSource);

                int[,] igram = ImgOper.Integrogram(bmp, 1);

                sf              = new SampleFeature();
                sf.GroupNum     = template.GroupNum;
                sf.FeatureNum   = template.FeatureNum;
                sf.SampleType   = sampletype;
                sf.FeatureValue = BaseClassifierTemplate.GetFeatureCodeGroup(igram, bmp.Width, bmp.Height, template, 0, 0);
            }
            return(sf);
        }
コード例 #2
0
ファイル: QuickDetect.cs プロジェクト: Micky-G/VideoTrace
        /// <summary>
        /// 用模板去检测图像中的物体
        /// </summary>
        /// <param name="bmpSource">源图像,需24位或32位真彩位图</param>
        /// <param name="template">基础分类器模板</param>
        /// <param name="fern">Fern</param>
        /// <returns></returns>
        public static RectangleCollection DetectObject(Bitmap bmpSource, BaseClassifierTemplate template, Fern fern)
        {
            RectangleCollection rc = new RectangleCollection();
            Bitmap bmp             = bmpSource;

            if (bmpSource.Width < template.BmpWidth || bmpSource.Height < template.BmpHeight)
            {
                return(null);
            }

            if (bmp != null && (bmp.PixelFormat == PixelFormat.Format24bppRgb || bmp.PixelFormat ==
                                PixelFormat.Format32bppRgb || bmp.PixelFormat == PixelFormat.Format32bppArgb))
            {
                UInt32[] featurecode = null;
                bmp          = ImgOper.Grayscale(bmpSource);
                int[,] igram = ImgOper.Integrogram(bmp, 1);

                while (template.BmpWidth < bmp.Width && template.BmpHeight < bmp.Height)
                {
                    for (int y = 0; y < bmp.Height - template.BmpHeight + 1; y += (template.BmpHeight / 10))
                    {
                        for (int x = 0; x < bmp.Width - template.BmpWidth + 1; x += (template.BmpWidth / 10))
                        {
                            //int posnum = 0;
                            //int negnum = 0;
                            featurecode = BaseClassifierTemplate.GetFeatureCodeGroup(igram, bmp.Width, bmp.Height, template, x, y);
                            double prob = 0;
                            for (int i = 0; i < template.GroupNum; i++)
                            {
                                prob += fern.Probability[featurecode[i]];
                                //prob = fern.Probability[featurecode[i]];
                                //if (prob > 0.5)
                                //{
                                //    posnum++;
                                //}
                                //else
                                //{
                                //    negnum++;
                                //}
                            }
                            prob = prob / template.GroupNum;
                            if (prob > 0.5)
                            //if (posnum > negnum)
                            {
                                Rectangle rect = new Rectangle(x, y, template.BmpWidth, template.BmpHeight);
                                rc.Add(rect);
                            }
                        }
                    }
                    template.ResizeTemplate(1.2);
                }
            }
            return(rc);
        }
コード例 #3
0
ファイル: tld.cs プロジェクト: Micky-G/VideoTrace
        /// <summary>
        /// 负样本检测专家,专门检测被误判为正的负样本和增加负样本集合
        /// </summary>
        /// <param name="detectCollection">检测模块产生的区域集合</param>
        /// <param name="trackerRect">跟踪模块产生的区域</param>
        /// <param name="bmp">被检测的位图</param>
        /// <returns>返回最可信对象区域</returns>
        public Rectangle NegativeExpert(RectangleCollection detectCollection, RectangleF trackerRect, Bitmap bmp)
        {
            // 复制一个矩形集合是为了让NExpert独立与PExpert
            RectangleCollection newRectCollection = new RectangleCollection();

            if (detectCollection != null)
            {
                foreach (Rectangle detectrect in detectCollection)
                {
                    newRectCollection.Add(detectrect);
                }
            }
            if (trackerRect != Rectangle.Empty)
            {
                // 将跟踪到的目标也加入待评估的对象集合
                newRectCollection.Add(new Rectangle((int)trackerRect.X, (int)trackerRect.Y, (int)trackerRect.Width, (int)trackerRect.Height));
            }

            DateTime dt = DateTime.Now;
            // 最可信的对象
            Rectangle confidentRect = MinDistanceObject(newRectCollection, bmp);
            double    elapse        = DateTime.Now.Subtract(dt).TotalMilliseconds;

            if (confidentRect != Rectangle.Empty)
            {
                newRectCollection.Remove(confidentRect);
            }

            dt = DateTime.Now;
            foreach (Rectangle rect in newRectCollection)
            {
                // 判断目标是否确实为背景,正距离归一化系数大于0.5
                // 目标加入负样本队列的第一道关,目标必须看起来像负样本(正距离归一化系数大于0.5)
                if (MinDistance(rect, bmp) > Parameter.MEDIAN_COEF)
                {
                    double areainsect = AreaProportion(confidentRect, rect);
                    // 与最可信对象交集面积小于AREA_INTERSECT_PROPORTION的为负样本,加入负样本列表
                    if (areainsect < Parameter.AREA_INTERSECT_PROPORTION)
                    {
                        Bitmap patch = ImgOper.CutImage(bmp, rect.X, rect.Y, rect.Width, rect.Height);
                        TrainNegative(patch);
                    }
                }
            }
            elapse = DateTime.Now.Subtract(dt).TotalMilliseconds;

            return(confidentRect);
        }
コード例 #4
0
ファイル: tld.cs プロジェクト: Micky-G/VideoTrace
        /// <summary>
        /// 正样本检测专家,专门检测被误判为负的正样本
        /// </summary>
        /// <param name="detectCollection">检测模块产生的区域集合</param>
        /// <param name="trackerRect">跟踪模块产生的区域</param>
        /// <param name="bmp">被检测位图</param>
        public void PositiveExpert(RectangleCollection detectCollection, RectangleF trackerRect, Bitmap bmp)
        {
            double areaproportion = 0;
            bool   nointersect    = true;    // 指明跟踪模块和检测模块得到的区域是否无交集

            if (trackerRect == Rectangle.Empty)
            {
                return;
            }

            foreach (Rectangle rect in detectCollection)
            {
                areaproportion = AreaProportion(trackerRect, rect);
                if (areaproportion > Parameter.AREA_INTERSECT_PROPORTION)
                {
                    nointersect = false;
                    break;
                }
            }

            // 没有交集,说明存在被误判为负的正样例
            if (nointersect)
            {
                // 判断跟踪到的目标是否确实为要识别的物体,正距离归一化系数小于0.5
                // 目标加入正样本队列的第一道关,目标必须看起来像正样本(正距离归一化系数小于0.5)
                if (MinDistance(trackerRect, bmp) < Parameter.MEDIAN_COEF)
                {
                    for (double lrshift = (-1) * Parameter.SHIFT_BORDER; lrshift < Parameter.SHIFT_BORDER + Parameter.SHIFT_INTERVAL; lrshift += Parameter.SHIFT_INTERVAL)
                    {
                        for (double tbshift = (-1) * Parameter.SHIFT_BORDER; tbshift < Parameter.SHIFT_BORDER + Parameter.SHIFT_INTERVAL; tbshift += Parameter.SHIFT_INTERVAL)
                        {
                            if (trackerRect.X + lrshift >= 0 && trackerRect.X + trackerRect.Width - 1 + lrshift < bmp.Width - 1 &&
                                trackerRect.Y + tbshift >= 0 && trackerRect.Y + trackerRect.Height - 1 + tbshift < bmp.Height - 1)
                            {
                                Bitmap patch = ImgOper.CutImage(bmp, (int)(trackerRect.X + lrshift), (int)(trackerRect.Y + tbshift),
                                                                (int)trackerRect.Width, (int)trackerRect.Height);
                                TrainPositive(patch);
                            }
                        }
                    }
                }
            }
        }
コード例 #5
0
ファイル: tld.cs プロジェクト: Micky-G/VideoTrace
        /// <summary>
        /// 计算正样本最大相关系数,数值越大,被检测对象越接近目标
        /// </summary>
        /// <param name="rect">检测目标区域</param>
        /// <param name="bmp">位图</param>
        /// <returns>正样本最大相关系数</returns>
        public double MostAssociate(Rectangle rect, Bitmap bmp)
        {
            double maxcoef    = 0; // 返回值
            double coef       = 0;
            double maxposcoef = 0;
            double maxnegcoef = 0;

            if (rect == Rectangle.Empty)
            {
                return(maxcoef);
            }
            Bitmap patch = ImgOper.CutImage(bmp, rect.X, rect.Y, rect.Width, rect.Height);

            patch = ImgOper.ResizeImage(patch, Parameter.DETECT_WINDOW_SIZE.Width, Parameter.DETECT_WINDOW_SIZE.Height);
            patch = ImgOper.Grayscale(patch);

            foreach (ValuedBitmap posbmp in PosMapCollection)
            {
                coef = ImgStatCompute.ComputeAssociationCoef(patch, posbmp.VBitmap);
                if (maxposcoef < coef)
                {
                    maxposcoef = coef;
                }
            }

            foreach (ValuedBitmap negbmp in NegMapCollection)
            {
                coef = ImgStatCompute.ComputeAssociationCoef(patch, negbmp.VBitmap);
                if (maxnegcoef < coef)
                {
                    maxnegcoef = coef;
                }
            }

            if (maxnegcoef != 0 || maxposcoef != 0)
            {
                maxcoef = maxposcoef / (maxposcoef + maxnegcoef);
            }

            return(maxcoef);
        }
コード例 #6
0
 /// <summary>
 /// 直接计算两幅灰度图的相关系数
 /// </summary>
 /// <param name="bmpX"></param>
 /// <param name="bmpY"></param>
 /// <returns></returns>
 public static double ComputeAssociationCoef(Bitmap bmpX, Bitmap bmpY)
 {
     int[,] igramX1  = null; //第一幅图像的标准积分图
     int[,] igramX2  = null; //第一幅图像的平方积分图
     int[,] igramY1  = null; //第二幅图像的标准积分图
     int[,] igramY2  = null; //第二幅图像的平方积分图
     int[,] igramXY1 = null; //两幅图像相乘积分图
     if (bmpX != null && bmpX.PixelFormat == PixelFormat.Format8bppIndexed &&
         bmpY != null && bmpY.PixelFormat == PixelFormat.Format8bppIndexed &&
         bmpX.Width == bmpY.Width && bmpX.Height == bmpY.Height)
     {
         Rectangle rect = new Rectangle(0, 0, bmpX.Width, bmpY.Height);
         igramX1  = ImgOper.Integrogram(bmpX, 1);
         igramX2  = ImgOper.Integrogram(bmpX, 2);
         igramY1  = ImgOper.Integrogram(bmpY, 1);
         igramY2  = ImgOper.Integrogram(bmpY, 2);
         igramXY1 = ImgOper.Integrogram(bmpX, bmpY, 1);
         return(ComputeAssociationCoef(igramX1, igramX2, igramY1, igramY2, igramXY1, rect));
     }
     return(0);
 }
コード例 #7
0
ファイル: FetchSample.cs プロジェクト: Micky-G/VideoTrace
        private void btnOk_Click(object sender, EventArgs e)
        {
            Bitmap patch = null;

            if (pri_tld != null && pri_bmp != null && pri_choose_rect != Rectangle.Empty)
            {
                for (double lrshift = (-1) * Parameter.SHIFT_BORDER; lrshift < Parameter.SHIFT_BORDER + Parameter.SHIFT_INTERVAL; lrshift += Parameter.SHIFT_INTERVAL)
                {
                    for (double tbshift = (-1) * Parameter.SHIFT_BORDER; tbshift < Parameter.SHIFT_BORDER + Parameter.SHIFT_INTERVAL; tbshift += Parameter.SHIFT_INTERVAL)
                    {
                        if (pri_choose_rect.X + lrshift >= 0 && pri_choose_rect.X + pri_choose_rect.Width - 1 + lrshift < pri_bmp.Width - 1 &&
                            pri_choose_rect.Y + tbshift >= 0 && pri_choose_rect.Y + pri_choose_rect.Height - 1 + tbshift < pri_bmp.Height - 1)
                        {
                            patch = ImgOper.CutImage(pri_bmp, (int)(pri_choose_rect.X + lrshift), (int)(pri_choose_rect.Y + tbshift),
                                                     (int)pri_choose_rect.Width, (int)pri_choose_rect.Height);
                            pri_tld.TrainPositive(patch);
                            //patch.Save("Image\\VideoSave\\" + DateTime.Now.ToString("yyyyMMddhhmmss") + ".jpg");
                        }
                    }
                }

                for (int row = 0; row < pri_bmp.Height - Parameter.DETECT_WINDOW_SIZE.Height + 1; row += Parameter.DETECT_WINDOW_SIZE.Height)
                {
                    for (int col = 0; col < pri_bmp.Width - Parameter.DETECT_WINDOW_SIZE.Width + 1; col += Parameter.DETECT_WINDOW_SIZE.Width)
                    {
                        Rectangle rect        = new Rectangle(col, row, pri_choose_rect.Width, pri_choose_rect.Height);
                        double    areaportion = pri_tld.AreaProportion(rect, pri_choose_rect);
                        if (areaportion < Parameter.AREA_INTERSECT_PROPORTION)
                        {
                            patch = ImgOper.CutImage(pri_bmp, rect.X, rect.Y, rect.Width, rect.Height);
                            pri_tld.TrainNegative(patch);
                        }
                    }
                }
            }
            pri_player.Start();
            this.Close();
        }
コード例 #8
0
ファイル: QuickDetect.cs プロジェクト: Micky-G/VideoTrace
        /// <summary>
        /// 用训练好的模型测试已标记样本的类别
        /// </summary>
        /// <param name="sample">已标记样本图像</param>
        /// <param name="template">基础分类器模板</param>
        /// <param name="fern">Fern</param>
        /// <returns>返回测试结果,类别</returns>
        public static int DetectSample(Bitmap sample, BaseClassifierTemplate template, Fern fern)
        {
            Bitmap bmp = null;

            if (sample.Width != template.BmpWidth || sample.Height != template.BmpHeight)
            {
                bmp = ImgOper.ResizeImage(sample, template.BmpWidth, template.BmpHeight);
            }
            else
            {
                bmp = sample;
            }

            if (bmp != null && (bmp.PixelFormat == PixelFormat.Format24bppRgb || bmp.PixelFormat ==
                                PixelFormat.Format32bppRgb || bmp.PixelFormat == PixelFormat.Format32bppArgb))
            {
                bmp          = ImgOper.Grayscale(bmp);
                int[,] igram = ImgOper.Integrogram(bmp, 1);
                UInt32[] featurecode = BaseClassifierTemplate.GetFeatureCodeGroup(igram, bmp.Width, bmp.Height, template, 0, 0);
                double   prob        = 0;
                for (int i = 0; i < template.GroupNum; i++)
                {
                    prob += fern.Probability[featurecode[i]];
                }
                prob = prob / template.GroupNum;
                if (prob > 0.5)
                {
                    return(1);
                }
                else
                {
                    return(-1);
                }
            }
            return(0);
        }
コード例 #9
0
ファイル: ImgOper.cs プロジェクト: Micky-G/VideoTrace
        /// <summary>
        /// 高斯卷积(模糊)
        /// </summary>
        /// <param name="bmp">原位图</param>
        ///// <param name="sigma">sigma</param>
        ///// <param name="lenght">卷积核边长(像素)</param>
        public static Bitmap GaussianConvolution(Bitmap bmpSource, double sigma, int length
                                                 )
        {
            Bitmap bmp = null;

            double[] gaussian_coef = new double[length];

            for (int i = 0; i < length; i++)
            {
                gaussian_coef[i] = Math.Pow(Math.E, -((i - length / 2) * (i - length / 2)) / (2 * sigma * sigma)) / Math.Sqrt(2 * Math.PI) * sigma;
            }

            if (bmpSource == null)
            {
                return(bmp);
            }

            if (bmpSource.PixelFormat == PixelFormat.Format24bppRgb || bmpSource.PixelFormat ==
                PixelFormat.Format32bppRgb || bmpSource.PixelFormat == PixelFormat.Format32bppArgb)
            {
                bmpSource = ImgOper.Grayscale(bmpSource);
            }

            if (bmpSource.PixelFormat != PixelFormat.Format8bppIndexed)
            {
                return(bmp);
            }

            int        width      = bmpSource.Width;
            int        height     = bmpSource.Height;
            Rectangle  rect       = new Rectangle(0, 0, width, height);
            BitmapData dataSource = bmpSource.LockBits(rect, ImageLockMode.ReadOnly, bmpSource.PixelFormat);
            // Stride为位图中每一行以4字节对齐的行宽
            int strideSource = dataSource.Stride;

            double[,] bmpdata1 = new double[height, width];
            double[,] bmpdata2 = new double[height, width];
            unsafe
            {
                byte * ptrSource = (byte *)dataSource.Scan0.ToPointer();
                byte * ptr1;
                double d               = 0; // 临时计算某点上的卷积值
                byte   minvalue        = 255;
                byte   maxvalue        = 0;
                double minvalue_double = double.MaxValue;
                double maxvalue_double = 0;

                for (int row = 0; row < height; row++)
                {
                    for (int col = 0; col < width; col++)
                    {
                        // 位图的在内存中的排列是从左到右,从下到上的,但BitmapData貌似做了优化,把位图数据在内存中的排列
                        // 顺序改为与坐标系一致,即从左到右、从上到下
                        ptr1 = ptrSource + row * strideSource + col;

                        d = 0;
                        for (int i = 0; i < length; i++)
                        {
                            if (col + i - length / 2 >= 0 && col + i - length / 2 < width)
                            {
                                d += *(ptr1 + i - length / 2) * gaussian_coef[i];
                            }
                        }
                        bmpdata1[row, col] = d;

                        if (*ptr1 > maxvalue)
                        {
                            maxvalue = *ptr1;
                        }
                        if (*ptr1 < minvalue)
                        {
                            minvalue = *ptr1;
                        }
                    }
                }

                for (int col = 0; col < width; col++)
                {
                    for (int row = 0; row < height; row++)
                    {
                        d = 0;
                        for (int i = 0; i < length; i++)
                        {
                            if (row + i - length / 2 >= 0 && row + i - length / 2 < height)
                            {
                                d += bmpdata1[row + i - length / 2, col] * gaussian_coef[i];
                            }
                        }
                        bmpdata2[row, col] = d;
                        if (d > maxvalue_double)
                        {
                            maxvalue_double = d;
                        }
                        if (d < minvalue_double)
                        {
                            minvalue_double = d;
                        }
                    }
                }

                for (int row = 0; row < height; row++)
                {
                    for (int col = 0; col < width; col++)
                    {
                        ptr1 = ptrSource + row * strideSource + col;
                        *ptr1 = (byte)(bmpdata2[row, col] * (maxvalue - minvalue) / (maxvalue_double - minvalue_double));
                    }
                }
                bmpSource.UnlockBits(dataSource);

                bmp = bmpSource;
            }
            return(bmp);
        }
コード例 #10
0
ファイル: Form1.cs プロジェクト: Micky-G/VideoTrace
        private void videoSourcePlayer1_NewFrame(object sender, ref Bitmap image)
        {
            Bitmap nowImg = AForge.Imaging.Image.Clone(image);

            nowImg = ImgOper.Grayscale(nowImg);

            // 做高斯模糊,取样和检测都会影响到
            nowImg = ImgOper.GaussianConvolution(nowImg, Parameter.GAUSSIAN_SIGMA, Parameter.GAUSSIAN_SIZE);

            // 将图像传递到取样窗口
            pri_bmp = AForge.Imaging.Image.Clone(nowImg);

            double   elapse = 0;
            DateTime dt     = DateTime.Now;

            pri_obj_regions = pri_tld.HogDetect(nowImg);
            elapse          = DateTime.Now.Subtract(dt).TotalMilliseconds;

            float[] vect = new float[3];   // 描述区域位移和缩放
            //nowImg.Save("Image\\VideoSave\\" + DateTime.Now.ToString("yyyyMMddhhmmss") + ".jpg");
            ArrayList points = null;

            if (pri_optical.IPoints == null || pri_tracker_rect == Rectangle.Empty)
            {
                pri_optical.I = pri_optical.TransformBmpToLayerImg(nowImg);

                // 尚未确定跟踪对象,不需要PositiveExpert
                pri_tracker_rect = pri_tld.NegativeExpert(pri_obj_regions, pri_tracker_rect, nowImg);

                pri_optical.IPoints = new ArrayList();
                points = pri_optical.ChooseRectRandomPoints(pri_tracker_rect, Parameter.INITIAL_POINTS_NUMBER);
                foreach (PointF pt in points)
                {
                    pri_optical.IPoints.Add(new PointF(pt.X, pt.Y));
                }
            }
            else
            {
                if (pri_optical.J != null)
                {
                    pri_optical.I       = pri_optical.J;
                    pri_optical.IPoints = pri_optical.JPoints;
                }
                pri_optical.J = pri_optical.TransformBmpToLayerImg(nowImg);

                //pri_optical.JPoints = new ArrayList();
                // 跟踪时wx和wy设为3,或者更大
                dt   = DateTime.Now;
                vect = pri_optical.ComputerDisplacement(pri_optical.I, pri_optical.J, pri_optical.IPoints,
                                                        1, 3, 3, 20, 0.2f, ref pri_optical.IPoints, ref pri_optical.JPoints);
                elapse = DateTime.Now.Subtract(dt).TotalMilliseconds;

                if (vect != null)
                {
                    pri_tracker_rect.X      = pri_tracker_rect.X + vect[0];
                    pri_tracker_rect.Y      = pri_tracker_rect.Y + vect[1];
                    pri_tracker_rect.Width  = pri_tracker_rect.Width * vect[2];
                    pri_tracker_rect.Height = pri_tracker_rect.Height * vect[2];

                    if (pri_tracker_rect.X < 0)
                    {
                        pri_tracker_rect.Width = pri_tracker_rect.Width + pri_tracker_rect.X;
                        pri_tracker_rect.X     = 0;
                    }
                    else if (pri_tracker_rect.X + pri_tracker_rect.Width - 1 > nowImg.Width - 1)
                    {
                        pri_tracker_rect.Width = nowImg.Width - pri_tracker_rect.X;
                    }

                    if (pri_tracker_rect.Y < 0)
                    {
                        pri_tracker_rect.Height = pri_tracker_rect.Height + pri_tracker_rect.Y;
                        pri_tracker_rect.Y      = 0;
                    }
                    else if (pri_tracker_rect.Y + pri_tracker_rect.Height - 1 > nowImg.Height - 1)
                    {
                        pri_tracker_rect.Height = nowImg.Height - pri_tracker_rect.Y;
                    }
                }
                else
                {
                    pri_tracker_rect = Rectangle.Empty;
                }

                // 在跟踪框被NExpert产生的最可信对象替代前保存原始状态
                if (pri_tracker_rect != Rectangle.Empty)
                {
                    pri_obj_rect = new RectangleF(pri_tracker_rect.X, pri_tracker_rect.Y, pri_tracker_rect.Width, pri_tracker_rect.Height);
                }

                dt = DateTime.Now;
                // 有跟踪对象时,PositvieExpert与NegativeExpert都开始工作
                pri_tld.PositiveExpert(pri_obj_regions, pri_tracker_rect, nowImg);
                elapse = DateTime.Now.Subtract(dt).TotalMilliseconds;

                dt = DateTime.Now;
                pri_tracker_rect = pri_tld.NegativeExpert(pri_obj_regions, pri_tracker_rect, nowImg);
                elapse           = DateTime.Now.Subtract(dt).TotalMilliseconds;

                pri_optical.JPoints = new ArrayList();
                points = pri_optical.ChooseRectRandomPoints(pri_tracker_rect, Parameter.INITIAL_POINTS_NUMBER);
                // 如果pri_tracker_rect为空,则pri_optical.JPoints为空,那么下次跟踪的pri_optical.Ipoints也为空,无法继续跟踪
                foreach (PointF pt in points)
                {
                    pri_optical.JPoints.Add(new PointF(pt.X, pt.Y));
                }
            }

            Graphics g   = Graphics.FromImage(image);
            Pen      pen = new Pen(Color.Red);

            if (pri_tracker_rect != Rectangle.Empty)
            {
                g.DrawRectangle(pen, pri_tracker_rect.X, pri_tracker_rect.Y, pri_tracker_rect.Width, pri_tracker_rect.Height);

                FontFamily f       = new FontFamily("宋体");
                Font       font    = new System.Drawing.Font(f, 12);
                SolidBrush myBrush = new SolidBrush(Color.Blue);

                StringBuilder str = new StringBuilder();
                if (vect != null)
                {
                    str.AppendFormat("跟踪框相对位移:({0}, {1}),缩放:{2} \r\n" +
                                     "跟踪产生的Rectangle:({3}, {4}, {5}, {6}) \r\n" +
                                     "最可信的Rectangle:({7}, {8}, {9}, {10})", vect[0], vect[1], vect[2],
                                     pri_obj_rect.X, pri_obj_rect.Y, pri_obj_rect.Width, pri_obj_rect.Height,
                                     pri_tracker_rect.X, pri_tracker_rect.Y, pri_tracker_rect.Width, pri_tracker_rect.Height);
                }
                else
                {
                    str.AppendFormat("最可信的Rectangle:({0}, {1}, {2}, {3})", pri_tracker_rect.X, pri_tracker_rect.Y, pri_tracker_rect.Width, pri_tracker_rect.Height);
                }

                g.DrawString(str.ToString(), font, myBrush, 0, 0);
            }

            //if (pri_obj_regions != null && pri_obj_regions.Count > 0)
            //{
            //    foreach (Rectangle rect in pri_obj_regions)
            //    {
            //        g.DrawRectangle(pen, rect.X, rect.Y, rect.Width, rect.Height);
            //    }
            //}
        }
コード例 #11
0
ファイル: tld.cs プロジェクト: Micky-G/VideoTrace
        /// <summary>
        /// 计算最近正样本距离系数,按照距离而不是相关系数,这样效率高,系数越小,检测对象越接近目标
        /// </summary>
        /// <param name="rect">检测目标区域</param>
        /// <param name="bmp">整幅位图</param>
        /// <returns>最近距离系数, double.MaxValue表示计算异常或没计算</returns>
        public double MinDistance(RectangleF rect, Bitmap bmp)
        {
            double mindistance = double.MaxValue;   // 返回值
            double dist        = double.MaxValue;
            double minposdist  = double.MaxValue;
            double minnegdist  = double.MaxValue;

            if (rect == Rectangle.Empty)
            {
                return(mindistance);
            }

            DateTime dt     = DateTime.Now;
            double   elapse = 0;
            Bitmap   patch  = ImgOper.CutImage(bmp, (int)rect.X, (int)rect.Y, (int)rect.Width, (int)rect.Height);

            patch = ImgOper.ResizeImage(patch, Parameter.DETECT_WINDOW_SIZE.Width, Parameter.DETECT_WINDOW_SIZE.Height);
            patch = ImgOper.Grayscale(patch);

            int[,] patchgram = ImgOper.Integrogram(patch, 1);

            byte[] patchdata = ImgOper.GetGraybmpData(patch);
            double patchmean = (double)patchgram[patch.Height - 1, patch.Width - 1] / (double)(patch.Width * patch.Height);

            double[] patchdatad = new double[patchdata.Length];
            for (int i = 0; i < patchdata.Length; i++)
            {
                patchdatad[i] = patchdata[i] - patchmean;
            }

            foreach (ValuedBitmap posbmp in PosMapCollection)
            {
                int[,] posgram = ImgOper.Integrogram(posbmp.VBitmap, 1);
                byte[]   posdata  = ImgOper.GetGraybmpData(posbmp.VBitmap);
                double   posmean  = (double)posgram[posbmp.VBitmap.Height - 1, posbmp.VBitmap.Width - 1] / (double)(posbmp.VBitmap.Width * posbmp.VBitmap.Height);
                double[] posdatad = new double[posdata.Length];
                for (int i = 0; i < posdata.Length; i++)
                {
                    posdatad[i] = posdata[i] - posmean;
                }
                dist = ImgStatCompute.ComputeDistance(patchdatad, posdatad);
                if (dist < minposdist)
                {
                    minposdist = dist;
                }
            }

            foreach (ValuedBitmap negbmp in NegMapCollection)
            {
                int[,] neggram = ImgOper.Integrogram(negbmp.VBitmap, 1);
                byte[]   negdata  = ImgOper.GetGraybmpData(negbmp.VBitmap);
                double   negmean  = (double)neggram[negbmp.VBitmap.Height - 1, negbmp.VBitmap.Width - 1] / (double)(negbmp.VBitmap.Width * negbmp.VBitmap.Height);
                double[] negdatad = new double[negdata.Length];
                for (int i = 0; i < negdata.Length; i++)
                {
                    negdatad[i] = negdata[i] - negmean;
                }
                dist = ImgStatCompute.ComputeDistance(patchdatad, negdatad);
                if (dist < minnegdist)
                {
                    minnegdist = dist;
                }
            }

            if (minnegdist != 0 || minposdist != 0)
            {
                // 带归一化的系数,如果用minposdist/minnegdist,值可能会溢出
                mindistance = minposdist / (minposdist + minnegdist);
            }

            elapse = DateTime.Now.Subtract(dt).TotalMilliseconds;
            return(mindistance);
        }
コード例 #12
0
ファイル: tld.cs プロジェクト: Micky-G/VideoTrace
        /// <summary>
        /// 训练正样本
        /// </summary>
        /// <param name="bmp">正样本位图</param>
        public void TrainPositive(Bitmap bmp)
        {
            Bitmap samplebmp    = null;
            double neg_distance = 0;
            double pos_distance = 0;
            bool   hasinserted  = false; // 指明样本是否已插入队列

            samplebmp = ImgOper.ResizeImage(bmp, Parameter.DETECT_WINDOW_SIZE.Width, Parameter.DETECT_WINDOW_SIZE.Height);
            samplebmp = ImgOper.Grayscale(samplebmp);

            for (double angle = (-1) * Parameter.ANGLE_BORDER; angle < Parameter.ANGLE_BORDER; angle += Parameter.ANGLE_INTERVAL)
            {
                Bitmap bmpclone = ImgOper.RotateImage(samplebmp, angle);
                bmpclone = ImgOper.ResizeImage(bmpclone, Parameter.DETECT_WINDOW_SIZE.Width, Parameter.DETECT_WINDOW_SIZE.Height);

                for (double scale = (-1) * Parameter.SCALE_BORDER; scale < Parameter.SCALE_BORDER; scale += Parameter.SCALE_INTERVAL)
                {
                    // 往两个方向去,所以是减号
                    IntPoint lt       = new IntPoint((int)(bmpclone.Width * scale / 2), (int)(bmpclone.Height * scale / 2));
                    IntPoint rt       = new IntPoint(bmpclone.Width - 1 - (int)(bmpclone.Width * scale / 2), (int)(bmpclone.Height * scale / 2));
                    IntPoint rb       = new IntPoint(bmpclone.Width - 1 - (int)(bmpclone.Width * scale / 2), bmpclone.Height - 1 - (int)(bmpclone.Height * scale / 2));
                    IntPoint lb       = new IntPoint((int)(bmpclone.Width * scale / 2), bmpclone.Height - 1 - (int)(bmpclone.Height * scale / 2));
                    Bitmap   scalebmp = ImgOper.QuadrilateralTransform(bmpclone, lt, rt, rb, lb);

                    HogGram             hogGram   = HogGram.GetHogFromBitmap(scalebmp, Parameter.CELL_SIZE.Width, Parameter.CELL_SIZE.Height, Parameter.PART_NUMBER);
                    NormBlockVectorGram blockGram = new NormBlockVectorGram(hogGram, Parameter.BLOCK_SIZE.Width, Parameter.BLOCK_SIZE.Height);

                    Rectangle rect = new Rectangle(0, 0, hogGram.HogSize.Width, hogGram.HogSize.Height);
                    double[]  vect = blockGram.GetHogWindowVec(rect);

                    if (Dimension != 0 && vect.Length != Dimension)
                    {
                        throw new Exception("输入正样本的尺寸与其他样本尺寸不一致!");
                    }

                    ValuedBitmap vbmp = null;
                    if (NegCenter != null && PosCenter != null)
                    {
                        // 计算离正负中心的距离
                        for (int i = 0; i < vect.Length; i++)
                        {
                            neg_distance += Math.Abs(vect[i] - NegCenter[i]);
                            pos_distance += Math.Abs(vect[i] - PosCenter[i]);
                        }

                        // 与负样本中心重合时,说明是负样本,不能插入正样本队列
                        if (neg_distance == 0)
                        {
                            return;
                        }

                        // 检测到的正样本加入样本队列的第二道关,如果不够接近正样本中心,就无法加入队列
                        // 按照Hog检测的判定条件,正距离乘以Parameter.POS_DIST_COEF,使其避开边界
                        if (neg_distance < pos_distance * Parameter.POS_DIST_COEF)
                        {
                            return;
                        }

                        // 带归一化的系数,如果用pos_distance/neg_distance,值可能会溢出;
                        // 将pos_distance / (pos_distance + neg_distance)作为正样本的评价系数,值越小越接近正样本
                        vbmp = new ValuedBitmap(scalebmp, pos_distance / (pos_distance + neg_distance));
                    }
                    else
                    {
                        // 如果正或负样本库还没建立起来,则Val暂时赋值为1
                        vbmp = new ValuedBitmap(scalebmp, 1);
                    }

                    // 检测到的正样本加入样本队列的第三道关,与正样本评价系数的有序队列比较后,决定是否加入样本队列
                    hasinserted = InsertValuedBitmap(ref PosMapCollection, vbmp, Parameter.POS_LIMITED_NUMBER);
                    PosLength   = PosMapCollection.Count;

                    //// 人工观察正样本插入情况
                    //if (hasinserted && vbmp != null)
                    //{
                    //    vbmp.VBitmap.Save("Image\\pos_save\\" + poscnt + "_" + vbmp.Val + ".jpg");
                    //    poscnt++;
                    //}

                    // 如果样本已经插入队列,说明样本比较可信,重新计算样本中心
                    if (hasinserted)
                    {
                        if (PosCenter == null)
                        {
                            Dimension = vect.Length;
                            PosCenter = new double[Dimension];
                        }

                        for (int i = 0; i < Dimension; i++)
                        {
                            PosCenter[i] = (PosCenter[i] * PosLength + vect[i]) / (PosLength + 1);
                        }
                    }
                }
            }
        }
コード例 #13
0
ファイル: tld.cs プロジェクト: Micky-G/VideoTrace
        /// <summary>
        /// 计算最近正样本距离系数,按照距离而不是相关系数,这样效率高,系数越小,检测对象越接近目标
        /// </summary>
        /// <param name="rect">检测目标区域</param>
        /// <param name="bmp">位图</param>
        /// <returns>最近距离系数, double.MaxValue表示计算异常或没计算</returns>
        private double NearestNeighbour(Rectangle rect, Bitmap bmp)
        {
            Bitmap sample = null;
            Bitmap detect = null;

            Rectangle gramRect = Rectangle.Empty;

            HogGram sampleHGram = null;
            HogGram detectHGram = null;

            NormBlockVectorGram sampleBlock = null;
            NormBlockVectorGram detectBlock = null;

            double[]  detectvect = null;
            double[]  samplevect = null;
            ArrayList posvects   = null;
            ArrayList negvects   = null;

            double minposdist  = double.MaxValue;
            double minnegdist  = double.MaxValue;
            double dist        = 0;
            double nearestdist = double.MaxValue;


            if (PosLength == 0 || NegLength == 0)
            {
                return(nearestdist);
            }

            // 正样本载入
            posvects = new ArrayList();
            for (int i = 0; i < PosLength; i++)
            {
                sample = PosMapCollection[i].VBitmap;
                sample = ImgOper.ResizeImage(sample, Parameter.DETECT_WINDOW_SIZE.Width, Parameter.DETECT_WINDOW_SIZE.Height);
                sample = ImgOper.Grayscale(sample);

                sampleHGram = HogGram.GetHogFromBitmap(sample, Parameter.CELL_SIZE.Width, Parameter.CELL_SIZE.Height, Parameter.PART_NUMBER);
                sampleBlock = new NormBlockVectorGram(sampleHGram, Parameter.BLOCK_SIZE.Width, Parameter.BLOCK_SIZE.Height);

                gramRect   = new Rectangle(0, 0, sampleHGram.HogSize.Width, sampleHGram.HogSize.Height);
                samplevect = sampleBlock.GetHogWindowVec(gramRect);
                posvects.Add(samplevect);
            }

            // 负样本载入
            negvects = new ArrayList();
            for (int i = 0; i < NegLength; i++)
            {
                sample = NegMapCollection[i].VBitmap;
                sample = ImgOper.ResizeImage(sample, Parameter.DETECT_WINDOW_SIZE.Width, Parameter.DETECT_WINDOW_SIZE.Height);
                sample = ImgOper.Grayscale(sample);

                sampleHGram = HogGram.GetHogFromBitmap(sample, Parameter.CELL_SIZE.Width, Parameter.CELL_SIZE.Height, Parameter.PART_NUMBER);
                sampleBlock = new NormBlockVectorGram(sampleHGram, Parameter.BLOCK_SIZE.Width, Parameter.BLOCK_SIZE.Height);

                gramRect   = new Rectangle(0, 0, sampleHGram.HogSize.Width, sampleHGram.HogSize.Height);
                samplevect = sampleBlock.GetHogWindowVec(gramRect);
                negvects.Add(samplevect);
            }


            detect = ImgOper.CutImage(bmp, rect.X, rect.Y, rect.Width, rect.Height);
            detect = ImgOper.ResizeImage(detect, Parameter.DETECT_WINDOW_SIZE.Width, Parameter.DETECT_WINDOW_SIZE.Height);
            detect = ImgOper.Grayscale(detect);

            detectHGram = HogGram.GetHogFromBitmap(detect, Parameter.CELL_SIZE.Width, Parameter.CELL_SIZE.Height, Parameter.PART_NUMBER);
            detectBlock = new NormBlockVectorGram(detectHGram, Parameter.BLOCK_SIZE.Width, Parameter.BLOCK_SIZE.Height);

            gramRect   = new Rectangle(0, 0, detectHGram.HogSize.Width, detectHGram.HogSize.Height);
            detectvect = detectBlock.GetHogWindowVec(gramRect);

            foreach (double[] svect in posvects)
            {
                dist = ImgStatCompute.ComputeDistance(detectvect, svect);
                if (dist < minposdist)
                {
                    minposdist = dist;
                }
            }

            foreach (double[] svect in negvects)
            {
                dist = ImgStatCompute.ComputeDistance(detectvect, svect);

                if (dist < minnegdist)
                {
                    minnegdist = dist;
                }
            }

            if (minnegdist != 0 || minposdist != 0)
            {
                nearestdist = minposdist / (minposdist + minnegdist);
            }

            return(nearestdist);
        }
コード例 #14
0
ファイル: tld.cs プロジェクト: Micky-G/VideoTrace
        /// <summary>
        /// Hog检测, 被检测到图像自动缩放到BMPLIMITSIZE容忍范围内,并在检测完后将检测框自动放大之前缩小的倍率
        /// </summary>
        /// <param name="bmp">位图</param>
        public RectangleCollection HogDetect(Bitmap bmp)
        {
            RectangleCollection resultCollection = null;

            if (bmp == null)
            {
                return(null);
            }

            if (NegCenter == null && PosCenter == null)
            {
                return(null);
            }

            DateTime dt     = DateTime.Now;
            double   elapse = 0;

            // 针对原图的缩放倍率
            double se = 1;

            if (bmp.Width > Parameter.BMPLIMITSIZE.Width || bmp.Height > Parameter.BMPLIMITSIZE.Height)
            {
                se = bmp.Width / (double)Parameter.BMPLIMITSIZE.Width > bmp.Height / (double)Parameter.BMPLIMITSIZE.Height ?
                     bmp.Width / (double)Parameter.BMPLIMITSIZE.Width : bmp.Height / (double)Parameter.BMPLIMITSIZE.Height;
                bmp = ImgOper.ResizeImage(bmp, (int)(bmp.Width / se), (int)(bmp.Height / se));
            }
            bmp = ImgOper.Grayscale(bmp);

            //bmp = ImgOper.GaussianConvolution(bmp, GAUSSIAN_SIGMA, GAUSSIAN_SIZE);   // 高斯卷积,使得图像平滑

            // 所有层的检测结果
            ArrayList resultlayers = new ArrayList();
            // 初始缩放因子
            double scalecoef = 1.0;
            Bitmap scalebmp  = null;
            int    newwidth  = (int)(bmp.Width / scalecoef);
            int    newheight = (int)(bmp.Height / scalecoef);
            // 每层最小距离点的集合
            ArrayList idx_layermindistance = new ArrayList();
            int       cnt = 0;

            do
            {
                scalebmp = ImgOper.ResizeImage(bmp, newwidth, newheight);
                HogGram             hogGram   = HogGram.GetHogFromBitmap(scalebmp, Parameter.CELL_SIZE.Width, Parameter.CELL_SIZE.Height, Parameter.PART_NUMBER);
                NormBlockVectorGram blockGram = new NormBlockVectorGram(hogGram, Parameter.BLOCK_SIZE.Width, Parameter.BLOCK_SIZE.Height);

                DetectResultLayer detectlayer = new DetectResultLayer();
                // !!!!!!检测窗口的像素尺寸必须能被cell尺寸整除!!!!!!像素尺寸除以hog尺寸就是检测窗口的尺寸
                detectlayer.DetectResult = blockGram.DetectImgByHogWindow(
                    new Size(Parameter.DETECT_WINDOW_SIZE.Width / Parameter.CELL_SIZE.Width, Parameter.DETECT_WINDOW_SIZE.Height / Parameter.CELL_SIZE.Height),
                    NegCenter, PosCenter, Parameter.POS_DIST_COEF);
                if (detectlayer.DetectResult == null)
                {
                    return(null);
                }

                detectlayer.ScaleCoef = scalecoef;
                resultlayers.Add(detectlayer);        // 本层检测结果加入队列

                scalecoef *= Parameter.SCALE_COEF;    // 逐次缩小图像
                newwidth   = (int)(bmp.Width / scalecoef);
                newheight  = (int)(bmp.Height / scalecoef);
                cnt++;
            } while (newwidth > 2 * Parameter.DETECT_WINDOW_SIZE.Width && newheight > 2 * Parameter.DETECT_WINDOW_SIZE.Height);

            elapse = DateTime.Now.Subtract(dt).TotalSeconds;

            // 框出所有可能的物体
            WindowResult[] wr = null;
            Rectangle      rect;
            double         mindist       = -1;
            WindowResult   min_obj       = null;
            double         min_scalecoef = 1;

            resultCollection = new RectangleCollection();

            foreach (DetectResultLayer layer in resultlayers)
            {
                wr = layer.DetectResult;
                for (int i = 0; i < wr.Length; i++)
                {
                    if (wr[i].label == 1)
                    {
                        if (mindist == -1 || mindist > wr[i].PosDistance)
                        {
                            mindist       = wr[i].PosDistance;
                            min_obj       = wr[i];
                            min_scalecoef = layer.ScaleCoef;
                        }

                        rect = new Rectangle((int)(wr[i].ImageRegion.X * layer.ScaleCoef * se),
                                             (int)(wr[i].ImageRegion.Y * layer.ScaleCoef * se),
                                             (int)(wr[i].ImageRegion.Width * layer.ScaleCoef * se),
                                             (int)(wr[i].ImageRegion.Height * layer.ScaleCoef * se));
                        resultCollection.Add(rect);
                    }
                }
            }

            //rect = new Rectangle((int)(min_obj.ImageRegion.X * min_scalecoef * se),
            //    (int)(min_obj.ImageRegion.Y * min_scalecoef * se),
            //    (int)(min_obj.ImageRegion.Width * min_scalecoef * se),
            //    (int)(min_obj.ImageRegion.Height * min_scalecoef * se));
            //resultCollection.Add(rect);
            return(resultCollection);
        }
コード例 #15
0
ファイル: tld.cs プロジェクト: Micky-G/VideoTrace
        /// <summary>
        /// 训练负样本
        /// </summary>
        /// <param name="bmp">负样本位图</param>
        public void TrainNegative(Bitmap bmp)
        {
            Bitmap samplebmp    = null;
            double neg_distance = 0;
            double pos_distance = 0;
            bool   hasinserted  = false; // 指明样本是否已插入队列

            if (bmp.Width / Parameter.DETECT_WINDOW_SIZE.Width > bmp.Height / Parameter.DETECT_WINDOW_SIZE.Height)
            {
                samplebmp = ImgOper.ResizeImage(bmp,
                                                (int)(bmp.Width * Parameter.DETECT_WINDOW_SIZE.Height / bmp.Height), Parameter.DETECT_WINDOW_SIZE.Height);
            }
            else
            {
                samplebmp = ImgOper.ResizeImage(bmp, Parameter.DETECT_WINDOW_SIZE.Width,
                                                (int)(bmp.Height * Parameter.DETECT_WINDOW_SIZE.Width / bmp.Width));
            }
            samplebmp = ImgOper.CutImage(samplebmp, 0, 0, Parameter.DETECT_WINDOW_SIZE.Width, Parameter.DETECT_WINDOW_SIZE.Height);
            samplebmp = ImgOper.Grayscale(samplebmp);

            HogGram             hogGram   = HogGram.GetHogFromBitmap(samplebmp, Parameter.CELL_SIZE.Width, Parameter.CELL_SIZE.Height, Parameter.PART_NUMBER);
            NormBlockVectorGram blockGram = new NormBlockVectorGram(hogGram, Parameter.BLOCK_SIZE.Width, Parameter.BLOCK_SIZE.Height);

            Rectangle rect = new Rectangle(0, 0, hogGram.HogSize.Width, hogGram.HogSize.Height);

            double[] vect = blockGram.GetHogWindowVec(rect);

            if (Dimension != 0 && vect.Length != Dimension)
            {
                throw new Exception("输入负样本的尺寸与其他样本尺寸不一致!");
            }

            ValuedBitmap vbmp = null;

            if (PosCenter != null && NegCenter != null)
            {
                // 计算离正负中心的距离
                for (int i = 0; i < vect.Length; i++)
                {
                    neg_distance += Math.Abs(vect[i] - NegCenter[i]);
                    pos_distance += Math.Abs(vect[i] - PosCenter[i]);
                }

                // 与正样本中心重合时,说明是正样本,不能插入负样本队列
                if (pos_distance == 0)
                {
                    return;
                }

                // 负样本加入样本队列的第二道关,如果不够接近负样本中心,就无法加入队列
                // 按照Hog检测的判定条件,正距离乘以Parameter.POS_DIST_COEF,使其避开边界
                if (pos_distance * Parameter.POS_DIST_COEF < neg_distance)
                {
                    return;
                }

                // 带归一化的系数,如果用neg_distance / pos_distance,值可能会溢出;
                // 将neg_distance / (pos_distance + neg_distance)作为负样本的评价系数,值越小越接近负样本
                vbmp = new ValuedBitmap(samplebmp, neg_distance / (pos_distance + neg_distance));
            }
            else
            {
                // 如果正样本库还没建立起来,则Val暂时赋值为1
                vbmp = new ValuedBitmap(samplebmp, 1);
            }

            // 负样本加入样本队列的第三道关,与负样本评价系数的有序队列比较后,决定是否加入样本队列
            hasinserted = InsertValuedBitmap(ref NegMapCollection, vbmp, Parameter.NEG_LIMITED_NUMBER);
            NegLength   = NegMapCollection.Count;

            //// 人工观察负样本插入情况
            //if (hasinserted && vbmp != null)
            //{
            //    vbmp.VBitmap.Save("Image\\neg_save\\" + negcnt + "_" + vbmp.Val + ".jpg");
            //    negcnt++;
            //}

            // 如果样本已经插入队列,说明样本比较可信,重新计算样本中心
            if (hasinserted)
            {
                if (NegCenter == null)
                {
                    Dimension = vect.Length;
                    NegCenter = new double[Dimension];
                }

                for (int i = 0; i < Dimension; i++)
                {
                    NegCenter[i] = (NegCenter[i] * NegLength + vect[i]) / (NegLength + 1);
                }
            }
        }