Ejemplo n.º 1
0
        // function that depicts results of optical flow operations
        // requires reference to image being processed, the results of Farneback algorithm stored in flow_x and flow_y
        // step gives the distance between pixels that are depicted, shift_that_counts is threshold for vector length that is used for calculations
        private Image <Bgr, Byte> Draw_Farneback_flow_map(Image <Bgr, Byte> img_curr, Image <Gray, float> flow_x, Image <Gray, float> flow_y, OpticalFlowVariable optiVars)
        {
            // NOTE: flow Images (flow_x and flow_y) are organized like this:
            // at index (is position of pixel before optical flow operation) of Image array
            // the shift of this specific pixel after the flow operation is stored
            // if no shift has occured value stored at index is zero
            // (i.e., pixel[index] = 0
            GC.Collect(0, GCCollectionMode.Forced);

            Image <Bgr, Byte> blackFrame = new Image <Bgr, Byte>(new Bitmap(1280 / frameReduction, 720 / frameReduction));

            System.Drawing.Point from_dot_xy = new System.Drawing.Point(); // point variable to draw lines between dots before and after flow (=vectors)
            System.Drawing.Point to_dot_xy   = new System.Drawing.Point(); // point variable, which will be endpoint of line between dots before and after flow

            MCvScalar col;                                                 // variable to store color values of lines representing flow vectors

            col.V0 = 100;
            col.V1 = 255;
            col.V2 = 0;
            col.V3 = 100;


            // for drawing central line based on window size
            System.Drawing.Point[] window_centre = new System.Drawing.Point[2];

            window_centre[0].X = img_curr.Width / 2;// * Convert.ToInt32(txt_resize_factor.Text)/ 2;
            window_centre[0].Y = 0;

            window_centre[1].X = img_curr.Width / 2; //* Convert.ToInt32(txt_resize_factor.Text) / 2;
            window_centre[1].Y = orig_height;


            // Point variables that constitute starting point for drawing summed and mean vectors onto image
            System.Drawing.Point vector_right = new System.Drawing.Point();
            System.Drawing.Point vector_left  = new System.Drawing.Point();

            // variables used for summing vectors to left and to the right of the window's centre
            System.Drawing.Point vector_right_end_window = new System.Drawing.Point();
            System.Drawing.Point vector_left_end_window  = new System.Drawing.Point();


            // determine centre of output window (needed for summed vectors)
            int mid_point_horz = 1280 * frameReduction / 2; // width
            int mid_point_vert = 720 * frameReduction / 2;  // height

            // landmark coordinates that are origin of direction vectors
            // near centre of image window; to depict motion of left and right half of "body" (or more precisely, window)
            vector_right.X = (mid_point_horz + 10) * optiVars.stepRate;
            vector_right.Y = mid_point_vert * optiVars.stepRate;

            vector_left.X = (mid_point_horz - 10) * optiVars.stepRate;
            vector_left.Y = mid_point_vert * optiVars.stepRate;


            // counting landmarks in flow field that exceed a certain value (shift_that_counts); left and right is based on centre of window (half of width)
            double count_X_right = 0;
            double count_Y_right = 0;

            double count_X_left = 0;
            double count_Y_left = 0;

            // loops over image matrix; position of dots before and after optical flow operations are compared and vector is drawn between the old and the new position
            for (int i = 0; i < flow_x.Rows; i += optiVars.stepRate)     // NOTE: steps are given by step variable in arguments of method
            {
                for (int j = 0; j < flow_x.Cols; j += optiVars.stepRate) // BEGIN FOR

                // pixel shift measured by optical flow is transferred to point variables
                // storing starting point of motion (from_dot..) and its end points (to_dot...)

                {
                    to_dot_xy.X = (int)flow_x.Data[i, j, 0]; // access single pixel of flow matrix where x-coords of pixel after flow are stored; only gives the shift
                    to_dot_xy.Y = (int)flow_y.Data[i, j, 0]; // access single pixel of flow matrix where y-coords of pixel after flow are stored; only gives the shift

                    from_dot_xy.X = j;                       //  index of loop is  position on image (here: x-coord); X is cols
                    from_dot_xy.Y = i;                       // index of of loop is  position on image (here: y-coord);  Y is rows


                    // LEFT SIDE OF WINDOW BASED CENTRE
                    if (j < window_centre[0].X)
                    {
                        //  count the x and y indices and sum them when they exceed the value given by shift_that_counts (here:0)
                        if (Math.Abs(to_dot_xy.X) > optiVars.shiftThatCounts)
                        {
                            count_X_left++;
                        }
                        if (Math.Abs(to_dot_xy.Y) > optiVars.shiftThatCounts)
                        {
                            count_Y_left++;
                        }
                        // sum up vectors
                        vector_left_end_window.Y += to_dot_xy.Y;
                        vector_left_end_window.X += to_dot_xy.X;
                    }
                    else //(j > window_centre[0].X)// WINDOW BASED CENTRE
                    {
                        //  like above; count the x and y indices and sum them
                        if (Math.Abs(to_dot_xy.X) > optiVars.shiftThatCounts)
                        {
                            count_X_right++;
                        }

                        if (Math.Abs(to_dot_xy.Y) > optiVars.shiftThatCounts)
                        {
                            count_Y_right++;
                        }

                        // sum  vectors
                        vector_right_end_window.Y += to_dot_xy.Y;
                        vector_right_end_window.X += to_dot_xy.X;
                    }

                    to_dot_xy.X = from_dot_xy.X + to_dot_xy.X; // new x-coord position of pixel (taking into account distance from the origin)
                    to_dot_xy.Y = from_dot_xy.Y + to_dot_xy.Y; // new y-coord postion of pixel

                    // draw line between coords on image and pixel shift stored in flow field after applying  optical-flow
                    if (GetDistance(from_dot_xy.X, from_dot_xy.Y, to_dot_xy.X, to_dot_xy.Y) > optiVars.shiftThatCounts)
                    {
                        CvInvoke.Line(blackFrame, from_dot_xy, to_dot_xy, col, 1);
                    }


                    //CvInvoke.Imshow("Flow field vectors", img_curr); // show image with flow depicted as lines
                } // END of both for loops
            }
            Mat blackDst = new Mat();
            Mat BlackMat = blackFrame.Mat;

            using (GpuMat gMatSrc = new GpuMat())
                using (GpuMat gMatDst = new GpuMat()) {
                    gMatSrc.Upload(BlackMat);
                    Emgu.CV.Cuda.CudaInvoke.Resize(gMatSrc, gMatDst, new Size(0, 0), frameReduction, frameReduction, Inter.Area);
                    gMatDst.Download(blackDst);
                }

            GC.Collect();

            return(blackDst.ToImage <Bgr, Byte>());
        }
Ejemplo n.º 2
0
        // calculates the optical flow according to the Farneback algorithm
        public Bitmap Dense_Optical_Flow(Bitmap bmp, OpticalFlowVariable optiVariables, Camera cam)
        {
            frameReduction = optiVariables.frameReduction < 1 ? 1 : optiVariables.frameReduction;
            // frame becomes previous frame (i.e., prev_frame stores information about current frame)
            prev_frame = matframe;

            Image <Bgr, Byte> imageCV = new Image <Bgr, byte>(bmp); //Image Class from Emgu.CV

            matframe = imageCV.Mat;                                 //This is your Image converted to Mat

            if (prev_frame == null)
            {
                return(bmp);
            }

            // frame_nr increment by number of steps given in textfield on user interface
            frame_nr += 1;


            // intialize this Image Matrix before resizing (see below), so it remains at original size
            img_average_vectors = new Image <Bgr, byte>(matframe.Width, matframe.Height);

            orig_height = matframe.Height;

            Size n_size = new Size(matframe.Width / frameReduction,
                                   matframe.Height / frameReduction);

            // Resize frame and previous frame (smaller to reduce processing load)
            //Source

            Mat matFramDst = new Mat();

            using (GpuMat gMatSrc = new GpuMat())
                using (GpuMat gMatDst = new GpuMat()) {
                    gMatSrc.Upload(matframe);
                    Emgu.CV.Cuda.CudaInvoke.Resize(gMatSrc, gMatDst, new Size(0, 0), (double)1 / frameReduction, (double)1 / frameReduction);
                    gMatDst.Download(matFramDst);
                }

            matframe = matFramDst;

            if (prev_frame.Height != matframe.Height)
            {
                return(bmp);
            }



            // images that are compared during the flow operations (see below)
            // these need to be greyscale images
            Image <Gray, Byte> prev_grey_img, curr_grey_img;

            prev_grey_img = new Image <Gray, byte>(prev_frame.Width, prev_frame.Height);
            curr_grey_img = new Image <Gray, byte>(matframe.Width, matframe.Height);

            // Image arrays to store information of flow vectors (one image array for each direction, which is x and y)
            Image <Gray, float> flow_x;
            Image <Gray, float> flow_y;

            flow_x = new Image <Gray, float>(matframe.Width, matframe.Height);
            flow_y = new Image <Gray, float>(matframe.Width, matframe.Height);

            // assign information stored in frame and previous frame in greyscale images (works without convert function)
            CvInvoke.CvtColor(matframe, curr_grey_img, ColorConversion.Bgr2Gray);
            CvInvoke.CvtColor(prev_frame, prev_grey_img, ColorConversion.Bgr2Gray);


            // Apply Farneback dense optical flow
            // parameters are the two greyscale images (these are compared)
            // and two image arrays storing the flow information
            // the results of the procedure are stored
            // the rest of the parameters are:
            // pryScale: specifies image scale to build pyramids: 0.5 means that each next layer is twice smaller than the former
            // levels: number of pyramid levels: 1 means no extra layers
            // winSize: the average window size; larger values = more robust to noise but more blur
            // iterations: number of iterations at each pyramid level
            // polyN: size of pixel neighbourhood: higher = more precision but more blur
            // polySigma
            // flags


            CvInvoke.CalcOpticalFlowFarneback(prev_grey_img, curr_grey_img, flow_x, flow_y, 0.5, 3, 10, 3, 6, 1.3, 0);


            // call function that shows results of Farneback algorithm
            Image <Bgr, Byte> farnebackImg = Draw_Farneback_flow_map(matframe.ToImage <Bgr, Byte>(), flow_x, flow_y, optiVariables);// given in global variables section

            // Release memory
            prev_grey_img.Dispose();
            curr_grey_img.Dispose();
            flow_x.Dispose();
            flow_y.Dispose();

            //return farnebackImg.ToBitmap();

            Image <Bgra, Byte> alphaImgShape = new Image <Bgra, byte>(imageCV.Size.Width, imageCV.Size.Height, new Bgra(0, 0, 0, .5));

            CvInvoke.AddWeighted(alphaImgShape, .5, BlackTransparent(farnebackImg), .5, 0, alphaImgShape);

            Mat alphaimg = new Mat();

            CvInvoke.CvtColor(imageCV, alphaimg, ColorConversion.Bgr2Bgra);

            if (CudaInvoke.HasCuda)
            {
                using (GpuMat gMatSrc = new GpuMat())
                    using (GpuMat gMatSrc2 = new GpuMat())
                        using (GpuMat gMatDst = new GpuMat()) {
                            gMatSrc.Upload(alphaimg);
                            gMatSrc2.Upload(alphaImgShape);
                            CudaInvoke.AlphaComp(gMatSrc, gMatSrc2, gMatDst, AlphaCompTypes.Plus);
                            gMatDst.Download(alphaimg);
                        }
                return(alphaimg.Bitmap);
            }
            else
            {
                return(Overlay(imageCV, alphaImgShape).ToBitmap());
            }
        }