Exemple #1
0
 /// <summary>
 /// Creates a new tail tracker
 /// </summary>
 /// <param name="regionToTrack">The ROI in which we should track the tail</param>
 /// <param name="tailStart">The designated starting point of the tail</param>
 /// <param name="tailEnd">The designated end point of the tail</param>
 /// <param name="nsegments">The number of tail segments to track btw. start and end</param>
 public TailTracker(IppiSize imageSize, IppiPoint tailStart, IppiPoint tailEnd, int nsegments)
 {
     _threshold = 20;
     _morphSize = 8;
     _frameRate = 200;
     _strel     = BWImageProcessor.GenerateDiskMask(_morphSize);
     _nSegments = 5;
     _imageSize = imageSize;
     _tailStart = tailStart;
     _tailEnd   = tailEnd;
     //set up our track regions based on the tail positions
     DefineTrackRegions();
     NSegments = nsegments;
     InitializeImageBuffers(); //set up image buffers
     InitializeScanPoints();   //create scan points appropriate for the tail parameters
     //Initialize our angle store for tracking (size never changes)
     _angleStore = (int *)System.Runtime.InteropServices.Marshal.AllocHGlobal(900 * 4);
 }
Exemple #2
0
        /// <summary>
        /// Differences the current image with the previous one and computes the number of pixels
        /// above the set difference threshold in the selected ROI
        /// </summary>
        /// <param name="imCurrent">The current frame to difference with the previous</param>
        /// <param name="roi">The ROI in which to perform the compuation</param>
        /// <returns>Number of pixels above threshold in the delta frame</returns>
        public int ComputeNumDeltaPixels(Image8 imCurrent, IppiROI roi)
        {
            double nPixels = 0;

            if (!_isFirst)
            {
                //form difference image
                cv.ippiAbsDiff_8u_C1R(imCurrent[roi.TopLeft], imCurrent.Stride, _imPrevious[roi.TopLeft], _imPrevious.Stride, _imDelta[roi.TopLeft], _imDelta.Stride, roi.Size);
                //threshold image
                BWImageProcessor.Im2Bw(_imDelta, _imThresh, roi, _threshold);
                //count pixels
                ip.ippiSum_8u_C1R(_imThresh[roi.TopLeft], _imThresh.Stride, roi.Size, &nPixels);
            }
            else
            {
                _isFirst = false;
            }
            //copy current image to previous image buffer
            ip.ippiCopy_8u_C1R(imCurrent[roi.TopLeft], imCurrent.Stride, _imPrevious[roi.TopLeft], _imPrevious.Stride, roi.Size);
            //return pixel count - divide sum by 255 since threshold sets al values to 255
            return((int)nPixels / 255);
        }
Exemple #3
0
        /// <summary>
        /// Tracks the tailsegments on the supplied image and returns
        /// the angles and distances of each segment from the tailstart
        /// </summary>
        /// <param name="image">The image on which to id the tail</param>
        /// <returns>NSegments number of TailPoints</returns>
        public TailSegment[] TrackTail(Image8 image)
        {
            lock (_regionLock)
            {
                //CURRENTLY ONLY DOWNWARD FACING TAILS ARE PROPERLY VERIFIED!!!
                //Generate background by morphology operation - 10 times a second or whenever our coordinates changed
                //in the default case where the tail is darker than the background, using closing operation otherwise opening
                if (_frameNumber % (_frameRate / 10) == 0 || !_bgValid)
                {
                    if (!_bgValid)
                    {
                        //if our regions changed, reblank our images!
                        ip.ippiSet_8u_C1R(0, _background.Image, _background.Stride, _background.Size);
                        ip.ippiSet_8u_C1R(0, _foreground.Image, _foreground.Stride, _foreground.Size);
                        ip.ippiSet_8u_C1R(0, _thresholded.Image, _thresholded.Stride, _thresholded.Size);
                    }
                    if (_lightOnDark)
                    {
                        BWImageProcessor.Open(image, _background, _calc1, _strel, _trackRegionOuter);
                    }
                    else
                    {
                        BWImageProcessor.Close(image, _background, _calc1, _strel, _trackRegionOuter);
                    }
                    _bgValid = true;
                }
                //Compute foreground
                IppHelper.IppCheckCall(cv.ippiAbsDiff_8u_C1R(_background[_trackRegionInner.TopLeft], _background.Stride, image[_trackRegionInner.TopLeft], image.Stride, _foreground[_trackRegionInner.TopLeft], _foreground.Stride, _trackRegionInner.Size));
                //Threshold
                BWImageProcessor.Im2Bw(_foreground, _thresholded, _trackRegionInner, _threshold);
                //Fill small holes
                BWImageProcessor.Close3x3(_thresholded, _thresholded, _calc1, _trackRegionOuter);
            }
            //Tracking concept: We track the angle of each segment end (TailStart+SegmentLength:TailEnd)
            //as the angular displacement from the previous segment end
            //To do this we define one full-circle with radius SegmentLength and an angle step corresponding
            //to ~1 Pixel. Then for each angle we pre-compute in InitializeScanPoints the corresponding x
            //and y offsets. From the full circle set we track -90 to +90 degrees around the angle of the
            //previous segment - for the initial segment that angle will be 0. For each segment we will return
            //the segment angle and its associated end-point coordinate
            var retval = new TailSegment[_nSegments];

            lock (_scanPointLock)
            {
                //The index of the absolute angle of the previous segment in our angle sweep array
                //determines which angles we sweep over for the next segment
                int prevAngleIndex   = _scanAngles.Length / 2;
                int nAnglesPerHalfPi = prevAngleIndex / 2;//this is the number of entries in the array that we have to walk to cover 90 degrees
                //we scan beginning with one segment length away from tail start and then walk
                //down the tail rather than using circles fixed arount the tailstart
                IppiPoint prevSegmentEnd = TailStart;

                //loop over tail segments
                for (int i = 0; i < _nSegments; i++)
                {
                    //loop over scan-points from -pi/2 to +pi/2 (i.e. nAnglesPerHalfPi) around previous segment angle
                    //interpreting the scan points as offsets around the previous tail segment
                    //end point
                    //then find tail angle of this segment
                    int pointsFound = 0;//the number of non-zero pixels identified
                    for (int j = -1 * nAnglesPerHalfPi; j < nAnglesPerHalfPi + 1; j++)
                    {
                        //Determine the index to scan - usually this will simply be "prevAngleIndex+j" however we
                        //have to loop around our angle array properly in case we screen more extreme angles
                        int currIndex = prevAngleIndex + j;
                        if (currIndex >= _scanAngles.Length)
                        {
                            currIndex = currIndex % _scanAngles.Length;
                        }
                        else if (currIndex < 0)
                        {
                            currIndex = currIndex + _scanAngles.Length;
                        }

                        //If current point is outside of the image, ignore it
                        IppiPoint pt = new IppiPoint(_coordinateOffsets[currIndex].x + prevSegmentEnd.x, _coordinateOffsets[currIndex].y + prevSegmentEnd.y);
                        if (pt.x < 0 || pt.y < 0 || pt.x >= _imageSize.width || pt.y >= _imageSize.height)
                        {
                            continue;
                        }
                        //if the value at the current point >0 we mark that angle as valid
                        //by storing the index in our anglestore
                        if (*_thresholded[pt] > 0)
                        {
                            _angleStore[pointsFound] = currIndex;
                            pointsFound++;
                        }
                    }
                    //find the median point in our angle store if we have more than 2 points stored
                    //the value in our angle store will directly give us the absolute screen angle of the segment
                    //as well as the coordinate offset which we can used together with the previous segment endpoint
                    //to compute the tail segment end coordinate - to get the delta angle we need to get the difference
                    //between the stored index and the previously stored index, prevAngleIndex
                    //after computing the appropriate return values we update prevAngleIndex with the index from the angle store
                    //and prevSegmentEnd with the returned coordinate of the current tail segment
                    double    deltaTailAngle;
                    IppiPoint coordinate;
                    int       pos;        //the index in the angle store that we determine to be the tail-center

                    if (pointsFound == 0) //we have lost the tail - no reason to keep tracking - fill remaining positions in array with NaNs for their angle
                    {
                        for (int k = i; k < _nSegments; k++)
                        {
                            deltaTailAngle = double.NaN;
                            coordinate     = new IppiPoint();
                            retval[k]      = new TailSegment(deltaTailAngle, coordinate);
                        }
                        break;
                    }
                    else if (pointsFound < 3)
                    {
                        pos = _angleStore[0];
                    }
                    else
                    {
                        //we want the angle at the median position of valid points
                        //we don't compute intermediate positions however (we should be able to afford this since our angle step is so small), so in case
                        //of an even number of points, the top of the lower half currently
                        //wins - since array indexing is zero based, we have to subtract 1
                        //from the computed median position
                        pos = _angleStore[(int)(Math.Ceiling(pointsFound / 2.0) - 1)];
                    }
                    coordinate     = new IppiPoint(prevSegmentEnd.x + _coordinateOffsets[pos].x, prevSegmentEnd.y + _coordinateOffsets[pos].y);
                    deltaTailAngle = (pos - prevAngleIndex) * _angleStep;
                    //the condition above of wrapping around the "angle circle":if (currIndex >= _scanAngles.Length)....
                    //results in one very large (close to +360 or -360) angle of opposite sign at the switch point
                    //however, since we usually only scan over offsets from +90 to -90 this case is easy to spot
                    //and remedy:
                    if (deltaTailAngle > 90)
                    {
                        deltaTailAngle = deltaTailAngle - 360;//this should create the appropriate tail angle btw. 0 and -90
                    }
                    else if (deltaTailAngle < -90)
                    {
                        deltaTailAngle = 360 + deltaTailAngle;//this should create the appropriate tail angle btw. 0 and +90
                    }
                    prevAngleIndex = pos;
                    prevSegmentEnd = coordinate;
                    retval[i]      = new TailSegment(deltaTailAngle, coordinate);
                }//loop over tail segments
            }
            _frameNumber++;
            return(retval);
        }
Exemple #4
0
        /// <summary>
        /// Extracts a fish (candidate) from an image by performing background subtraction, noise filtering, thresholding and closing to obtain a foreground
        /// followed by marker extraction.
        /// </summary>
        /// <param name="im">The image to extract the fish from</param>
        /// <param name="region">The ROI to search</param>
        /// <returns>The most likely fish blob or null if no suitable candidate was found</returns>
        BlobWithMoments ExtractFish(Image8 im, IppiROI region)
        {
            int nMarkers = 0;


            //Perform background subtraction - CASH BACKGROUND IMAGE POINTER - otherwise we actually do the whole costly
            //32f conversion twice - once for accessing the actual image, and once for accessing the stride...
            var bg = _bgModel.Background;

            IppHelper.IppCheckCall(cv.ippiAbsDiff_8u_C1R(im[region.TopLeft], im.Stride, bg[region.TopLeft], bg.Stride, _calc[region.TopLeft], _calc.Stride, region.Size));
            //remove noise via median filtering
            _mFiltSize.width  = region.Width - 2;
            _mFiltSize.height = region.Height - 2;
            IppHelper.IppCheckCall(ip.ippiFilterMedianWeightedCenter3x3_8u_C1R(_calc[region.X + 1, region.Y + 1], _calc.Stride,
                                                                               _bgSubtracted[region.X + 1, region.Y + 1], _bgSubtracted.Stride, _mFiltSize, 1));
            //Threshold and close
            Im2Bw(_bgSubtracted, _foreground, region);
            BWImageProcessor.Close3x3(_foreground, _foreground, _calc, region);
            //Label connected components
            IppHelper.IppCheckCall(cv.ippiLabelMarkers_8u_C1IR(_foreground[region.TopLeft], _foreground.Stride, region.Size, 1, 254, IppiNorm.ippiNormInf, &nMarkers, _markerBuffer));
            //loop over returned markers and use ipp to extract blobs
            if (nMarkers > 0)
            {
                if (nMarkers > 254)
                {
                    nMarkers = 254;
                }
                //create or update our intermediate blob storage to store the required number of marker representations
                if (_blobsDetected == null || _blobsDetected.Length < nMarkers)
                {
                    _blobsDetected = new BlobWithMoments[nMarkers];
                }

                for (int i = 1; i <= nMarkers; i++)
                {
                    //label all pixels with the current marker as 255 and others as 0
                    IppHelper.IppCheckCall(ip.ippiCompareC_8u_C1R(_foreground[region.TopLeft], _foreground.Stride, (byte)i, _calc[region.TopLeft], _calc.Stride, region.Size, IppCmpOp.ippCmpEq));
                    //calculate image moments
                    IppHelper.IppCheckCall(ip.ippiMoments64s_8u_C1R(_calc[region.TopLeft], _calc.Stride, region.Size, _momentState));
                    //retrieve moments
                    long m00 = 0;
                    long m10 = 0;
                    long m01 = 0;
                    long m20 = 0;
                    long m02 = 0;
                    long m11 = 0;
                    long m30 = 0;
                    long m03 = 0;
                    long m21 = 0;
                    long m12 = 0;
                    ip.ippiGetSpatialMoment_64s(_momentState, 0, 0, 0, region.TopLeft, &m00, 0);
                    //since our input image is not 0s and 1s but 0s and 255s we have to divide by 255 in order to re-normalize our moments
                    System.Diagnostics.Debug.Assert(m00 % 255 == 0, "M00 was not a multiple of 255");
                    m00 /= 255;
                    //only retrieve other moments if this is a "fish candidate"
                    if (m00 >= MinArea)
                    {
                        ip.ippiGetSpatialMoment_64s(_momentState, 1, 0, 0, region.TopLeft, &m10, 0);
                        m10 /= 255;
                        ip.ippiGetSpatialMoment_64s(_momentState, 0, 1, 0, region.TopLeft, &m01, 0);
                        m01 /= 255;
                        ip.ippiGetSpatialMoment_64s(_momentState, 2, 0, 0, region.TopLeft, &m20, 0);
                        m20 /= 255;
                        ip.ippiGetSpatialMoment_64s(_momentState, 0, 2, 0, region.TopLeft, &m02, 0);
                        m02 /= 255;
                        ip.ippiGetSpatialMoment_64s(_momentState, 1, 1, 0, region.TopLeft, &m11, 0);
                        m11 /= 255;
                        ip.ippiGetSpatialMoment_64s(_momentState, 3, 0, 0, region.TopLeft, &m30, 0);
                        m30 /= 255;
                        ip.ippiGetSpatialMoment_64s(_momentState, 0, 3, 0, region.TopLeft, &m03, 0);
                        m03 /= 255;
                        ip.ippiGetSpatialMoment_64s(_momentState, 2, 1, 0, region.TopLeft, &m21, 0);
                        m21 /= 255;
                        ip.ippiGetSpatialMoment_64s(_momentState, 1, 2, 0, region.TopLeft, &m12, 0);
                        m12 /= 255;
                        if (_blobsDetected[i - 1] == null)
                        {
                            _blobsDetected[i - 1] = new BlobWithMoments(m00, m10, m01, m20, m11, m02, m30, m03, m21, m12);
                        }
                        else
                        {
                            _blobsDetected[i - 1].UpdateBlob(m00, m10, m01, m20, m11, m02, m30, m03, m21, m12);
                        }
                        //Determine bounding box of the blob. The following seems kinda retarded as Ipp must already
                        //have obtained that information before so maybe there is some way to actually retrieve it??
                        //Do linescans using ipp's sum function starting from the blobs centroid until we hit a line
                        //the sum of which is 0


                        int       xStart, xEnd, yStart, yEnd;
                        double    sum      = 1;
                        IppiPoint centroid = _blobsDetected[i - 1].Centroid;
                        xStart = centroid.x - 1;
                        xEnd   = centroid.x + 1;
                        yStart = centroid.y - 1;
                        yEnd   = centroid.y + 1;
                        //in the following loops we PRE-increment, whence we stop the loop if we are at one coordinate short of the ends
                        //find xStart
                        _bboxScan.width  = 1;
                        _bboxScan.height = region.Height;
                        while (sum > 0 && xStart > (region.X + 4))
                        {
                            xStart -= 5;
                            IppHelper.IppCheckCall(ip.ippiSum_8u_C1R(_calc[xStart, region.Y], _calc.Stride, _bboxScan, &sum));
                        }
                        xStart += 1;//we have a sum of 0, so go back one line towards the centroid
                        //find xEnd
                        sum = 1;
                        while (sum > 0 && xEnd < region.X + region.Width - 5)
                        {
                            xEnd += 5;
                            IppHelper.IppCheckCall(ip.ippiSum_8u_C1R(_calc[xEnd, region.Y], _calc.Stride, _bboxScan, &sum));
                        }
                        xEnd -= 1;//we have sum of 0, so go back one line towards the centroid
                        //find yStart - we can limit our x-search-space as we already have those boundaries
                        _bboxScan.width  = xEnd - xStart + 1;
                        _bboxScan.height = 1;
                        sum = 1;
                        while (sum > 0 && yStart > (region.Y + 4))
                        {
                            yStart -= 5;
                            IppHelper.IppCheckCall(ip.ippiSum_8u_C1R(_calc[xStart, yStart], _calc.Stride, _bboxScan, &sum));
                        }
                        yStart += 1;
                        //find yEnd - again limit summation to x-search-space
                        sum = 1;
                        while (sum > 0 && yEnd < region.Y + region.Height - 5)
                        {
                            yEnd += 5;
                            IppHelper.IppCheckCall(ip.ippiSum_8u_C1R(_calc[xStart, yEnd], _calc.Stride, _bboxScan, &sum));
                        }
                        yEnd -= 1;
                        _blobsDetected[i - 1].UpdateBoundingBox(xStart, yStart, xEnd - xStart + 1, yEnd - yStart + 1);
                    }
                    else
                    {
                        if (_blobsDetected[i - 1] == null)
                        {
                            _blobsDetected[i - 1] = new BlobWithMoments();
                        }
                        else
                        {
                            _blobsDetected[i - 1].ResetBlob();
                        }
                    }
                }
            }
            else
            {
                return(null);
            }
            //decide which of the detected objects is the fish
            //usually we pick the larger blob - however to avoid
            //tracking reflections on the wall, if the largest blob
            //and second largest blob are of comparable size
            //we pick the one which is closer to the center (as reflections
            //are always more eccentric)
            long maxArea        = 0;
            long secondMaxArea  = 0;
            int  maxIndex       = -1;
            int  secondMaxIndex = -1;

            for (int i = 0; i < nMarkers; i++)
            {
                if (_blobsDetected[i] == null)
                {
                    break;
                }
                //Note down the largest and second-largest blob - but only if those blobs aren't larger than the maxArea and if they eccentricity is at least MinEccentricity
                //this comparison allows that if we find two exactly same-sized blobs to consider both but to not consider any further blobs of this size (which we hopefully never have anyways)
                //Eccentricity and MaxAllowedArea checks removed at this point as they were mainly concieved to not track the laser.
                if (_blobsDetected[i].Area >= maxArea && _blobsDetected[i].Area > secondMaxArea /*&& blobsDetected[i].Area<=MaxAllowedArea && blobsDetected[i].Eccentricity>=MinEccentricity*/)
                {
                    secondMaxArea  = maxArea;
                    maxArea        = _blobsDetected[i].Area;
                    secondMaxIndex = maxIndex;
                    maxIndex       = i;
                }
            }

            if (maxArea < MinArea)
            {
                return(null);
            }
            else
            {
                //if our second-largest blob is at least two-thirds the size
                //of the largest blob we also consider distance and swap accordingly
                if ((float)secondMaxArea * 1.5 >= (float)maxArea)
                {
                    double distMax, distSecondMax;
                    distMax       = Distance.Euclidian(_blobsDetected[maxIndex].Centroid, DishCenter);
                    distSecondMax = Distance.Euclidian(_blobsDetected[secondMaxIndex].Centroid, DishCenter);
                    if (distMax > distSecondMax)
                    {
                        maxIndex = secondMaxIndex;
                    }
                }
                return(_blobsDetected[maxIndex]);
            }
        }
Exemple #5
0
        /// <summary>
        /// Given an image identifies a fish in it and returns
        /// it's properties such as position and heading
        /// </summary>
        /// <param name="image">The image to be tracked.</param>
        /// <returns>A blob representation of the fish</returns>
        public BlobWithMoments Track(Image8 image)
        {
            if (IsDisposed)
            {
                throw new ObjectDisposedException("Tracker90mmDish", "Can't track after disposal!");
            }

            BlobWithMoments currentFish = null;

            if (RemoveCMOSISBrightLineArtefact)
            {
                BWImageProcessor.Open3x3(image, image, _calc, _imageROI);
            }

            if (_frame > FramesInitialBackground)
            {
                //if _previousFish is present we only check an area around it
                if (_previousFish != null)
                {
                    //compute search region
                    //top-left
                    tl_x = _previousFish.Centroid.x - _searchRegionSize;
                    tl_y = _previousFish.Centroid.y - _searchRegionSize;
                    tl_x = tl_x < 0 ? 0 : tl_x;
                    tl_y = tl_y < 0 ? 0 : tl_y;
                    //bottom-right
                    br_x = _previousFish.Centroid.x + _searchRegionSize;
                    br_y = _previousFish.Centroid.y + _searchRegionSize;
                    br_x = br_x >= image.Width ? image.Width - 1 : br_x;
                    br_y = br_y >= image.Height ? image.Height - 1 : br_y;
                    //update search region
                    _searchRegion.X      = tl_x;
                    _searchRegion.Y      = tl_y;
                    _searchRegion.Width  = br_x - tl_x + 1;
                    _searchRegion.Height = br_y - tl_y + 1;
                    //extract fish within region
                    currentFish = ExtractFish(image, _searchRegion);
                }
                else
                {
                    currentFish = ExtractFish(image, _imageROI);
                }
            }//If We are past initial background frames

            //create/update background and advance frame counter
            if (_frame == 0)
            {
                //Blank images - necessary specifically because of the median filter step
                //which otherwise will leave pixels in _bgSubtracted un-initialized
                ip.ippiSet_8u_C1R(0, Foreground.Image, Foreground.Stride, Foreground.Size);
                ip.ippiSet_8u_C1R(0, _calc.Image, _calc.Stride, _calc.Size);
                ip.ippiSet_8u_C1R(0, _bgSubtracted.Image, _bgSubtracted.Stride, _bgSubtracted.Size);
                _bgModel = new SelectiveUpdateBGModel(image, 1.0f / FramesInBackground);
            }
            else
            {
                //update background every nth frame only
                if (_frame % BGUpdateEvery == 0)
                {
                    if (currentFish == null)
                    {
                        _bgModel.UpdateBackground(image);
                    }
                    else
                    {
                        _bgModel.UpdateBackground(image, currentFish);
                    }
                }
            }
            //Update knowledge about previous fish
            //if current fish is trustworthy
            if (currentFish != null && currentFish.Area > FullTrustMinArea)
            {
                _previousFish = currentFish;
            }
            else
            {
                _previousFish = null;
            }

            _frame++;
            return(currentFish);
        }