private CvLineSegmentPoint[] detectLinesFromCanny(CvMat roi) { CvMat edgesMat = MatOps.CopySize(roi, MatrixType.U8C1); roi.Canny(edgesMat, 10, 200, ApertureSize.Size3); // Size5 also works good; 7 not; rest crash! // these values work fine with "box7.png" double rho = 1; // 1 double theta = 1 * Cv.PI / 180; // 1*Cv.PI/180 int threshold = 75; // 75 (quality) double minLength = 1; // 1 double maxGap = 10000; // 1000000, but not Infinity, for some dumb reason CvLineSegmentPoint[] lines = edgesMat.HoughLinesProbabilistic(rho, theta, threshold, minLength, maxGap); CvMat linesMat = MatOps.CopySize(edgesMat, MatrixType.U8C3, 0); for (int i = 0; i < lines.Length; ++i) { linesMat.Line(lines[i].P1, lines[i].P2, Const.ScalarRandom(), 3, LineType.AntiAlias); } //MatOps.NewWindowShow( edgesMat, "edgesMat Canny-Hough" ); MatOps.NewWindowShow(linesMat, "linesMat"); Console.WriteLine("===================="); Console.WriteLine("detectLinesFromCanny"); Console.WriteLine("lines=" + lines.Length); Console.WriteLine("===================="); return(lines); }
private void resetBoxEstimation(CvMat input, ref CvMat hue, ref CvMat normalize) { boxEstimationType = BoxEstimationType.NONE; // this logic is here (and not somewhere else) so that we don't have to calculate hue/normalize twice in a single frame // that's because hue/normalize are also needed at a later state of frame processing // this check right here sounds stupid, but I want to easily change priority between hue and normalize // in the end, one of the 2 will stay at 1st place and the check will be removed... if (boxEstimationType == BoxEstimationType.NONE) { hue = MatOps.BGRtoHue(input); //MatOps.NewWindowShow( hue, "HUE-processed" ); if (estimateBoxHint(hue, ref floodHueTolerance)) { boxEstimationType = BoxEstimationType.HUE; } } if (boxEstimationType == BoxEstimationType.NONE) { normalize = MatOps.MyNormalize(input); MatOps.NewWindowShow(normalize, "NORMALIZE-processed"); if (estimateBoxHint(normalize, ref floodNormTolerance)) { boxEstimationType = BoxEstimationType.NORMALIZE; } } }
private CvLineSegmentPoint[] detectLinesFromFeatures(CvMat hue, CvMat roi) { // IDEA 3 : // Extract features (actual box corners?!) from ROI with corner detection CvPoint2D32f[] corners; // extracted features int cornerCount; // not exactly "count", but rather "maximum number of corners to return" double qualityLevel = 0.05; // this changes to 0.1 if NOT using ROI as mask! double minimumDistance = 25; // maybe this has to be a percentage of the input-size, rather than an absolute value?!?!? bool useHarris = false; int blockSize = 3; // NOTE : roi is not as good to check for features as the hue itself!!! #if false cornerCount = 100; Cv.GoodFeaturesToTrack( roi, MatOps.CopySize(roi, MatrixType.F32C1, Const.ScalarBlack), MatOps.CopySize(roi, MatrixType.F32C1, Const.ScalarBlack), out corners, ref cornerCount, qualityLevel, minimumDistance, null, blockSize, useHarris); CvMat roiClone = roi.Clone(); roiClone.SaveImage("roiClone.png"); for (int i = 0; i < cornerCount; ++i) { // remove "isolated" features : gave back some good results, but it still wasn't as good as actual HUE feature discovery CvPoint2D32f feature = corners[i]; if (checkFeatureArea(roiClone, feature)) { roiClone.Circle(feature, 10, 127); } } MatOps.NewWindowShow(roiClone, "ROI!"); Console.WriteLine("corners=" + cornerCount); #endif // TODO : determine if it's a good idea to use ROI as a mask. // NOTE : Abandoning this idea for now. Good features are truly found, but they give worse lines than [IDEA 4]! cornerCount = 100; Cv.GoodFeaturesToTrack( hue, MatOps.CopySize(roi, MatrixType.F32C1, Const.ScalarBlack), MatOps.CopySize(roi, MatrixType.F32C1, Const.ScalarBlack), out corners, ref cornerCount, qualityLevel, minimumDistance, roi, blockSize, useHarris); //CvMat hueClone = hue.Clone(); CvMat hueClone = MatOps.CopySize(hue, MatrixType.U8C1, 0); for (int i = 0; i < cornerCount; ++i) { hueClone.Circle(corners[i], 10, 127, -1); } CvLineSegmentPoint[] lines2 = hueClone.HoughLinesProbabilistic(1, 1 * Cv.PI / 180, 75, 1, 10000); for (int i = 0; i < lines2.Length; ++i) { hueClone.Line(lines2[i].P1, lines2[i].P2, Const.ScalarRandom(), 3, LineType.AntiAlias); } MatOps.NewWindowShow(hueClone, "Lines from Features"); Console.WriteLine("======================="); Console.WriteLine("detectLinesFromFeatures"); Console.WriteLine("corners=" + cornerCount); Console.WriteLine("lines=" + lines2.Length); Console.WriteLine("======================="); return(lines2); }
static public CvMat BGRtoHueCV(CvMat input) { CvMat hsl = MatOps.ConvertChannels(input, MatrixType.U8C3, ColorConversion.BgrToHsv_Full); CvMat hue = MatOps.CopySize(input, MatrixType.U8C1); //CvMat lum = hue.EmptyClone(); //hsl.Split( hue, null, lum, null ); hsl.Split(hue, null, null, null); return(hue); }
// => accepts everything OpenCv.FloodFill accepts // <= return true if succeeded private bool estimateBoxHint(CvMat input, ref double tollerance) { // IDEA : // Get area around hinted point and find the range of box colors (due to lighting etc it can't be a single color) CvConnectedComp filledAreaData; double tol = tollerance; // don't affect value unless we are successful int hueArea = input.Rows * input.Cols; double floodMaxArea = floodMaxAreaPercent * hueArea; double floodMinArea = floodMinAreaPercent * hueArea; int retries = 0; const int MaxRetries = 4; do { Console.Out.WriteLine("FLOOD FILLING AT " + hintPos); CvScalar scalarTol = new CvScalar(tol, tol, tol, tol); // cause CvScalar doesn't now how to properly multiply itself with a number!!! CvMat filledArea = null; filledAreaData = MatOps.GetAreaOfSimilarPixels(input, hintPos, scalarTol, scalarTol, ref filledArea); //MatOps.NewWindowShow( filledArea, hue.ElemType+" try:" + retries ); if (filledAreaData.Area >= floodMinArea) { if (filledAreaData.Area <= floodMaxArea) { // keep new values in order to adapt faster next time this function is called!!! tollerance = tol; break; // we're good to go!!! } else { tol /= floodNarrowFactor; // too big; try again with NARROWER search range } } else { tol *= floodBroadenFactor; // too small; try again with BROADER search range } Console.WriteLine("On next retry tollerance will be:" + tol); Console.Out.WriteLine(retries + ") must retry COL={0} area={1} rect={2}", filledAreaData.Value, filledAreaData.Area, filledAreaData.Rect); if (++retries > MaxRetries) { // can't search for ever, maybe hint wasn't good enough? return(false); } } while (true); boxEstimatedValue = filledAreaData.Value; // TODO : Also get minimum and maximum values in the area returned!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // It's a shame this ain't returned upfront by FloodFill! Console.Out.WriteLine(retries + ")YEAH! COL={0} area={1} rect={2}", boxEstimatedValue, filledAreaData.Area, filledAreaData.Rect); return(true); }
// => hue and normalize, if null, may be created and assigned // <= ROI private CvMat detectROI(CvMat input, ref CvMat hue, ref CvMat normalize) { // TODO : Like I said above, if I get the minimum/maximum values, I have an accurate lowerBound/upperBound pair to work with!!! CvMat roi; CvScalar lowerBound; CvScalar upperBound; // IDEA 3: // Determine if I should check for "features" in the "thresholded" image, or in a cropped grayscale version of the original one!! // For now, lets search the thresholded one... if (boxEstimationType == BoxEstimationType.HUE) { roi = MatOps.CopySize(input, MatrixType.U8C1); lowerBound = boxEstimatedValue - floodHueTolerance / 1; // TODO : this should be +-(MAX VALUE) upperBound = boxEstimatedValue + floodHueTolerance / 1; if (hue == null) { hue = MatOps.BGRtoHue(input); } hue.InRangeS(lowerBound, upperBound, roi); } else if (boxEstimationType == BoxEstimationType.NORMALIZE) { // TODO : must investigate, range doesn't return anything roi = MatOps.CopySize(input, MatrixType.U8C1); lowerBound = boxEstimatedValue - floodNormTolerance; upperBound = boxEstimatedValue + floodNormTolerance; if (normalize == null) { normalize = MatOps.MyNormalize(input); } normalize.InRangeS(lowerBound, upperBound, roi); } else { // Couldn't estimate either way? We are off to a bad start, but lets try to see if features can be extracted anyway. roi = MatOps.ConvertChannels(input); // we are already losing valuable info here!! } return(roi); }
// NOTE : Also seems not well written and craves optimization at places. P.A.N.A.R.G.O. // => frame = 8 bit greyscale CvMat static public void ContrastEnhancement(CvMat frame) { //CvMat originalFrame = frame; // return this if cannot enhance //if (frame.ElemType != MatrixType.U8C1) // frame = MatOps.Convert(frame, MatrixType.U8C1, 1 / 255.0 ); /////original histogram const int HistBinSize = 256; int[] histSizes = new int[1]; histSizes[0] = HistBinSize; CvHistogram hist = new CvHistogram(histSizes, HistogramFormat.Array); Cv.CalcArrHist(frame, hist, false); // size = 256 implied CvHistogram newHist = MatOps.CopyHistogram(hist); CvArr newHistBin = newHist.Bins; //double[] origVals = new double[hist.Bins.GetDims( 0 )]; List <double> origVals = new List <double>(HistBinSize); for (int i = 0; i < HistBinSize; i++) { double elem = newHistBin.GetReal1D(i); if (elem != 0) { origVals.Add(elem); } } // FIX : See no need for histL, since we have origVals //////histogram with only nonzero bins //CvMat histL = new CvMat( imageRows, imageCols, MatrixType.F32C1, new CvScalar( 0 ) ); //for (i = 0; i < origVals.size(); i++) // histL.at<float>( i, 0 ) = origVals.at( i ); List <double> peakValues = new List <double>(HistBinSize); //std::vector<int> peakValues; //////////3 bin search window for (int i = 1; i < origVals.Count - 2; ++i) { double elem = origVals[i]; if (elem > origVals[i - 1] && elem > origVals[i + 1]) { peakValues.Add(elem); } } if (peakValues.Count == 0) { //Console.Out.WriteLine( "Cannot enhance" ); return; // cannot enhance? } //////Upper threshold double threshUP = 0; for (int i = 0; i < peakValues.Count; ++i) { threshUP += peakValues[i]; } threshUP /= peakValues.Count; //////Lower threshold double threshDOWN = Math.Min((frame.Cols * frame.Rows), threshUP * origVals.Count) / 256.0; //Console.Out.WriteLine( "Enhance thresholds " + threshUP + "/" + threshDOWN ); //////histogram reconstruction CvArr histBins = hist.Bins; for (int i = 0; i < HistBinSize; ++i) { double histElem = histBins.GetReal1D(i); if (histElem > threshUP) { histBins.SetReal1D(i, threshUP); } else if (histElem <= threshUP && histElem >= threshDOWN) { continue; } else if (histElem < threshDOWN && histElem > 0) { histBins.SetReal1D(i, threshDOWN); } else if (histElem == 0) { continue; } } // accumulated values(?) double[] accVals = new double[HistBinSize]; //std::vector<int> accVals; accVals[0] = (histBins.GetReal1D(0)); for (int i = 1; i < HistBinSize; ++i) { accVals[i] = (accVals[i - 1] + histBins[i]); } byte[] lookUpTable = new byte[HistBinSize]; //cv::Mat lookUpTable = cv::Mat::zeros( hist.size(), CV_8UC1 ); for (int i = 0; i < HistBinSize; ++i) { lookUpTable[i] = (byte)(255.0 * accVals[i] / accVals[255]); } // assign computed values to input frame //Console.Out.Write( "Enhance-->" ); for (int i = 0; i < frame.Cols; ++i) { for (int j = 0; j < frame.Rows; ++j) { // there is NO mask, thus no need to check for; was: "if (mask.data)..." byte oldValue = (byte)frame.Get2D(j, i); byte newValue = lookUpTable[oldValue]; //if ((newValue <1 || newValue > 254) && (newValue != oldValue)) Console.Out.Write( oldValue + " " + newValue + "|"); frame.Set2D(j, i, newValue); //frame.SetReal2D( j, i, lookUpTable[ (int)(255.0 * frame.GetReal2D( j, i )) ] / 255.0); } } //Console.Out.WriteLine(); //frame = MatOps.Convert( frame, MatrixType.U8C1, 255.0 ); }
// => inputMat MUST be 24/32 bit private CvMat processFrame(CvMat inputMat) { // return "inputMat" after lots. LOTS. Of processing width = inputMat.Cols; height = inputMat.Rows; // taking out 4% of the input's edges: sounds wrong #if false // I have no idea what on earth is the purpose of this: //CvMat temp2 = inputMat( new CvRect( inputMat.Cols / 25, inputMat.Cols / 25, inputMat.Cols - 2 * (inputMat.Cols / 25), inputMat.Rows - 2 * (inputMat.Rows / 25) ) ); //resize( temp2, temp2, inputMat.size() ); //temp2.copyTo( inputMat ); int borderX = inputMat.Cols / 25; // 4% of original int borderY = inputMat.Rows / 25; CvRect roi = new CvRect(borderX, borderY, inputMat.Cols - 2 * borderX, inputMat.Rows - 2 * borderY); CvMat temp2 = inputMat.GetSubRect(out temp2, roi); // stupid to pass "out temp2"? inputMat = temp2; // =TODO : What? temp2.Copy( inputMat ); // is it really required to remove 4% of the input image's edges? #endif CvMat inputMat_grey; { // TODO : looks like a waste to make two conversions from inputMat to _grey, instead of 1 // since OpenCV doesn't support it, it could be made manually CvMat inputMat_grey8 = MatOps.ConvertChannels(inputMat); inputMat_grey = MatOps.ConvertElements(inputMat_grey8, MatrixType.F32C1, 1.0 / 255.0); } // NOTE : IBO seems to give good contrast with certain images, but with bbox7, it is just disastrous. //MatOps.NewWindowShow( inputMat_grey ); //inputMat_grey = Filters.IBO( inputMat_grey ); // inputMat_grey = 32f //MatOps.NewWindowShow( inputMat_grey ); inputMat_grey = MatOps.ConvertElements(inputMat_grey, MatrixType.U8C1, 255); // inputMat_grey = 8u // was: SLOW : Filters.ContrastEnhancement( inputMat_grey ); // NOTE : not needed AFTER IBO // NOTE : Contrast Enhancement2 may NOT be needed AT ALL, at this point at least, ANYWAY!!! Filters.ContrastEnhancement2(inputMat_grey); // NOTE : certainly NOT needed AFTER IBO MatOps.NewWindowShow(inputMat_grey); // mask passed originally in method below was all white, so I optimized it out. Passing the number of pixels was also dumb-o. double thresh = Filters.NeighborhoodValleyEmphasis(inputMat_grey); Cv.Threshold(inputMat_grey, inputMat_grey, thresh, 255, ThresholdType.BinaryInv); IplConvKernel element = new IplConvKernel(3, 3, 1, 1, ElementShape.Cross); Cv.Erode(inputMat_grey, inputMat_grey, element); Cv.Dilate(inputMat_grey, inputMat_grey, element); MatOps.NewWindowShow(inputMat_grey); // TODO : check if check is required if (inputMat_grey.ElemType != MatrixType.U8C1) { inputMat_grey = MatOps.ConvertElements(inputMat_grey, MatrixType.U8C1, 255.0); } // ======= // is this just a test? CvPoint[] newPtV = Filters.DistillContours(inputMat_grey, 5, Const.PointZero); CvMat imageDest; using (CvMemStorage storage = new CvMemStorage()) { CvSeq <CvPoint> updateContours = CvSeq <CvPoint> .FromArray(newPtV, SeqType.Contour, storage); imageDest = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1); Cv.DrawContours(imageDest, updateContours, Const.ScalarWhite, 0, 100, 16); } // ======= kawane(newPtV); // updates thresholdDist, minMaskY, final4P //*******************************************set a greater contour for estimation of the missing points*******************************// // ======= newPtV = Filters.DistillContours(inputMat_grey, 100, Const.PointZero); using (CvMemStorage storage = new CvMemStorage()) { CvSeq <CvPoint> updateContours = CvSeq <CvPoint> .FromArray(newPtV, SeqType.Contour, storage); imageDest = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1); Cv.DrawContours(imageDest, updateContours, Const.ScalarWhite, 0, 100, 1, LineType.AntiAlias); } // ======= CvMat mask1 = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1, 0); Cv.FillConvexPoly(mask1, newPtV, Const.ScalarWhite, 0, 0); temp = MatOps.ConvertChannels(inputMat); temp.Copy(imageDest, mask1); Cv.Canny(imageDest, imageDest, 150, 300, ApertureSize.Size3); IplConvKernel element2 = new IplConvKernel(3, 3, 1, 1, ElementShape.Rect); Cv.Dilate(imageDest, imageDest, element2); Cv.Erode(imageDest, imageDest, element2); CvLineSegmentPoint[] lines = Cv2.HoughLinesP(new Mat(imageDest), 1, Cv.PI / 180 /*NOTE : 1 degree angle*/, 50, 50, 50); // TODO : those 50s..? extendLines(lines, 350); // TODO : This idea sounds arbitary? And why 350? At least some percentage? // draw extended lines for (int i = 0; i < lines.Length; ++i) { CvLineSegmentPoint l = lines[i]; Cv.Line(imageDest, l.P1, l.P2, Const.ScalarWhite, 1, LineType.AntiAlias); } Cv.Dilate(imageDest, imageDest, element2); // TODO : FIX : Dilate again?! // another huge function here... fourPoints(lines); //////////// //********************************************************************* replace estimate points with mask corners ********// if (oldPt.Count != 0) { //** // BEWARE : great use of the English language following right below: // test for each and every one of the last slice delete each one of all the revisited of the above and estimate for only the best the off topic adapt //** List <int> positions = new List <int>(final4P.Count); for (int i = 0; i < final4P.Count; ++i) { positions.Add(-1); // "initialize" positions[i] double distmin = 10000; for (int j = 0; j < oldPt.Count; ++j) { double distAB = PointOps.Norm(oldPt[j] - final4P[i]); if (distAB < distmin) { distmin = distAB; positions[i] = j; } } } int flagFrCounter = 0; for (int i = 0; i < final4P.Count; ++i) { double distA = PointOps.Norm(oldPt[positions[i]] - final4P[i]); //********************* threshold pou na orizei tin megisti perioxi gia anazitisi,alliws na krataei to proigoumeno simeio*******// if (distA < thresholdDist) //if(distA<80) { oldPt[positions[i]] = final4P[i]; --flagFrCounter; } ++flagFrCounter; } if (reset) { numFrames = 0; oldPt.Clear(); final4P.Clear(); } } //pointsb[0]=thresholdDist; //****************************************************************************// for (int i = 0; i < oldPt.Count; ++i) { Cv.Circle(temp, oldPt[i], 2, Const.ScalarRed, 3); } MatOps.Convert8To24(temp).Copy(inputMat); //MatOps.ConvertChannels( temp, ColorConversion.GrayToBgr ).Copy( inputMat ); //temp.Copy( inputMat ); //******************************************************OVERLAY IMAGE***********************************************////// if (oldPt.Count == 0) { return(inputMat); // end of line } CvMat black2; if (overlay != null) { black2 = overlay.Clone(); //=imread("cubes.jpg"); Cv.Resize(black2, inputMat, Interpolation.NearestNeighbor); // TODO : check if interpolation type is appropriate } else { black2 = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C3); } List <CvPoint> tempPoint = new List <CvPoint>(4); //vector<Point> tempPoint; int pp = 0; // BEWARE : the guy is copy/pasting needlessly? int mini = 1000000; for (int i = 0; i < oldPt.Count; ++i) { if (oldPt[i].Y < mini) { mini = oldPt[i].Y; pp = i; } } tempPoint.Add(oldPt[pp]); mini = 1000000; for (int i = 0; i < oldPt.Count; ++i) { if (oldPt[i].Y < mini && oldPt[i] != tempPoint[0]) { mini = oldPt[i].Y; pp = i; } } tempPoint.Add(oldPt[pp]); mini = 1000000; for (int i = 0; i < oldPt.Count; ++i) { int tempmini = Math.Abs(oldPt[i].X - tempPoint[1].X); if (tempmini < mini && oldPt[i] != tempPoint[0] && oldPt[i] != tempPoint[1]) { mini = tempmini; pp = i; } } tempPoint.Add(oldPt[pp]); for (int i = 0; i < oldPt.Count; ++i) { CvPoint pt = oldPt[i]; bool found = false; for (int j = 0; j < tempPoint.Count; ++j) { if (tempPoint[j] == pt) { found = true; break; } } if (!found) { tempPoint.Add(pt); } } // only keep up to 4 points List <CvPoint> co_ordinates = new List <CvPoint>(4); { int maxIndex = Math.Min(4, tempPoint.Count); for (int i = 0; i < maxIndex; ++i) { co_ordinates.Add(tempPoint[i]); } } // lost me... if (outputQuad[0] == outputQuad[2]) { { int maxIndex = Math.Min(4, tempPoint.Count); for (int i = 0; i < maxIndex; ++i) { outputQuad[i] = tempPoint[i]; } } } else { CvPoint2D32f rr; for (int i = 0; i < 4; ++i) { List <double> dist = new List <double>(tempPoint.Count); for (int j = 0; j < tempPoint.Count; ++j) { rr = tempPoint[j]; dist.Add(PointOps.Norm(outputQuad[i] - rr)); } double minimumDist = dist.Min(); int min_pos = Utils.FindIndex(dist, minimumDist); if (tempPoint.Count > 0) { outputQuad[i] = tempPoint[min_pos]; tempPoint.RemoveAt(min_pos); } } } // The 4 points where the mapping is to be done , from top-left in clockwise order inputQuad[0] = new CvPoint2D32f(0, 0); inputQuad[1] = new CvPoint2D32f(inputMat.Cols - 1, 0); inputQuad[2] = new CvPoint2D32f(inputMat.Cols - 1, inputMat.Rows - 1); inputQuad[3] = new CvPoint2D32f(0, inputMat.Rows - 1); //Input and Output Image; // Get the Perspective Transform Matrix i.e. lambda (2D warp transform) // Lambda Matrix CvMat lambda = Cv.GetPerspectiveTransform(inputQuad, outputQuad); // Apply this Perspective Transform to the src image // - get a "top-down" view of the supposedly box-y area Cv.WarpPerspective(black2, black2, lambda, Interpolation.Cubic, Const.ScalarBlack); // see nice explanation : http://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/ CvMat maskOV = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1, Const.ScalarBlack); using (CvMemStorage storage = new CvMemStorage()) { CvSeq <CvPoint> updateContours = CvSeq <CvPoint> .FromArray(co_ordinates, SeqType.Contour, storage); imageDest = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1); Cv.DrawContours(maskOV, updateContours, Const.ScalarWhite, 0, 100, 16); //drawContours( maskOV, co_ordinates, 0, Scalar( 255 ), CV_FILLED, 8 ); } double alpha = 0.8; double beta = (1.0 - alpha); Cv.AddWeighted(black2, alpha, inputMat, beta, 0.0, black2); black2.Copy(inputMat, maskOV); return(inputMat); }