// => 24bit rgb // <= 24bit rgb, normalized of some sort // NOTE : MUST be converted into multi-core loop (Convert to ParallelFor loop)! static public CvMat MyNormalize(CvMat input) { CvMat output = input.Clone(); unsafe { int count = input.Rows * input.Cols * 3; byte *dataIn = input.DataByte; byte *dataOut = output.DataByte; for (int i = 0; i < count; i += 3) { int r = dataIn[i]; int g = dataIn[i + 1]; int b = dataIn[i + 2]; double invLength = 255.0 / Math.Sqrt(r * r + b * b + g * g); // because: channel / length => [0...1] if (!Double.IsInfinity(invLength)) { dataOut[i] = (byte)(r * invLength); dataOut[i + 1] = (byte)(g * invLength); dataOut[i + 2] = (byte)(b * invLength); } } } return(output); }
public Solve() { // x + y + z = 6 // 2x - 3y + 4z = 8 // 4x + 4y - 4z = 0 double[] A = new double[]{ 1, 1, 1, 2, -3, 4, 4, 4, -4 }; double[] B = new double[]{ 6, 8, 0 }; CvMat matA = new CvMat(3, 3, MatrixType.F64C1, A); CvMat matB = new CvMat(3, 1, MatrixType.F64C1, B); // X = inv(A) * B CvMat matAInv = matA.Clone(); matA.Inv(matAInv); CvMat matX = matAInv * matB; Console.WriteLine("X = {0}", matX[0].Val0); Console.WriteLine("Y = {0}", matX[1].Val0); Console.WriteLine("Z = {0}", matX[2].Val0); Console.Read(); }
public Solve() { // x + y + z = 6 // 2x - 3y + 4z = 8 // 4x + 4y - 4z = 0 double[] A = new double[] { 1, 1, 1, 2, -3, 4, 4, 4, -4 }; double[] B = new double[] { 6, 8, 0 }; CvMat matA = new CvMat(3, 3, MatrixType.F64C1, A); CvMat matB = new CvMat(3, 1, MatrixType.F64C1, B); // X = inv(A) * B CvMat matAInv = matA.Clone(); matA.Inv(matAInv); CvMat matX = matAInv * matB; Console.WriteLine("X = {0}", matX[0].Val0); Console.WriteLine("Y = {0}", matX[1].Val0); Console.WriteLine("Z = {0}", matX[2].Val0); Console.Read(); }
private CvLineSegmentPoint[] detectLinesFromFeatures(CvMat hue, CvMat roi) { // IDEA 3 : // Extract features (actual box corners?!) from ROI with corner detection CvPoint2D32f[] corners; // extracted features int cornerCount; // not exactly "count", but rather "maximum number of corners to return" double qualityLevel = 0.05; // this changes to 0.1 if NOT using ROI as mask! double minimumDistance = 25; // maybe this has to be a percentage of the input-size, rather than an absolute value?!?!? bool useHarris = false; int blockSize = 3; // NOTE : roi is not as good to check for features as the hue itself!!! #if false cornerCount = 100; Cv.GoodFeaturesToTrack( roi, MatOps.CopySize(roi, MatrixType.F32C1, Const.ScalarBlack), MatOps.CopySize(roi, MatrixType.F32C1, Const.ScalarBlack), out corners, ref cornerCount, qualityLevel, minimumDistance, null, blockSize, useHarris); CvMat roiClone = roi.Clone(); roiClone.SaveImage("roiClone.png"); for (int i = 0; i < cornerCount; ++i) { // remove "isolated" features : gave back some good results, but it still wasn't as good as actual HUE feature discovery CvPoint2D32f feature = corners[i]; if (checkFeatureArea(roiClone, feature)) { roiClone.Circle(feature, 10, 127); } } MatOps.NewWindowShow(roiClone, "ROI!"); Console.WriteLine("corners=" + cornerCount); #endif // TODO : determine if it's a good idea to use ROI as a mask. // NOTE : Abandoning this idea for now. Good features are truly found, but they give worse lines than [IDEA 4]! cornerCount = 100; Cv.GoodFeaturesToTrack( hue, MatOps.CopySize(roi, MatrixType.F32C1, Const.ScalarBlack), MatOps.CopySize(roi, MatrixType.F32C1, Const.ScalarBlack), out corners, ref cornerCount, qualityLevel, minimumDistance, roi, blockSize, useHarris); //CvMat hueClone = hue.Clone(); CvMat hueClone = MatOps.CopySize(hue, MatrixType.U8C1, 0); for (int i = 0; i < cornerCount; ++i) { hueClone.Circle(corners[i], 10, 127, -1); } CvLineSegmentPoint[] lines2 = hueClone.HoughLinesProbabilistic(1, 1 * Cv.PI / 180, 75, 1, 10000); for (int i = 0; i < lines2.Length; ++i) { hueClone.Line(lines2[i].P1, lines2[i].P2, Const.ScalarRandom(), 3, LineType.AntiAlias); } MatOps.NewWindowShow(hueClone, "Lines from Features"); Console.WriteLine("======================="); Console.WriteLine("detectLinesFromFeatures"); Console.WriteLine("corners=" + cornerCount); Console.WriteLine("lines=" + lines2.Length); Console.WriteLine("======================="); return(lines2); }
// => inputMat MUST be 24/32 bit private CvMat processFrame(CvMat inputMat) { // return "inputMat" after lots. LOTS. Of processing width = inputMat.Cols; height = inputMat.Rows; // taking out 4% of the input's edges: sounds wrong #if false // I have no idea what on earth is the purpose of this: //CvMat temp2 = inputMat( new CvRect( inputMat.Cols / 25, inputMat.Cols / 25, inputMat.Cols - 2 * (inputMat.Cols / 25), inputMat.Rows - 2 * (inputMat.Rows / 25) ) ); //resize( temp2, temp2, inputMat.size() ); //temp2.copyTo( inputMat ); int borderX = inputMat.Cols / 25; // 4% of original int borderY = inputMat.Rows / 25; CvRect roi = new CvRect(borderX, borderY, inputMat.Cols - 2 * borderX, inputMat.Rows - 2 * borderY); CvMat temp2 = inputMat.GetSubRect(out temp2, roi); // stupid to pass "out temp2"? inputMat = temp2; // =TODO : What? temp2.Copy( inputMat ); // is it really required to remove 4% of the input image's edges? #endif CvMat inputMat_grey; { // TODO : looks like a waste to make two conversions from inputMat to _grey, instead of 1 // since OpenCV doesn't support it, it could be made manually CvMat inputMat_grey8 = MatOps.ConvertChannels(inputMat); inputMat_grey = MatOps.ConvertElements(inputMat_grey8, MatrixType.F32C1, 1.0 / 255.0); } // NOTE : IBO seems to give good contrast with certain images, but with bbox7, it is just disastrous. //MatOps.NewWindowShow( inputMat_grey ); //inputMat_grey = Filters.IBO( inputMat_grey ); // inputMat_grey = 32f //MatOps.NewWindowShow( inputMat_grey ); inputMat_grey = MatOps.ConvertElements(inputMat_grey, MatrixType.U8C1, 255); // inputMat_grey = 8u // was: SLOW : Filters.ContrastEnhancement( inputMat_grey ); // NOTE : not needed AFTER IBO // NOTE : Contrast Enhancement2 may NOT be needed AT ALL, at this point at least, ANYWAY!!! Filters.ContrastEnhancement2(inputMat_grey); // NOTE : certainly NOT needed AFTER IBO MatOps.NewWindowShow(inputMat_grey); // mask passed originally in method below was all white, so I optimized it out. Passing the number of pixels was also dumb-o. double thresh = Filters.NeighborhoodValleyEmphasis(inputMat_grey); Cv.Threshold(inputMat_grey, inputMat_grey, thresh, 255, ThresholdType.BinaryInv); IplConvKernel element = new IplConvKernel(3, 3, 1, 1, ElementShape.Cross); Cv.Erode(inputMat_grey, inputMat_grey, element); Cv.Dilate(inputMat_grey, inputMat_grey, element); MatOps.NewWindowShow(inputMat_grey); // TODO : check if check is required if (inputMat_grey.ElemType != MatrixType.U8C1) { inputMat_grey = MatOps.ConvertElements(inputMat_grey, MatrixType.U8C1, 255.0); } // ======= // is this just a test? CvPoint[] newPtV = Filters.DistillContours(inputMat_grey, 5, Const.PointZero); CvMat imageDest; using (CvMemStorage storage = new CvMemStorage()) { CvSeq <CvPoint> updateContours = CvSeq <CvPoint> .FromArray(newPtV, SeqType.Contour, storage); imageDest = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1); Cv.DrawContours(imageDest, updateContours, Const.ScalarWhite, 0, 100, 16); } // ======= kawane(newPtV); // updates thresholdDist, minMaskY, final4P //*******************************************set a greater contour for estimation of the missing points*******************************// // ======= newPtV = Filters.DistillContours(inputMat_grey, 100, Const.PointZero); using (CvMemStorage storage = new CvMemStorage()) { CvSeq <CvPoint> updateContours = CvSeq <CvPoint> .FromArray(newPtV, SeqType.Contour, storage); imageDest = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1); Cv.DrawContours(imageDest, updateContours, Const.ScalarWhite, 0, 100, 1, LineType.AntiAlias); } // ======= CvMat mask1 = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1, 0); Cv.FillConvexPoly(mask1, newPtV, Const.ScalarWhite, 0, 0); temp = MatOps.ConvertChannels(inputMat); temp.Copy(imageDest, mask1); Cv.Canny(imageDest, imageDest, 150, 300, ApertureSize.Size3); IplConvKernel element2 = new IplConvKernel(3, 3, 1, 1, ElementShape.Rect); Cv.Dilate(imageDest, imageDest, element2); Cv.Erode(imageDest, imageDest, element2); CvLineSegmentPoint[] lines = Cv2.HoughLinesP(new Mat(imageDest), 1, Cv.PI / 180 /*NOTE : 1 degree angle*/, 50, 50, 50); // TODO : those 50s..? extendLines(lines, 350); // TODO : This idea sounds arbitary? And why 350? At least some percentage? // draw extended lines for (int i = 0; i < lines.Length; ++i) { CvLineSegmentPoint l = lines[i]; Cv.Line(imageDest, l.P1, l.P2, Const.ScalarWhite, 1, LineType.AntiAlias); } Cv.Dilate(imageDest, imageDest, element2); // TODO : FIX : Dilate again?! // another huge function here... fourPoints(lines); //////////// //********************************************************************* replace estimate points with mask corners ********// if (oldPt.Count != 0) { //** // BEWARE : great use of the English language following right below: // test for each and every one of the last slice delete each one of all the revisited of the above and estimate for only the best the off topic adapt //** List <int> positions = new List <int>(final4P.Count); for (int i = 0; i < final4P.Count; ++i) { positions.Add(-1); // "initialize" positions[i] double distmin = 10000; for (int j = 0; j < oldPt.Count; ++j) { double distAB = PointOps.Norm(oldPt[j] - final4P[i]); if (distAB < distmin) { distmin = distAB; positions[i] = j; } } } int flagFrCounter = 0; for (int i = 0; i < final4P.Count; ++i) { double distA = PointOps.Norm(oldPt[positions[i]] - final4P[i]); //********************* threshold pou na orizei tin megisti perioxi gia anazitisi,alliws na krataei to proigoumeno simeio*******// if (distA < thresholdDist) //if(distA<80) { oldPt[positions[i]] = final4P[i]; --flagFrCounter; } ++flagFrCounter; } if (reset) { numFrames = 0; oldPt.Clear(); final4P.Clear(); } } //pointsb[0]=thresholdDist; //****************************************************************************// for (int i = 0; i < oldPt.Count; ++i) { Cv.Circle(temp, oldPt[i], 2, Const.ScalarRed, 3); } MatOps.Convert8To24(temp).Copy(inputMat); //MatOps.ConvertChannels( temp, ColorConversion.GrayToBgr ).Copy( inputMat ); //temp.Copy( inputMat ); //******************************************************OVERLAY IMAGE***********************************************////// if (oldPt.Count == 0) { return(inputMat); // end of line } CvMat black2; if (overlay != null) { black2 = overlay.Clone(); //=imread("cubes.jpg"); Cv.Resize(black2, inputMat, Interpolation.NearestNeighbor); // TODO : check if interpolation type is appropriate } else { black2 = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C3); } List <CvPoint> tempPoint = new List <CvPoint>(4); //vector<Point> tempPoint; int pp = 0; // BEWARE : the guy is copy/pasting needlessly? int mini = 1000000; for (int i = 0; i < oldPt.Count; ++i) { if (oldPt[i].Y < mini) { mini = oldPt[i].Y; pp = i; } } tempPoint.Add(oldPt[pp]); mini = 1000000; for (int i = 0; i < oldPt.Count; ++i) { if (oldPt[i].Y < mini && oldPt[i] != tempPoint[0]) { mini = oldPt[i].Y; pp = i; } } tempPoint.Add(oldPt[pp]); mini = 1000000; for (int i = 0; i < oldPt.Count; ++i) { int tempmini = Math.Abs(oldPt[i].X - tempPoint[1].X); if (tempmini < mini && oldPt[i] != tempPoint[0] && oldPt[i] != tempPoint[1]) { mini = tempmini; pp = i; } } tempPoint.Add(oldPt[pp]); for (int i = 0; i < oldPt.Count; ++i) { CvPoint pt = oldPt[i]; bool found = false; for (int j = 0; j < tempPoint.Count; ++j) { if (tempPoint[j] == pt) { found = true; break; } } if (!found) { tempPoint.Add(pt); } } // only keep up to 4 points List <CvPoint> co_ordinates = new List <CvPoint>(4); { int maxIndex = Math.Min(4, tempPoint.Count); for (int i = 0; i < maxIndex; ++i) { co_ordinates.Add(tempPoint[i]); } } // lost me... if (outputQuad[0] == outputQuad[2]) { { int maxIndex = Math.Min(4, tempPoint.Count); for (int i = 0; i < maxIndex; ++i) { outputQuad[i] = tempPoint[i]; } } } else { CvPoint2D32f rr; for (int i = 0; i < 4; ++i) { List <double> dist = new List <double>(tempPoint.Count); for (int j = 0; j < tempPoint.Count; ++j) { rr = tempPoint[j]; dist.Add(PointOps.Norm(outputQuad[i] - rr)); } double minimumDist = dist.Min(); int min_pos = Utils.FindIndex(dist, minimumDist); if (tempPoint.Count > 0) { outputQuad[i] = tempPoint[min_pos]; tempPoint.RemoveAt(min_pos); } } } // The 4 points where the mapping is to be done , from top-left in clockwise order inputQuad[0] = new CvPoint2D32f(0, 0); inputQuad[1] = new CvPoint2D32f(inputMat.Cols - 1, 0); inputQuad[2] = new CvPoint2D32f(inputMat.Cols - 1, inputMat.Rows - 1); inputQuad[3] = new CvPoint2D32f(0, inputMat.Rows - 1); //Input and Output Image; // Get the Perspective Transform Matrix i.e. lambda (2D warp transform) // Lambda Matrix CvMat lambda = Cv.GetPerspectiveTransform(inputQuad, outputQuad); // Apply this Perspective Transform to the src image // - get a "top-down" view of the supposedly box-y area Cv.WarpPerspective(black2, black2, lambda, Interpolation.Cubic, Const.ScalarBlack); // see nice explanation : http://www.pyimagesearch.com/2014/08/25/4-point-opencv-getperspective-transform-example/ CvMat maskOV = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1, Const.ScalarBlack); using (CvMemStorage storage = new CvMemStorage()) { CvSeq <CvPoint> updateContours = CvSeq <CvPoint> .FromArray(co_ordinates, SeqType.Contour, storage); imageDest = new CvMat(inputMat.Rows, inputMat.Cols, MatrixType.U8C1); Cv.DrawContours(maskOV, updateContours, Const.ScalarWhite, 0, 100, 16); //drawContours( maskOV, co_ordinates, 0, Scalar( 255 ), CV_FILLED, 8 ); } double alpha = 0.8; double beta = (1.0 - alpha); Cv.AddWeighted(black2, alpha, inputMat, beta, 0.0, black2); black2.Copy(inputMat, maskOV); return(inputMat); }
public CvMat CalculateTransform(int targetModelIndex, bool updateInternalModelTransform, double randomSamplingRatio) { if (targetModelIndex < 0 || targetModelIndex >= _flannModels.Count) { throw new ArgumentOutOfRangeException("targetModelIndex"); } CoordRotTransConversion coordConverter = new CoordRotTransConversion(); //CoordConvertSpring coordConverter = new CoordConvertSpring(_modelTransforms[targetModelIndex]); //foreach (var point in dataPointListInWorldCoordinate) { List <Tuple <CvPoint3D64f, CvColor> > tuples = _flannModels[targetModelIndex].ModelPoints; if (randomSamplingRatio < 1) { Random rand = new Random(); tuples = tuples.Where(x => rand.NextDouble() < randomSamplingRatio).ToList(); } CvMat targetTransform = _modelTransforms[targetModelIndex]; List <CvMat> inverseTransforms = new List <CvMat>(); foreach (CvMat transform in _modelTransforms) { CvMat inv = CvEx.InitCvMat(transform); transform.Invert(inv); inverseTransforms.Add(inv); } float searchDistanceSq = this.SearchDistance * this.SearchDistance; Parallel.ForEach(tuples, tuple => { CvPoint3D64f point = tuple.Item1; CvColor color = tuple.Item2; //foreach (var point in points) { CvPoint3D64f worldPoint = CvEx.ConvertPoint3D(point, targetTransform); int minModelIndex = -1; int minPointIndex = -1; float minDistanceSq = float.MaxValue; for (int modelIndex = 0; modelIndex < _flannModels.Count; modelIndex++) { if (modelIndex == targetModelIndex) { continue; } CvPoint3D64f inversePoint = CvEx.ConvertPoint3D(worldPoint, inverseTransforms[modelIndex]); int[] indices; float[] distances; _flannModels[modelIndex].KnnSearch(inversePoint, color, out indices, out distances, 1); if (indices.Length >= 1) { float distanceSq = distances[0]; if (distanceSq <= searchDistanceSq) { if (distanceSq < minDistanceSq) { minModelIndex = modelIndex; minPointIndex = indices[0]; minDistanceSq = distanceSq; } } } } if (minModelIndex != -1) { Tuple <CvPoint3D64f, CvColor> bestModelPoint = _flannModels[minModelIndex].ModelPoints[minPointIndex]; double weightTo = 1.0 / (Math.Abs(bestModelPoint.Item1.Z - 1500 / 1000f) + 5000 / 1000f); double weightFrom = 1.0 / (Math.Abs(point.Z - 1500 / 1000f) + 5000 / 1000f); //weightFrom = weightTo = 1; double weight = _weightFromDistanceSq(minDistanceSq) * weightFrom * weightTo; CvPoint3D64f from = CvEx.ConvertPoint3D(point, targetTransform); CvPoint3D64f to = CvEx.ConvertPoint3D(bestModelPoint.Item1, _modelTransforms[minModelIndex]); coordConverter.PutPoint(from, to, weight); } }); CvMat ret = coordConverter.Solve() * targetTransform; if (updateInternalModelTransform) { _modelTransforms[targetModelIndex] = ret.Clone(); } return(ret); }
private double[] Cross_intersection_3D(double[] Intersection, double[,] first_projcet, double[,] second_projcet, CvPoint Correspond_point, int contour_point_index, double red, double green, double blue, int left_image_number, int right_image_number, int contour_index_model_operation_number)//求空间点坐标 { //IplImage first_image = new IplImage(image2, LoadMode.Color); //根据《空间点三维重建新方法及其不确定性研究》论文完成 double[,] A = new double[4, 3] { { Intersection[1] * first_projcet[2, 0] - first_projcet[0, 0], Intersection[1] * first_projcet[2, 1] - first_projcet[0, 1], Intersection[1] * first_projcet[2, 2] - first_projcet[0, 2] }, { Intersection[2] * first_projcet[2, 0] - first_projcet[1, 0], Intersection[2] * first_projcet[2, 1] - first_projcet[1, 1], Intersection[2] * first_projcet[2, 2] - first_projcet[1, 2] }, { Correspond_point.X *second_projcet[2, 0] - second_projcet[0, 0], Correspond_point.X *second_projcet[2, 1] - second_projcet[0, 1], Correspond_point.X *second_projcet[2, 2] - second_projcet[0, 2] }, { Correspond_point.Y *second_projcet[2, 0] - second_projcet[1, 0], Correspond_point.Y *second_projcet[2, 1] - second_projcet[1, 1], Correspond_point.Y *second_projcet[2, 2] - second_projcet[1, 2] } }; double[,] y = new double[6, 1] { { first_projcet[0, 3] - Intersection[1] * first_projcet[2, 3] }, { first_projcet[1, 3] - Intersection[2] * first_projcet[2, 3] }, { second_projcet[0, 3] - (Correspond_point.X * second_projcet[2, 3]) }, { second_projcet[1, 3] - (Correspond_point.Y * second_projcet[2, 3]) }, { 0 }, { 0 } }; double[,] s1 = new double[1, 3] { { A[0, 1] * A[1, 2] - A[0, 2] * A[1, 1], A[1, 0] * A[0, 2] - A[0, 0] * A[1, 2], A[0, 0] * A[1, 1] - A[1, 0] * A[0, 1] } }; double[,] s2 = new double[1, 3] { { A[2, 1] * A[3, 2] - A[2, 2] * A[3, 1], A[3, 0] * A[2, 2] - A[2, 0] * A[3, 2], A[2, 0] * A[3, 1] - A[3, 0] * A[2, 1] } }; double[,] D = new double[6, 6] { { A[0, 0], A[0, 1], A[0, 2], 0, 0, 0 }, { A[1, 0], A[1, 1], A[1, 2], 0, 0, 0 }, { 0, 0, 0, A[2, 0], A[2, 1], A[2, 2] }, { 0, 0, 0, A[3, 0], A[3, 1], A[3, 2] }, { s1[0, 0], s1[0, 1], s1[0, 2], -s1[0, 0], -s1[0, 1], -s1[0, 2] }, { s2[0, 0], s2[0, 1], s2[0, 2], -s2[0, 0], -s2[0, 1], -s2[0, 2] } }; CvMat D_mat = new CvMat(6, 6, MatrixType.F64C1, D); CvMat D1_mat = new CvMat(6, 1, MatrixType.F64C1, y); CvMat matAInv1 = new CvMat(6, 6, MatrixType.F64C1, D); matAInv1 = D_mat.Clone(); D_mat.Inv(matAInv1); CvMat result = new CvMat(6, 1, MatrixType.F64C1); result = matAInv1 * D1_mat; //CvMat result= matAInv1 * D1_mat; //double Xb = result[0].Val0; double Yb = result[1].Val0; double Zb = result[2].Val0; double Xc = result[3].Val0; double Yc = result[4].Val0; double Zc = result[5].Val0; //如果映射像素颜色则运行时间很慢 //CvScalar first_image_pixel; //int first_image_pixel_x = (int)Intersection[1]; //int first_image_pixel_y = (int)Intersection[2]; //first_image_pixel = Cv.Get2D(first_image, first_image_pixel_y, first_image_pixel_x);// //double[,] point_3D_location = new double[1, 10] { { contour_point_index, (Xb + Xc) / 2, (Yb + Yc) / 2, (Zb + Zc) / 2, 1, red, green, blue,left_image_number,right_image_number } }; double[] point_3D_location; point_3D_location = new double[11] { contour_point_index, Xc, Yc, Zc, 1, red, green, blue, left_image_number, right_image_number, contour_index_model_operation_number }; //可视线上 // point_3D_location = new double[1, 10] { { contour_point_index, Xb, Yb, Zb, 1, red, green, blue, left_image_number, right_image_number } };//参考图上 //point_3D_location = new double[1, 11] { { contour_point_index, (Xb + Xc) / 2, (Yb + Yc) / 2, (Zb + Zc) / 2, 1, red, green, blue, left_image_number, right_image_number, contour_index_model_operation_number } }; Cv.ReleaseMat(D_mat); Cv.ReleaseMat(D1_mat); Cv.ReleaseMat(matAInv1); Cv.ReleaseMat(result); //GC.Collect(); return(point_3D_location); }
private CvMat Computecorrespondepilines(double[,] first_projcet, double[,] second_projcet, double[,] right_contour_point)//求两幅图的极线,此算法在窄基线情况下成立,返回的是位于基线上的两个点(因为求交的时候是用点作为参数,而不是A,B,C) { //double[,] Epiline_point = new double[4, right_contour_point.GetLength(1)]; CvMat Epiline_point = new CvMat(4, right_contour_point.GetLength(1), MatrixType.F64C1); CvMat correspondent_lines = new CvMat(3, right_contour_point.GetLength(1), MatrixType.F64C1); CvMat FundamentalMat = new CvMat(3, 3, MatrixType.F64C1); //《根据投影矩阵求基础矩阵》网页来求基础矩阵 double[,] M11 = new double[3, 3] { { first_projcet[0, 0], first_projcet[0, 1], first_projcet[0, 2] }, { first_projcet[1, 0], first_projcet[1, 1], first_projcet[1, 2] }, { first_projcet[2, 0], first_projcet[2, 1], first_projcet[2, 2] } }; double[,] M21 = new double[3, 3] { { second_projcet[0, 0], second_projcet[0, 1], second_projcet[0, 2] }, { second_projcet[1, 0], second_projcet[1, 1], second_projcet[1, 2] }, { second_projcet[2, 0], second_projcet[2, 1], second_projcet[2, 2] } }; double[,] m1 = new double[3, 1] { { first_projcet[0, 3] }, { first_projcet[1, 3] }, { first_projcet[2, 3] } }; double[,] m2 = new double[3, 1] { { second_projcet[0, 3] }, { second_projcet[1, 3] }, { second_projcet[2, 3] } }; CvMat M11_mat = new CvMat(3, 3, MatrixType.F64C1, M11); CvMat M21_mat = new CvMat(3, 3, MatrixType.F64C1, M21); CvMat m1_mat = new CvMat(3, 1, MatrixType.F64C1, m1); CvMat m2_mat = new CvMat(3, 1, MatrixType.F64C1, m2); CvMat M11_matInv = M11_mat.Clone(); M11_mat.Inv(M11_matInv); CvMat temp3 = M21_mat * M11_matInv * m1_mat; double[,] temp3_arry = new double[3, 1] { { temp3[0, 0] }, { temp3[1, 0] }, { temp3[2, 0] } }; double[,] m_arry = new double[3, 1]; m_arry = MatrixSubtration(m2, temp3_arry); CvMat m_mat = new CvMat(3, 1, MatrixType.F64C1, m_arry); double[,] mx_mat_arry = new double[3, 3] { { 0, -m_mat[2, 0], m_mat[1, 0] }, { m_mat[2, 0], 0, -m_mat[0, 0] }, { -m_mat[1, 0], m_mat[0, 0], 0 } }; CvMat mx_mat = new CvMat(3, 3, MatrixType.F64C1, mx_mat_arry); //MessageBox.Show(m_mat.ToString()); //MessageBox.Show(mx_mat.ToString()); FundamentalMat = mx_mat * M21_mat * M11_matInv; CvMat matA = new CvMat(2, right_contour_point.GetLength(1), MatrixType.F64C1, right_contour_point); //将数组转换为矩阵,列表示点的个数 Cv.ComputeCorrespondEpilines(matA, 2, FundamentalMat, out correspondent_lines); //correspondent_lines的列表示点的个数,经过证明图像指数是2,将其极线映射到参考图 double A = 0, B = 0, C = 0; //方程系数 //double A1 = correspondent_lines[0, 0]; //double B1 = correspondent_lines[1, 0]; //double C1 = correspondent_lines[2, 0]; //double A2 = correspondent_lines[0, 1]; //double B2 = correspondent_lines[1, 1]; //double C2 = correspondent_lines[2, 1]; //double[] epipole_temp = new double[2]; //epipole_temp[0] = (-1) * (B2 * C1 - B1 * C2) / (A1 * B2 - A2 * B1); //epipole_temp[1] = (-1) * (A2 * C1 - A1 * C2) / (A2 * B1 - A1* B2); //epipole = epipole_temp; for (int i = 0; i < right_contour_point.GetLength(1); i++)//一个轮廓点对应一条极线(有些轮廓点无相应图像的极线),一条极线要获得其上两个点,因为求交要用 { A = correspondent_lines[0, i]; B = correspondent_lines[1, i]; C = correspondent_lines[2, i]; Epiline_point[0, i] = 0; Epiline_point[1, i] = ((-C) / B); Epiline_point[2, i] = ((-C) / A); Epiline_point[3, i] = 0; //if (i != right_contour_point.GetLength(1) - 1) //{ // A1 = correspondent_lines[0, i]; // B1 = correspondent_lines[1, i]; // C1 = correspondent_lines[2, i]; // A2 = correspondent_lines[0, i + 1]; // B2 = correspondent_lines[1, i + 1]; // C2 = correspondent_lines[2, i + 1]; // epipole_temp = new double[2]; // epipole_temp[0] = (-1) * (B2 * C1 - B1 * C2) / (A1 * B2 - A2 * B1); // epipole_temp[1] = (-1) * (A2 * C1 - A1 * C2) / (A2 * B1 - A1 * B2); //} }//轮询轮廓点的循环在此结束 //MessageBox.Show(correspondent_lines.ToString()); Cv.ReleaseMat(correspondent_lines); Cv.ReleaseMat(FundamentalMat); Cv.ReleaseMat(M11_mat); Cv.ReleaseMat(M21_mat); Cv.ReleaseMat(m1_mat); Cv.ReleaseMat(m2_mat); Cv.ReleaseMat(M11_matInv); Cv.ReleaseMat(temp3); Cv.ReleaseMat(m_mat); Cv.ReleaseMat(mx_mat); Cv.ReleaseMat(matA); return(Epiline_point); }
public CharactersInfo(CvMat _image) { positions = new List<CvRect>(); // 画像コピー image = _image.Clone(); // 文字列認識 // 輝点列検索フラグ.falseなら暗点列を探す bool searchingBright = true; // 始点 int left = 0; // 終点 int right; for (int col = 0; col < image.Cols; col++) { // 列の輝点数 int nonzero = image.GetCol(col).CountNonZero(); // 探索モードによる分岐 if ( true == searchingBright) { // 輝点列探索中 if (0 < nonzero) { // 輝点が1個以上あったなら left = col; // フラグ切り替え searchingBright = false; } } else { // 暗点列探索中 if (0 == nonzero) { // 暗点列だったなら right = col; // LowestWidth を満足するか if (CharactersInfo.LowestWidth > right - left) { // 条件を満たさない場合は探索やり直し searchingBright = true; continue; } // 文字と認める CvMat character = image.GetCols( left, right ); // 上端輝点行を探す int top = 0; for (int row = 0; row < character.Rows; row++) { if (0 < character.GetRow(row).CountNonZero()) { // 輝点発見 top = row; break; } } // 下端輝点行を探す int bottom = character.Rows - 1; for (int row = bottom; row > top; row--) { if (0 < character.GetRow(row).CountNonZero()) { // 輝点発見 bottom = row + 1; break; } } // 文字領域確定 positions.Add(new CvRect(left, top, right - left, bottom - top)); // 探索フラグ切り替え searchingBright = true; } } } }