public IplImage PerspectiveImage(IplImage src) { perspective = new IplImage(src.Size, BitDepth.U8, 3); CvPoint2D32f[] srcPoint = new CvPoint2D32f[4]; CvPoint2D32f[] dstPoint = new CvPoint2D32f[4]; srcPoint[0] = new CvPoint2D32f(600.0, 600.0); srcPoint[1] = new CvPoint2D32f(300.0, 900.0); srcPoint[2] = new CvPoint2D32f(1300.0, 600.0); srcPoint[3] = new CvPoint2D32f(1600.0, 900.0); float width = src.Size.Width; float height = src.Size.Height; dstPoint[0] = new CvPoint2D32f(0.0, 0.0); dstPoint[1] = new CvPoint2D32f(0.0, height); dstPoint[2] = new CvPoint2D32f(width, 0.0); dstPoint[4] = new CvPoint2D32f(width, height); CvMat matrix = Cv.GetPerspectiveTransform(srcPoint, dstPoint); Console.WriteLine(matrix); Cv.WarpPerspective(src, perspective, matrix, Interpolation.Linear, CvScalar.ScalarAll(0)); return(perspective); }
public IplImage RotateImage(IplImage src, int angle) { rotate = new IplImage(src.Size, BitDepth.U8, 3); CvMat matrix = Cv.GetRotationMatrix2D(new CvPoint2D32f(src.Width / 2, src.Height / 2), angle, 1); Cv.WarpAffine(src, rotate, matrix, Interpolation.Linear, CvScalar.ScalarAll(0)); return(rotate); }
// summery // 회전(Rotate) // summery public IplImage RotateTransform(IplImage src, int angle) { iplGeometry = new IplImage(src.Size, BitDepth.U8, 3); CvMat matrix = Cv.GetRotationMatrix2D(Cv.Point2D32f(src.Width / 2, src.Height / 2), angle, 1); Cv.WarpAffine(src, iplGeometry, matrix, Interpolation.Linear, CvScalar.ScalarAll(0)); return(iplGeometry); }
/// <summary> /// /// </summary> /// <param name="data"></param> /// <param name="missing"></param> /// <param name="responses"></param> /// <param name="pWeight"></param> /// <returns></returns> private CvDTree MushroomCreateDTree(CvMat data, CvMat missing, CvMat responses, float pWeight) { float[] priors = { 1, pWeight }; CvMat varType = new CvMat(data.Cols + 1, 1, MatrixType.U8C1); Cv.Set(varType, CvScalar.ScalarAll(CvStatModel.CV_VAR_CATEGORICAL)); // all the variables are categorical CvDTree dtree = new CvDTree(); CvDTreeParams p = new CvDTreeParams(8, // max depth 10, // min sample count 0, // regression accuracy: N/A here true, // compute surrogate split, as we have missing data 15, // max number of categories (use sub-optimal algorithm for larger numbers) 10, // the number of cross-validation folds true, // use 1SE rule => smaller tree true, // throw away the pruned tree branches priors // the array of priors, the bigger p_weight, the more attention // to the poisonous mushrooms // (a mushroom will be judjed to be poisonous with bigger chance) ); dtree.Train(data, DTreeDataLayout.RowSample, responses, null, null, varType, missing, p); // compute hit-rate on the training database, demonstrates predict usage. int hr1 = 0, hr2 = 0, pTotal = 0; for (int i = 0; i < data.Rows; i++) { CvMat sample, mask; Cv.GetRow(data, out sample, i); Cv.GetRow(missing, out mask, i); double r = dtree.Predict(sample, mask).Value; bool d = Math.Abs(r - responses.DataArraySingle[i]) >= float.Epsilon; if (d) { if (r != 'p') { hr1++; } else { hr2++; } } //Console.WriteLine(responses.DataArraySingle[i]); pTotal += (responses.DataArraySingle[i] == (float)'p') ? 1 : 0; } Console.WriteLine("Results on the training database"); Console.WriteLine("\tPoisonous mushrooms mis-predicted: {0} ({1}%)", hr1, (double)hr1 * 100 / pTotal); Console.WriteLine("\tFalse-alarms: {0} ({1}%)", hr2, (double)hr2 * 100 / (data.Rows - pTotal)); varType.Dispose(); return(dtree); }
public Watershed() { // cvWatershed // マウスで円形のマーカー(シード領域)の中心を指定し,複数のマーカーを設定する. // このマーカを画像のgradientに沿って広げて行き,gradientの高い部分に出来る境界を元に領域を分割する. // 領域は,最初に指定したマーカーの数に分割される. // (2)画像の読み込み,マーカー画像の初期化,結果表示用画像領域の確保を行なう using (IplImage srcImg = new IplImage(Const.ImageGoryokaku, LoadMode.AnyDepth | LoadMode.AnyColor)) using (IplImage dstImg = srcImg.Clone()) using (IplImage dspImg = srcImg.Clone()) using (IplImage markers = new IplImage(srcImg.Size, BitDepth.S32, 1)) { markers.Zero(); // (3)入力画像を表示しシードコンポーネント指定のためのマウスイベントを登録する using (CvWindow wImage = new CvWindow("image", WindowMode.AutoSize)) { wImage.Image = srcImg; // クリックにより中心を指定し,円形のシード領域を設定する int seedNum = 0; wImage.OnMouseCallback += delegate(MouseEvent ev, int x, int y, MouseEvent flags) { if (ev == MouseEvent.LButtonDown) { seedNum++; CvPoint pt = new CvPoint(x, y); markers.Circle(pt, 20, CvScalar.ScalarAll(seedNum), Cv.FILLED, LineType.Link8, 0); dspImg.Circle(pt, 20, CvColor.White, 3, LineType.Link8, 0); wImage.Image = dspImg; } }; CvWindow.WaitKey(); } // (4)watershed分割を実行する Cv.Watershed(srcImg, markers); // (5)実行結果の画像中のwatershed境界(ピクセル値=-1)を結果表示用画像上に表示する for (int i = 0; i < markers.Height; i++) { for (int j = 0; j < markers.Width; j++) { int idx = (int)(markers.Get2D(i, j).Val0); if (idx == -1) { dstImg.Set2D(i, j, CvColor.Red); } } } using (CvWindow wDst = new CvWindow("watershed transform", WindowMode.AutoSize)) { wDst.Image = dstImg; CvWindow.WaitKey(); } } }
private void RunDFT(IplImage srcImg) { // cvDFT // 離散フーリエ変換を用いて,振幅画像を生成する. //using (IplImage srcImg = Cv.LoadImage(Const.ImageGoryokaku, LoadMode.GrayScale)) using (IplImage realInput = Cv.CreateImage(srcImg.Size, BitDepth.F64, 1)) using (IplImage imaginaryInput = Cv.CreateImage(srcImg.Size, BitDepth.F64, 1)) using (IplImage complexInput = Cv.CreateImage(srcImg.Size, BitDepth.F64, 2)) { // (1)入力画像を実数配列にコピーし,虚数配列とマージして複素数平面を構成 Cv.Scale(srcImg, realInput, 1.0, 0.0); Cv.Zero(imaginaryInput); Cv.Merge(realInput, imaginaryInput, null, null, complexInput); // (2)DFT用の最適サイズを計算し,そのサイズで行列を確保する int dftM = Cv.GetOptimalDFTSize(srcImg.Height - 1); int dftN = Cv.GetOptimalDFTSize(srcImg.Width - 1); using (CvMat dft_A = Cv.CreateMat(dftM, dftN, MatrixType.F64C2)) using (IplImage imageRe = new IplImage(new CvSize(dftN, dftM), BitDepth.F64, 1)) using (IplImage imageIm = new IplImage(new CvSize(dftN, dftM), BitDepth.F64, 1)) { // (3)複素数平面をdft_Aにコピーし,残りの行列右側部分を0で埋める CvMat tmp; Cv.GetSubRect(dft_A, out tmp, new CvRect(0, 0, srcImg.Width, srcImg.Height)); Cv.Copy(complexInput, tmp, null); if (dft_A.Cols > srcImg.Width) { Cv.GetSubRect(dft_A, out tmp, new CvRect(srcImg.Width, 0, dft_A.Cols - srcImg.Width, srcImg.Height)); Cv.Zero(tmp); } // (4)離散フーリエ変換を行い,その結果を実数部分と虚数部分に分解 Cv.DFT(dft_A, dft_A, DFTFlag.Forward, complexInput.Height); Cv.Split(dft_A, imageRe, imageIm, null, null); // (5)スペクトルの振幅を計算 Mag = sqrt(Re^2 + Im^2) Cv.Pow(imageRe, imageRe, 2.0); Cv.Pow(imageIm, imageIm, 2.0); Cv.Add(imageRe, imageIm, imageRe, null); Cv.Pow(imageRe, imageRe, 0.5); // (6)振幅の対数をとる log(1 + Mag) Cv.AddS(imageRe, CvScalar.ScalarAll(1.0), imageRe, null); Cv.Log(imageRe, imageRe); // (7)原点(直流成分)が画像の中心にくるように,画像の象限を入れ替える ShiftDFT(imageRe, imageRe); // (8)振幅画像のピクセル値が0.0-1.0に分布するようにスケーリング double m, M; Cv.MinMaxLoc(imageRe, out m, out M); Cv.Scale(imageRe, imageRe, 1.0 / (M - m), 1.0 * (-m) / (M - m)); using (new CvWindow("Image", WindowMode.AutoSize, srcImg)) using (new CvWindow("Magnitude", WindowMode.AutoSize, imageRe)) { Cv.WaitKey(0); } } } }
private void RunDFT(IplImage srcImg) { //using (IplImage srcImg = Cv.LoadImage(Const.ImageGoryokaku, LoadMode.GrayScale)) using (IplImage realInput = Cv.CreateImage(srcImg.Size, BitDepth.F64, 1)) using (IplImage imaginaryInput = Cv.CreateImage(srcImg.Size, BitDepth.F64, 1)) using (IplImage complexInput = Cv.CreateImage(srcImg.Size, BitDepth.F64, 2)) { Cv.Scale(srcImg, realInput, 1.0, 0.0); Cv.Zero(imaginaryInput); Cv.Merge(realInput, imaginaryInput, null, null, complexInput); int dftM = Cv.GetOptimalDFTSize(srcImg.Height - 1); int dftN = Cv.GetOptimalDFTSize(srcImg.Width - 1); using (CvMat dft_A = Cv.CreateMat(dftM, dftN, MatrixType.F64C2)) using (IplImage imageRe = new IplImage(new CvSize(dftN, dftM), BitDepth.F64, 1)) using (IplImage imageIm = new IplImage(new CvSize(dftN, dftM), BitDepth.F64, 1)) { CvMat tmp; Cv.GetSubRect(dft_A, out tmp, new CvRect(0, 0, srcImg.Width, srcImg.Height)); Cv.Copy(complexInput, tmp, null); if (dft_A.Cols > srcImg.Width) { Cv.GetSubRect(dft_A, out tmp, new CvRect(srcImg.Width, 0, dft_A.Cols - srcImg.Width, srcImg.Height)); Cv.Zero(tmp); } Cv.DFT(dft_A, dft_A, DFTFlag.Forward, complexInput.Height); Cv.Split(dft_A, imageRe, imageIm, null, null); Cv.Pow(imageRe, imageRe, 2.0); Cv.Pow(imageIm, imageIm, 2.0); Cv.Add(imageRe, imageIm, imageRe, null); Cv.Pow(imageRe, imageRe, 0.5); Cv.AddS(imageRe, CvScalar.ScalarAll(1.0), imageRe, null); Cv.Log(imageRe, imageRe); ShiftDFT(imageRe, imageRe); double m, M; Cv.MinMaxLoc(imageRe, out m, out M); Cv.Scale(imageRe, imageRe, 1.0 / (M - m), 1.0 * (-m) / (M - m)); using (new CvWindow("Image", WindowMode.AutoSize, srcImg)) using (new CvWindow("Magnitude", WindowMode.AutoSize, imageRe)) { Cv.WaitKey(0); } } } }
public Watershed() { using (var srcImg = new IplImage(FilePath.Image.Goryokaku, LoadMode.AnyDepth | LoadMode.AnyColor)) using (var dstImg = srcImg.Clone()) using (var dspImg = srcImg.Clone()) using (var markers = new IplImage(srcImg.Size, BitDepth.S32, 1)) { markers.Zero(); using (var window = new CvWindow("image", WindowMode.AutoSize)) { window.Image = srcImg; // Mouse event int seedNum = 0; window.OnMouseCallback += delegate(MouseEvent ev, int x, int y, MouseEvent flags) { if (ev == MouseEvent.LButtonDown) { seedNum++; CvPoint pt = new CvPoint(x, y); markers.Circle(pt, 20, CvScalar.ScalarAll(seedNum), Cv.FILLED, LineType.Link8, 0); dspImg.Circle(pt, 20, CvColor.White, 3, LineType.Link8, 0); window.Image = dspImg; } }; CvWindow.WaitKey(); } Cv.Watershed(srcImg, markers); // draws watershed for (int i = 0; i < markers.Height; i++) { for (int j = 0; j < markers.Width; j++) { int idx = (int)(markers.Get2D(i, j).Val0); if (idx == -1) { dstImg.Set2D(i, j, CvColor.Red); } } } using (CvWindow wDst = new CvWindow("watershed transform", WindowMode.AutoSize)) { wDst.Image = dstImg; CvWindow.WaitKey(); } } }
public IplImage AffineImage(IplImage src) { affine = new IplImage(src.Size, BitDepth.U8, 3); CvPoint2D32f[] srcPoint = new CvPoint2D32f[3]; CvPoint2D32f[] dstPoint = new CvPoint2D32f[3]; srcPoint[0] = new CvPoint2D32f(100.0, 100.0); srcPoint[1] = new CvPoint2D32f(src.Width - 100.0, 100.0); srcPoint[2] = new CvPoint2D32f(100.0, src.Height - 100.0); dstPoint[0] = new CvPoint2D32f(300.0, 100.0); dstPoint[1] = new CvPoint2D32f(src.Width - 100.0, 100.0); dstPoint[2] = new CvPoint2D32f(100.0, src.Height - 100.0); CvMat matrix = Cv.GetAffineTransform(srcPoint, dstPoint); Console.WriteLine(matrix); Cv.WarpAffine(src, affine, matrix, Interpolation.Linear, CvScalar.ScalarAll(0)); return(affine); }
/// <summary> /// Sauvolaの手法による二値化処理を行う(高速だが、メモリを多く消費するバージョン)。 /// </summary> /// <param name="imgSrc">入力画像</param> /// <param name="imgDst">出力画像</param> /// <param name="kernelSize">局所領域のサイズ</param> /// <param name="k">係数</param> /// <param name="r">係数</param> #else /// <summary> /// Binarizes by Sauvola's method (This is faster but memory-hogging) /// </summary> /// <param name="src">Input image</param> /// <param name="dst">Output image</param> /// <param name="kernelSize">Window size</param> /// <param name="k">Adequate coefficient</param> /// <param name="r">Adequate coefficient</param> #endif public static void SauvolaFast(IplImage src, IplImage dst, int kernelSize, double k, double r) { if (src == null) { throw new ArgumentNullException("src"); } if (dst == null) { throw new ArgumentNullException("dst"); } // グレースケールのみ if (src.NChannels != 1) { throw new ArgumentException("src must be gray scale image"); } if (dst.NChannels != 1) { throw new ArgumentException("dst must be gray scale image"); } // サイズのチェック if (kernelSize < 3) { throw new ArgumentOutOfRangeException("kernelSize", "size must be 3 and above"); } if (kernelSize % 2 == 0) { throw new ArgumentOutOfRangeException("kernelSize", "size must be odd number"); } if (r == 0) { throw new ArgumentOutOfRangeException("r", "r == 0"); } int borderSize = kernelSize / 2; CvRect roi = src.ROI; int width = roi.Width; int height = roi.Height; if (width != dst.Width || height != dst.Height) { throw new ArgumentException("src.Size == dst.Size"); } using (IplImage imgTemp = new IplImage(width + (borderSize * 2), height + (borderSize * 2), src.Depth, src.NChannels)) using (IplImage imgSum = new IplImage(imgTemp.Width + 1, imgTemp.Height + 1, BitDepth.F64, 1)) using (IplImage imgSqSum = new IplImage(imgTemp.Width + 1, imgTemp.Height + 1, BitDepth.F64, 1)) { Cv.CopyMakeBorder(src, imgTemp, new CvPoint(borderSize, borderSize), BorderType.Replicate, CvScalar.ScalarAll(0)); Cv.Integral(imgTemp, imgSum, imgSqSum); unsafe { byte *pSrc = src.ImageDataPtr; byte *pDst = dst.ImageDataPtr; //byte* pTemp = imgTemp.ImageDataPtr; double *pSum = (double *)imgSum.ImageDataPtr; double *pSqSum = (double *)imgSqSum.ImageDataPtr; int stepSrc = src.WidthStep; int stepDst = dst.WidthStep; int stepSum = imgSum.WidthStep / sizeof(double); int ylim = height + borderSize; int xlim = width + borderSize; int kernelPixels = kernelSize * kernelSize; for (int y = borderSize; y < ylim; y++) { for (int x = borderSize; x < xlim; x++) { int x1 = x - borderSize; int y1 = y - borderSize; int x2 = x + borderSize + 1; int y2 = y + borderSize + 1; double sum = pSum[stepSum * y2 + x2] - pSum[stepSum * y2 + x1] - pSum[stepSum * y1 + x2] + pSum[stepSum * y1 + x1]; double sqsum = pSqSum[stepSum * y2 + x2] - pSqSum[stepSum * y2 + x1] - pSqSum[stepSum * y1 + x2] + pSqSum[stepSum * y1 + x1]; double mean = sum / kernelPixels; double var = (sqsum / kernelPixels) - (mean * mean); if (var < 0.0) { var = 0.0; } double stddev = Math.Sqrt(var); double threshold = mean * (1 + k * (stddev / r - 1)); int offsetSrc = stepSrc * (y + roi.Y - borderSize) + (x + roi.X - borderSize); int offsetDst = stepDst * (y - borderSize) + (x - borderSize); if (pSrc[offsetSrc] < threshold) { pDst[offsetDst] = 0; } else { pDst[offsetDst] = 255; } } } } } }
public Perspective() { using (var srcImg = new IplImage(FilePath.Image.Lenna, LoadMode.AnyDepth | LoadMode.AnyColor)) using (var dstImg = srcImg.Clone()) { CvPoint2D32f[] srcPnt = new CvPoint2D32f[4]; CvPoint2D32f[] dstPnt = new CvPoint2D32f[4]; srcPnt[0] = new CvPoint2D32f(150.0f, 150.0f); srcPnt[1] = new CvPoint2D32f(150.0f, 300.0f); srcPnt[2] = new CvPoint2D32f(350.0f, 300.0f); srcPnt[3] = new CvPoint2D32f(350.0f, 150.0f); dstPnt[0] = new CvPoint2D32f(200.0f, 200.0f); dstPnt[1] = new CvPoint2D32f(150.0f, 300.0f); dstPnt[2] = new CvPoint2D32f(350.0f, 300.0f); dstPnt[3] = new CvPoint2D32f(300.0f, 200.0f); using (CvMat mapMatrix = Cv.GetPerspectiveTransform(srcPnt, dstPnt)) { Cv.WarpPerspective(srcImg, dstImg, mapMatrix, Interpolation.Linear | Interpolation.FillOutliers, CvScalar.ScalarAll(100)); using (new CvWindow("src", srcImg)) using (new CvWindow("dst", dstImg)) { Cv.WaitKey(0); } } } }
/// <summary> /// /// </summary> /// <param name="dataFilename"></param> /// <param name="filenameToSave"></param> /// <param name="filenameToLoad"></param> private void BuildBoostClassifier(string dataFilename, string filenameToSave, string filenameToLoad) { const int ClassCount = 26; CvMat data = null; CvMat responses = null; CvMat varType = null; CvMat tempSample = null; CvMat weakResponses = null; int nsamplesAall = 0, ntrainSamples = 0; int varCount; double trainHr = 0, testHr = 0; CvBoost boost = new CvBoost(); try { ReadNumClassData(dataFilename, 16, out data, out responses); } catch { Console.WriteLine("Could not read the database {0}", dataFilename); return; } Console.WriteLine("The database {0} is loaded.", dataFilename); nsamplesAall = data.Rows; ntrainSamples = (int)(nsamplesAall * 0.5); varCount = data.Cols; // Create or load Boosted Tree classifier if (filenameToLoad != null) { // load classifier from the specified file boost.Load(filenameToLoad); ntrainSamples = 0; if (boost.GetWeakPredictors() == null) { Console.WriteLine("Could not read the classifier {0}", filenameToLoad); return; } Console.WriteLine("The classifier {0} is loaded.", filenameToLoad); } else { // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! // // As currently boosted tree classifier in MLL can only be trained // for 2-class problems, we transform the training database by // "unrolling" each training sample as many times as the number of // classes (26) that we have. // // !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! using (CvMat newData = new CvMat(ntrainSamples * ClassCount, varCount + 1, MatrixType.F32C1)) using (CvMat newResponses = new CvMat(ntrainSamples * ClassCount, 1, MatrixType.S32C1)) { // 1. unroll the database type mask Console.WriteLine("Unrolling the database..."); for (int i = 0; i < ntrainSamples; i++) { unsafe { float *dataRow = (float *)(data.DataByte + data.Step * i); for (int j = 0; j < ClassCount; j++) { float *newDataRow = (float *)(newData.DataByte + newData.Step * (i * ClassCount + j)); for (int k = 0; k < varCount; k++) { newDataRow[k] = dataRow[k]; } newDataRow[varCount] = (float)j; newResponses.DataInt32[i * ClassCount + j] = (responses.DataSingle[i] == j + 'A') ? 1 : 0; } } } // 2. create type mask varType = new CvMat(varCount + 2, 1, MatrixType.U8C1); varType.Set(CvScalar.ScalarAll(CvStatModel.CV_VAR_ORDERED)); // the last indicator variable, as well // as the new (binary) response are categorical varType.SetReal1D(varCount, CvStatModel.CV_VAR_CATEGORICAL); varType.SetReal1D(varCount + 1, CvStatModel.CV_VAR_CATEGORICAL); // 3. train classifier Console.Write("Training the classifier (may take a few minutes)..."); boost.Train( newData, DTreeDataLayout.RowSample, newResponses, null, null, varType, null, new CvBoostParams(CvBoost.REAL, 100, 0.95, 5, false, null) ); } Console.WriteLine(); } tempSample = new CvMat(1, varCount + 1, MatrixType.F32C1); weakResponses = new CvMat(1, boost.GetWeakPredictors().Total, MatrixType.F32C1); // compute prediction error on train and test data for (int i = 0; i < nsamplesAall; i++) { int bestClass = 0; double maxSum = double.MinValue; double r; CvMat sample; Cv.GetRow(data, out sample, i); for (int k = 0; k < varCount; k++) { tempSample.DataArraySingle[k] = sample.DataArraySingle[k]; } for (int j = 0; j < ClassCount; j++) { tempSample.DataArraySingle[varCount] = (float)j; boost.Predict(tempSample, null, weakResponses); double sum = weakResponses.Sum().Val0; if (maxSum < sum) { maxSum = sum; bestClass = j + 'A'; } } r = (Math.Abs(bestClass - responses.DataArraySingle[i]) < float.Epsilon) ? 1 : 0; if (i < ntrainSamples) { trainHr += r; } else { testHr += r; } } testHr /= (double)(nsamplesAall - ntrainSamples); trainHr /= (double)ntrainSamples; Console.WriteLine("Recognition rate: train = {0:F1}%, test = {1:F1}%", trainHr * 100.0, testHr * 100.0); Console.WriteLine("Number of trees: {0}", boost.GetWeakPredictors().Total); // Save classifier to file if needed if (filenameToSave != null) { boost.Save(filenameToSave); } Console.Read(); tempSample.Dispose(); weakResponses.Dispose(); if (varType != null) { varType.Dispose(); } data.Dispose(); responses.Dispose(); boost.Dispose(); }
//* MODE : STAMP *// private IplImage mode_Stamp(IplImage srcImg) { // 1. 객체추출 Took3D.checkSize(srcImg); int minX = Took3D.minX, minY = Took3D.minY; int maxX = Took3D.maxX - minX, maxY = Took3D.maxY - minY; srcImg.SetROI(new CvRect(minX, minY, maxX + 1, maxY + 1)); IplImage src = new IplImage(maxX + 1, maxY + 1, srcImg.Depth, srcImg.NChannels); srcImg.Copy(src); // 2. 도장이미지 생성 IplImage stampImg = new IplImage(200, 200, src.Depth, src.NChannels); stampImg.Set(CvScalar.ScalarAll(255)); // 3. 도장이미지 크기조정 int roi_width = 175; int roi_height = 175; IplImage gr_hole; int setHeight = 0, setWidth = 0; if (src.Width > src.Height) { setWidth = roi_width; setHeight = (roi_width * src.Height) / src.Width; if (setHeight > roi_height) { setHeight = roi_height; setWidth = (roi_height * setWidth) / setHeight; } gr_hole = new IplImage(setWidth, setHeight, src.Depth, src.NChannels); } else if (src.Width < src.Height) { setHeight = roi_height; setWidth = (roi_height * src.Width) / src.Height; if (setWidth > roi_width) { setWidth = roi_width; setHeight = (roi_width * setHeight) / setWidth; } gr_hole = new IplImage(setWidth, setHeight, src.Depth, src.NChannels); } else { setHeight = roi_height; setWidth = (roi_height * src.Width) / src.Height; gr_hole = new IplImage(setWidth, setHeight, src.Depth, src.NChannels); } src.Resize(gr_hole, Interpolation.Cubic); // 4. 위치 조정 int mid_X = (200 / 2) - (gr_hole.Width / 2); int mid_Y = (200 / 2) - (gr_hole.Height / 2); stampImg.SetROI(mid_X, mid_Y, gr_hole.Width, gr_hole.Height); // 5. 삽입 gr_hole.Copy(stampImg); // 6. 메모리 정리 srcImg.ResetROI(); stampImg.ResetROI(); gr_hole.Dispose(); src.Dispose(); return(stampImg); }
//* BUTTON_CONVERT *// private void button_Convert_Click(object sender, EventArgs e) { IplImage tempImgBox; Took3D.SET_Z = Int32.Parse(textBox_SetZ.Text); if (isImgOpen == true) { switch (currentMode) { // <<DEFAULT>> case Mode.DEFAULT: // 흰색 윤곽선 추가. tempImgBox = new IplImage(imgBox.Width + 10, imgBox.Height + 10, BitDepth.U8, 1); imgBox.CopyMakeBorder(tempImgBox, new CvPoint(5, 5), BorderType.Constant, CvScalar.ScalarAll(0xFF)); // 모델링 Took3D.START(tempImgBox); if (radioButton_Polygon.Checked == true) { bool checkBottomMode = Took3D.bottomPolygon(); if (checkBottomMode == false) { radioButton_Background.Checked = true; } Took3D.binarySTL_Bottom(); } else if (radioButton_Background.Checked == true) { Took3D.bottomBackground(); Took3D.binarySTL_BackBottom(); } else { Took3D.binarySTL(); } break; // <<STAMP>> case Mode.STAMP: tempImgBox = mode_Stamp(imgBox); Took3D.START(tempImgBox); Took3D.stampModeling(); Took3D.binarySTL_BackBottom(); break; // <<DOT>> case Mode.DOT: // 1. 크기조정 tempImgBox = Dot_Resize(imgBox); // 2. 이미지 확대 ( 도트 원형 유지 ) IplImage dot_tmp = new IplImage(tempImgBox.Width * 10, tempImgBox.Height * 10, tempImgBox.Depth, tempImgBox.NChannels); tempImgBox.Resize(dot_tmp, Interpolation.Cubic); tempImgBox.Dispose(); // 3. 도트 이미지 생성 tempImgBox = mode_Dot(dot_tmp); dot_tmp.Dispose(); // 4. 흰색윤곽추가 dot_tmp = new IplImage(tempImgBox.Width + 4, tempImgBox.Height + 4, tempImgBox.Depth, tempImgBox.NChannels); tempImgBox.CopyMakeBorder(dot_tmp, new CvPoint(2, 2), BorderType.Constant, CvScalar.ScalarAll(0xFF)); tempImgBox = dot_tmp; // 5. 이진화 시작 Took3D.START(tempImgBox); Took3D.binarySTL(); break; // <<RING>> case Mode.RING: tempImgBox = mode_Ring(imgBox); Took3D.START(tempImgBox); Took3D.binarySTL(); break; default: break; } pictureBox.ImageIpl = Took3D.resultImage; //Console.WriteLine("minX : {0} minY : {1} maxX : {2} maxY : {3}", Took3D.minX, Took3D.minY, Took3D.maxX, Took3D.maxY); // 파일 저장 처리 saveFileDialog1.Filter = "STL File(*.stl)|*.stl"; if (saveFileDialog1.ShowDialog() == DialogResult.OK) { FileInfo file = new FileInfo(@"C:\TookTemp\output"); if (file.Exists) { File.Copy(@"C:\TookTemp\output", saveFileDialog1.FileName, true); MessageBox.Show("완료"); } } //tempImgBox.Dispose(); } else { MessageBox.Show("이미지를 선택해주세요!"); } }
public Affine() { // cvGetAffineTransform + cvWarpAffine using (IplImage srcImg = new IplImage(FilePath.Image.Goryokaku, LoadMode.AnyDepth | LoadMode.AnyColor)) using (IplImage dstImg = srcImg.Clone()) { CvPoint2D32f[] srcPnt = new CvPoint2D32f[3]; CvPoint2D32f[] dstPnt = new CvPoint2D32f[3]; srcPnt[0] = new CvPoint2D32f(200.0f, 200.0f); srcPnt[1] = new CvPoint2D32f(250.0f, 200.0f); srcPnt[2] = new CvPoint2D32f(200.0f, 100.0f); dstPnt[0] = new CvPoint2D32f(300.0f, 100.0f); dstPnt[1] = new CvPoint2D32f(300.0f, 50.0f); dstPnt[2] = new CvPoint2D32f(200.0f, 100.0f); using (CvMat mapMatrix = Cv.GetAffineTransform(srcPnt, dstPnt)) { Cv.WarpAffine(srcImg, dstImg, mapMatrix, Interpolation.Linear | Interpolation.FillOutliers, CvScalar.ScalarAll(0)); using (new CvWindow("src", srcImg)) using (new CvWindow("dst", dstImg)) { Cv.WaitKey(0); } } } }
public Perspective() { // cvGetPerspectiveTransform + cvWarpPerspective // 画像上の4点対応より透視投影変換行列を計算し,その行列を用いて画像全体の透視投影変換を行う. // (1)画像の読み込み,出力用画像領域の確保を行なう using (IplImage srcImg = new IplImage(Const.ImageLenna, LoadMode.AnyDepth | LoadMode.AnyColor)) using (IplImage dstImg = srcImg.Clone()) { // (2)四角形の変換前と変換後の対応する頂点をそれぞれセットし // cvWarpPerspectiveを用いて透視投影変換行列を求める CvPoint2D32f[] srcPnt = new CvPoint2D32f[4]; CvPoint2D32f[] dstPnt = new CvPoint2D32f[4]; srcPnt[0] = new CvPoint2D32f(150.0f, 150.0f); srcPnt[1] = new CvPoint2D32f(150.0f, 300.0f); srcPnt[2] = new CvPoint2D32f(350.0f, 300.0f); srcPnt[3] = new CvPoint2D32f(350.0f, 150.0f); dstPnt[0] = new CvPoint2D32f(200.0f, 200.0f); dstPnt[1] = new CvPoint2D32f(150.0f, 300.0f); dstPnt[2] = new CvPoint2D32f(350.0f, 300.0f); dstPnt[3] = new CvPoint2D32f(300.0f, 200.0f); using (CvMat mapMatrix = Cv.GetPerspectiveTransform(srcPnt, dstPnt)) { // (3)指定されたアフィン行列により,cvWarpAffineを用いて画像を回転させる Cv.WarpPerspective(srcImg, dstImg, mapMatrix, Interpolation.Linear | Interpolation.FillOutliers, CvScalar.ScalarAll(100)); // (4)結果を表示する using (new CvWindow("src", srcImg)) using (new CvWindow("dst", dstImg)) { Cv.WaitKey(0); } } } }
/// <summary> /// RTrees /// </summary> /// <param name="dataFilename"></param> /// <param name="filenameToSave"></param> /// <param name="filenameToLoad"></param> private void BuildRtreesClassifier(string dataFilename, string filenameToSave, string filenameToLoad) { CvMat data = null; CvMat responses = null; CvMat varType = null; CvMat sampleIdx = null; int nsamplesAll = 0, ntrainSamples = 0; double trainHr = 0, testHr = 0; CvRTrees forest = new CvRTrees(); try { ReadNumClassData(dataFilename, 16, out data, out responses); } catch { Console.WriteLine("Could not read the database {0}", dataFilename); return; } Console.WriteLine("The database {0} is loaded.", dataFilename); nsamplesAll = data.Rows; ntrainSamples = (int)(nsamplesAll * 0.8); // Create or load Random Trees classifier if (filenameToLoad != null) { // load classifier from the specified file forest.Load(filenameToLoad); ntrainSamples = 0; if (forest.GetTreeCount() == 0) { Console.WriteLine("Could not read the classifier {0}", filenameToLoad); return; } Console.WriteLine("The classifier {0} is loaded.", filenameToLoad); } else { // create classifier by using <data> and <responses> Console.Write("Training the classifier ..."); // 1. create type mask varType = new CvMat(data.Cols + 1, 1, MatrixType.U8C1); varType.Set(CvScalar.ScalarAll(CvStatModel.CV_VAR_ORDERED)); varType.SetReal1D(data.Cols, CvStatModel.CV_VAR_CATEGORICAL); // 2. create sample_idx sampleIdx = new CvMat(1, nsamplesAll, MatrixType.U8C1); { CvMat mat; Cv.GetCols(sampleIdx, out mat, 0, ntrainSamples); mat.Set(CvScalar.RealScalar(1)); Cv.GetCols(sampleIdx, out mat, ntrainSamples, nsamplesAll); mat.SetZero(); } // 3. train classifier forest.Train( data, DTreeDataLayout.RowSample, responses, null, sampleIdx, varType, null, new CvRTParams(10, 10, 0, false, 15, null, true, 4, new CvTermCriteria(100, 0.01f)) ); Console.WriteLine(); } // compute prediction error on train and test data for (int i = 0; i < nsamplesAll; i++) { double r; CvMat sample; Cv.GetRow(data, out sample, i); r = forest.Predict(sample); r = Math.Abs((double)r - responses.DataArraySingle[i]) <= float.Epsilon ? 1 : 0; if (i < ntrainSamples) { trainHr += r; } else { testHr += r; } } testHr /= (double)(nsamplesAll - ntrainSamples); trainHr /= (double)ntrainSamples; Console.WriteLine("Recognition rate: train = {0:F1}%, test = {1:F1}%", trainHr * 100.0, testHr * 100.0); Console.WriteLine("Number of trees: {0}", forest.GetTreeCount()); // Print variable importance Mat varImportance0 = forest.GetVarImportance(); CvMat varImportance = varImportance0.ToCvMat(); if (varImportance != null) { double rtImpSum = Cv.Sum(varImportance).Val0; Console.WriteLine("var#\timportance (in %):"); for (int i = 0; i < varImportance.Cols; i++) { Console.WriteLine("{0}\t{1:F1}", i, 100.0f * varImportance.DataArraySingle[i] / rtImpSum); } } // Print some proximitites Console.WriteLine("Proximities between some samples corresponding to the letter 'T':"); { CvMat sample1, sample2; int[,] pairs = new int[, ] { { 0, 103 }, { 0, 106 }, { 106, 103 }, { -1, -1 } }; for (int i = 0; pairs[i, 0] >= 0; i++) { Cv.GetRow(data, out sample1, pairs[i, 0]); Cv.GetRow(data, out sample2, pairs[i, 1]); Console.WriteLine("proximity({0},{1}) = {2:F1}%", pairs[i, 0], pairs[i, 1], forest.GetProximity(sample1, sample2) * 100.0); } } // Save Random Trees classifier to file if needed if (filenameToSave != null) { forest.Save(filenameToSave); } Console.Read(); if (sampleIdx != null) { sampleIdx.Dispose(); } if (varType != null) { varType.Dispose(); } data.Dispose(); responses.Dispose(); forest.Dispose(); }
private IplImage Dot_Resize(IplImage srcImg) { // 1. 객체추출 Took3D.checkSize(srcImg); int minX = Took3D.minX, minY = Took3D.minY; int maxX = Took3D.maxX - minX, maxY = Took3D.maxY - minY; srcImg.SetROI(new CvRect(minX, minY, maxX, maxY)); IplImage src = new IplImage(maxX, maxY, srcImg.Depth, srcImg.NChannels); srcImg.Copy(src); // 2. 도트 밑판 생성 IplImage dotBackImg = new IplImage(500, 500, src.Depth, src.NChannels); dotBackImg.Set(CvScalar.ScalarAll(255)); // 3. 도트이미지 크기조정 int roi_width = 470; int roi_height = 470; IplImage temp; int setHeight = 0, setWidth = 0; if (src.Width > src.Height) { setWidth = roi_width; setHeight = (roi_width * src.Height) / src.Width; if (setHeight > roi_height) { setHeight = roi_height; setWidth = (roi_height * setWidth) / setHeight; } temp = new IplImage(setWidth, setHeight, src.Depth, src.NChannels); } else if (src.Width < src.Height) { setHeight = roi_height; setWidth = (roi_height * src.Width) / src.Height; if (setWidth > roi_width) { setWidth = roi_width; setHeight = (roi_width * setHeight) / setWidth; } temp = new IplImage(setWidth, setHeight, src.Depth, src.NChannels); } else { setHeight = roi_height; setWidth = (roi_height * src.Width) / src.Height; temp = new IplImage(setWidth, setHeight, src.Depth, src.NChannels); } src.Resize(temp, Interpolation.Cubic); // 4. 위치 조정 int mid_X = (500 / 2) - (temp.Width / 2); int mid_Y = (500 / 2) - (temp.Height / 2); dotBackImg.SetROI(mid_X, mid_Y, temp.Width, temp.Height); // 5. 삽입 temp.Copy(dotBackImg); // 6. 메모리 정리 srcImg.ResetROI(); dotBackImg.ResetROI(); temp.Dispose(); src.Dispose(); return(dotBackImg); }
public void rotate(double inputangle) { dst = Cv.CreateImage(src.Size, BitDepth.U8, 3); double angle = inputangle; // 시계 반대 double scale = 1.0; CvPoint2D32f centralPoint = new CvPoint2D32f(src.Width / 2, src.Height / 2); // 회전 기준점 설정 CvMat rotationMatrix = Cv.CreateMat(2, 3, MatrixType.F32C1); Cv._2DRotationMatrix(centralPoint, angle, scale, out rotationMatrix); // 회전 기준 행렬 Cv.WarpAffine(src, dst, rotationMatrix, Interpolation.Linear | Interpolation.FillOutliers, CvScalar.ScalarAll(0)); pictureBoxIpl2.ImageIpl = dst; Cv.ReleaseMat(rotationMatrix); }
public Affine() { // cvGetAffineTransform + cvWarpAffine // 画像上の3点対応よりアフィン変換行列を計算し,その行列を用いて画像全体のアフィン変換を行う. // (1)画像の読み込み,出力用画像領域の確保を行なう using (IplImage srcImg = new IplImage(Const.ImageGoryokaku, LoadMode.AnyDepth | LoadMode.AnyColor)) using (IplImage dstImg = srcImg.Clone()) { // (2)三角形の回転前と回転後の対応する頂点をそれぞれセットし // cvGetAffineTransformを用いてアフィン行列を求める CvPoint2D32f[] srcPnt = new CvPoint2D32f[3]; CvPoint2D32f[] dstPnt = new CvPoint2D32f[3]; srcPnt[0] = new CvPoint2D32f(200.0f, 200.0f); srcPnt[1] = new CvPoint2D32f(250.0f, 200.0f); srcPnt[2] = new CvPoint2D32f(200.0f, 100.0f); dstPnt[0] = new CvPoint2D32f(300.0f, 100.0f); dstPnt[1] = new CvPoint2D32f(300.0f, 50.0f); dstPnt[2] = new CvPoint2D32f(200.0f, 100.0f); using (CvMat mapMatrix = Cv.GetAffineTransform(srcPnt, dstPnt)) { // (3)指定されたアフィン行列により,cvWarpAffineを用いて画像を回転させる Cv.WarpAffine(srcImg, dstImg, mapMatrix, Interpolation.Linear | Interpolation.FillOutliers, CvScalar.ScalarAll(0)); // (4)結果を表示する using (new CvWindow("src", srcImg)) using (new CvWindow("dst", dstImg)) { Cv.WaitKey(0); } } } }