コード例 #1
0
ファイル: 描画画面.cs プロジェクト: c13proto/tobii_camera
        public 描画画面()
        {
            InitializeComponent();

            dis_height= System.Windows.Forms.Screen.PrimaryScreen.Bounds.Height;
            dis_width=System.Windows.Forms.Screen.PrimaryScreen.Bounds.Width;
            pos_max = Tobii.pos_max;
            while (Tobii. 眼球位置_L[0] == 0 || Tobii. 眼球位置_R[0] == 100) { }//両目とれるまでここにとどまる
            diff_in = Tobii. 眼球位置_R[0]-Tobii. 眼球位置_L[0];
            posY_in = (Tobii.眼球位置_L[1] + Tobii.眼球位置_R[1] )/ 2;

            pictureBoxIpl1.Width = dis_width;
            pictureBoxIpl1.Height = dis_height;
            frame = Cv.CreateImage(new CvSize(dis_width, dis_height), BitDepth.U8, 3);
            background = Cv.CreateImage(new CvSize(dis_width, dis_height), BitDepth.U8, 3);
            background=メイン画面.background;
            pictureBoxIpl1.ImageIpl = background;
            window_size = new CvSize(メイン画面.window[0], メイン画面.window[1]);
            point_old = new CvPoint(window_size.Width / 2, window_size.Height / 2);
            許容半径 = メイン画面.radius;

            PC=new System.Diagnostics.PerformanceCounter[3];

            タイマー開始();
        }
コード例 #2
0
ファイル: Resize.cs プロジェクト: 0sv/opencvsharp
        public Resize()
        {
            using (var src = new IplImage(FilePath.Image.Square5, LoadMode.AnyColor | LoadMode.AnyDepth))
            {
                CvSize size = new CvSize(src.Width * 2, src.Height * 2);
                using (IplImage dstNN = new IplImage(size, src.Depth, src.NChannels))
                using (IplImage dstCubic = new IplImage(size, src.Depth, src.NChannels))
                using (IplImage dstLinear = new IplImage(size, src.Depth, src.NChannels))
                using (IplImage dstLanczos = new IplImage(size, src.Depth, src.NChannels))                
                {
                    Cv.Resize(src, dstNN, Interpolation.NearestNeighbor);
                    Cv.Resize(src, dstCubic, Interpolation.Cubic);                    
                    Cv.Resize(src, dstLinear, Interpolation.Linear);
                    Cv.Resize(src, dstLanczos, Interpolation.Lanczos4);

                    using (new CvWindow("src", src))
                    using (new CvWindow("dst NearestNeighbor", dstNN))
                    using (new CvWindow("dst Cubic", dstCubic))
                    using (new CvWindow("dst Linear", dstLinear))
                    using (new CvWindow("dst Lanczos4", dstLanczos))                    
                    {
                        Cv.WaitKey();
                    }
                }
            }
        }
コード例 #3
0
ファイル: Resize.cs プロジェクト: qxp1011/opencvsharp
        public Resize()
        {
            // cvResize
            // 指定した出力画像サイズに合うように、入力画像のサイズを変更し出力する.

            // (1)画像を読み込む
            using (IplImage src = new IplImage(Const.ImageSquare5, LoadMode.AnyColor | LoadMode.AnyDepth))
            {
                // (2)出力用画像領域の確保を行なう
                CvSize size = new CvSize(src.Width * 2, src.Height * 2);
                using (IplImage dstNN = new IplImage(size, src.Depth, src.NChannels))
                using (IplImage dstCubic = new IplImage(size, src.Depth, src.NChannels))
                using (IplImage dstLinear = new IplImage(size, src.Depth, src.NChannels))
                using (IplImage dstLanczos = new IplImage(size, src.Depth, src.NChannels))                
                {
                    // (3)画像のサイズ変更を行う
                    Cv.Resize(src, dstNN, Interpolation.NearestNeighbor);
                    Cv.Resize(src, dstCubic, Interpolation.Cubic);                    
                    Cv.Resize(src, dstLinear, Interpolation.Linear);
                    Cv.Resize(src, dstLanczos, Interpolation.Lanczos4);

                    // (4)結果を表示する
                    using (new CvWindow("src", src))
                    using (new CvWindow("dst NearestNeighbor", dstNN))
                    using (new CvWindow("dst Cubic", dstCubic))
                    using (new CvWindow("dst Linear", dstLinear))
                    using (new CvWindow("dst Lanczos4", dstLanczos))                    
                    {
                        Cv.WaitKey();
                    }
                }
            }
        }
コード例 #4
0
ファイル: Resize.cs プロジェクト: shimat/opencvsharp_2410
        public Resize()
        {
            using (var src = new IplImage(FilePath.Image.Square5, LoadMode.AnyColor | LoadMode.AnyDepth))
            {
                CvSize size = new CvSize(src.Width * 2, src.Height * 2);
                using (IplImage dstNN = new IplImage(size, src.Depth, src.NChannels))
                    using (IplImage dstCubic = new IplImage(size, src.Depth, src.NChannels))
                        using (IplImage dstLinear = new IplImage(size, src.Depth, src.NChannels))
                            using (IplImage dstLanczos = new IplImage(size, src.Depth, src.NChannels))
                            {
                                Cv.Resize(src, dstNN, Interpolation.NearestNeighbor);
                                Cv.Resize(src, dstCubic, Interpolation.Cubic);
                                Cv.Resize(src, dstLinear, Interpolation.Linear);
                                Cv.Resize(src, dstLanczos, Interpolation.Lanczos4);

                                using (new CvWindow("src", src))
                                    using (new CvWindow("dst NearestNeighbor", dstNN))
                                        using (new CvWindow("dst Cubic", dstCubic))
                                            using (new CvWindow("dst Linear", dstLinear))
                                                using (new CvWindow("dst Lanczos4", dstLanczos))
                                                {
                                                    Cv.WaitKey();
                                                }
                            }
            }
        }
コード例 #5
0
 public static extern void cvInitIntrinsicParams2D(
     IntPtr object_points,
     IntPtr image_points,
     IntPtr npoints,
     CvSize image_size,
     IntPtr camera_matrix,
     double aspect_ratio);
コード例 #6
0
ファイル: FitLine.cs プロジェクト: 0sv/opencvsharp
        public FitLine()
        {
            CvSize imageSize = new CvSize(500, 500);

            // cvFitLine
            CvPoint2D32f[] points = GetRandomPoints(20, imageSize);
            CvLine2D line = Cv.FitLine2D(points, DistanceType.L2, 0, 0.01, 0.01);

            using (IplImage img = new IplImage(imageSize, BitDepth.U8, 3))
            {
                img.Zero();

                // draw line
                {
                    CvPoint pt1, pt2;
                    line.FitSize(img.Width, img.Height, out pt1, out pt2);
                    img.Line(pt1, pt2, CvColor.Green, 1, LineType.Link8);
                }

                // draw points and distances
                using (CvFont font = new CvFont(FontFace.HersheySimplex, 0.33, 0.33))
                {
                    foreach (CvPoint2D32f p in points)
                    {
                        double d = line.Distance(p);

                        img.Circle(p, 2, CvColor.White, -1, LineType.AntiAlias);
                        img.PutText(string.Format("{0:F1}", d), new CvPoint((int) (p.X + 3), (int) (p.Y + 3)), font, CvColor.Green);
                    }
                }

                CvWindow.ShowImages(img);
            }
        }
コード例 #7
0
ファイル: MainForm.cs プロジェクト: mind0n/hive
        private void CaptureCameraCallback()
        {
            const double ScaleFactor  = 2.5;
            const int    MinNeighbors = 1;
            CvSize       MinSize      = new CvSize(30, 30);

            CvCapture cap = CvCapture.FromCamera(1);
            CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile("haarcascade_eye.xml");

            while (true)
            {
                IplImage img = cap.QueryFrame();
                //IplImage.FromBitmap()
                //CvSeq<CvAvgComp> eyes = Cv.HaarDetectObjects(img, cascade, Cv.CreateMemStorage(), ScaleFactor, MinNeighbors, HaarDetectionType.DoCannyPruning, MinSize);

                //foreach (CvAvgComp eye in eyes.AsParallel())
                //{
                //    img.DrawRect(eye.Rect, CvColor.Red);

                //    if (eye.Rect.Left > pctCvWindow.Width / 2)
                //    {
                //        try
                //        {
                //            IplImage rightEyeImg1 = img.Clone();
                //            Cv.SetImageROI(rightEyeImg1, eye.Rect);
                //            IplImage rightEyeImg2 = Cv.CreateImage(eye.Rect.Size, rightEyeImg1.Depth, rightEyeImg1.NChannels);
                //            Cv.Copy(rightEyeImg1, rightEyeImg2, null);
                //            Cv.ResetImageROI(rightEyeImg1);


                //            Bitmap rightEyeBm = BitmapConverter.ToBitmap(rightEyeImg2);
                //            pctRightEye.Image = rightEyeBm;
                //        }
                //        catch { }
                //    }
                //    else
                //    {
                //        try
                //        {
                //            IplImage leftEyeImg1 = img.Clone();
                //            Cv.SetImageROI(leftEyeImg1, eye.Rect);
                //            IplImage leftEyeImg2 = Cv.CreateImage(eye.Rect.Size, leftEyeImg1.Depth, leftEyeImg1.NChannels);
                //            Cv.Copy(leftEyeImg1, leftEyeImg2, null);
                //            Cv.ResetImageROI(leftEyeImg1);

                //            Bitmap leftEyeBm = BitmapConverter.ToBitmap(leftEyeImg2);
                //            pctLeftEye.Image = leftEyeBm;
                //        }catch{}
                //    }
                //}

                Bitmap bm = BitmapConverter.ToBitmap(img);
                bm.SetResolution(pctCvWindow.Width, pctCvWindow.Height);
                //pctCvWindow.Image = bm;
                pb.Image = bm;
                img      = null;
                bm       = null;
                Thread.Sleep(100);
            }
        }
コード例 #8
0
        private void Done(object sender, RoutedEventArgs e)
        {
            string imgPath = AppDomain.CurrentDomain.BaseDirectory + "instTest";
            //string imgPath = @"C:\Users\Won\Documents\instTest";

            int count = 1;

            foreach (ImagePreperationItem ipi in IpiList)
            {
                // identify chosen signal
                int    index      = ipi.SignalIndex;
                string signalSt   = SignalList[index];
                string signalName = SignalToNameDictionary[signalSt];
                ipi.SignalName = signalName;

                // Resize and locate image to MATLAB directory.
                IplImage img     = new IplImage(ipi.ImageName);
                CvSize   size    = new CvSize(ImgWidth, ImgHeight);
                IplImage resized = new IplImage(size, img.Depth, img.NChannels);
                Cv.Resize(img, resized);
                WriteableBitmap   rawImage = WriteableBitmapConverter.ToWriteableBitmap(resized);
                FileStream        fs       = new System.IO.FileStream(imgPath + @"\s1\" + count + ".jpg", System.IO.FileMode.Create);
                JpegBitmapEncoder pbe      = new JpegBitmapEncoder();
                pbe.Frames.Add(BitmapFrame.Create(rawImage));
                pbe.Save(fs);
                fs.Dispose();

                count++;
            }


            /*
             * MATLAB starts learning image
             */
            MLApp.MLApp matlab = new MLApp.MLApp();
            Console.WriteLine(@"cd '" + imgPath + "'");
            matlab.Execute(@"cd '" + imgPath + "'");

            // Define the output
            object result = null;

            int imgCount = count - 1;

            // Call the MATLAB function learning
            matlab.Feval("learning", 5, out result, imgCount);

            // Display result
            parameters = result as object[];

            ImageParameters[0] = imgCount;
            ImageParameters[1] = ImgWidth;
            ImageParameters[2] = ImgHeight;

            string savePath = AppDomain.CurrentDomain.BaseDirectory + "params";

            // Save parameters and IPI list into file.
            WriteoutIpiList();
            WriteoutMatlabPrams();
        }
コード例 #9
0
ファイル: MainForm.cs プロジェクト: mind0n/hive
        private void CaptureCameraCallback()
        {
            const double ScaleFactor = 2.5;
            const int MinNeighbors = 1;
            CvSize MinSize = new CvSize(30, 30);

            CvCapture cap = CvCapture.FromCamera(1);
            CvHaarClassifierCascade cascade = CvHaarClassifierCascade.FromFile("haarcascade_eye.xml");
            while (true)
            {
                IplImage img = cap.QueryFrame();
                //IplImage.FromBitmap()
                //CvSeq<CvAvgComp> eyes = Cv.HaarDetectObjects(img, cascade, Cv.CreateMemStorage(), ScaleFactor, MinNeighbors, HaarDetectionType.DoCannyPruning, MinSize);
                
                //foreach (CvAvgComp eye in eyes.AsParallel())
                //{
                //    img.DrawRect(eye.Rect, CvColor.Red);

                //    if (eye.Rect.Left > pctCvWindow.Width / 2)
                //    {
                //        try
                //        {
                //            IplImage rightEyeImg1 = img.Clone();
                //            Cv.SetImageROI(rightEyeImg1, eye.Rect);
                //            IplImage rightEyeImg2 = Cv.CreateImage(eye.Rect.Size, rightEyeImg1.Depth, rightEyeImg1.NChannels);
                //            Cv.Copy(rightEyeImg1, rightEyeImg2, null);
                //            Cv.ResetImageROI(rightEyeImg1);

                            
                //            Bitmap rightEyeBm = BitmapConverter.ToBitmap(rightEyeImg2);
                //            pctRightEye.Image = rightEyeBm;
                //        }
                //        catch { }
                //    }
                //    else
                //    {
                //        try
                //        {
                //            IplImage leftEyeImg1 = img.Clone();
                //            Cv.SetImageROI(leftEyeImg1, eye.Rect);
                //            IplImage leftEyeImg2 = Cv.CreateImage(eye.Rect.Size, leftEyeImg1.Depth, leftEyeImg1.NChannels);
                //            Cv.Copy(leftEyeImg1, leftEyeImg2, null);
                //            Cv.ResetImageROI(leftEyeImg1);

                //            Bitmap leftEyeBm = BitmapConverter.ToBitmap(leftEyeImg2);
                //            pctLeftEye.Image = leftEyeBm;
                //        }catch{}
                //    }
                //}

                Bitmap bm = BitmapConverter.ToBitmap(img);
                bm.SetResolution(pctCvWindow.Width, pctCvWindow.Height);
                //pctCvWindow.Image = bm;
                pb.Image = bm;
                img = null;
                bm = null;
                Thread.Sleep(100);
            }
        }
コード例 #10
0
ファイル: DFT.cs プロジェクト: inohiroki/opencvsharp
        /// <summary>
        /// 原点(直流成分)が画像の中心にくるように,画像の象限を入れ替える関数.
        /// srcArr, dstArr は同じサイズ,タイプの配列.
        /// </summary>
        /// <param name="srcArr"></param>
        /// <param name="dstArr"></param>
        private static void ShiftDFT(CvArr srcArr, CvArr dstArr)
        {
            CvSize size    = Cv.GetSize(srcArr);
            CvSize dstSize = Cv.GetSize(dstArr);

            if (dstSize.Width != size.Width || dstSize.Height != size.Height)
            {
                throw new ApplicationException("Source and Destination arrays must have equal sizes");
            }
            // (9)インプレースモード用のテンポラリバッファ
            CvMat tmp = null;

            if (srcArr == dstArr)
            {
                tmp = Cv.CreateMat(size.Height / 2, size.Width / 2, Cv.GetElemType(srcArr));
            }
            int cx = size.Width / 2;   /* 画像中心 */
            int cy = size.Height / 2;

            // (10)1〜4象限を表す配列と,そのコピー先
            CvMat q1stub, q2stub;
            CvMat q3stub, q4stub;
            CvMat d1stub, d2stub;
            CvMat d3stub, d4stub;
            CvMat q1 = Cv.GetSubRect(srcArr, out q1stub, new CvRect(0, 0, cx, cy));
            CvMat q2 = Cv.GetSubRect(srcArr, out q2stub, new CvRect(cx, 0, cx, cy));
            CvMat q3 = Cv.GetSubRect(srcArr, out q3stub, new CvRect(cx, cy, cx, cy));
            CvMat q4 = Cv.GetSubRect(srcArr, out q4stub, new CvRect(0, cy, cx, cy));
            CvMat d1 = Cv.GetSubRect(srcArr, out d1stub, new CvRect(0, 0, cx, cy));
            CvMat d2 = Cv.GetSubRect(srcArr, out d2stub, new CvRect(cx, 0, cx, cy));
            CvMat d3 = Cv.GetSubRect(srcArr, out d3stub, new CvRect(cx, cy, cx, cy));
            CvMat d4 = Cv.GetSubRect(srcArr, out d4stub, new CvRect(0, cy, cx, cy));

            // (11)実際の象限の入れ替え
            if (srcArr != dstArr)
            {
                if (!Cv.ARE_TYPES_EQ(q1, d1))
                {
                    throw new ApplicationException("Source and Destination arrays must have the same format");
                }
                Cv.Copy(q3, d1, null);
                Cv.Copy(q4, d2, null);
                Cv.Copy(q1, d3, null);
                Cv.Copy(q2, d4, null);
            }
            else
            {      /* インプレースモード */
                Cv.Copy(q3, tmp, null);
                Cv.Copy(q1, q3, null);
                Cv.Copy(tmp, q1, null);
                Cv.Copy(q4, tmp, null);
                Cv.Copy(q2, q4, null);
                Cv.Copy(tmp, q2, null);
            }
            if (tmp != null)
            {
                tmp.Dispose();
            }
        }
コード例 #11
0
 public static extern double calib3d_calibrateCamera_vector(
     IntPtr[] objectPoints, int opSize1, int[] opSize2,
     IntPtr[] imagePoints, int ipSize1, int[] ipSize2,
     CvSize imageSize,
     [In, Out] double[,] cameraMatrix,
     [In, Out] double[] distCoeffs, int distCoeffsSize,
     IntPtr rvecs, IntPtr tvecs,
     int flags, CvTermCriteria criteria);
コード例 #12
0
ファイル: GpuMat.cs プロジェクト: inohiroki/opencvsharp
 /// <summary>
 ///
 /// </summary>
 /// <param name="size"></param>
 /// <param name="type"></param>
 /// <param name="s"></param>
 public GpuMat(CvSize size, MatrixType type, CvScalar s)
 {
     ptr = GpuInvoke.GpuMat_new11(size, type, s);
     if (ptr == IntPtr.Zero)
     {
         throw new OpenCvSharpException();
     }
 }
コード例 #13
0
ファイル: GpuMat.cs プロジェクト: inohiroki/opencvsharp
 /// <summary>
 /// allocates new matrix data unless the matrix already has specified size and type.
 /// previous data is unreferenced if needed.
 /// </summary>
 /// <param name="size"></param>
 /// <param name="type"></param>
 public void Create(CvSize size, MatrixType type)
 {
     if (disposed)
     {
         throw new ObjectDisposedException("GpuMat");
     }
     GpuInvoke.GpuMat_create2(ptr, size, type);
 }
コード例 #14
0
 /// <summary>
 ///
 /// </summary>
 /// <param name="size"></param>
 /// <param name="type"></param>
 public GpuMat(CvSize size, MatrixType type)
 {
     ptr = NativeMethods.GpuMat_new6(size, type);
     if (ptr == IntPtr.Zero)
     {
         throw new OpenCvSharpException();
     }
 }
コード例 #15
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="_modelPoints"></param>
 /// <param name="_modelSize"></param>
 /// <param name="_maxBasicSolutions"></param>
 public CvModelEstimator2(int _modelPoints, CvSize _modelSize, int _maxBasicSolutions)
 {
     modelPoints = _modelPoints;
     modelSize = _modelSize;
     maxBasicSolutions = _maxBasicSolutions;
     checkPartialSubsets = true;
     rng = new CvRNG(-1);
 }
コード例 #16
0
 /// <summary>
 /// 
 /// </summary>
 /// <param name="modelPoints"></param>
 /// <param name="modelSize"></param>
 /// <param name="maxBasicSolutions"></param>
 protected CvModelEstimator2(int modelPoints, CvSize modelSize, int maxBasicSolutions)
 {
     this.modelPoints = modelPoints;
     this.modelSize = modelSize;
     this.maxBasicSolutions = maxBasicSolutions;
     this.checkPartialSubsets = true;
     this.rng = new CvRNG(-1);
 }
コード例 #17
0
ファイル: GpuMat.cs プロジェクト: inohiroki/opencvsharp
 /// <summary>
 /// locates matrix header within a parent matrix.
 /// </summary>
 /// <param name="wholeSize"></param>
 /// <param name="ofs"></param>
 public void LocateROI(out CvSize wholeSize, out CvPoint ofs)
 {
     if (disposed)
     {
         throw new ObjectDisposedException("GpuMat");
     }
     GpuInvoke.GpuMat_locateROI(ptr, out wholeSize, out ofs);
 }
コード例 #18
0
ファイル: GpuMat.cs プロジェクト: inohiroki/opencvsharp
 /// <summary>
 ///
 /// </summary>
 /// <param name="size"></param>
 /// <param name="type"></param>
 /// <param name="data"></param>
 /// <param name="step"></param>
 public GpuMat(CvSize size, MatrixType type, IntPtr data, uint step)
 {
     ptr = GpuInvoke.GpuMat_new7(size, type, data, step);
     if (ptr == IntPtr.Zero)
     {
         throw new OpenCvSharpException();
     }
 }
コード例 #19
0
 /// <summary>
 /// Конструктор
 /// </summary>
 /// <param name="size">Размер обрабатываемого изображения</param>
 public ContoursFinder(CvSize size)
 {
     this.size = size;
     hsvImg    = new IplImage(size, BitDepth.U8, 3);
     hImg      = new IplImage(size, BitDepth.U8, 1);
     sImg      = new IplImage(size, BitDepth.U8, 1);
     vImg      = new IplImage(size, BitDepth.U8, 1);
     tmpImg    = new IplImage(size, BitDepth.U8, 1);
 }
コード例 #20
0
ファイル: ContoursFinder.cs プロジェクト: Goshik92/RingMeter
 /// <summary>
 /// Конструктор
 /// </summary>
 /// <param name="size">Размер обрабатываемого изображения</param>
 public ContoursFinder(CvSize size)
 {
     this.size = size;
     hsvImg = new IplImage(size, BitDepth.U8, 3);
     hImg = new IplImage(size, BitDepth.U8, 1);
     sImg = new IplImage(size, BitDepth.U8, 1);
     vImg = new IplImage(size, BitDepth.U8, 1);
     tmpImg = new IplImage(size, BitDepth.U8, 1);
 }
コード例 #21
0
 private static extern void cvCalcOpticalFlowPyrLK(
     IntPtr old, IntPtr curr, IntPtr oldPyr, IntPtr currPyr,
     [In, Out] CvPoint2D32f[] oldFeatures,
     [In, Out] CvPoint2D32f[] currFeatures,
     int numFeatures, CvSize winSize, int level,
     [In, Out] byte[] status,
     [In, Out] float[] errors,
     CvTermCriteria term,
     int flags);
コード例 #22
0
 public static extern void calib3d_stereoRectify_InputArray(
     IntPtr cameraMatrix1, IntPtr distCoeffs1,
     IntPtr cameraMatrix2, IntPtr distCoeffs2,
     CvSize imageSize, IntPtr R, IntPtr T,
     IntPtr R1, IntPtr R2,
     IntPtr P1, IntPtr P2,
     IntPtr Q, int flags,
     double alpha, Size newImageSize,
     out Rect validPixROI1, out Rect validPixROI2);
コード例 #23
0
        /// <summary>
        /// 画面を更新する
        /// TODO : coordinatemapperを使うように変更(3月)
        /// </summary>
        /// <param name="frame"></param>
        private void UpdateDisplay(Frame frame)
        {
            Label[]        timeLabels = { Box1Timer, Box2Timer, Box3Timer, Box4Timer, Box5Timer };
            Image[]        images     = { Image1, Image2, Image3, Image4, Image5 };
            DrawingGroup[] drawings   = { drawingGroup1, drawingGroup2, drawingGroup3, drawingGroup4, drawingGroup5 };
            // 使い回しの変数
            List <Dictionary <JointType, Point> > pointsList;
            List <Dictionary <JointType, Joint> > jointsList;
            List <Tuple <ulong, Point> >          idPointList;
            FormattedText fmt;

            for (int recordNo = 0; recordNo < frameSequence.recordNum; recordNo++)
            {
                images[recordNo].Source      = new BitmapImage(new Uri(frame.ColorImagePathList[recordNo]));
                timeLabels[recordNo].Content = frame.GetMotionData(recordNo).TimeStamp.ToString(@"ss\:fff");

                // 描画
                using (DrawingContext dc = drawings[recordNo].Open())
                {
                    CvSize colorSize = frame.ColorSize[recordNo];
                    dc.DrawRectangle(Brushes.Transparent, null, new Rect(0.0, 0.0, colorSize.Width, colorSize.Height));
                    pointsList  = frame.GetBodyColorSpaceJoints(recordNo);
                    jointsList  = frame.GetBodyJoints(recordNo);
                    idPointList = frame.GetIdAndPosition(recordNo);

                    for (int user = 0; user < pointsList.Count(); user++)
                    {
                        // Bodyの描画
                        if (pointsList[user] != null && jointsList[user] != null)
                        {
                            this.DrawBody(pointsList[user], jointsList[user], dc);
                        }
                        // user id の描画
                        if (idPointList[user] != null)
                        {
                            ulong  userId = idPointList[user].Item1;
                            string text   = userId.ToString();
                            if (frameSequence.UserMapping.ContainsKey(userId))
                            {
                                text = frameSequence.UserMapping[userId].ToString();
                            }
                            // TODO: 縁取りテキスト, http://gushwell.ldblog.jp/archives/52312432.html
                            fmt = new FormattedText(text,
                                                    System.Globalization.CultureInfo.CurrentCulture,
                                                    System.Windows.FlowDirection.LeftToRight,
                                                    new Typeface("Arial"), 50.0, Brushes.Cyan
                                                    );
                            dc.DrawText(fmt, idPointList[user].Item2);
                        }
                    }
                    drawings[recordNo].ClipGeometry = new RectangleGeometry(new Rect(0.0, 0.0, colorSize.Width, colorSize.Height));
                }
            }
            // スライダーとか時間表示
            this.PlaySlider.Value  = this.playingIndex;
            this.TimeLabel.Content = frame.Time.ToString(frame.Time.ToString(@"mm\:ss\:fff"));
        }
コード例 #24
0
        void drawUndistortedCornerFrame(CvMat displayMat, CvPoint2D32f[] corners, CvSize boardSize)
        {
            CvMat cornerMat = new CvMat(1, corners.Length, MatrixType.F32C2);

            CvEx.FillCvMat(cornerMat, corners.Select(x => new CvScalar(x.X, x.Y)).ToList());
            CvMat undistMat = CvEx.InitCvMat(cornerMat);

            Cv.UndistortPoints(cornerMat, undistMat, this.UndistortionData.CameraStruct.CreateCvMat(), this.UndistortionData.DistortStruct.CreateCvMat(true), null, this.UndistortionData.CameraStruct.CreateCvMat());
            CvEx.DrawChessboardCornerFrame(displayMat, boardSize, undistMat.Select(x => new CvPoint2D32f(x.Val0, x.Val1)).ToArray(), new CvScalar(216, 216, 216));
        }
コード例 #25
0
 public static extern void calib3d_stereoRectify_array(
     double[,] cameraMatrix1,
     double[] distCoeffs1, int dc1Size,
     double[,] cameraMatrix2,
     double[] distCoeffs2, int dc2Size,
     CvSize imageSize,
     double[,] R, double[] T,
     double[,] R1, double[,] R2, double[,] P1, double[,] P2,
     double[,] Q, int flags, double alpha, Size newImageSize,
     out Rect validPixROI1, out Rect validPixROI2);
コード例 #26
0
ファイル: prototype.cs プロジェクト: khksjkhj1/photosh
        public void resize(double inputsize)
        {
            CvSize size = new CvSize((int)(src.Width * inputsize), (int)(src.Height * inputsize)); // 임의로 두배

            dst = new IplImage(size, src.Depth, src.NChannels);

            Cv.Resize(src, dst, Interpolation.Cubic);

            pictureBoxIpl2.ImageIpl = dst;
        }
コード例 #27
0
 public static extern double cvCalibrateCamera2(
     IntPtr object_points,
     IntPtr image_points,
     IntPtr point_counts,
     CvSize image_size,
     IntPtr camera_matrix,
     IntPtr distortion_coeffs,
     IntPtr rotation_vectors,
     IntPtr translation_vectors,
     int flags);        //,
コード例 #28
0
        /// <summary>
        /// Конструктор
        /// </summary>
        /// <param name="deviceId">ID камеры с которой будут захватываться кадры</param>
        /// <param name="frameSize">Желаемое разрешение кадров</param>
        public WebCam(int deviceId, CvSize frameSize)
        {
            this.deviceId = deviceId;
            vi            = new VideoInput();
            vi.SetupDevice(deviceId, frameSize.Width, frameSize.Height);

            this.frameSize = new CvSize(vi.GetWidth(deviceId), vi.GetHeight(deviceId));
            sum            = new IplImage(this.frameSize, BitDepth.F32, 3);
            tmp            = new IplImage(this.frameSize, BitDepth.U8, 3);
        }
コード例 #29
0
        /// <summary>
        /// Конструктор
        /// </summary>
        /// <param name="deviceId">ID камеры которая будет использоваться для получения изображения</param>
        /// <param name="frameSize">Размер изображения которое будет обрабатываться</param>
        public ImageProcessingRoutine(int deviceId, CvSize frameSize)
        {
            Camera = new WebCam(deviceId, frameSize);
            Calibrator = new CameraCalibrator(Camera.FrameSize);
            Finder = new ContoursFinder(Camera.FrameSize);
            Transformer = new CoordinatesTransformer();

            routineThread = new Thread(routine);
            routineThread.IsBackground = true;
            routineThread.Start();
        }
コード例 #30
0
        private void button1_Click_2(object sender, RoutedEventArgs e)
        {
            //以下修正
            MotionDataHandler handler;
            string            path;

            if (openMotionData(out handler, out path))
            {
                CvMat resultMat = null;
                int   length    = handler.FrameCount;

                IEnumerable <CvMat> depthImages;
                Utility.LoadImages(handler.GetDepthImagePaths(), out depthImages);

                foreach (CvMat depthMat in depthImages)
                {
                    CvSize depthUserSize = new CvSize(depthMat.Cols, depthMat.Rows);
                    CvEx.InitCvMat(ref resultMat, depthMat, MatrixType.U8C3);
                    resultMat.Zero();
                    double          avgDepth = depthMat.Select(v => v.Val0).Where(v => v != 0).Average();
                    double          pDepth   = CvEx.Get2DSubPixel(depthMat, new CvPoint2D32f(_undistortion.CameraStruct.PrincipalX, _undistortion.CameraStruct.PrincipalY), 0) ?? 0;
                    List <double>[] diffs    = Enumerable.Range(0, depthUserSize.Width).Select(x => new List <double>()).ToArray();
                    unsafe
                    {
                        short *depthArr = depthMat.DataInt16;
                        for (int y = 0; y < depthUserSize.Height; y++)
                        {
                            int offset = y * depthUserSize.Width;
                            for (int x = 0; x < depthUserSize.Width - 1; x++)
                            {
                                short l = depthArr[offset + x];
                                short r = depthArr[offset + x + 1];
                                if (l != 0 && r != 0)
                                {
                                    double ll = Math.Log(l);
                                    double rl = Math.Log(r);
                                    diffs[x].Add(ll - rl);
                                }
                            }
                        }
                    }
                    double[] median = diffs.Select(x => x.Count > 0 ? CalcEx.GetMedian(x) : 0).ToArray();
                    double   max    = median.Select(x => Math.Abs(x)).Max();
                    for (int x = 0; x < depthUserSize.Width; x++)
                    {
                        resultMat.DrawLine(new CvPoint(x, 0), new CvPoint(x, resultMat.Rows), new CvScalar(Math.Max(median[x] / max * 255, 0), Math.Max(-median[x] / max * 255, 0), 0));
                    }
                    resultMat.PutText(avgDepth.ToString("0.00000"), new CvPoint(0, 20), new CvFont(FontFace.HersheyPlain, 1, 1), new CvScalar(255, 255, 255));
                    resultMat.PutText(pDepth.ToString("0.00000"), new CvPoint(0, 40), new CvFont(FontFace.HersheyPlain, 1, 1), new CvScalar(255, 255, 255));

                    putImage(resultMat, PixelFormats.Rgb24);
                }
            }
        }
コード例 #31
0
        /// <summary>
        /// Конструктор
        /// </summary>
        /// <param name="deviceId">ID камеры которая будет использоваться для получения изображения</param>
        /// <param name="frameSize">Размер изображения которое будет обрабатываться</param>
        public ImageProcessingRoutine(int deviceId, CvSize frameSize)
        {
            Camera      = new WebCam(deviceId, frameSize);
            Calibrator  = new CameraCalibrator(Camera.FrameSize);
            Finder      = new ContoursFinder(Camera.FrameSize);
            Transformer = new CoordinatesTransformer();

            routineThread = new Thread(routine);
            routineThread.IsBackground = true;
            routineThread.Start();
        }
コード例 #32
0
        public bool Undistortion(IplImage src, out IplImage dst, int panoramaImageWidth)
        {
            dst = null;

            if (src == null)
            {
                return(false);
            }

            IplImage dstTemp  = new IplImage(panoramaImageWidth, (int)Math.Max((panoramaImageWidth / 4 * (_r2 - _r1) / _r2), 1), BitDepth.U8, 3);
            bool     isUpdate = false;

            if (isUpdate = (_inputSize != src.Size || _outputSize != dstTemp.Size))
            {
                _inputSize  = src.Size;
                _outputSize = dstTemp.Size;
                _pointDictionary.Clear();
            }

            object          lockobj = new object();
            ParallelOptions opt     = new ParallelOptions()
            {
                MaxDegreeOfParallelism = Environment.ProcessorCount
            };

            Parallel.For(0, dstTemp.Height, opt, i =>
            {
                IplImage patch = new IplImage(1, 1, BitDepth.U8, 3);

                for (int x = 0; x < dstTemp.Width; ++x)
                {
                    if (isUpdate || _isUpdate)
                    {
                        lock (lockobj)
                        {
                            _pointDictionary[i * dstTemp.Width + x] = ConvertPolar(x, i);
                        }
                    }

                    Cv.GetRectSubPix(src, patch, _pointDictionary[i * dstTemp.Width + x]);
                    dstTemp.Set2D(dstTemp.Height - i - 1, dstTemp.Width - x - 1, patch.Get2D(0, 0));
                }

                patch.Dispose();
            });

            _isUpdate = false;

            dst = dstTemp.Clone();
            dstTemp.Dispose();

            return(true);
        }
コード例 #33
0
 public static extern double calib3d_stereoCalibrate_InputArray(
     IntPtr[] objectPoints, int opSize,
     IntPtr[] imagePoints1, int ip1Size,
     IntPtr[] imagePoints2, int ip2Size,
     IntPtr cameraMatrix1,
     IntPtr distCoeffs1,
     IntPtr cameraMatrix2,
     IntPtr distCoeffs2,
     CvSize imageSize,
     IntPtr R, IntPtr T,
     IntPtr E, IntPtr F,
     CvTermCriteria criteria, int flags);
コード例 #34
0
        private void OnClick_csv出力(object sender, EventArgs e)
        {
            if (合成画像 != null)
            {
                string 結果 = "";
                int x,y;
                int roi_w = 9;
                int roi_h = 9;

                CvSize roiSize = new CvSize(roi_w, roi_h);
                CvPoint roiPoint;
                for (x = 0; x < 合成画像.Width - roi_w; x++)
                {
                    System.Diagnostics.Debug.WriteLine(x + "\n" + 結果);

                    for (y = 0; y < 合成画像.Height - roi_h; y++)
                    {
                        string buff = "";
                        string type = 検査領域か判断(x,y,roi_w,roi_h);

                        if (type != "")//ちょっと高速化
                        {
                            roiPoint = new CvPoint(x, y);
                            Cv.SetImageROI(検査対象, new CvRect(roiPoint, roiSize));
                            if (type == "1") buff = csvフォーマットを取得(検査対象, roiSize, "1");
                            else if (type == "0") buff = csvフォーマットを取得(検査対象, roiSize, "0");
                        }
                        //if (checkBox_all.Checked)
                        //{
                        //    roiPoint = new CvPoint(x, y);
                        //    Cv.SetImageROI(検査対象, new CvRect(roiPoint, roiSize));
                        //    Cv.SetImageROI(マスク画像, new CvRect(roiPoint, roiSize));
                        //    if (マスク画像.Avg().Val0 == 0) buff = csvフォーマットを取得(検査対象, roiSize, "0");
                        //    else if (マスク画像.Avg().Val0 == 255) buff = csvフォーマットを取得(検査対象, roiSize, "1");
                        //}
                        //else if (checkBox_black.Checked)
                        //{
                        //    if (マスク画像.Avg().Val0 == 0) buff = csvフォーマットを取得(検査対象, roiSize, "0");
                        //}
                        //else
                        //{
                        //    if (マスク画像.Avg().Val0 == 255) buff = csvフォーマットを取得(検査対象, roiSize, "1");
                        //}

                        if(buff!="")結果 += buff + "\n";

                        Cv.ResetImageROI(マスク画像);
                        Cv.ResetImageROI(検査対象);
                    }
                }
                stringをcsv出力(結果,DateTime.Now.ToString("yy-MM-dd_")+this.Text);
            }
        }
コード例 #35
0
        public ImagePreperation(CANdb candb)
        {
            ImgWidth  = 1280;
            ImgHeight = 720;

            IpiList         = new List <ImagePreperationItem>();
            ImageParameters = new int[3];

            // Get all signal from candb instance, this list is binded to dropbox in ListView
            List <Signal> signals = candb.GetAllSignal();

            SignalList = new List <string>();
            //Dictionary is used to convert dropbox label to actuall signal name
            foreach (Signal s in signals)
            {
                SignalList.Add(s.ToString());
                SignalToNameDictionary.Add(s.ToString(), s.Name);
            }

            ImageDictionary  = new Dictionary <string, WriteableBitmap>();
            SignalDictionary = new Dictionary <string, string>();

            //Get current location of app
            string initPath = AppDomain.CurrentDomain.BaseDirectory;

            //Ask user to specify the folder of images
            FolderBrowserDialog fbd = new FolderBrowserDialog();

            fbd.SelectedPath = initPath;
            System.Windows.Forms.DialogResult result = fbd.ShowDialog();

            string dirPath = fbd.SelectedPath;

            string[] paths = Directory.GetFiles(dirPath);
            NameList = new List <string>(paths);

            // Create IPI list with image and signal name.
            // IPI list is binded to ListView so any change in the form is directly changes value in IPI list.
            foreach (string name in NameList)
            {
                //Resize image so that it fits to window
                IplImage img     = new IplImage(name);
                CvSize   size    = new CvSize(427, 240);
                IplImage resized = new IplImage(size, img.Depth, img.NChannels);
                Cv.Resize(img, resized);
                //WritableBitmap is compatible with Image Window of WPF
                WriteableBitmap wb = WriteableBitmapConverter.ToWriteableBitmap(resized);
                ImageDictionary.Add(name, wb);
                IpiList.Add(new ImagePreperationItem(wb, name));
            }

            InitializeComponent();
        }
コード例 #36
0
 public static extern double calib3d_stereoCalibrate_array(
     IntPtr[] objectPoints, int opSize1, int[] opSizes2,
     IntPtr[] imagePoints1, int ip1Size1, int[] ip1Sizes2,
     IntPtr[] imagePoints2, int ip2Size1, int[] ip2Sizes2,
     [In, Out] double[,] cameraMatrix1,
     [In, Out] double[] distCoeffs1, int dc1Size,
     [In, Out] double[,] cameraMatrix2,
     [In, Out] double[] distCoeffs2, int dc2Size,
     CvSize imageSize,
     IntPtr R, IntPtr T,
     IntPtr E, IntPtr F,
     CvTermCriteria criteria, int flags);
コード例 #37
0
 public static extern float calib3d_rectify3Collinear_InputArray(
     IntPtr cameraMatrix1, IntPtr distCoeffs1,
     IntPtr cameraMatrix2, IntPtr distCoeffs2,
     IntPtr cameraMatrix3, IntPtr distCoeffs3,
     IntPtr[] imgpt1, int imgpt1Size,
     IntPtr[] imgpt3, int imgpt3Size,
     CvSize imageSize, IntPtr R12, IntPtr T12,
     IntPtr R13, IntPtr T13,
     IntPtr R1, IntPtr R2, IntPtr R3,
     IntPtr P1, IntPtr P2, IntPtr P3,
     IntPtr Q, double alpha, CvSize newImgSize,
     out CvRect roi1, out CvRect roi2, int flags);
コード例 #38
0
ファイル: FitLine.cs プロジェクト: 0sv/opencvsharp
 private CvPoint2D32f[] GetRandomPoints(int count, CvSize imageSize)
 {
     Random rand = new Random();
     CvPoint2D32f[] points = new CvPoint2D32f[count];
     double a = rand.NextDouble() + 0.5;
     for (int i = 0; i < points.Length; i++)
     {
         double x = rand.Next(imageSize.Width);
         double y = (x * a) + (rand.Next(100) - 50);
         points[i] = new CvPoint2D32f(x, y);
     }
     return points;
 }
コード例 #39
0
        private IplImage ExtractSubImage(IplImage imgOrig, out CvRect pSubImageRect)
        {
            IplImage mainSubImage = null;

            CvSize  subImageSize = new CvSize((Int32)((imgOrig.Size.Width / 9) * 7), (Int32)((imgOrig.Size.Height / 9) * 4));
            CvPoint point        = new CvPoint(0, (Int32)((imgOrig.Size.Height / 9) * 3));
            CvRect  subImageRect = new CvRect(point, subImageSize);

            pSubImageRect = subImageRect;
            mainSubImage  = GetSubImage(imgOrig, subImageRect);

            return(mainSubImage);
        }
コード例 #40
0
ファイル: Moments.cs プロジェクト: inohiroki/opencvsharp
        public Moments()
        {
            // (1)画像を読み込む.3チャンネル画像の場合はCOIがセットされていなければならない
            using (IplImage srcImg = new IplImage(Const.ImageLenna, LoadMode.AnyColor | LoadMode.AnyDepth))
            {
                if (srcImg.NChannels == 3 && srcImg.COI == 0)
                {
                    srcImg.COI = 1;
                }
                // (2)入力画像の3次までの画像モーメントを計算する
                CvMoments moments = new CvMoments(srcImg, false);
                srcImg.COI = 0;

                // (3)モーメントやHuモーメント不変量を,得られたCvMoments構造体の値を使って計算する.
                double      spatialMoment = moments.GetSpatialMoment(0, 0);
                double      centralMoment = moments.GetCentralMoment(0, 0);
                double      normCMoment   = moments.GetNormalizedCentralMoment(0, 0);
                CvHuMoments huMoments     = new CvHuMoments(moments);

                // (4)得られたモーメントやHuモーメント不変量を文字として画像に描画
                using (CvFont font = new CvFont(FontFace.HersheySimplex, 1.0, 1.0, 0, 2, LineType.Link8))
                {
                    string[] text = new string[10];
                    text[0] = string.Format("spatial={0:F3}", spatialMoment);
                    text[1] = string.Format("central={0:F3}", centralMoment);
                    text[2] = string.Format("norm={0:F3}", spatialMoment);
                    text[3] = string.Format("hu1={0:F10}", huMoments.Hu1);
                    text[4] = string.Format("hu2={0:F10}", huMoments.Hu2);
                    text[5] = string.Format("hu3={0:F10}", huMoments.Hu3);
                    text[6] = string.Format("hu4={0:F10}", huMoments.Hu4);
                    text[7] = string.Format("hu5={0:F10}", huMoments.Hu5);
                    text[8] = string.Format("hu6={0:F10}", huMoments.Hu6);
                    text[9] = string.Format("hu7={0:F10}", huMoments.Hu7);

                    CvSize textSize = font.GetTextSize(text[0]);
                    for (int i = 0; i < 10; i++)
                    {
                        srcImg.PutText(text[i], new CvPoint(10, (textSize.Height + 3) * (i + 1)), font, CvColor.Black);
                    }
                }

                // (5)入力画像とモーメント計算結果を表示,キーが押されたときに終了
                using (CvWindow window = new CvWindow("Image", WindowMode.AutoSize))
                {
                    window.ShowImage(srcImg);
                    Cv.WaitKey(0);
                }
            }
        }
コード例 #41
0
        private CvPoint2D32f[] GetRandomPoints(int count, CvSize imageSize)
        {
            Random rand = new Random();

            CvPoint2D32f[] points = new CvPoint2D32f[count];
            double         a      = rand.NextDouble() + 0.5;

            for (int i = 0; i < points.Length; i++)
            {
                double x = rand.Next(imageSize.Width);
                double y = (x * a) + (rand.Next(100) - 50);
                points[i] = new CvPoint2D32f(x, y);
            }
            return(points);
        }
コード例 #42
0
        private Point GetOverlapLocation(Bitmap screan)
        {
            IplImage ipltemplate = BitmapConverter.ToIplImage(template);
            IplImage iplScrean = BitmapConverter.ToIplImage(screan);

            CvSize resSize = new CvSize(iplScrean.Width - ipltemplate.Width + 1,
                                        iplScrean.Height - ipltemplate.Height + 1);
            IplImage resImg = Cv.CreateImage(resSize, BitDepth.F32, 1);

            Cv.MatchTemplate(iplScrean, ipltemplate, resImg, MatchTemplateMethod.CCorrNormed);

            double minVal;
            double maxVal;
            CvPoint minLoc;
            CvPoint maxLoc;
            Cv.MinMaxLoc(resImg, out minVal, out maxVal, out minLoc, out maxLoc);
            return maxVal >= 0.99 ? new Point(maxLoc.X, maxLoc.Y) : new Point(0, 0);
        }
コード例 #43
0
ファイル: Sub.cs プロジェクト: DECOPON0220/HandRecog3D
    /* ------------------ */

    // Use this for initialization
    void Start()
    {
        // カメラデバイスの選択、設定
        cam.setDevice(index);

        // HSV用画像の初期化
        CvSize WINDOW_SIZE = new CvSize(GlobalVar.CAMERA_WIDTH, GlobalVar.CAMERA_HEIGHT);
        h_img = Cv.CreateImage(WINDOW_SIZE, BitDepth.U8, 3);

        // データ格納用配列の初期化
        ps_arr = new int[GlobalVar.CAMERA_HEIGHT / GlobalVar.POINT_INTERVAL, GlobalVar.CAMERA_WIDTH / GlobalVar.POINT_INTERVAL];

        /*     デバッグ用     */
        CvSize D_WINDOW_SIZE = new CvSize(GlobalVar.CAMERA_WIDTH, GlobalVar.CAMERA_HEIGHT);
        d_img = Cv.CreateImage(D_WINDOW_SIZE, BitDepth.U8, 3);
        texture = new Texture2D(GlobalVar.CAMERA_WIDTH, GlobalVar.CAMERA_HEIGHT, TextureFormat.RGB24, false);
        GetComponent<Renderer>().material.mainTexture = texture;
        /* ------------------ */
    }
コード例 #44
0
ファイル: Main.cs プロジェクト: DECOPON0220/HandRecog3D
    /* ------------------------- */



    // Use this for initialization
    void Start()
    {
        // 変数宣言
        int x_window = GlobalVar.CAMERA_WIDTH;
        int y_window = GlobalVar.CAMERA_HEIGHT;

        // カメラデバイス選択、設定
        cam1.setDevice(0);  // 水平
        cam2.setDevice(1);  // 垂直

        // HSV画像初期化
        CvSize WINDOW_SIZE = new CvSize(x_window, y_window);
        h_img1 = Cv.CreateImage(WINDOW_SIZE, BitDepth.U8, 3);
        h_img2 = Cv.CreateImage(WINDOW_SIZE, BitDepth.U8, 3);

        // データ格納用配列の初期化
        int x = x_window / GlobalVar.POINT_INTERVAL;
        int y = y_window / GlobalVar.POINT_INTERVAL;
        int z = y_window / GlobalVar.POINT_INTERVAL;
        hps_arr  = new int[y, x];
        vps_arr  = new int[z, x];
        ps_arr3D = new int[x, y, z];
        pl_arrXZ = new double[GlobalVar.VERTICE_NUM / 2, 2];
        pl_arrY  = new double[2];
        io_flag  = new int[x, y, z];    // 外部(遠):0 外部(近):1 内部:2

        // 3Dポリゴン指定
        refObj = GameObject.Find("Object");
        polygon = refObj.GetComponent<CreatePolygonMesh>();
        polygon.Init();

        // 観測点データ初期化
        init3DArr(ps_arr3D);
        initMFlag(io_flag);

        // 図形と観測点の内部外部判定を行う
        polygon.getIODMonitoringPoint(io_flag);
        
        /*     デバッグ用(FPS)     */
        frameCount = 0;
        prevTime = 0.0f;
        /* ------------------------- */
    }
コード例 #45
0
        string csvフォーマットを取得(IplImage src, CvSize size,string type)
        {
            string info = "";
            double 平均, 分散;

            平均 = src.Avg().Val0;
            double sum = 0, max = 0, min = 255;
            for (int x = 0; x < size.Width; x++)
                for (int y = 0; y < size.Height; y++)
                {
                    //CvScalar cs = Cv.Get2D(src, y, x);
                    double val = Cv.Get2D(src, y, x).Val0;
                    sum += Math.Pow(val - 平均, 2);
                    if (val > max) max = val;
                    if (val < min) min = val;
                }
            分散 = sum / (size.Width * size.Height);
            info = 平均.ToString("f")+","+分散.ToString("f")+","+max.ToString("f")+","+min.ToString("f")+","+type;

            return info;
        }
コード例 #46
0
        void Awake()
        {
            _cap = new CvCapture(0);

            _capImage = _cap.QueryFrame();
            _capRgbImage = new IplImage(_capImage.Width, _capImage.Height, BitDepth.U8, 3);
            Debug.Log(string.Format("Capture info : size{0}", _capImage.Size));
               	_capGrayImage0 = new IplImage(_capImage.Size, BitDepth.U8, 1);
            _capGrayImage1 = new IplImage(_capImage.Size, BitDepth.U8, 1);
            _pyramidImage0 = new IplImage(new CvSize(_capImage.Width + 8, _capImage.Height/3), BitDepth.U8, 1);
            _pyramidImage1 = new IplImage(new CvSize(_capImage.Width + 8, _capImage.Height/3), BitDepth.U8, 1);
            _eigImage = new IplImage(_capImage.Size, BitDepth.F32, 1);
            _tmpImage = new IplImage(_capImage.Size, BitDepth.F32, 1);
            Cv.ConvertImage(_capImage, _capGrayImage0, 0);
            width = _capImage.Width;
            height = _capImage.Height;

            _opticalFlowWinSize = new CvSize(opticalFlowWinSize, opticalFlowWinSize);
            _opticalFlowCrit = new CvTermCriteria(CriteriaType.Iteration | CriteriaType.Epsilon, ofCritIterations, ofCritError);

            _prevTime = _currTime = UnityEngine.Time.time;
        }
コード例 #47
0
ファイル: Cv_E.cs プロジェクト: sanglin307/UnityOpenCV
        /// <summary>
        /// 楕円弧をポリラインで近似する
        /// </summary>
        /// <param name="center">弧の中心</param>
        /// <param name="axes">楕円の軸の長さ</param>
        /// <param name="angle">楕円の回転角度</param>
        /// <param name="arc_start">楕円弧の開始角度</param>
        /// <param name="arc_end">楕円弧の終了角度</param>
        /// <param name="pts">この関数で塗りつぶされる点の配列</param>
        /// <param name="delta">ポリラインの連続した頂点間の角度,近似精度.出力される点の総数は最大で ceil((end_angle - start_angle)/delta) + 1.</param>
        /// <returns></returns>
#else
        /// <summary>
        /// Approximates elliptic arc with polyline
        /// </summary>
        /// <param name="center">Center of the arc. </param>
        /// <param name="axes">Half-sizes of the arc. See cvEllipse. </param>
        /// <param name="angle">Rotation angle of the ellipse in degrees. See cvEllipse. </param>
        /// <param name="arc_start">Starting angle of the elliptic arc. </param>
        /// <param name="arc_end">Ending angle of the elliptic arc. </param>
        /// <param name="pts">The array of points, filled by the function. </param>
        /// <param name="delta">Angle between the subsequent polyline vertices, approximation accuracy. So, the total number of output points will ceil((end_angle - start_angle)/delta) + 1 at max. </param>
        /// <returns>The function cvEllipse2Poly computes vertices of the polyline that approximates the specified elliptic arc. It is used by cvEllipse. It returns the numbers of output points.</returns>
#endif
        public static int Ellipse2Poly(CvPoint center, CvSize axes, int angle, int arc_start, int arc_end, out CvPoint[] pts, int delta)
        {
            int nb_pts = (int)Math.Ceiling(((arc_end - arc_start) / (float)delta) + 1);
            pts = new CvPoint[nb_pts];
            nb_pts = CvInvoke.cvEllipse2Poly(center, axes, angle, arc_start, arc_end, pts, delta);
            //pts = new CvPoint[nb_pts];
            //Array.ConstrainedCopy(pts2, 0, pts, 0, nb_pts);       
            return nb_pts;
        }
コード例 #48
0
ファイル: Cv_E.cs プロジェクト: sanglin307/UnityOpenCV
        /// <summary>
        /// 枠だけの楕円,もしくは塗りつぶされた楕円を描画する
        /// </summary>
        /// <param name="img">楕円が描かれる画像.</param>
        /// <param name="box">描画したい楕円を囲む矩形領域.</param>
        /// <param name="color">楕円の色.</param>
        /// <param name="thickness">楕円境界線の幅.</param>
        /// <param name="line_type">楕円境界線の種類.</param>
        /// <param name="shift">矩形領域の頂点座標の小数点以下の桁を表すビット数.</param>
#else
        /// <summary>
        /// Draws simple or thick elliptic arc or fills ellipse sector
        /// </summary>
        /// <param name="img">Image. </param>
        /// <param name="box">The enclosing box of the ellipse drawn </param>
        /// <param name="color">Ellipse color. </param>
        /// <param name="thickness">Thickness of the ellipse boundary. </param>
        /// <param name="line_type">Type of the ellipse boundary</param>
        /// <param name="shift">Number of fractional bits in the box vertex coordinates. </param>
#endif
        public static void EllipseBox(CvArr img, CvBox2D box, CvScalar color, int thickness, LineType line_type, int shift)
        {
            if (img == null)
            {
                throw new ArgumentNullException("img");
            }

            CvSize axes = new CvSize
            {
                Width = (int)Math.Round(box.Size.Height * 0.5),
                Height = (int)Math.Round(box.Size.Width * 0.5)
            };
            Ellipse(img, box.Center, axes, box.Angle, 0, 360, color, thickness, line_type, shift);
        }
コード例 #49
0
ファイル: Cv_E.cs プロジェクト: sanglin307/UnityOpenCV
        /// <summary>
        /// 枠だけの楕円,楕円弧,もしくは塗りつぶされた扇形の楕円を描画する
        /// </summary>
        /// <param name="img">楕円が描画される画像</param>
        /// <param name="center">楕円の中心</param>
        /// <param name="axes">楕円の軸の長さ</param>
        /// <param name="angle">回転角度</param>
        /// <param name="start_angle">楕円弧の開始角度</param>
        /// <param name="end_angle">楕円弧の終了角度</param>
        /// <param name="color">楕円の色</param>
        /// <param name="thickness">楕円弧の線の幅</param>
        /// <param name="line_type">楕円弧の線の種類</param>
        /// <param name="shift">中心座標と軸の長さの小数点以下の桁を表すビット数</param>
#else
        /// <summary>
        /// Draws simple or thick elliptic arc or fills ellipse sector
        /// </summary>
        /// <param name="img">Image. </param>
        /// <param name="center">Center of the ellipse. </param>
        /// <param name="axes">Length of the ellipse axes. </param>
        /// <param name="angle">Rotation angle. </param>
        /// <param name="start_angle">Starting angle of the elliptic arc. </param>
        /// <param name="end_angle">Ending angle of the elliptic arc. </param>
        /// <param name="color">Ellipse color. </param>
        /// <param name="thickness">Thickness of the ellipse arc. </param>
        /// <param name="line_type">Type of the ellipse boundary.</param>
        /// <param name="shift">Number of fractional bits in the center coordinates and axes' values. </param>
#endif
        public static void DrawEllipse(CvArr img, CvPoint center, CvSize axes, double angle, double start_angle, double end_angle, CvScalar color, int thickness, LineType line_type, int shift)
        {
            Ellipse(img, center, axes, angle, start_angle, end_angle, color, thickness, line_type, shift);
        }
コード例 #50
0
ファイル: Cv_E.cs プロジェクト: sanglin307/UnityOpenCV
        /// <summary>
        /// 枠だけの楕円,楕円弧,もしくは塗りつぶされた扇形の楕円を描画する
        /// </summary>
        /// <param name="img">楕円が描画される画像</param>
        /// <param name="center">楕円の中心</param>
        /// <param name="axes">楕円の軸の長さ</param>
        /// <param name="angle">回転角度</param>
        /// <param name="start_angle">楕円弧の開始角度</param>
        /// <param name="end_angle">楕円弧の終了角度</param>
        /// <param name="color">楕円の色</param>
#else
        /// <summary>
        /// Draws simple or thick elliptic arc or fills ellipse sector
        /// </summary>
        /// <param name="img">Image. </param>
        /// <param name="center">Center of the ellipse. </param>
        /// <param name="axes">Length of the ellipse axes. </param>
        /// <param name="angle">Rotation angle. </param>
        /// <param name="start_angle">Starting angle of the elliptic arc. </param>
        /// <param name="end_angle">Ending angle of the elliptic arc. </param>
        /// <param name="color">Ellipse color. </param>
#endif
        public static void DrawEllipse(CvArr img, CvPoint center, CvSize axes, double angle, double start_angle, double end_angle, CvScalar color)
        {
            Ellipse(img, center, axes, angle, start_angle, end_angle, color, 1, LineType.Link8, 0);
        }
コード例 #51
0
ファイル: Cv_E.cs プロジェクト: sanglin307/UnityOpenCV
        /// <summary>
        /// 枠だけの楕円,楕円弧,もしくは塗りつぶされた扇形の楕円を描画する
        /// </summary>
        /// <param name="img">楕円が描画される画像</param>
        /// <param name="center">楕円の中心</param>
        /// <param name="axes">楕円の軸の長さ</param>
        /// <param name="angle">回転角度</param>
        /// <param name="start_angle">楕円弧の開始角度</param>
        /// <param name="end_angle">楕円弧の終了角度</param>
        /// <param name="color">楕円の色</param>
        /// <param name="thickness">楕円弧の線の幅</param>
        /// <param name="line_type">楕円弧の線の種類</param>
        /// <param name="shift">中心座標と軸の長さの小数点以下の桁を表すビット数</param>
#else
        /// <summary>
        /// Draws simple or thick elliptic arc or fills ellipse sector
        /// </summary>
        /// <param name="img">Image. </param>
        /// <param name="center">Center of the ellipse. </param>
        /// <param name="axes">Length of the ellipse axes. </param>
        /// <param name="angle">Rotation angle. </param>
        /// <param name="start_angle">Starting angle of the elliptic arc. </param>
        /// <param name="end_angle">Ending angle of the elliptic arc. </param>
        /// <param name="color">Ellipse color. </param>
        /// <param name="thickness">Thickness of the ellipse arc. </param>
        /// <param name="line_type">Type of the ellipse boundary.</param>
        /// <param name="shift">Number of fractional bits in the center coordinates and axes' values. </param>
#endif
        public static void Ellipse(CvArr img, CvPoint center, CvSize axes, double angle, double start_angle, double end_angle, CvScalar color, int thickness, LineType line_type, int shift)
        {
            if (img == null)
            {
                throw new ArgumentNullException("img");
            }
            CvInvoke.cvEllipse(img.CvPtr, center, axes, angle, start_angle, end_angle, color, thickness, line_type, shift);
        }
コード例 #52
0
 public static extern void HOGDescriptor_cell_size_set(IntPtr obj, CvSize value);
コード例 #53
0
 public static extern void HOGDescriptor_block_stride_set(IntPtr obj, CvSize value);
コード例 #54
0
 public static extern void HOGDescriptor_getDescriptors(IntPtr obj, IntPtr img, CvSize win_stride, IntPtr descriptors, [MarshalAs(UnmanagedType.I4)] Gpu.DescriptorFormat descr_format);
コード例 #55
0
        public static extern void HOGDescriptor_detectMultiScale(IntPtr obj, IntPtr img, IntPtr found_locations, 
										           double hit_threshold, CvSize win_stride, CvSize padding, double scale, int group_threshold);
コード例 #56
0
 public static extern void HOGDescriptor_detect(IntPtr obj, IntPtr img, IntPtr found_locations, double hit_threshold, CvSize win_stride, CvSize padding);
コード例 #57
0
        public static extern IntPtr HOGDescriptor_new( CvSize win_size, CvSize block_size, CvSize block_stride, CvSize cell_size, 
	        int nbins, double winSigma, double threshold_L2Hys, bool gamma_correction, int nlevels);
コード例 #58
0
        /// <summary>
        /// 与えられた画像からオブジェクトを含む様な矩形領域を検出し,それらの領域を矩形の列として返す.
        /// </summary>
        /// <param name="image">この画像の中からオブジェクトを検出する</param>
        /// <param name="storage">オブジェクト候補の矩形が得られた場合に,その矩形列を保存するメモリストレージ</param>
        /// <param name="scaleFactor">スキャン毎に探索ウィンドウがスケーリングされる際のスケールファクタ. 例えばこの値が 1.1 ならば,ウィンドウが 10% 大きくなる</param>
        /// <param name="minNeighbors">(これから 1 を引いた値が)オブジェクトを構成する近傍矩形の最小数となる. min_neighbors-1 よりも少ない矩形しか含まないようなグループは全て棄却される. もし min_neighbors が 0 である場合,この関数はグループを一つも生成せず,候補となる矩形を全て返す.これはユーザがカスタマイズしたグループ化処理を適用したい場合に有用である. </param>
        /// <param name="flags">処理モード</param>
        /// <param name="minSize">最小ウィンドウサイズ.デフォルトでは分類器の学習に用いられたサンプルのサイズが設定される(顔検出の場合は,~20×20).</param>
        /// <returns>CvAvgCompを要素とするCvSeq</returns>
#else
        /// <summary>
        /// Finds rectangular regions in the given image that are likely to contain objects the cascade has been trained for and returns those regions as a sequence of rectangles.
        /// </summary>
        /// <param name="image">Image to detect objects in. </param>
        /// <param name="storage">Memory storage to store the resultant sequence of the object candidate rectangles. </param>
        /// <param name="scaleFactor">The factor by which the search window is scaled between the subsequent scans, for example, 1.1 means increasing window by 10%. </param>
        /// <param name="minNeighbors">Minimum number (minus 1) of neighbor rectangles that makes up an object. All the groups of a smaller number of rectangles than min_neighbors-1 are rejected. If min_neighbors is 0, the function does not any grouping at all and returns all the detected candidate rectangles, which may be useful if the user wants to apply a customized grouping procedure. </param>
        /// <param name="flags">Mode of operation. Currently the only flag that may be specified is CV_HAAR_DO_CANNY_PRUNING. If it is set, the function uses Canny edge detector to reject some image regions that contain too few or too much edges and thus can not contain the searched object. The particular threshold values are tuned for face detection and in this case the pruning speeds up the processing. </param>
        /// <param name="minSize">Minimum window size. By default, it is set to the size of samples the classifier has been trained on (~20×20 for face detection). </param>
        /// <returns></returns>
#endif
        public CvSeq HaarDetectObjects(CvArr image, CvMemStorage storage, double scaleFactor, int minNeighbors, HaarDetectionType flags, CvSize minSize)
        {
            return Cv.HaarDetectObjects(image, this, storage, scaleFactor, minNeighbors, flags, minSize);
        }
コード例 #59
0
 public static extern IntPtr cvCreateVideoWriter([MarshalAs(UnmanagedType.LPStr)] string filename, int fourcc, double fps, CvSize frame_size, int is_color);
コード例 #60
-1
ファイル: WebCam.cs プロジェクト: Goshik92/RingMeter
        /// <summary>
        /// Конструктор
        /// </summary>
        /// <param name="deviceId">ID камеры с которой будут захватываться кадры</param>
        /// <param name="frameSize">Желаемое разрешение кадров</param>
        public WebCam(int deviceId, CvSize frameSize)
        {
            this.deviceId = deviceId;
            vi = new VideoInput();
            vi.SetupDevice(deviceId, frameSize.Width, frameSize.Height);

            this.frameSize = new CvSize(vi.GetWidth(deviceId), vi.GetHeight(deviceId));
            sum = new IplImage(this.frameSize, BitDepth.F32, 3);
            tmp = new IplImage(this.frameSize, BitDepth.U8, 3);
        }