示例#1
0
      /*
      /// <summary>
      /// Create a LevMarqSparse solver
      /// </summary>
      public LevMarqSparse()
      {
         _ptr = CvInvoke.CvCreateLevMarqSparse();
      }*/

      /// <summary>
      /// Useful function to do simple bundle adjustment tasks
      /// </summary>
      /// <param name="points">Positions of points in global coordinate system (input and output), values will be modified by bundle adjustment</param>
      /// <param name="imagePoints">Projections of 3d points for every camera</param>
      /// <param name="visibility">Visibility of 3d points for every camera</param>
      /// <param name="cameraMatrix">Intrinsic matrices of all cameras (input and output), values will be modified by bundle adjustment</param>
      /// <param name="R">rotation matrices of all cameras (input and output), values will be modified by bundle adjustment</param>
      /// <param name="T">translation vector of all cameras (input and output), values will be modified by bundle adjustment</param>
      /// <param name="distCoeffcients">distortion coefficients of all cameras (input and output), values will be modified by bundle adjustment</param>
      /// <param name="termCrit">Termination criteria, a reasonable value will be (30, 1.0e-12) </param>
      public static void BundleAdjust(
         MCvPoint3D64f[] points, MCvPoint2D64f[][] imagePoints, int[][] visibility,
         Matrix<double>[] cameraMatrix, Matrix<double>[] R, Matrix<double>[] T, Matrix<double>[] distCoeffcients, MCvTermCriteria termCrit)
      {
         using (Matrix<double> imagePointsMat = CvToolbox.GetMatrixFromPoints(imagePoints))
         using (Matrix<int> visibilityMat = CvToolbox.GetMatrixFromArrays(visibility))
         using (VectorOfMat cameraMatVec = new VectorOfMat())
         using (VectorOfMat rMatVec = new VectorOfMat())
         using (VectorOfMat tMatVec = new VectorOfMat())
         using (VectorOfMat distorMatVec = new VectorOfMat())
         {
            cameraMatVec.Push(cameraMatrix);
            rMatVec.Push(R);
            tMatVec.Push(T);
            distorMatVec.Push(distCoeffcients);


            GCHandle handlePoints = GCHandle.Alloc(points, GCHandleType.Pinned);

            CvInvoke.CvLevMarqSparseAdjustBundle(
               cameraMatrix.Length,
               points.Length, handlePoints.AddrOfPinnedObject(),
               imagePointsMat, visibilityMat, cameraMatVec, rMatVec, tMatVec, distorMatVec, ref termCrit);

            handlePoints.Free();

         }
      }
示例#2
0
 public static extern void cvCalibrationMatrixValues(
  IntPtr calibMatr,
  int imgWidth,
  int imgHeight,
  double apertureWidth,
  double apertureHeight,
  ref double fovx,
  ref double fovy,
  ref double focalLength,
  ref MCvPoint2D64f principalPoint,
  ref double pixelAspectRatio);
 /// <summary>
 /// computes various useful camera (sensor/lens) characteristics using the computed camera calibration matrix, image frame resolution in pixels and the physical aperture size
 /// </summary>
 /// <param name="imgWidth">Image width in pixels</param>
 /// <param name="imgHeight">Image height in pixels</param>
 /// <param name="apertureWidth">Aperture width in realworld units (optional input parameter). Set it to 0 if not used</param>
 /// <param name="apertureHeight">Aperture width in realworld units (optional input parameter). Set it to 0 if not used</param>
 /// <param name="fovx">Field of view angle in x direction in degrees</param>
 /// <param name="fovy">Field of view angle in y direction in degrees </param>
 /// <param name="focalLength">Focal length in realworld units </param>
 /// <param name="principalPoint">The principal point in realworld units </param>
 /// <param name="pixelAspectRatio">The pixel aspect ratio ~ fy/f</param>
 public void GetIntrinsicMatrixValues(
    int imgWidth,
    int imgHeight,
    double apertureWidth,
    double apertureHeight,
    out double fovx,
    out double fovy,
    out double focalLength,
    out MCvPoint2D64f principalPoint,
    out double pixelAspectRatio)
 {
    fovx = 0;
    fovy = 0;
    focalLength = 0;
    principalPoint = new MCvPoint2D64f();
    pixelAspectRatio = 0;
    CvInvoke.cvCalibrationMatrixValues(_intrinsicMatrix.Ptr, imgWidth, imgHeight, apertureWidth, apertureHeight, ref fovx, ref fovy, ref focalLength, ref principalPoint, ref pixelAspectRatio);
 }
示例#4
0
 internal static extern void CvEMPredict(
    IntPtr model,
    IntPtr samples,
    ref MCvPoint2D64f result,
    IntPtr probs);
示例#5
0
        public void TestContour()
        {
            //Application.EnableVisualStyles();
             //Application.SetCompatibleTextRenderingDefault(false);
             using (Image<Gray, Byte> img = new Image<Gray, Byte>(100, 100, new Gray()))
             {
            Rectangle rect = new Rectangle(10, 10, 80 - 10, 50 - 10);
            img.Draw(rect, new Gray(255.0), -1);
            //ImageViewer.Show(img);
            PointF pIn = new PointF(60, 40);
            PointF pOut = new PointF(80, 100);

            using (MemStorage stor = new MemStorage())
            {
               Contour<Point> cs = img.FindContours(CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, CvEnum.RETR_TYPE.CV_RETR_LIST, stor);
               Assert.AreEqual(cs.MCvContour.elem_size, Marshal.SizeOf(typeof(Point)));
               Assert.AreEqual(rect.Width * rect.Height, cs.Area);

               Assert.IsTrue(cs.Convex);
               Assert.AreEqual(rect.Width * 2 + rect.Height * 2, cs.Perimeter);
               Rectangle rect2 = cs.BoundingRectangle;
               rect2.Width -= 1;
               rect2.Height -= 1;
               //rect2.Center.X -= 0.5;
               //rect2.Center.Y -= 0.5;
               Assert.IsTrue(rect2.Equals(rect));
               Assert.AreEqual(cs.InContour(pIn), 100);
               Assert.AreEqual(cs.InContour(pOut), -100);
               Assert.AreEqual(cs.Distance(pIn), 10);
               Assert.AreEqual(cs.Distance(pOut), -50);
               img.Draw(cs, new Gray(100), new Gray(100), 0, 1);

               MCvPoint2D64f rectangleCenter = new MCvPoint2D64f(rect.X + rect.Width / 2.0, rect.Y + rect.Height / 2.0);
               MCvMoments moment = cs.GetMoments();
               MCvPoint2D64f center = moment.GravityCenter;
               Assert.AreEqual(center, rectangleCenter);
            }

            using (MemStorage stor = new MemStorage())
            {
               Image<Gray, Byte> img2 = new Image<Gray, byte>(300, 200);
               Contour<Point> c = img2.FindContours(Emgu.CV.CvEnum.CHAIN_APPROX_METHOD.CV_CHAIN_APPROX_SIMPLE, Emgu.CV.CvEnum.RETR_TYPE.CV_RETR_LIST, stor);
               Assert.AreEqual(c, null);
            }
             }

             int s1 = Marshal.SizeOf(typeof(MCvSeq));
             int s2 = Marshal.SizeOf(typeof(MCvContour));
             int sizeRect = Marshal.SizeOf(typeof(Rectangle));
             Assert.AreEqual(s1 + sizeRect + 4 * Marshal.SizeOf(typeof(int)), s2);
        }
示例#6
0
        public void TestLookup()
        {
            double[] b = new double[4] { 0, 1, 2, 3 };
             double[] a = new double[4] { 1, 3, 2, 0 };
             MCvPoint2D64f [] pts = new MCvPoint2D64f[b.Length];
             for (int i = 0; i < pts.Length; i++)
            pts[i] = new MCvPoint2D64f(b[i], a[i]);

             IEnumerable<MCvPoint2D64f> interPts = Toolbox.LinearInterpolate(pts, new double[2] { 1.5, 3.5 });
             IEnumerator<MCvPoint2D64f> enumerator = interPts.GetEnumerator();
             enumerator.MoveNext();
             Assert.AreEqual(1.5, enumerator.Current.x);
             Assert.AreEqual(2.5, enumerator.Current.y);
             enumerator.MoveNext();
             Assert.AreEqual(3.5, enumerator.Current.x);
             Assert.AreEqual(-1, enumerator.Current.y);
        }
示例#7
0
        public static MCvPoint2D64f[] GetImagePoints(Float3 rawPosition)
        {
            MCvPoint2D64f[] imgPts = new MCvPoint2D64f[4];

            float radiusPx = rawPosition.Z;

            imgPts[0] = new MCvPoint2D64f(rawPosition.X - radiusPx, rawPosition.Y - radiusPx);
            imgPts[1] = new MCvPoint2D64f(rawPosition.X + radiusPx, rawPosition.Y - radiusPx);
            imgPts[2] = new MCvPoint2D64f(rawPosition.X + radiusPx, rawPosition.Y + radiusPx);
            imgPts[3] = new MCvPoint2D64f(rawPosition.X - radiusPx, rawPosition.Y + radiusPx);

            return imgPts;
        }
示例#8
0
        public void TestXmlSerialization()
        {
            MCvPoint2D64f pt2d = new MCvPoint2D64f(12.0, 5.5);

             XmlDocument xdoc = Toolbox.XmlSerialize<MCvPoint2D64f>(pt2d);
             //Trace.WriteLine(xdoc.OuterXml);
             pt2d = Toolbox.XmlDeserialize<MCvPoint2D64f>(xdoc);

             CircleF cir = new CircleF(new PointF(0.0f, 1.0f), 2.8f);
             xdoc = Toolbox.XmlSerialize<CircleF>(cir);
             //Trace.WriteLine(xdoc.OuterXml);
             cir = Toolbox.XmlDeserialize<CircleF>(xdoc);

             Image<Bgr, Byte> img1 = new Image<Bgr, byte>("stuff.jpg");
             xdoc = Toolbox.XmlSerialize(img1);
             //Trace.WriteLine(xdoc.OuterXml);
             Image<Bgr, Byte> img2 = Toolbox.XmlDeserialize<Image<Bgr, Byte>>(xdoc);

             Byte[] a1 = img1.Bytes;
             Byte[] a2 = img2.Bytes;
             Assert.AreEqual(a1.Length, a2.Length);
             for (int i = 0; i < a1.Length; i++)
             {
            Assert.AreEqual(a1[i], a2[i]);
             }

             img1.Dispose();
             img2.Dispose();
        }
示例#9
0
 /// <summary>
 /// Computes various useful camera (sensor/lens) characteristics using the computed camera calibration matrix, image frame resolution in pixels and the physical aperture size
 /// </summary>
 /// <param name="cameraMatrix">The matrix of intrinsic parameters</param>
 /// <param name="imageSize">Image size in pixels</param>
 /// <param name="apertureWidth">Aperture width in real-world units (optional input parameter). Set it to 0 if not used</param>
 /// <param name="apertureHeight">Aperture width in real-world units (optional input parameter). Set it to 0 if not used</param>
 /// <param name="fovx">Field of view angle in x direction in degrees</param>
 /// <param name="fovy">Field of view angle in y direction in degrees </param>
 /// <param name="focalLength">Focal length in real-world units </param>
 /// <param name="principalPoint">The principal point in real-world units </param>
 /// <param name="aspectRatio">The pixel aspect ratio ~ fy/f</param>
 public static void CalibrationMatrixValues(
    IInputArray cameraMatrix, Size imageSize, double apertureWidth, double apertureHeight,
    ref double fovx, ref double fovy, ref double focalLength, ref MCvPoint2D64f principalPoint,
    ref double aspectRatio)
 {
    using (InputArray iaCameraMatrix = cameraMatrix.GetInputArray())
       cveCalibrationMatrixValues(
          iaCameraMatrix, ref imageSize, apertureWidth, apertureHeight, ref fovx, ref fovy, ref focalLength,
          ref principalPoint, ref aspectRatio);
 }
示例#10
0
 private static extern void cveCalibrationMatrixValues(
    IntPtr cameraMatrix, ref Size imageSize, double apertureWidth, double apertureHeight,
    ref double fovx, ref double fovy, ref double focalLength, ref MCvPoint2D64f principalPoint,
    ref double aspectRatio);
示例#11
0
        public ImageAnalysis analyse(string fileName, int lLow, int lHigh, int aLow, int aHigh, int bLow, int bHigh)
        {
            ImageAnalysis analysis = new ImageAnalysis();

            Mat tempMat = new Mat(fileName);

            // pixel mask, erode x2, dilate x2
            //CvInvoke.GaussianBlur(mat, mat, new Size(5, 5), 1.5, 1.5);
            //CvInvoke.GaussianBlur(mat, mat, new Size(5, 5), 1.5, 1.5);
            //GetColorPixelMask(tempMat, tempMat, maskHueUpper, maskHueLower, maskSatUpper, maskSatLower, maskLumUpper, maskLumLower);
            GetLabColorPixelMask(tempMat, tempMat, lLow, lHigh, aLow, aHigh, bLow, bHigh);
            //tempMat.Save(fileName + "temp.jpg");

            CvInvoke.Erode(tempMat, tempMat, null, new Point(-1, -1), 2, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue);
            Mat temp = new Mat();

            CvInvoke.Dilate(tempMat, temp, null, new Point(-1, -1), 2, BorderType.Constant, CvInvoke.MorphologyDefaultBorderValue);
            tempMat = temp;

            //find largest contour
            Mat           result = new Mat(540, 720, 0, 1);
            int           largest_contour_index = 0;
            double        largest_area          = 0;
            VectorOfPoint largestContour;

            VectorOfVectorOfPoint contours = new VectorOfVectorOfPoint();
            Mat hierachy = new Mat();

            CvInvoke.FindContours(tempMat, contours, hierachy, RetrType.Tree, ChainApproxMethod.ChainApproxNone);

            if (contours.Size > 0)
            {
                for (int i = 0; i < contours.Size; i++)
                {
                    MCvScalar color = new MCvScalar(0, 0, 255);

                    double a = CvInvoke.ContourArea(contours[i], false);  //  Find the area of contour
                    if (a > largest_area)
                    {
                        largest_area          = a;
                        largest_contour_index = i;                //Store the index of largest contour
                    }

                    //CvInvoke.DrawContours(result, contours, largest_contour_index, new MCvScalar(255, 0, 0));
                }


                //draw largest contour
                CvInvoke.DrawContours(result, contours, largest_contour_index, new MCvScalar(255, 255, 255), 1, LineType.EightConnected, hierachy);
                largestContour = new VectorOfPoint(contours[largest_contour_index].ToArray());


                Image <Bgr, Byte> tempImg = result.ToImage <Bgr, Byte>();

                //Find center point
                MCvMoments    m      = CvInvoke.Moments(largestContour, true);
                MCvPoint2D64f center = m.GravityCenter;
                //textBox1.AppendText("Center point: " + Math.Round(center.X, 3) + "px, " + Math.Round(center.Y, 3) + "px\n");
                tempImg.Draw(new Cross2DF(new PointF((float)center.X, (float)center.Y), 3, 3), new Bgr(0, 0, 255), 2);

                //Find Area
                double area = CvInvoke.ContourArea(largestContour);
                //textBox1.AppendText("Area: " + area + "px,     " + convertSqPxToSqMm(area) + "sq mm\n");

                //Find Bounding Rectangle
                RotatedRect rect    = CvInvoke.MinAreaRect(largestContour);
                float       width0  = rect.Size.Width;
                float       height0 = rect.Size.Height;

                float length = (height0 >= width0 ? height0 : width0);
                float width  = (height0 < width0 ? height0 : width0);

                tempImg.Draw(rect, new Bgr(255, 0, 0), 2);
                //textBox1.AppendText("Width: " + width + "px  Length: " + length + "px\n");
                //textBox1.AppendText("Width: " + convertPxToMm(width) + "mm  Length: " + convertPxToMm(length) + "mm\n");

                double ratio = Math.Round((length / width), 3);
                //textBox1.AppendText("Ratio (width:length): 1:" + ratio + "\n");

                //save and display
                tempImg.Save(fileName + "_after.bmp");
                tempMat = tempImg.Mat;

                analysis.Contours            = contours;
                analysis.LargestContourIndex = largest_contour_index;
                analysis.LargestContour      = largestContour;
                analysis.Center      = center;
                analysis.Area        = area;
                analysis.BoundingBox = rect;
                analysis.Length      = length;
                analysis.Width       = width;
                analysis.Ratio       = ratio;
                analysis.Result      = tempImg.ToBitmap();
            }


            return(analysis);
        }
示例#12
0
 /// <summary>
 /// Predict the probability of the <paramref name="samples"/>
 /// </summary>
 /// <param name="samples">The input samples</param>
 /// <param name="probs">The prediction results, should have the same # of rows as the <paramref name="samples"/></param>
 public MCvPoint2D64f Predict(IInputArray samples, IOutputArray probs = null)
 {
    MCvPoint2D64f result = new MCvPoint2D64f();
    using (InputArray iaSamples = samples.GetInputArray())
    using (OutputArray oaProbs = probs == null ? OutputArray.GetEmpty() : probs.GetOutputArray())
       MlInvoke.CvEMPredict(
         _ptr,
         iaSamples,
         ref result,
         oaProbs);
    return result;
 }
示例#13
0
 internal static extern void cveEMPredict(
     IntPtr model,
     IntPtr samples,
     ref MCvPoint2D64f result,
     IntPtr probs);
示例#14
0
 /// <summary>
 /// The function is used to detect translational shifts that occur between two images. The operation takes advantage of the Fourier shift theorem for detecting the translational shift in the frequency domain. It can be used for fast image registration as well as motion estimation. 
 /// </summary>
 /// <param name="src1">Source floating point array (CV_32FC1 or CV_64FC1)</param>
 /// <param name="src2">Source floating point array (CV_32FC1 or CV_64FC1)</param>
 /// <param name="window">Floating point array with windowing coefficients to reduce edge effects (optional).</param>
 /// <param name="response">Signal power within the 5x5 centroid around the peak, between 0 and 1 </param>
 /// <returns>The translational shifts that occur between two images</returns>
 public static MCvPoint2D64f PhaseCorrelate(IInputArray src1, IInputArray src2, IInputArray window, out double response)
 {
    MCvPoint2D64f resultPt = new MCvPoint2D64f();
    response = 0;
    using (InputArray iaSrc1 = src1.GetInputArray())
    using (InputArray iaSrc2 = src2.GetInputArray())
    using (InputArray iaWindow = window == null ? InputArray.GetEmpty() : window.GetInputArray())
       cvePhaseCorrelate(iaSrc1, iaSrc2, iaWindow, ref response, ref resultPt);
    return resultPt;
 }
        public void DoBundleAdjust()
        {
            // N = cameras
            // M = point count
            //public static void BundleAdjust(MCvPoint3D64f[M] points,              // Positions of points in global coordinate system (input and output), values will be modified by bundle adjustment
            //                                MCvPoint2D64f[M][N] imagePoints,      // Projections of 3d points for every camera
            //                                int[M][N] visibility,                 // Visibility of 3d points for every camera
            //                                Matrix<double>[N] cameraMatrix,       // Intrinsic matrices of all cameras (input and output), values will be modified by bundle adjustment
            //                                Matrix<double>[N] R,                  // rotation matrices of all cameras (input and output), values will be modified by bundle adjustment
            //                                Matrix<double>[N] T,                  // translation vector of all cameras (input and output), values will be modified by bundle adjustment
            //                                Matrix<double>[N] distCoefficients,   // distortion coefficients of all cameras (input and output), values will be modified by bundle adjustment
            //                                MCvTermCriteria termCrit)             // Termination criteria, a reasonable value will be (30, 1.0e-12)
            _stopwatchGet.Restart();
            if (_cameras.Cameras.Count == 0) return;

            IEnumerable<CameraModel> orderedCameras = _cameras.Cameras.OrderBy(camera => camera.Calibration.Index);
            ObservableCollection<MotionControllerModel> controllers = _cameras.Cameras[0].Controllers;

            if (controllers.Count == 0) return;
            
            float radius = CameraCalibrationModel.SPHERE_RADIUS_CM;
            int cameraCount = _cameras.Cameras.Count;
            int pointCount = 8;
            MCvPoint3D64f[] objectPoints = new MCvPoint3D64f[controllers.Count * pointCount];
            MCvPoint2D64f[][] imagePoints = new MCvPoint2D64f[cameraCount][];
            int[][] visibility = new int[cameraCount][];
            Matrix<double>[] cameraMatrix = new Matrix<double>[cameraCount];
            Matrix<double>[] R = new Matrix<double>[cameraCount];
            Matrix<double>[] T = new Matrix<double>[cameraCount];
            Matrix<double>[] distCoefficients = new Matrix<double>[cameraCount];
            MCvTermCriteria termCrit = new MCvTermCriteria(30, 1.0e-12);

            int visible = 0;

            foreach (CameraModel camera in orderedCameras)
            {
                visibility[camera.Calibration.Index] = new int[controllers.Count * pointCount];
                cameraMatrix[camera.Calibration.Index] = camera.Calibration.IntrinsicParameters.IntrinsicMatrix.Clone();
                distCoefficients[camera.Calibration.Index] = camera.Calibration.IntrinsicParameters.DistortionCoeffs.Clone();
                imagePoints[camera.Calibration.Index] = new MCvPoint2D64f[controllers.Count * pointCount];
                R[camera.Calibration.Index] = camera.Calibration.RotationToWorld.Clone();
                T[camera.Calibration.Index] = camera.Calibration.TranslationToWorld.Clone();

                foreach (MotionControllerModel controller in controllers)
                {
                    float x = controller.RawPosition[camera].x;
                    float y = controller.RawPosition[camera].y;

                    //if (x == 0 && y == 0) return;

                    // controller is not visible
                    if (controller.TrackerStatus[camera] != PSMoveTrackerStatus.Tracking)
                    {
                        for (int i = 0; i < pointCount; i++)
                        {
                            visibility[camera.Calibration.Index][i + controller.Id * pointCount] = 0;
                        }
                    }
                    // controller is visible
                    else
                    {
                        Vector3[] history = controller.PositionHistory[camera];
                        float avgMagnitude = 0f;
                        for (int i = 1; i < history.Length; i++)
                        {
                            avgMagnitude += history[i].magnitude/(history.Length - 1);
                        }
                        // check deviation of newest position
                        if ((Math.Abs(((history[0].magnitude*100)/avgMagnitude)) - 100) > 5)
                        {
                            for (int i = 0; i < pointCount; i++)
                            {
                                visibility[camera.Calibration.Index][i + controller.Id * pointCount] = 0;
                            }
                            continue;
                        }
                        visible++;
                        //double distance = 0.0;
                        int startIndex = controller.Id * pointCount;

                        //MCvPoint3D64f cameraPositionInWorld = new MCvPoint3D64f
                        //{
                        //    x = camera.Calibration.TranslationToWorld[0, 0],
                        //    y = camera.Calibration.TranslationToWorld[1, 0],
                        //    z = camera.Calibration.TranslationToWorld[2, 0]
                        //};

                        // set visibility and calculate distance of the controller relative to the camera
                        for (int i = startIndex; i < pointCount * controllers.Count; i++)
                        {
                            visibility[camera.Calibration.Index][i] = 1;
                            //double d = CvHelper.GetDistanceToPoint(cameraPositionInWorld,objectPoints[i]);
                            //distance += d / pointCount;
                        }

                        // initialize object's world coordinates
                        // calculate as the average of each camera's transformed world coordinate
                        float wx = controller.WorldPosition[camera].x;
                        float wy = controller.WorldPosition[camera].y;
                        float wz = controller.WorldPosition[camera].z;


                        objectPoints[startIndex]     += new MCvPoint3D32f(wx - radius, wy - radius, wz - radius);
                        objectPoints[startIndex + 1] += new MCvPoint3D32f(wx + radius, wy - radius, wz - radius);
                        objectPoints[startIndex + 2] += new MCvPoint3D32f(wx + radius, wy + radius, wz - radius);
                        objectPoints[startIndex + 3] += new MCvPoint3D32f(wx - radius, wy + radius, wz - radius);

                        objectPoints[startIndex + 4] += new MCvPoint3D32f(wx - radius, wy + radius, wz + radius);
                        objectPoints[startIndex + 5] += new MCvPoint3D32f(wx + radius, wy + radius, wz + radius);
                        objectPoints[startIndex + 6] += new MCvPoint3D32f(wx + radius, wy - radius, wz + radius);
                        objectPoints[startIndex + 7] += new MCvPoint3D32f(wx - radius, wy - radius, wz + radius);
                        
                        //imagePoints[scvm.Camera.Calibration.Index] = Utils.GetImagePoints(mcvm.MotionController.RawPosition[scvm.Camera]);
                        imagePoints[camera.Calibration.Index] = Array.ConvertAll(camera.Calibration.ObjectPointsProjected, CvHelper.PointFtoPoint2D);
                    }
                } // foreach controller
            } // foreach camera

            if (visible == 0) return;

            // average object points
            for (int i = 0; i < objectPoints.Length; i++)
            {
                objectPoints[i].x /= visible;
                objectPoints[i].y /= visible;
                objectPoints[i].z /= visible;
            }
            // calculate object's middle
            float prex = 0, prey = 0, prez = 0;
            for (int i = 0; i < objectPoints.Length; i++)
            {
                prex += (float)objectPoints[i].x / objectPoints.Length;
                prey += (float)objectPoints[i].y / objectPoints.Length;
                prez += (float)objectPoints[i].z / objectPoints.Length;
            }
            _stopwatchBA.Restart();
            //LevMarqSparse.BundleAdjust(objectPoints, imagePoints, visibility, cameraMatrix, R, T, distCoefficients, termCrit);
            _stopwatchBA.Stop();
            _stopwatchSet.Restart();

            // check for calucation error
            for (int i = 0; i < objectPoints.Length; i++)
            {
                if (objectPoints[i].x.ToString().Equals("NaN")) return;
                if (objectPoints[i].y.ToString().Equals("NaN")) return;
                if (objectPoints[i].z.ToString().Equals("NaN")) return;
            }

            // save changed matrices
            foreach (CameraModel camera in orderedCameras)
            {
                if (visibility[camera.Calibration.Index][0] == 1)
                {
                    //RotationVector3D rot1 = new RotationVector3D();
                    //rot1.RotationMatrix = camera.Calibration.RotationToWorld;

                    //RotationVector3D rot2 = new RotationVector3D();
                    //rot2.RotationMatrix = R[camera.Calibration.Index];

                    //Console.WriteLine((int)(rot1[0, 0] * (180 / Math.PI)) + " " + (int)(rot2[0, 0] * (180 / Math.PI)));
                    //Console.WriteLine((int)(rot1[1, 0] * (180 / Math.PI)) + " " + (int)(rot2[1, 0] * (180 / Math.PI)));
                    //Console.WriteLine((int)(rot1[2, 0] * (180 / Math.PI)) + " " + (int)(rot2[2, 0] * (180 / Math.PI)) + Environment.NewLine);

                    //camera.Calibration.IntrinsicParameters.IntrinsicMatrix = cameraMatrix[camera.Calibration.Index];
                    //camera.Calibration.RotationToWorld = R[camera.Calibration.Index];
                    //camera.Calibration.TranslationToWorld = T[camera.Calibration.Index];
                    //camera.Calibration.IntrinsicParameters.DistortionCoeffs = distCoefficients[camera.Calibration.Index];

                    //camera.Calibration.XAngle = (int)(rot2[0, 0] * (180 / Math.PI));
                    //camera.Calibration.YAngle = (int)(rot2[1, 0] * (180 / Math.PI));
                    //camera.Calibration.ZAngle = (int)(rot2[2, 0] * (180 / Math.PI));
                }
            }

            // calculate object's middle
            float preCenterX = 0, preCenterY = 0, preCenterZ = 0;
            for (int i = 0; i < objectPoints.Length; i++)
            {
                preCenterX += (float)objectPoints[i].x / objectPoints.Length;
                preCenterY += (float)objectPoints[i].y / objectPoints.Length;
                preCenterZ += (float)objectPoints[i].z / objectPoints.Length;
            }
            Vector3 prePosition = new Vector3(preCenterX, -preCenterY, preCenterZ);

            if (prePosition != _positionHistory[0])
            {
                for (int i = _positionHistory.Length - 1; i > 0; --i)
                {
                    _positionHistory[i] = _positionHistory[i - 1];
                }
                _positionHistory[0] = prePosition;
            }

            //Vector3 avgPosition = Vector3.zero;
            //for (int i = 0; i < _positionHistory.Length; i++)
            //{
            //    avgPosition += _positionHistory[i] / _positionHistory.Length;
            //}

            // 0 predition, 1 correction / estimated

            Matrix<float> kalmanResults = FilterPoints(_kalmanXYZ, prePosition.x, prePosition.y, prePosition.z);

            Vector3 kalmanPosition = new Vector3(kalmanResults[1,0], kalmanResults[1,1], kalmanResults[1,2]);
            _cameras.Position = kalmanPosition;

            _stopwatchGet.Stop();
            _stopwatchSet.Stop();
            for (int i = 0; i < 4; i++)
            {
                CameraModel camera = _cameras.Cameras[i];
                float xr = controllers[0].RawPosition[camera].x;
                float yr = controllers[0].RawPosition[camera].y;
                float zr = controllers[0].RawPosition[camera].z;
                float xc = controllers[0].CameraPosition[camera].x;
                float yc = controllers[0].CameraPosition[camera].y;
                float zc = controllers[0].CameraPosition[camera].z;
                string str = String.Format(new CultureInfo("en-US"), "{0},{1},{2},{3},{4},{5},{6},{7},{8}",
                    iteration,
                    xr,
                    yr,
                    zr,
                    PsMoveApi.psmove_tracker_distance_from_radius(camera.Handle, controllers[0].RawPosition[camera].z),
                    xc,
                    yc,
                    zc,
                    Math.Sqrt(xc * xc + yc * yc + zc * zc)
                    );
                if (camera.Calibration.Index == 0)
                {
                    if (csv0.Count > 0 && csv0[csv0.Count - 1].Contains(zr.ToString(new CultureInfo("en-US")))) { }
                    else csv0.Add(str);
                }
                else if (camera.Calibration.Index == 1)
                {
                    if (csv1.Count > 0 && csv1[csv1.Count - 1].Contains(zr.ToString(new CultureInfo("en-US")))) { }
                    else csv1.Add(str);
                }
                else if (camera.Calibration.Index == 2)
                {
                    if (csv2.Count > 0 && csv2[csv2.Count - 1].Contains(zr.ToString(new CultureInfo("en-US")))) { }
                    else csv2.Add(str);
                }
                else if (camera.Calibration.Index == 3)
                {
                    if (csv3.Count > 0 && csv3[csv3.Count - 1].Contains(zr.ToString(new CultureInfo("en-US")))) { }
                    else csv3.Add(str);
                }
            }
            csvTime.Add(String.Format(new CultureInfo("en-US"), "{0},{1},{2},{3}",
                    iteration,
                    _stopwatchGet.ElapsedMilliseconds,
                    _stopwatchBA.ElapsedMilliseconds,
                    _stopwatchSet.ElapsedMilliseconds));
            string strBA = String.Format(new CultureInfo("en-US"), "{0},{1},{2},{3},{4},{5},{6},{7},{8}",
                iteration,
                prePosition.x,
                prePosition.y,
                prePosition.z,
                Math.Sqrt(prePosition.x * prePosition.x + prePosition.y * prePosition.y + prePosition.z * prePosition.z),
                kalmanPosition.x,
                kalmanPosition.y,
                kalmanPosition.z,
                Math.Sqrt(kalmanPosition.x * kalmanPosition.x + kalmanPosition.y * kalmanPosition.y + kalmanPosition.z * kalmanPosition.z));
            if (csvBA.Count > 0 && csvBA[csvBA.Count - 1].Contains(prePosition.x.ToString(new CultureInfo("en-US")))) { }
            else csvBA.Add(strBA);
            iteration++;
            if (csvBA.Count == 100)
            {
                File.WriteAllLines(@"C:\\Users\\Johannes\\Documents\\GitHub\\Thesis\\Source\\avg_time.csv", csvTime);
                File.WriteAllLines(@"C:\\Users\\Johannes\\Documents\\GitHub\\Thesis\\Source\\distance.csv", csvBA);
                File.WriteAllLines(@"C:\\Users\\Johannes\\Documents\\GitHub\\Thesis\\Source\\distance0.csv", csv0);
                File.WriteAllLines(@"C:\\Users\\Johannes\\Documents\\GitHub\\Thesis\\Source\\distance1.csv", csv1);
                File.WriteAllLines(@"C:\\Users\\Johannes\\Documents\\GitHub\\Thesis\\Source\\distance2.csv", csv2);
                File.WriteAllLines(@"C:\\Users\\Johannes\\Documents\\GitHub\\Thesis\\Source\\distance3.csv", csv3);
            }
        }
示例#16
0
文件: Map.cs 项目: ersinkecis/EmguCV
        /*
         * /// <summary>
         * /// Create a new Map using the specific image and the rectangle area
         * /// </summary>
         * /// <param name="image">The image of this map</param>
         * /// <param name="area">The area of this map</param>
         * public Map(Image<TColor, TDepth> image, RectangleF area)
         * : base(image.Size)
         * {
         * image.CopyTo(this);
         * _area = area;
         * _resolution = new PointF( area.Width / image.Width, area.Height / image.Height);
         * }*/

        //private delegate Point PointTransformationFunction(PointF point);

        /// <summary>
        /// Map a point to a position in the internal image
        /// </summary>
        /// <param name="pt">The point on the map</param>
        /// <returns>The point on the image</returns>
        public Point MapPointToImagePoint(MCvPoint2D64f pt)
        {
            return(new Point(
                       (int)Math.Round((pt.X - Area.Left) / Resolution.X),
                       (int)Math.Round((pt.Y - Area.Top) / Resolution.Y)));
        }
示例#17
0
文件: MapShift.cs 项目: v5chn/emgucv
 internal static extern IntPtr cveMapShiftCreate(ref MCvPoint2D64f shift, ref IntPtr map);
示例#18
0
文件: MapShift.cs 项目: v5chn/emgucv
 /// <summary>
 /// Create a transformation that consists on a simple displacement
 /// </summary>
 /// <param name="shift">A transformation.</param>
 public MapShift(MCvPoint2D64f shift)
 {
     _ptr = RegInvoke.cveMapShiftCreate(ref shift, ref _mapPtr);
 }
示例#19
0
 private static double Dist(MCvPoint2D64f a, MCvPoint2D64f b)
 {
     return(Math.Sqrt(Math.Pow(b.X - a.X, 2) + Math.Pow(b.Y - a.Y, 2)));
 }
示例#20
0
 private static extern void cvePhaseCorrelate(IntPtr src1, IntPtr src2, IntPtr window, ref double response, ref MCvPoint2D64f result);
示例#21
0
        private static Matrix CreateProjectionFrom(Size imageSize, MCvPoint2D64f principalPoint,
                                                  double focalLength, double near, double far)
        {
            var l = near / focalLength * -principalPoint.x;
            var r = near / focalLength * (imageSize.Width - principalPoint.x);
            var t = near / focalLength * principalPoint.y;
            var b = near / focalLength * (principalPoint.y - imageSize.Height);

            return Matrix.CreatePerspectiveOffCenter((float)l, (float)r, (float)b, (float)t, (float)near, (float)far);            
        }