public SyntheticData() { state = new Matrix<float>(4, 1); state[0, 0] = Cursor.Position.X; // x-pos state[1, 0] = Cursor.Position.Y; // y-pos state[2, 0] = 0f; // x-velocity state[3, 0] = 0f; // y-velocity transitionMatrix = new Matrix<float>(new float[,] { {1, 0, 1, 0}, {0, 1, 0, 1}, {0, 0, 1, 0}, {0, 0, 0, 1} }); measurementMatrix = new Matrix<float>(new float[,] { { 1, 0, 0, 0 }, { 0, 1, 0, 0 } }); measurementMatrix.SetIdentity(); processNoise = new Matrix<float>(4, 4); processNoise.SetIdentity(new MCvScalar(1.0e-4)); measurementNoise = new Matrix<float>(2, 2); measurementNoise.SetIdentity(new MCvScalar(1.5e-1)); errorCovariancePost = new Matrix<float>(4, 4); errorCovariancePost.SetIdentity(); }
public SyntheticData() { state = new Matrix<float>(4, 1); state[0, 0] = 0f; // x-pos state[1, 0] = 0f; // y-pos state[2, 0] = 0f; // x-velocity state[3, 0] = 0f; // y-velocity transitionMatrix = new Matrix<float>(new float[,] { {1, 0, 1, 0}, // x-pos, y-pos, x-velocity, y-velocity {0, 1, 0, 1}, {0, 0, 1, 0}, {0, 0, 0, 1} }); measurementMatrix = new Matrix<float>(new float[,] { { 1, 0, 0, 0 }, { 0, 1, 0, 0 } }); measurementMatrix.SetIdentity(); processNoise = new Matrix<float>(4, 4); //Linked to the size of the transition matrix processNoise.SetIdentity(new MCvScalar(1.0e-4)); //The smaller the value the more resistance to noise measurementNoise = new Matrix<float>(2, 2); //Fixed accordiong to input data measurementNoise.SetIdentity(new MCvScalar(1.0e-1)); errorCovariancePost = new Matrix<float>(4, 4); //Linked to the size of the transition matrix errorCovariancePost.SetIdentity(); }
public static Matrix<double> Get2DTranslationMatrix(double x, double y) { Matrix<double> m = new Matrix<double>(3, 3); m.SetIdentity(); m[0, 3] = x; m[1, 3] = y; return m; }
public void TestInvert() { Matrix<Single> m = new Matrix<Single>(3, 3); Matrix<Single> mInvert = new Matrix<Single>(3, 3); m.SetIdentity(); CvInvoke.Invert(m, mInvert, Emgu.CV.CvEnum.DecompMethod.LU); EmguAssert.IsTrue(m.Equals(mInvert)); }
public void TestSolve() { Matrix<Single> lhs = new Matrix<Single>(3, 3); lhs.SetIdentity(); Matrix<Single> rhs = new Matrix<Single>(new float[,] { { 0.1f }, { 0.2f }, { 0.5f } }); Matrix<Single> result = new Matrix<float>(3, 1); CvInvoke.Solve(lhs, rhs, result, CvEnum.DecompMethod.LU); EmguAssert.AreEqual(rhs[0, 0], result[0, 0]); EmguAssert.AreEqual(rhs[1, 0], result[1, 0]); EmguAssert.AreEqual(rhs[2, 0], result[2, 0]); }
public static Matrix<double> ConvertToHomogenous(Matrix<double> matrix) { if (matrix.Rows < 2 || matrix.Cols < 2 || (matrix.Rows != matrix.Cols)) { throw new ArgumentException("symmetric matrix > 1x1 expected"); } Matrix<double> m = new Matrix<double>(matrix.Rows + 1, matrix.Cols + 1); m.SetIdentity(); for (int row = 0; row < matrix.Rows; row++) { for (int col = 0; col < matrix.Cols; col++) { m[row, col] = matrix[row, col]; } } return m; }
private bool isInitialized; // true if any data has been fed public KalmanFilter(int variables) { variablesCount = variables; int measurementVariables = variables; int dynamicVariables = variables * 2; float[] state = new float[dynamicVariables]; for (int i = 0; i < dynamicVariables; ++i) state[i] = 0.0f; Matrix<float> transitionMatrix = new Matrix<float>(dynamicVariables, dynamicVariables); transitionMatrix.SetZero(); for (int i = 0; i < dynamicVariables; ++i) { transitionMatrix[i, i] = 1.0f; if (i >= measurementVariables) transitionMatrix[i - measurementVariables, i] = 1; } Matrix<float> measurementMatrix = new Matrix<float>(measurementVariables, dynamicVariables); measurementMatrix.SetZero(); for (int i = 0; i < measurementVariables; ++i) measurementMatrix[i, i] = 1.0f; Matrix<float> processNoise = new Matrix<float>(dynamicVariables, dynamicVariables); processNoise.SetIdentity(new MCvScalar(1));//1.0e-4)); Matrix<float> measurementNoise = new Matrix<float>(measurementVariables, measurementVariables); measurementNoise.SetIdentity(new MCvScalar(4));//1.0e-1)); Matrix<float> errorCovariancePost = new Matrix<float>(dynamicVariables, dynamicVariables); errorCovariancePost.SetIdentity(); kalman = new Kalman(dynamicVariables, measurementVariables, 0); kalman.CorrectedState = new Matrix<float>(state); kalman.TransitionMatrix = transitionMatrix; kalman.MeasurementNoiseCovariance = measurementNoise; kalman.ProcessNoiseCovariance = processNoise; kalman.ErrorCovariancePost = errorCovariancePost; kalman.MeasurementMatrix = measurementMatrix; }
public SyntheticData(float strengthMatrix, double processNoise, double measurementNoise) { var newStrength = strengthMatrix; var newProcessNoise = processNoise; var newMeasurementNoise = measurementNoise; if (strengthMatrix > 1.0f || strengthMatrix < 0.0f) newStrength = 0.6f; if (processNoise > 1.0e-1 || processNoise < 1.0e-4) newProcessNoise = 1.0e-2; if (measurementNoise > 1.0e-1 || measurementNoise < 1.0e-4) newMeasurementNoise = 1.0e-1; State = new Matrix<float>(4, 1); State[0, 0] = 0f; // x-pos State[1, 0] = 0f; // y-pos State[2, 0] = 0f; // x-velocity State[3, 0] = 0f; // y-velocity TransitionMatrix = new Matrix<float>(new[,] { {newStrength, 0, 1, 0}, {0, newStrength, 0, 1}, {0, 0, 1, 0}, {0, 0, 0, 1} }); MeasurementMatrix = new Matrix<float>(new float[,] { { 1, 0, 0, 0 }, { 0, 1, 0, 0 } }); MeasurementMatrix.SetIdentity(); ProcessNoise = new Matrix<float>(4, 4); //Linked to the size of the transition matrix ProcessNoise.SetIdentity(new MCvScalar(newProcessNoise)); //The smaller the value the more resistance to noise MeasurementNoise = new Matrix<float>(2, 2); //Fixed accordiong to input data MeasurementNoise.SetIdentity(new MCvScalar(newMeasurementNoise)); //larger the value more resitance to noise and the less responsive to velocity ErrorCovariancePost = new Matrix<float>(4, 4); //Linked to the size of the transition matrix ErrorCovariancePost.SetIdentity(); }
/// <summary> /// Allocates CvKalman and all its matrices and initializes them somehow. /// </summary> /// <param name="dynamParams">dimensionality of the state vector</param> /// <param name="measureParams">dimensionality of the measurement vector </param> /// <param name="controlParams">dimensionality of the control vector </param> public Kalman(int dynamParams, int measureParams, int controlParams) { _kalman.DP = dynamParams; _kalman.MP = measureParams; _kalman.CP = controlParams; PredictedState = new Matrix<float>(dynamParams, 1); CorrectedState = new Matrix<float>(dynamParams, 1); TransitionMatrix = new Matrix<float>(dynamParams, dynamParams); TransitionMatrix.SetIdentity(); ProcessNoiseCovariance = new Matrix<float>(dynamParams, dynamParams); ProcessNoiseCovariance.SetIdentity(); MeasurementMatrix = new Matrix<float>(measureParams, dynamParams); MeasurementNoiseCovariance = new Matrix<float>(measureParams, measureParams); MeasurementNoiseCovariance.SetIdentity(); ErrorCovariancePre = new Matrix<float>(dynamParams, dynamParams); ErrorCovariancePost = new Matrix<float>(dynamParams, dynamParams); Gain = new Matrix<float>(dynamParams, measureParams); if (controlParams > 0) { ControlMatrix = new Matrix<float>(dynamParams, controlParams); } _temp1 = new Matrix<float>(dynamParams, dynamParams); _kalman.temp1 = _temp1.Ptr; _temp2 = new Matrix<float>(measureParams, dynamParams); _kalman.temp2 = _temp2.Ptr; _temp3 = new Matrix<float>(measureParams, measureParams); _kalman.temp3 = _temp3.Ptr; _temp4 = new Matrix<float>(measureParams, dynamParams); _kalman.temp4 = _temp4.Ptr; _temp5 = new Matrix<float>(measureParams, 1); _kalman.temp5 = _temp5.Ptr; //_kalman.Temp1 = _temp1.MCvMat.data; //_kalman.Temp2 = _temp2.MCvMat.data; }
public void TestCudaWarpPerspective() { if (!CudaInvoke.HasCuda) return; Matrix<float> transformation = new Matrix<float>(3, 3); transformation.SetIdentity(); Image<Gray, byte> image = new Image<Gray, byte>(480, 320); image.SetRandNormal(new MCvScalar(), new MCvScalar(255)); using (GpuMat cudaImage = new GpuMat(image)) using (CudaImage<Gray, Byte> resultCudaImage = new CudaImage<Gray, byte>()) { CudaInvoke.WarpPerspective(cudaImage, resultCudaImage, transformation, cudaImage.Size, CvEnum.Inter.Cubic, CvEnum.BorderType.Default, new MCvScalar(), null); } }
// this is directly from the wikipedia page on Kabsh Algorithm public void Recompute2() { var p = P; var q = Q; //1. subtract centroids for (int i = 0; i < p.Rows; i++) { p[i, 0] -= SourceCentroid[0, 0]; p[i, 1] -= SourceCentroid[1, 0]; q[i, 0] -= DestCentroid[0, 0]; q[i, 1] -= DestCentroid[1, 0]; } //2. compute covariance matrix var a = p.Transpose()*q; //3. compute rotation matrix /* perform svd where A = V S WT */ Matrix<double> V = new Matrix<double>(2, 2); Matrix<double> S = new Matrix<double>(2, 2); Matrix<double> W = new Matrix<double>(2, 2); CvInvoke.cvSVD(a.Ptr, S.Ptr, V.Ptr, W.Ptr, SVD_TYPE.CV_SVD_DEFAULT); // Deal with reflection matrix Matrix<double> m = new Matrix<double>(2, 2); m.SetIdentity(new MCvScalar(1)); m[1,1] = ((W*V.Transpose()).Det<0) ? -1 : 1; // Comput the rotation matrix Rotation = W*m*V.Transpose(); //Offset = DestCentroid - (Rotation * SourceCentroid); Offset = DestCentroid - SourceCentroid; Console.WriteLine("Rotaiton Matrix - Angle ="+Angle); Console.WriteLine(FormatMatrix(Rotation)); }
public static Matrix<double> Get3DTranslationMatrix(double x, double y, double z) { Matrix<double> m = new Matrix<double>(4, 4); m.SetIdentity(); m[0, 3] = x; m[1, 3] = y; m[2, 3] = z; return m; }
public void TestStereoSGBMCorrespondence() { Image<Gray, Byte> left = new Image<Gray, byte>("left.jpg"); Image<Gray, Byte> right = new Image<Gray, byte>("right.jpg"); Size size = left.Size; Image<Gray, Int16> disparity = new Image<Gray, Int16>(size); StereoSGBM bm = new StereoSGBM(10, 64, 0, 0, 0, 0, 0, 0, 0, 0, false); Stopwatch watch = Stopwatch.StartNew(); bm.FindStereoCorrespondence(left, right, disparity); watch.Stop(); Trace.WriteLine(String.Format("Time used: {0} milliseconds", watch.ElapsedMilliseconds)); Matrix<double> q = new Matrix<double>(4, 4); q.SetIdentity(); MCvPoint3D32f[] points = PointCollection.ReprojectImageTo3D(disparity * (-16), q); float min = (float)1.0e10, max = 0; foreach (MCvPoint3D32f p in points) { if (p.z < min) min = p.z; else if (p.z > max) max = p.z; } Trace.WriteLine(String.Format("Min : {0}\r\nMax : {1}", min, max)); }
public SyntheticData() { _state = new Matrix<float>(2, 1); // start with random position and velocity //_state.SetRandNormal(new MCvScalar(0.0), new MCvScalar(1.0)); _state[0, 0] = 0.0f; _state[1, 0] = 0.05f; _measurementNoise = new Matrix<float>(1, 1); _measurementNoise.SetIdentity(new MCvScalar(1.0e-2)); _processNoise = new Matrix<float>(2, 2); _processNoise.SetIdentity(new MCvScalar(1.0e-5)); _errorCovariancePost = new Matrix<float>(2, 2); _errorCovariancePost.SetIdentity(); _transitionMatrix = new Matrix<float>(new float[,] { { 1, 1 }, { 0, 1 } }); // phi_t = phi_{t-1} + delta_phi _measurementMatrix = new Matrix<float>(new float[,] { {1, 0}}); _measurementMatrix.SetIdentity(); //the measurement is [ phi ] }
public void TestStereoSGBMCorrespondence() { Image<Gray, Byte> left = EmguAssert.LoadImage<Gray, byte>("aloeL.jpg"); Image<Gray, Byte> right = EmguAssert.LoadImage<Gray, byte>("aloeR.jpg"); Size size = left.Size; Image<Gray, Int16> disparity = new Image<Gray, Int16>(size); StereoSGBM bm = new StereoSGBM(10, 64, 0, 0, 0, 0, 0, 0, 0, 0, StereoSGBM.Mode.SGBM); Stopwatch watch = Stopwatch.StartNew(); bm.Compute(left, right, disparity); watch.Stop(); EmguAssert.WriteLine(String.Format("Time used: {0} milliseconds", watch.ElapsedMilliseconds)); Matrix<double> q = new Matrix<double>(4, 4); q.SetIdentity(); Image<Gray, Int16> disparityScaled = disparity * (-16); MCvPoint3D32f[] points = PointCollection.ReprojectImageTo3D(disparityScaled.Mat, q); float min = (float) 1.0e10, max = 0; foreach (MCvPoint3D32f p in points) { if (p.Z < min) min = p.Z; else if (p.Z > max) max = p.Z; } EmguAssert.WriteLine(String.Format("Min : {0}\r\nMax : {1}", min, max)); }
public void TestInvert() { Matrix<Single> m = new Matrix<Single>(3, 3); Matrix<Single> mInvert = new Matrix<Single>(3, 3); m.SetIdentity(); CvInvoke.cvInvert(m, mInvert, Emgu.CV.CvEnum.INVERT_METHOD.CV_LU); Assert.IsTrue(m.Equals(mInvert)); }
//SEARCH: RENDER STEREO FRAME TO VIDEO FORM private void StereoStreamFrameRender(VideoSource.StereoFrameSequenceElement stereoFrame) { this.UpdateCurPrevMEMSOrient(); if (!stereoFrame.IsNotFullFrame) { var leftImg = new Image<Bgr, byte>(stereoFrame.LeftRawFrame); var rightImg = new Image<Bgr, byte>(stereoFrame.RightRawFrame); Bitmap stuff1Bmp = null; if (this.useCalibratedStereoRenderCheckBox.Checked) { if (this.StereoCameraParams != null) { var tmpLeft = this.StereoCameraParams.LeftIntrinsicCameraParameters.Undistort(leftImg); var tmpRight = this.StereoCameraParams.RightIntrinsicCameraParameters.Undistort(rightImg); CvInvoke.cvRemap(tmpLeft, leftImg, this.StereoCameraParams.LeftMapX, this.StereoCameraParams.LeftMapY, (int)INTER.CV_INTER_LINEAR | (int)WARP.CV_WARP_FILL_OUTLIERS, new MCvScalar(0)); CvInvoke.cvRemap(tmpRight, rightImg, this.StereoCameraParams.RightMapX, this.StereoCameraParams.RightMapY, (int)INTER.CV_INTER_LINEAR | (int)WARP.CV_WARP_FILL_OUTLIERS, new MCvScalar(0)); //Image<Gray, short> dispImg; //var points = this.Get3DFeatures(this.StereoCameraParams, stereoFrame, out dispImg); //var centroid = this.GetPoint3DCloudCentroid(points); //Console.WriteLine("Centr: {0}, {1}, {2};", centroid.x, centroid.y, centroid.z); //this.videoForm.RenderStereoFrame(dispImg.ToBitmap(), null); } } var leftGrayImg = leftImg.Convert<Gray, byte>(); var rightGrayImg = rightImg.Convert<Gray, byte>(); Bitmap leftFrameRender; Bitmap rightFrameRender; if (this.renderGrayCheckBox.Checked) { leftFrameRender = new Bitmap(leftGrayImg.ToBitmap()); rightFrameRender = new Bitmap(rightGrayImg.ToBitmap()); } else { leftFrameRender = new Bitmap(leftImg.ToBitmap()); rightFrameRender = new Bitmap(rightImg.ToBitmap()); } if (this.showDepthMapCheckBox.Checked) { //var features = this.opticFlowProcessor.GetFeaturesToTrack( // stereoFrame: frame, // useGpu: true); var depthMap = this.opticFlowProcessor.GetDispMap(leftGrayImg, rightGrayImg, this.useGPUCheckBox.Checked, this.GetParametersForStereoMapSolver(this.useGPUCheckBox.Checked)); stuff1Bmp = depthMap.ToBitmap(); //update frame this.prevStereoDepthFrame = this.currStereoDepthFrame; this.currStereoDepthFrame = new DataSource.OpticFlowFrameContainer() { DepthMapImg = depthMap, StereoFrame = new VideoSource.StereoFrameSequenceElement(stereoFrame) }; //// } //try to use odometry if (this.perfOdometryCheckBox.Checked) { if (this.StereoCameraParams != null) { var rotMatrix = new Matrix<double>(3, 3); rotMatrix.SetIdentity(); var rotMatr = Utils.CvHelper.MatrixToArray(rotMatrix); rotMatrix.Dispose(); if (this.prevMEMSRotMatr != null && this.currentMEMSRotMatr != null) { rotMatr = this.OrientationCalc.GetRotationMatrixBetweenTwoStates(this.prevMEMSRotMatr, this.currentMEMSRotMatr, this.orientCalibMatrix); } List<PointF> currFreatures; List<PointF> prevFeatures; Matrix<double> resRotation; var featuresToTrackParams = this.GetVisualOdometerFeaturesToTrackParams(); var featuresOpticFlowParams = this.GetVisualOdometerFeaturesOpticFlowParams(); var disparitiesParams = this.GetVisualOdometerDisparitiesParams(); var tDiff = this.visualOdometer.GetTranslationAndRotation( rotMatrArray: rotMatr, prevFrame: this.prevStereoDepthFrame, currFrame: this.currStereoDepthFrame, cameraParams: this.StereoCameraParams, currFeaturesList: out currFreatures, prevFeaturesList: out prevFeatures, resRotation: out resRotation, featuresToTrackParams: featuresToTrackParams, featuresOpticFlowParams: featuresOpticFlowParams, disparitiesParams: disparitiesParams ); if (resRotation != null) { this.svdDiffRotMatrix = resRotation.Mul(this.svdDiffRotMatrix); } if (tDiff != null) { if (!(double.IsNaN(tDiff.Value.x) || double.IsNaN(tDiff.Value.y) || double.IsNaN(tDiff.Value.z))) { this.position3d.x += tDiff.Value.x; this.position3d.y += tDiff.Value.y; this.position3d.z += tDiff.Value.z; this.RenderTranslatoin(this.position3d); } Console.WriteLine("TRANSLATION: X={0}; Y={1}; Z={2}", tDiff.Value.x, tDiff.Value.y, tDiff.Value.z); Console.WriteLine("POSITION: X={0}; Y={1}; Z={2}", position3d.x, position3d.y, position3d.z); } if (this.renderFraturesCheckBox.Checked) { if (currFreatures != null && prevFeatures != null) { var dotSize = new Size(10, 10); var g = Graphics.FromImage(leftFrameRender); for (int i = 0; i < currFreatures.Count; ++i) { g.DrawEllipse(Pens.Red, currFreatures[i].X - dotSize.Width / 2, currFreatures[i].Y - dotSize.Height / 2, dotSize.Width, dotSize.Height); g.DrawLine(Pens.Red, currFreatures[i], prevFeatures[i]); } } } } } //// this.memsRenderForm.Invoke((MethodInvoker)delegate { this.RenderOrientationTransformation(Utils.CvHelper.MatrixToArray(this.svdDiffRotMatrix)); }); //general lr render this.videoForm.RenderStereoFrame(leftFrameRender, rightFrameRender); if (stuff1Bmp != null) { this.videoForm.RenderToStuffPictureBox1(stuff1Bmp); } stereoFrame.Dispose(); } }
public Matrix<double> GetGyroRotationMatrix(ReadingsVector3f gyroVect, double gyroSpeedMul) { Matrix<double> res = new Matrix<double>(3, 3); if (this.OldGyroReadings == null) { res.SetIdentity(); } else { long diffMS = gyroVect.TimeStampI - this.OldGyroReadings.TimeStampI; double xr = -((double)gyroVect.Values[0] * diffMS) / 1000 * gyroSpeedMul; double yr = ((double)gyroVect.Values[1] * diffMS) / 1000 * gyroSpeedMul; double zr = -((double)gyroVect.Values[2] * diffMS) / 1000 * gyroSpeedMul; double sinxr = Math.Sin(xr); double cosxr = Math.Cos(xr); double sinyr = Math.Sin(yr); double cosyr = Math.Cos(yr); double sinzr = Math.Sin(zr); double coszr = Math.Cos(zr); Matrix<double> xM = new Matrix<double>(new double[,] { {1, 0, 0 }, {0, cosxr, sinxr}, {0, -sinxr, cosxr} }); Matrix<double> yM = new Matrix<double>(new double[,] { {cosyr, 0, sinyr}, {0, 1, 0 }, {-sinyr, 0, cosyr} }); Matrix<double> zM = new Matrix<double>(new double[,] { {coszr, sinzr, 0}, {-sinzr, coszr, 0}, {0, 0, 1} }); res = xM.Mul(yM).Mul(zM); } return res; }