/// <summary>
        /// Computes the BFGS correction formula for inverse hessiana approximation.
        /// </summary>
        /// <param name="func">Function to find it the hessiana approximation.</param>
        /// <param name="b">Current inverse approximation of the hessiana.</param>
        /// <param name="x">Current vector of the minimization Quasi-Newton algorithm.</param>
        /// <param name="x1">Next vector of the minimization Quasi-Newton algorithm.</param>
        /// <returns>Returns a matrix representing the next step in inverse hessiana approximation.s</returns>
        public static Matrix Bfgs(CompiledFunc func, Matrix b, Vector x, Vector x1)
        {
            var sk = new Matrix(x1 - x);
            var yk = new Matrix(func.Differentiate(x1) - func.Differentiate(x));

            var t = b.Dot(sk.Transpose()).Dot(sk).Dot(b)/sk.Dot(b).Dot(sk.Transpose())[0,0];
            var t1 = yk.Transpose().Dot(yk)/yk.Dot(sk.Transpose())[0,0];

            return b - t + t1;
        }
        private Matrix Transformation(Matrix A, Matrix U, Matrix axt, Matrix wxt, Matrix ayt, Matrix wyt)
        {
            var fx_aff = axt * MatrixUtils.RankVertical(Ones(1, A.Rows), A.Transpose());
            var fx_wrp = wxt * U;
            var fx = fx_aff + fx_wrp; //fx=fx_aff+fx_wrp;

            // fy_aff=cy(n_good+1:n_good+3)'*[ones(1,nsamp1); X'];
            var fy_aff = ayt * MatrixUtils.RankVertical(Ones(1, A.Rows), A.Transpose());
            var fy_wrp = wyt * U; // fy_wrp=cy(1:n_good)'*U;
            var fy = fy_aff + fy_wrp; // fy=fy_aff+fy_wrp;

            return MatrixUtils.RankVertical(fx, fy).Transpose();
        }
        static void Main()
        {
            int[,] matrix1 = new int[,]
            {
                {1, 2, -3},
                {2, 1, 3},
                {3, 1, 2}
            };

            int[,] matrix2 = new int[,]
            {
                {4, 5, 6},
                {-1, 0, 7},
                {3, 2, 1}
            };

            Matrix<int> m1 = new Matrix<int>(matrix1);
            Matrix<int> m2 = new Matrix<int>(matrix2);

            Console.WriteLine(m1 + m2);
            Console.WriteLine(m1 - m2);
            Console.WriteLine(m1 * m2);
            Console.WriteLine(m1.Transpose());
            Console.WriteLine(m1 * 5);
        }
        public static void Draw(RenderContext11 renderContext, PositionColoredTextured[] points, int count, Matrix mat, bool triangleStrip)
        {
            if (VertexBuffer == null)
            {
                VertexBuffer = new Buffer(renderContext.Device, System.Runtime.InteropServices.Marshal.SizeOf(points[0]) * 2500, ResourceUsage.Dynamic, BindFlags.VertexBuffer, CpuAccessFlags.Write, ResourceOptionFlags.None, System.Runtime.InteropServices.Marshal.SizeOf(points[0]));
                VertexBufferBinding = new VertexBufferBinding(VertexBuffer, System.Runtime.InteropServices.Marshal.SizeOf((points[0])), 0);

            }

            renderContext.devContext.InputAssembler.PrimitiveTopology = triangleStrip ? SharpDX.Direct3D.PrimitiveTopology.TriangleStrip : SharpDX.Direct3D.PrimitiveTopology.TriangleList;
            renderContext.BlendMode = BlendMode.Alpha;
            renderContext.setRasterizerState(TriangleCullMode.Off);

            mat.Transpose();

            WarpOutputShader.MatWVP = mat;
            WarpOutputShader.Use(renderContext.devContext, false);

            renderContext.SetVertexBuffer(VertexBufferBinding);

            DataBox box = renderContext.devContext.MapSubresource(VertexBuffer, 0, MapMode.WriteDiscard, MapFlags.None);
            Utilities.Write(box.DataPointer, points, 0, count);

            renderContext.devContext.UnmapSubresource(VertexBuffer, 0);

            renderContext.devContext.PixelShader.SetShaderResource(0, null);

            renderContext.devContext.Draw(count, 0);
        }
Exemple #5
0
        private static Tuple<Vector<double>, double[]> GradientDescent(Matrix<double> x, Vector<double> y, Vector<double> theta, double alpha, int numberOfIterations)
        {
            var m = y.Count;
            var jHistory = new double[numberOfIterations];

            for (int i = 0; i < numberOfIterations; i++)
            {
                theta = theta - (alpha / m) * x.Transpose() * (x * theta - y);
            }

            return Tuple.Create(theta, jHistory);
        }
        /// <summary>
        /// Uses gradient descent to calculate the "theta" vector.
        /// On each iteration gets calculated as per
        /// 
        ///		theta = theta - (alpha/m)*(X*theta - Y)'*X
        /// 
        /// </summary>
        private void GradientDescent(Input input)
        {
            var monitor = new CostFunctionMonitor(input);

            _theta = new DenseMatrix(1, input.FeaturesCount);

            var multiplier = (Settings.LearningRate / input.SamplesCount);
            for (int i = 0; i < Settings.MaxIterations; i++) {
                _theta -= multiplier * ((input.X * _theta.Transpose() - input.Y).Transpose() * input.X);
                if (monitor.IsConverged(_theta)) {
                    break;
                }
            }
        }
Exemple #7
0
        static void Main(string[] args)
        {
            //test constructor
            var matr = new Matrix<int>(4, 5);
            //test indexer
            matr[3, 4] = 8;
            Console.WriteLine("{0}", matr[3, 4]);
            //test ToString override
            Console.WriteLine(matr);

            double[,] first = { { 0, 2, 3, 8 }, { 1, 2, 3, 4 }, { 1, 2, 3, 4 }, { 1, 2, 3, 4 } };
            double[,] second = { { 1, 2, 3, 8 }, { 1, 2, 3, 6 }, { 1, 2, 8, 4 }, { 1, 0, 3, 4 } };
            //test constructor2
            Matrix<double> arrFirst = new Matrix<double>(first);
            Matrix<double> arrSecond = new Matrix<double>(second);
            Console.WriteLine(arrFirst);
            Console.WriteLine(arrSecond);

            //test operator true
            if (arrFirst)
            {
                Console.WriteLine("There is no zero inside");
            }
            else Console.WriteLine("There is at least one zero inside\n");
            Console.WriteLine("Sum of the two matrices");
            //test opretor +
            Console.WriteLine(arrFirst + arrSecond);
            Console.WriteLine("Substraction of the two matrices");
            //test opreator -
            Console.WriteLine(arrFirst - arrSecond);
            Console.WriteLine("Multiplication of the two matrices");
            //test operator *
            Console.WriteLine(arrFirst * arrSecond);

            Console.WriteLine("Transposed matrix:");
            Matrix<double> transposed = arrFirst.Transpose();
            Console.WriteLine(transposed);
        }
Exemple #8
0
        private static Vector<double> AdvancedOptimization(Matrix<double> x, Vector<double> y, Vector<double> theta, double lambda)
        {
            var m = x.RowCount;
            var featureCount = theta.Count;
            var xTranspose = x.Transpose();

            // TODO: convert all of this to use MicrosoftResearch.Infer.Maths instead of MathNet.Numerics
            var solver = new MicrosoftResearch.Infer.Maths.BFGS();
            var minTheta = solver.Run(
                MicrosoftResearch.Infer.Maths.DenseVector.FromList(theta),
                10000,
                (Vector vector, ref Vector dX) =>
                    {
                        var newTheta = DenseVector.Create(featureCount, n => vector[n]);

                        var regTheta = DenseVector.OfVector(newTheta);
                        regTheta[0] = 0;

                        var regThetaSq = DenseVector.OfVector(regTheta);
                        regThetaSq.MapInplace(t => t * t);

                        var h = x * newTheta - y;

                        var cost = ((h * h) / (2D * m)) + ((lambda / (2D * m)) * regThetaSq.Sum());

                        var grad = ((1D / m) * (xTranspose * h)) + ((lambda / m) * regTheta);

                        for (var j = 0; j < grad.Count; j++)
                        {
                            dX[j] = grad[j];
                        }

                        return cost;
                    });

            return DenseVector.Create(theta.Count, i => minTheta[i]);
        }
Exemple #9
0
        public void Simple()
        {
            var matrix = new Matrix(3, 3);

            // [ 1,  4,  7 ]
            // [ 2,  5,  8 ]
            // [ 3,  6,  9 ]

            matrix[0, 0] = 1;
            matrix[0, 1] = 4;
            matrix[0, 2] = 7;

            matrix[1, 0] = 0;
            matrix[1, 1] = 5;
            matrix[1, 2] = 8;

            matrix[2, 0] = 0;
            matrix[2, 1] = 0;
            matrix[2, 2] = 0;

            Assert.AreEqual(matrix.IsTriangular, TriangularMatrixType.Upper);
            matrix = matrix.Transpose();
            Assert.AreEqual(matrix.IsTriangular, TriangularMatrixType.Lower);
        }
Exemple #10
0
        public void Render(DeviceContext deviceContext, int indexCount, Matrix worldMatrix, Matrix viewMatrix, Matrix projectionMatrix, ShaderResourceView texture)
        {
            worldMatrix.Transpose();
            viewMatrix.Transpose();
            projectionMatrix.Transpose();
            // Lock the constant memory buffer so it can be written to.
            DataStream mappedResource;
            deviceContext.MapSubresource(ConstantMatrixBuffer, MapMode.WriteDiscard, SharpDX.Direct3D11.MapFlags.None, out mappedResource);

            // Copy the transposed matrices (because they are stored in column-major order on the GPU by default) into the constant buffer.
            var matrixBuffer = new MatrixBuffer
            {
                world = worldMatrix,
                view = viewMatrix,
                projection = projectionMatrix
            };
            mappedResource.Write(matrixBuffer);

            // Unlock the constant buffer.
            deviceContext.UnmapSubresource(ConstantMatrixBuffer, 0);

            // Set the position of the constant buffer in the vertex shader.
            const int bufferNumber = 0;

            // Finally set the constant buffer in the vertex shader with the updated values.
            deviceContext.VertexShader.SetConstantBuffer(bufferNumber, ConstantMatrixBuffer);

            // Set shader resource in the pixel shader.
            deviceContext.PixelShader.SetShaderResource(0, texture);

            // Set the vertex input layout.
            deviceContext.InputAssembler.InputLayout = Layout;

            // Set the vertex and pixel shaders that will be used to render this triangle.
            deviceContext.VertexShader.Set(VertexShader);
            deviceContext.PixelShader.Set(PixelShader);

            // Set the sampler state in the pixel shader.
            deviceContext.PixelShader.SetSampler(0, SamplerState);

            // Render the triangle.
            deviceContext.DrawIndexed(indexCount, 0, 0);
        }
Exemple #11
0
      public void TestTransposeFloatMatrix()
      {
         using (Matrix<float> mat = new Matrix<float>(1, 3))
         {
            mat.SetRandUniform(new MCvScalar(-1000.0), new MCvScalar(1000.0));

            Matrix<float> matT = mat.Transpose();

            for (int i = 0; i < matT.Rows; i++)
               for (int j = 0; j < matT.Cols; j++)
                  EmguAssert.AreEqual(matT[i, j], mat[j, i]);
         }
      }
Exemple #12
0
        public static Vector<double> GetWeights(Matrix<double> data, Vector<double> targetClassification)
        {            
            var features = data.ColumnCount;

            // these are things we are trying to solve for
            Vector<double> weights = DenseVector.Create(features, i => 1.0);

            var alpha = 0.001;

            foreach (var cycle in Enumerable.Range(0, 500))
            {
                #region Sigmoid Explanation

                /*
                 * multiply all the data by the weights, this gives you the estimation of the current function
                 * given the weights. multiplying by the sigmoid moves a point into one class vs the other
                 * if its larger than 0.5 it'll be in class 1, if its smaller than it'll be in class 0.  The closer it is
                 * to 1 means that with the given weights that value is highly probably to be in class 1.
                 * 
                 * it doesn't matter if the instance is the class the sigmoid says it is,
                 * the error will shift the weights gradient so over the iterations of the cycles 
                 * the weights will move the final data point towards its actual expected class
                 * 
                 * for example, if there is a data point with values    
                 * 
                 * [1.0, -0.017612, 14.053064]
                 * 
                 * where value 1 is the initial weight factor, and values 2 and 3 are the x y coordinates
                 *  
                 * and lets say the point is categorized at class 0.
                 * 
                 * Calculating the initial sigma gives you something like 0.9999998
                 * 
                 * which says its class 1, but thats obviously wrong.  However, the error rate here is large
                 * 
                 * meaning that the gradient wants to move towards the expected data.  
                 * 
                 * As you run the ascent this data point will get smaller and smaller and eventually
                 * 
                 * the sigmoid will classify it properly
                 */

                #endregion

                var currentData = DenseVector.OfEnumerable(data.Multiply(weights).Select(Sigmoid));
               
                #region Error Explanation

                // find out how far off we are from the actual expectation. this is
                // like the x2 - x1 part of a derivative

                #endregion

                var error = targetClassification.Subtract(currentData);

                #region Gradient Explanation

                // this gives you the direction of change from the current 
                // set of data.  At this point every point is moving in the direction
                // of the error rate.  A large error means we are far off and trying to move
                // towards the actual data, a low error rate means we are really close
                // to the target data (so the gradient will be smaller, meaning less delta)

                #endregion

                var gradient = data.Transpose() * error;

                #region Weights Update Explanation

                // multiplying by alpha means we'll take a small step in the direction
                // of the gradient and add it to the current weights. An initial weights of 1.0
                // means we're going to start at the current location of where we are.

                #endregion

                weights = weights + alpha * gradient;
            }

            return weights;
        }
Exemple #13
0
        /// <summary>
        /// Binds the shader and world matricies.
        /// </summary>
        /// <param name="context">The context to draw from.</param>
        /// <param name="world">The world <see cref="Matrix"/> to draw from.</param>
        /// <param name="view">The view <see cref="Matrix"/> to use for transforms.</param>
        /// <param name="projection">The projection <see cref="Matrix"/> to use for transforms.</param>
        /// <returns></returns>
        private bool SetShaderParameters(DeviceContext context, Matrix world, Matrix view, Matrix projection)
        {
            try
            {
                world.Transpose();
                view.Transpose();
                projection.Transpose();

                context.VertexShader.Set(vertexShader);
                context.PixelShader.Set(pixelShader);

                DataStream mappedResource;
                context.MapSubresource(matrixBuffer, 0, MapMode.WriteDiscard, MapFlags.None, out mappedResource);

                mappedResource.Write(world);
                mappedResource.Write(view);
                mappedResource.Write(projection);

                context.UnmapSubresource(matrixBuffer, 0);

                context.VertexShader.SetConstantBuffers(0, matrixBuffer);

                context.InputAssembler.InputLayout = layout;

                return true;
            }
            catch(Exception)
            {
                return false;
            }
        }
        public void MatrixTransposeDimensions()
        {
            var matrix = new Matrix(2, 3);

            var transpose = matrix.Transpose();

            Assert.AreEqual(3, transpose.Rows);
            Assert.AreEqual(2, transpose.Columns);
        }
        public void MatrixTransposeValues()
        {
            var matrix = new Matrix(new []{1, 2}, new []{3, 4});

            var transpose = matrix.Transpose();

            Assert.AreEqual(1, transpose[0, 0]);
            Assert.AreEqual(2, transpose[1, 0]);
            Assert.AreEqual(3, transpose[0, 1]);
            Assert.AreEqual(4, transpose[1, 1]);
        }
Exemple #16
0
        /// <summary>
        /// Compute the Thin Plate Spline of the image, return a 2D tab
        /// </summary>
        /// <param name="control_points">Control points  </param>    
        /// <param name="input">Input image to get the dim xy</param>
        public double[,] calc_tps(List<cPoint3D> control_points, cDRC_Region AssociatedRegion, double Regularization)
        {
            int p = control_points.Count;
            if (p < 3) return null;
            double[,] grid = new double[AssociatedRegion.SizeX, AssociatedRegion.SizeY];
            Matrix mtx_l = new Matrix(p + 3, p + 3);
            Matrix mtx_v = new Matrix(p + 3, 1);
            Matrix mtx_orig_k = new Matrix(p, p);
            double a = 0.0;
            for (int i = 0; i < p; ++i)
            {
                for (int j = i + 1; j < p; ++j)
                {
                    cPoint3D pt_i = new cPoint3D(control_points[i].X, control_points[i].Y, control_points[i].Z);
                    cPoint3D pt_j = new cPoint3D(control_points[j].X, control_points[j].Y, control_points[j].Z);

                    pt_i.Y = pt_j.Y = 0;

                    //double elen = Math.Sqrt((pt_i.X - pt_j.X) * (pt_i.X - pt_j.X) + (pt_i.Z - pt_j.Z) * (pt_i.Z - pt_j.Z));
                    double elen = pt_i.DistTo(pt_j);
                    mtx_l[i, j] = mtx_l[j, i] = mtx_orig_k[i, j] = mtx_orig_k[j, i] = tps_base_func(elen);
                    a += elen * 2; // same for upper & lower tri
                }
            }
            a /= (double)(p * p);
            //regularization = 0.3f;
            //Fill the rest of L
            for (int i = 0; i < p; ++i)
            {
                //diagonal: reqularization parameters (lambda * a^2)

                mtx_l[i, i] = mtx_orig_k[i, i] = Regularization * (a * a);

                // P (p x 3, upper right)
                mtx_l[i, p + 0] = 1.0;
                mtx_l[i, p + 1] = control_points[i].X;
                mtx_l[i, p + 2] = control_points[i].Z;

                // P transposed (3 x p, bottom left)
                mtx_l[p + 0, i] = 1.0;
                mtx_l[p + 1, i] = control_points[i].X;
                mtx_l[p + 2, i] = control_points[i].Z;
            }
            // O (3 x 3, lower right)
            for (int i = p; i < p + 3; ++i)
                for (int j = p; j < p + 3; ++j)
                    mtx_l[i, j] = 0.0;

            // Fill the right hand vector V
            for (int i = 0; i < p; ++i)
                mtx_v[i, 0] = control_points[i].Y;

            mtx_v[p + 0, 0] = mtx_v[p + 1, 0] = mtx_v[p + 2, 0] = 0.0;
            // Solve the linear system "inplace"
            Matrix mtx_v_res = new Matrix(p + 3, 1);

            LuDecomposition ty = new LuDecomposition(mtx_l);

            mtx_v_res = ty.Solve(mtx_v);
            if (mtx_v_res == null)
            {
                return null;
            }

            // Interpolate grid heights
            for (int x = 0; x < AssociatedRegion.SizeX; ++x)
            {
                for (int z = 0; z < AssociatedRegion.SizeY; ++z)
                {

                    //float x = 0f; float z = 0.5f;
                    double h = mtx_v_res[p + 0, 0] + mtx_v_res[p + 1, 0] * (float)x / (float)AssociatedRegion.SizeX + mtx_v_res[p + 2, 0] * (float)z / (float)AssociatedRegion.SizeY;
                    //double h = mtx_v[p + 0, 0] + mtx_v[p + 1, 0] * (float)x + mtx_v[p + 2, 0] * (float)z ;
                    cPoint3D pt_ia;
                    cPoint3D pt_cur = new cPoint3D((float)x / (float)AssociatedRegion.SizeX, 0, (float)z / (float)AssociatedRegion.SizeY);
                    //Vector3 pt_cur = new Vector3((float)x , 0, (float)z);
                    for (int i = 0; i < p; ++i)
                    {
                        pt_ia = control_points[i];
                        pt_ia.Y = 0;
                        h += mtx_v_res[i, 0] * tps_base_func(pt_ia.DistTo(pt_cur));
                    }

                    grid[x, z] = h;
                }
            }
            // Calc bending energy
            Matrix w = new Matrix(p, 1);
            for (int i = 0; i < p; ++i)
                w[i, 0] = mtx_v_res[i, 0];

            Matrix be;

            be = Matrix.Multiply(Matrix.Multiply(w.Transpose(), mtx_orig_k), w);
            bending_energy = be[0, 0];

            Console.WriteLine("be= " + be[0, 0]);
            return grid;
        }
Exemple #17
0
      static void Run()
      {
         Image<Gray, Byte> modelImage = new Image<Gray, byte>("box.png");

         #region extract features from the object image
         MCvSURFParams param1 = new MCvSURFParams(500, false);
         SURFFeature[] modelFeatures = modelImage.ExtractSURF(ref param1);
         SURFFeature[] modelFeaturesPositiveLaplacian = Array.FindAll<SURFFeature>(modelFeatures, delegate(SURFFeature f) { return f.Point.laplacian >= 0; });
         SURFFeature[] modelFeaturesNegativeLaplacian = Array.FindAll<SURFFeature>(modelFeatures, delegate(SURFFeature f) { return f.Point.laplacian < 0; });

         //Create feature trees for the given features
         FeatureTree featureTreePositiveLaplacian = new FeatureTree(
            Array.ConvertAll<SURFFeature, Matrix<float>>(
               modelFeaturesPositiveLaplacian,
               delegate(SURFFeature f) { return f.Descriptor; }));
         FeatureTree featureTreeNegativeLaplacian = new FeatureTree(
            Array.ConvertAll<SURFFeature, Matrix<float>>(
               modelFeaturesNegativeLaplacian,
               delegate(SURFFeature f) { return f.Descriptor; }));
         #endregion

         Image<Gray, Byte> observedImage = new Image<Gray, byte>("box_in_scene.png");

         #region extract features from the observed image
         MCvSURFParams param2 = new MCvSURFParams(500, false);
         SURFFeature[] imageFeatures = observedImage.ExtractSURF(ref param2);
         SURFFeature[] imageFeaturesPositiveLaplacian = Array.FindAll<SURFFeature>(imageFeatures, delegate(SURFFeature f) { return f.Point.laplacian >= 0; });
         SURFFeature[] imageFeaturesNegativeLaplacian = Array.FindAll<SURFFeature>(imageFeatures, delegate(SURFFeature f) { return f.Point.laplacian < 0; });
         #endregion

         #region Merge the object image and the observed image into one image for display
         Image<Gray, Byte> res = new Image<Gray, byte>(Math.Max(modelImage.Width, observedImage.Width), modelImage.Height + observedImage.Height);
         res.ROI = new System.Drawing.Rectangle(0, 0, modelImage.Width, modelImage.Height);
         modelImage.Copy(res, null);
         res.ROI = new System.Drawing.Rectangle(0, modelImage.Height, observedImage.Width, observedImage.Height);
         observedImage.Copy(res, null);
         res.ROI = Rectangle.Empty;
         #endregion

         double matchDistanceRatio = 0.8;
         List<PointF> modelPoints = new List<PointF>();
         List<PointF> observePoints = new List<PointF>();

         #region using Feature Tree to match feature
         Matrix<float>[] imageFeatureDescriptorsPositiveLaplacian = Array.ConvertAll<SURFFeature, Matrix<float>>(
            imageFeaturesPositiveLaplacian,
            delegate(SURFFeature f) { return f.Descriptor; });
         Matrix<float>[] imageFeatureDescriptorsNegativeLaplacian = Array.ConvertAll<SURFFeature, Matrix<float>>(
            imageFeaturesNegativeLaplacian,
            delegate(SURFFeature f) { return f.Descriptor; });
         Matrix<Int32> result1;
         Matrix<double> dist1;

         featureTreePositiveLaplacian.FindFeatures(imageFeatureDescriptorsPositiveLaplacian, out result1, out dist1, 2, 20);
         MatchSURFFeatureWithFeatureTree(
           modelFeaturesPositiveLaplacian,
           imageFeaturesPositiveLaplacian,
           matchDistanceRatio, result1.Data, dist1.Data, modelPoints, observePoints);

         featureTreeNegativeLaplacian.FindFeatures(imageFeatureDescriptorsNegativeLaplacian, out result1, out dist1, 2, 20);
         MatchSURFFeatureWithFeatureTree(
              modelFeaturesNegativeLaplacian,
              imageFeaturesNegativeLaplacian,
              matchDistanceRatio, result1.Data, dist1.Data, modelPoints, observePoints);
         #endregion

         Matrix<float> homographyMatrix = CameraCalibration.FindHomography(
            modelPoints.ToArray(), //points on the object image
            observePoints.ToArray(), //points on the observed image
            HOMOGRAPHY_METHOD.RANSAC,
            3).Convert<float>();

         #region draw the projected object in observed image
         for (int i = 0; i < modelPoints.Count; i++)
         {
            PointF p = observePoints[i];
            p.Y += modelImage.Height;
            res.Draw(new LineSegment2DF(modelPoints[i], p), new Gray(0), 1);
         }

         System.Drawing.Rectangle rect = modelImage.ROI;
         Matrix<float> orginalCornerCoordinate = new Matrix<float>(new float[,] 
            {{  rect.Left, rect.Bottom, 1.0f},
               { rect.Right, rect.Bottom, 1.0f},
               { rect.Right, rect.Top, 1.0f},
               { rect.Left, rect.Top, 1.0f}});

         Matrix<float> destCornerCoordinate = homographyMatrix * orginalCornerCoordinate.Transpose();
         float[,] destCornerCoordinateArray = destCornerCoordinate.Data;

         Point[] destCornerPoints = new Point[4];
         for (int i = 0; i < destCornerPoints.Length; i++)
         {
            float denominator = destCornerCoordinateArray[2, i];
            destCornerPoints[i] = new Point(
               (int)(destCornerCoordinateArray[0, i] / denominator),
               (int)(destCornerCoordinateArray[1, i] / denominator) + modelImage.Height);
         }

         res.DrawPolyline(destCornerPoints, true, new Gray(255.0), 5);
         #endregion

         ImageViewer.Show(res);
      }
        // Set up the pipeline for drawing a shape then draw it.
        public void Draw(Shape shape)
        {
            // Set pipeline components to suit the shape if necessary.
            if (currVertexBinding.Buffer != shape.vertexBinding.Buffer) { context.InputAssembler.SetVertexBuffers(0, shape.vertexBinding); currVertexBinding = shape.vertexBinding; }
            if (currLayout != shape.style.layout) { context.InputAssembler.InputLayout = shape.style.layout; currLayout = shape.style.layout; }
            if (currTopology != shape.topology) { context.InputAssembler.PrimitiveTopology = shape.topology; currTopology = shape.topology; }
            if (currVertexShader != shape.style.vertexShader) { context.VertexShader.Set(shape.style.vertexShader); currVertexShader = shape.style.vertexShader; }
            if (currPixelShader != shape.style.pixelShader) { context.PixelShader.Set(shape.style.pixelShader); currPixelShader = shape.style.pixelShader; }
            if (currTextureView != shape.textureView) { context.PixelShader.SetShaderResource(0, shape.textureView); currTextureView = shape.textureView; }

            // Calculate the vertex transformation and update the constant buffer.
            worldViewProj = world * view * proj;
            worldViewProj.Transpose();
            context.UpdateSubresource(ref worldViewProj, constantBuffer);

            // Draw the shape.
            context.Draw(shape.vertexCount, 0);
        }
Exemple #19
0
		public void TestTranspose()
		{
			Matrix m = new Matrix(2, 3);
			m.SetRow(0, -1.1F, 2.6F, -7.1F);
			m.SetRow(1, 4.6F, -3.7F, 9.1F);

			Matrix result = new Matrix(3, 2);
			result.SetColumn(0, -1.1F, 2.6F, -7.1F);
			result.SetColumn(1, 4.6F, -3.7F, 9.1F);

			Assert.IsTrue(Matrix.AreEqual(m.Transpose(), result));
		}
        // this is directly from the wikipedia page on Kabsh Algorithm
        public void Recompute2()
        {
            var p = P;
            var q = Q;

            //1. subtract centroids
            for (int i = 0; i < p.Rows; i++) {
                p[i, 0] -= SourceCentroid[0, 0];
                p[i, 1] -= SourceCentroid[1, 0];
                q[i, 0] -= DestCentroid[0, 0];
                q[i, 1] -= DestCentroid[1, 0];
            }

            //2. compute covariance matrix
            var a = p.Transpose()*q;

            //3. compute rotation matrix
            /* perform svd  where A =  V S WT */
            Matrix<double> V = new Matrix<double>(2, 2);
            Matrix<double> S = new Matrix<double>(2, 2);
            Matrix<double> W = new Matrix<double>(2, 2);
            CvInvoke.cvSVD(a.Ptr, S.Ptr, V.Ptr, W.Ptr, SVD_TYPE.CV_SVD_DEFAULT);

            // Deal with reflection matrix
            Matrix<double> m = new Matrix<double>(2, 2);
            m.SetIdentity(new MCvScalar(1));
            m[1,1] = ((W*V.Transpose()).Det<0) ? -1 : 1;

            // Comput the rotation matrix
            Rotation = W*m*V.Transpose();
            //Offset = DestCentroid - (Rotation * SourceCentroid);
            Offset = DestCentroid - SourceCentroid;

            Console.WriteLine("Rotaiton Matrix - Angle ="+Angle);
            Console.WriteLine(FormatMatrix(Rotation));
        }
Exemple #21
0
        public void TestTranspose()
        {
            var matrix = new Matrix<int>(5, 6);
            matrix[1, 1] = 3;
            matrix[1, 3] = 4;
            matrix[3, 1] = 5;

            var transpose = matrix.Transpose();
            Assert.AreEqual(6, transpose.NumberOfRows);
            Assert.AreEqual(5, transpose.NumberOfColumns);
            Assert.AreEqual(3, transpose[1, 1]);
            Assert.AreEqual(4, transpose[3, 1]);
            Assert.AreEqual(5, transpose[1, 3]);

            // check whether it is really a copy
            transpose[1, 1] = 0;
            Assert.AreEqual(3, matrix[1, 1]);
        }
Exemple #22
0
      public void TestTransposeByteMatrix()
      {
         using (Matrix<Byte> mat = new Matrix<Byte>(1, 10))
         {
            mat.SetRandUniform(new MCvScalar(0.0), new MCvScalar(255.0));

            Matrix<Byte> matT = mat.Transpose();

            for (int i = 0; i < matT.Rows; i++)
               for (int j = 0; j < matT.Cols; j++)
                  EmguAssert.AreEqual(matT[i, j], mat[j, i]);
         }
      }
        public Matrix KLTransform(Matrix matrix)
        {
            matrix = (Matrix)matrix.Transpose();

            int columnNumber = matrix.ColumnCount;

            Matrix mean = GetMean(matrix);

            //Don't print it becouse is a lot of values
          //  PrintMatrix(mean, null, " mean matrix"); - 

            float[,] oneD = new float[1, columnNumber];
            for (int i = 0; i < columnNumber; i++)
            {
                oneD[0, i] = 1;
            }

            //so called edinichna matrica
            Matrix onesMatrix = DenseMatrix.OfArray(oneD);

            // center the data
            Matrix xm = (DenseMatrix)matrix.Subtract(mean.Multiply(onesMatrix));
           // PrintMatrix(null, xm.ToArray(), "center the data");

            // Calculate covariance matrix

            DenseMatrix cov = (DenseMatrix)(xm.Multiply(xm.Transpose())).Multiply(1.0f / columnNumber);
          //  PrintMatrix(cov, null, "Calculate covariance matrix");
            PrintText(cov.ColumnCount.ToString() + " Calculate covariance  ColumnCount");
            PrintText(cov.RowCount.ToString() + " Calculate covariance  RowCount");

            //this is from another libraly accord .net 
            EigenvalueDecomposition v = new EigenvalueDecomposition(FromFloatMatrixToDouble(cov.ToArray()));

            double[,] eigVectors = v.Eigenvectors;
            double[,] eidDiagonal = v.DiagonalMatrix;

            Matrix eigVectorsTransposedMatrix = (DenseMatrix)DenseMatrix.OfArray(FromDoubleMatrixToFlaot(eigVectors)).Transpose();
            Matrix eidDiagonalMatrix = DenseMatrix.OfArray(FromDoubleMatrixToFlaot(eidDiagonal));

            int rowsCountDiagonals = eidDiagonal.GetLength(0);
            int maxLambdaIndex = 0;
            for (int i = 0; i < rowsCountDiagonals - 1; i++)
            {
                if (eidDiagonal[i, i] <= eidDiagonal[i + 1, i + 1])
                {
                    maxLambdaIndex = i + 1;
                }
            }

            double maxAlpha = eidDiagonal[maxLambdaIndex, maxLambdaIndex];

            double sumAlpha = 0;
            for (int i = 0; i < rowsCountDiagonals; i++)
            {
                sumAlpha += eidDiagonal[i, i];
            }

            CalculateAndPrintError(maxAlpha, sumAlpha);
           PrintText("Max lambda index " + maxLambdaIndex);

           // PrintMatrix(eidDiagonalMatrix, null, "Eign vals");

            // //PCA

            float[,] arr = new float[1, eigVectorsTransposedMatrix.ColumnCount];
            for (int i = 0; i < eigVectorsTransposedMatrix.ColumnCount; i++)
            {
                arr[0, i] = eigVectorsTransposedMatrix[maxLambdaIndex, i];
            }

            Matrix mainComponentMatrix = DenseMatrix.OfArray(arr);
          //  PrintMatrix(mainComponentMatrix, null, "Main Component");


            Matrix pca = (DenseMatrix)mainComponentMatrix.Multiply(xm);
          //  PrintMatrix(pca, null, "PCA");

            return pca;

        }
Exemple #24
0
        protected double GetSVDRotation(Matrix<double>[] p1, Matrix<double>[] p2, out Matrix<double> rotation)
        {
            var centr1 = GetCentroid(p1);
            var centr2 = GetCentroid(p2);

            var q1 = new List<Matrix<double>>();
            var q2 = new List<Matrix<double>>();
            var H = new Matrix<double>(3, 3);

            for (int i = 0; i < Math.Min(p1.Count(), p2.Count()); ++i)
            {
                var q1d = new Matrix<double>(new double[,]
                {
                    {p1[i][0, 0] - centr1[0, 0]},
                    {p1[i][1, 0] - centr1[1, 0]},
                    {p1[i][2, 0] - centr1[2, 0]}
                });

                q1.Add(q1d);

                var q2d = new Matrix<double>(new double[,]
                {
                    {p2[i][0, 0] - centr2[0, 0]},
                    {p2[i][1, 0] - centr2[1, 0]},
                    {p2[i][2, 0] - centr2[2, 0]}
                });

                q2.Add(q2d);

                H = H.Add(q1d.Mul(q2d.Transpose()));
            }

            var U = new Matrix<double>(3, 3);
            var W = new Matrix<double>(3, 3);
            var V = new Matrix<double>(3, 3);

            CvInvoke.cvSVD(H, W, U, V, SVD_TYPE.CV_SVD_DEFAULT);

            var X = V.Mul(U.Transpose());

            var detX = CvInvoke.cvDet(X);

            rotation = X;
            return detX;
        }
        // this is from the blog entry
        public void Recompute()
        {
            if (source == null || dest == null || source.Count != dest.Count)
                throw new Exception("Input data null or not equal in length");

            // compute covariance matrix
            Matrix<double> H = new Matrix<double>(2, 2);

            H.SetZero();
            for (int i = 0; i < source.Count; i++) {
                var a = source[i].ToMatrix() - SourceCentroid;
                var b = dest[i].ToMatrix() - DestCentroid;
                H += a * b.Transpose();
            }

            /* perform svd  where A =  U W VT
             *  A  IntPtr  Source MxN matrix
             *  W  IntPtr  Resulting singular value matrix (MxN or NxN) or vector (Nx1).
             *  U  IntPtr  Optional left orthogonal matrix (MxM or MxN). If CV_SVD_U_T is specified, the number of rows and columns in the sentence above should be swapped
             *  V  IntPtr  Optional right orthogonal matrix (NxN)
             */

            Matrix<double> U = new Matrix<double>(2, 2);
            Matrix<double> W = new Matrix<double>(2, 2);
            Matrix<double> V = new Matrix<double>(2, 2);
            CvInvoke.cvSVD(H.Ptr, W.Ptr, U.Ptr, V.Ptr, SVD_TYPE.CV_SVD_DEFAULT);

            // compute rotational matrix R=V*UT
            Rotation = V * U.Transpose();

            // find translation
            //Offset = DestCentroid - ( Rotation * SourceCentroid);
            Offset = DestCentroid - SourceCentroid;

            if (Angle > 5) {
                Global.Instance.mainForm.ShowSimpleMessageBox("Excessive Angle Detected - Problem detecting rotation\nOffset = " + new PartLocation(Offset-DestCentroid) + "\nAngle=" + Angle);
            }
        }
 public static void QrDecomposition(this Matrix<double, RealSpace> matrix, out Matrix<double, RealSpace> q, out Matrix<double, RealSpace> r)
 {
     q = matrix.GramSchmidt();
     r = q.Transpose() * matrix;
 }
        private bool SetShaderParameters(DeviceContext deviceContext, Matrix worldMatrix, Matrix viewMatrix, Matrix projectionMatrix, ShaderResourceView[] textures, Vector3 lightDirection, Vector4 diffuseColor, Vector3 cameraPosition, Vector4 specularColor, float specularPower)
        {
            try
            {
                #region Constant Matrix Buffer
                // Transpose the matrices to prepare them for shader.
                worldMatrix.Transpose();
                viewMatrix.Transpose();
                projectionMatrix.Transpose();

                // Lock the constant buffer so it can be written to.
                DataStream mappedResource;
                deviceContext.MapSubresource(ConstantMatrixBuffer, MapMode.WriteDiscard, SharpDX.Direct3D11.MapFlags.None, out mappedResource);

                // Copy the matrices into the constant buffer.
                var matrixBuffer = new MatrixBuffer()
                {
                    world = worldMatrix,
                    view = viewMatrix,
                    projection = projectionMatrix
                };

                mappedResource.Write(matrixBuffer);

                // Unlock the constant buffer.
                deviceContext.UnmapSubresource(ConstantMatrixBuffer, 0);

                // Set the position of the constant buffer in the vertex shader.
                var bufferNumber = 0;

                // Finally set the constant buffer in the vertex shader with the updated values.
                deviceContext.VertexShader.SetConstantBuffer(bufferNumber, ConstantMatrixBuffer);

                // Set shader resource in the pixel shader.
                deviceContext.PixelShader.SetShaderResources(0, textures);
                #endregion

                #region Constant Light Buffer
                // Lock the light constant buffer so it can be written to.
                deviceContext.MapSubresource(ConstantLightBuffer, MapMode.WriteDiscard, SharpDX.Direct3D11.MapFlags.None, out mappedResource);

                // Copy the lighting variables into the constant buffer.
                var lightBuffer = new LightBuffer()
                {
                    diffuseColor = diffuseColor,
                    lightDirection = lightDirection,
                    specularColor = specularColor,
                    specularPower = specularPower,
                };

                mappedResource.Write(lightBuffer);

                // Unlock the constant buffer.
                deviceContext.UnmapSubresource(ConstantLightBuffer, 0);

                // Set the position of the light constant buffer in the pixel shader.
                bufferNumber = 0;

                // Finally set the light constant buffer in the pixel shader with the updated values.
                deviceContext.PixelShader.SetConstantBuffer(bufferNumber, ConstantLightBuffer);
                #endregion

                #region Constant Camera Buffer
                // Lock the camera constant buffer so it can be written to.
                deviceContext.MapSubresource(ConstantCameraBuffer, MapMode.WriteDiscard, SharpDX.Direct3D11.MapFlags.None, out mappedResource);

                // Copy the lighting variables into the constant buffer.
                var cameraBuffer = new CameraBuffer()
                {
                    cameraPosition = cameraPosition,
                    padding = 0.0f
                };

                mappedResource.Write(cameraBuffer);

                // Unlock the constant buffer.
                deviceContext.UnmapSubresource(ConstantCameraBuffer, 0);

                // Set the position of the light constant buffer in the pixel shader.
                bufferNumber = 1;

                // Now set the camera constant buffer in the vertex shader with the updated values.
                deviceContext.VertexShader.SetConstantBuffer(bufferNumber, ConstantCameraBuffer);
                #endregion

                return true;
            }
            catch (Exception)
            {
                return false;
            }
        }
        /// <summary>
        /// each colomn is one vertex - CORRECTED 
        /// </summary>
        /// <param name="PointsModel1"></param>
        /// <param name="PointsModel2"></param>
        /// <param name="p_mRotationMatrix"></param>
        /// <param name="p_mTranslationMatrix"></param>
        public static void CalculateRotationAndTranslation(Matrix PointsModel1, Matrix PointsModel2, out Iridium.Numerics.LinearAlgebra.Matrix p_mRotationMatrix, out Iridium.Numerics.LinearAlgebra.Matrix p_mTranslationMatrix)
        {
            if (PointsModel1.ColumnCount != PointsModel2.ColumnCount || PointsModel1.RowCount != PointsModel2.RowCount)
                throw new Exception("Two Matrixes do not have the same count");

            Matrix MeanValModel1 = new Matrix(PointsModel1.RowCount, 1);
            Matrix MeanValModel2 = new Matrix(PointsModel1.RowCount, 1);

            for (int j = 0; j < PointsModel1.RowCount; j++)
            {
                for (int i = 0; i < PointsModel1.ColumnCount; i++)
                {
                    MeanValModel1[j, 0] += PointsModel1[j, i];
                    MeanValModel2[j, 0] += PointsModel2[j, i];
                }
            }
            MeanValModel1 *= (1.0f / PointsModel1.ColumnCount);
            MeanValModel2 *= (1.0f / PointsModel1.ColumnCount);

            Matrix CenteredModel1 = new Matrix(PointsModel1.RowCount, PointsModel1.ColumnCount);
            Matrix CenteredModel2 = new Matrix(PointsModel1.RowCount, PointsModel1.ColumnCount);

            for (int i = 0; i < PointsModel1.RowCount; i++)
            {
                for (int j = 0; j < PointsModel1.ColumnCount; j++)
                {
                    CenteredModel1[i, j] = PointsModel1[i, j] - MeanValModel1[i, 0];
                    CenteredModel2[i, j] = PointsModel2[i, j] - MeanValModel2[i, 0];
                }
            }

            CenteredModel2.Transpose();

            Iridium.Numerics.LinearAlgebra.Matrix Covariance = CenteredModel1 * CenteredModel2;

            Iridium.Numerics.LinearAlgebra.SingularValueDecomposition SVD = new Iridium.Numerics.LinearAlgebra.SingularValueDecomposition(Covariance);
            Iridium.Numerics.LinearAlgebra.Matrix U = SVD.LeftSingularVectors;
            Iridium.Numerics.LinearAlgebra.Matrix V = SVD.RightSingularVectors;

            Iridium.Numerics.LinearAlgebra.Matrix s = new Iridium.Numerics.LinearAlgebra.Matrix(PointsModel1.RowCount, 1.0);

            if (Covariance.Rank() < 2)
                throw new Exception("Cannot allign generic model (cov rank is less than 2)");

            if (Covariance.Rank() == 2) // m-1 where m is dimension space (3D)
            {
                double detU = Math.Round(U.Determinant());
                double detV = Math.Round(V.Determinant());
                double detC = Covariance.Determinant();
                if ((int)detU * (int)detV == 1)
                    s[PointsModel1.RowCount - 1, PointsModel1.RowCount - 1] = 1;
                else if ((int)detU * (int)detV == -1)
                    s[PointsModel1.RowCount - 1, PointsModel1.RowCount - 1] = -1;
                else
                    throw new Exception("Determinant of U and V are not in conditions");
            }
            else
            {
                if (Covariance.Determinant() < 0)
                    s[PointsModel1.RowCount - 1, PointsModel1.RowCount - 1] = -1;
            }

            V.Transpose();
            Iridium.Numerics.LinearAlgebra.Matrix Rotation = U * s * V;

            Iridium.Numerics.LinearAlgebra.Matrix Translation = MeanValModel1 - Rotation * MeanValModel2;

            p_mRotationMatrix = Rotation;
            p_mTranslationMatrix = Translation;
        }
        private bool SetShaderParameters(DeviceContext deviceContext, Matrix worldMatrix, Matrix viewMatrix, Matrix projectionMatrix, ShaderResourceView texture)
        {
            try
            {
                // Transpose the matrices to prepare them for shader.
                worldMatrix.Transpose();
                viewMatrix.Transpose();
                projectionMatrix.Transpose();

                // Lock the constant buffer so it can be written to.
                DataStream mappedResource;
                deviceContext.MapSubresource(ConstantMatrixBuffer, MapMode.WriteDiscard, SharpDX.Direct3D11.MapFlags.None, out mappedResource);

                // Copy the matrices into the constant buffer.
                var matrixBuffer = new MatrixBuffer()
                {
                    world = worldMatrix,
                    view = viewMatrix,
                    projection = projectionMatrix
                };

                mappedResource.Write(matrixBuffer);

                // Unlock the constant buffer.
                deviceContext.UnmapSubresource(ConstantMatrixBuffer, 0);

                // Set the position of the constant buffer in the vertex shader.
                var bufferNumber = 0;

                // Finally set the constant buffer in the vertex shader with the updated values.
                deviceContext.VertexShader.SetConstantBuffer(bufferNumber, ConstantMatrixBuffer);

                // Set shader resource in the pixel shader.
                deviceContext.PixelShader.SetShaderResource(0, texture);

                return true;
            }
            catch (Exception)
            {
                return false;
            }
        }
Exemple #30
0
        public void TransposeMatrixTest()
        {
            var matrix = new Matrix(new[]
            {
                new Vector(new[] { new Number(1), new Number(2) }),
                new Vector(new[] { new Number(3), new Number(4) }),
                new Vector(new[] { new Number(5), new Number(6) })
            });

            var expected = new Matrix(new[]
            {
                new Vector(new[] { new Number(1), new Number(3), new Number(5) }),
                new Vector(new[] { new Number(2), new Number(4), new Number(6) })
            });
            var result = matrix.Transpose();

            Assert.AreEqual(expected, result);
        }