Ejemplo n.º 1
0
 public static Character CreateCharacter(Skeleton skeleton, Matrix<double> kinectToGlobal)
 {
     Character res = new Character();
     res.Position = kinectToGlobal.Multiply(new DenseVector(new double[] { skeleton.Position.X, skeleton.Position.Y, skeleton.Position.Z }));
     var rightHand = skeleton.Joints[JointType.HandRight];
     res.Position = kinectToGlobal.Multiply(new DenseVector(new double[] { rightHand.Position.X, rightHand.Position.Y, rightHand.Position.Z }));
     return res;
 }
Ejemplo n.º 2
0
        /// <summary>
        /// Decision function of the linear model.
        /// </summary>
        /// <param name="x">(n_samples, n_features)Samples.</param>
        /// <returns>Predicted values.</returns>
        public Matrix<double> DecisionFunction(Matrix<double> x)
        {
            int nFeatures = this.Coef.ColumnCount;
            if (x.ColumnCount != nFeatures)
            {
                throw new ArgumentException(
                    string.Format(
                    "X has {0} features per sample; expecting {1}",
                    x.ColumnCount,
                    nFeatures));
            }


            // todo: use TransposeAndMultiply. But there's bug in Math.Net
            // which appears with sparse matrices.
            var tmp = x.Multiply(this.Coef.Transpose());
            tmp.AddRowVector(this.Intercept, tmp);
            //tmp.MapIndexedInplace((i, j, v) => v + this.InterceptVector[j]);
            return tmp;
        }
Ejemplo n.º 3
0
        public static Vector<double> GetWeights(Matrix<double> data, Vector<double> targetClassification)
        {            
            var features = data.ColumnCount;

            // these are things we are trying to solve for
            Vector<double> weights = DenseVector.Create(features, i => 1.0);

            var alpha = 0.001;

            foreach (var cycle in Enumerable.Range(0, 500))
            {
                #region Sigmoid Explanation

                /*
                 * multiply all the data by the weights, this gives you the estimation of the current function
                 * given the weights. multiplying by the sigmoid moves a point into one class vs the other
                 * if its larger than 0.5 it'll be in class 1, if its smaller than it'll be in class 0.  The closer it is
                 * to 1 means that with the given weights that value is highly probably to be in class 1.
                 * 
                 * it doesn't matter if the instance is the class the sigmoid says it is,
                 * the error will shift the weights gradient so over the iterations of the cycles 
                 * the weights will move the final data point towards its actual expected class
                 * 
                 * for example, if there is a data point with values    
                 * 
                 * [1.0, -0.017612, 14.053064]
                 * 
                 * where value 1 is the initial weight factor, and values 2 and 3 are the x y coordinates
                 *  
                 * and lets say the point is categorized at class 0.
                 * 
                 * Calculating the initial sigma gives you something like 0.9999998
                 * 
                 * which says its class 1, but thats obviously wrong.  However, the error rate here is large
                 * 
                 * meaning that the gradient wants to move towards the expected data.  
                 * 
                 * As you run the ascent this data point will get smaller and smaller and eventually
                 * 
                 * the sigmoid will classify it properly
                 */

                #endregion

                var currentData = DenseVector.OfEnumerable(data.Multiply(weights).Select(Sigmoid));
               
                #region Error Explanation

                // find out how far off we are from the actual expectation. this is
                // like the x2 - x1 part of a derivative

                #endregion

                var error = targetClassification.Subtract(currentData);

                #region Gradient Explanation

                // this gives you the direction of change from the current 
                // set of data.  At this point every point is moving in the direction
                // of the error rate.  A large error means we are far off and trying to move
                // towards the actual data, a low error rate means we are really close
                // to the target data (so the gradient will be smaller, meaning less delta)

                #endregion

                var gradient = data.Transpose() * error;

                #region Weights Update Explanation

                // multiplying by alpha means we'll take a small step in the direction
                // of the gradient and add it to the current weights. An initial weights of 1.0
                // means we're going to start at the current location of where we are.

                #endregion

                weights = weights + alpha * gradient;
            }

            return weights;
        }
        /// <summary>
        /// Multiplies each element of the matrix by a scalar and places results into the result matrix.
        /// </summary>
        /// <param name="scalar">The scalar to multiply the matrix with.</param>
        /// <param name="result">The matrix to multiply.</param>
        /// <exception cref="ArgumentNullException">If the result matrix is <see langword="null" />.</exception> 
        /// <exception cref="ArgumentException">If the result matrix's dimensions are not the same as this matrix.</exception>
        public virtual void Multiply(double scalar, Matrix result)
        {
            if (result == null)
            {
                throw new ArgumentNullException("result");
            }

            if (result.RowCount != RowCount)
            {
                throw new ArgumentException(Resources.ArgumentMatrixSameRowDimension, "result");
            }

            if (result.ColumnCount != ColumnCount)
            {
                throw new ArgumentException(Resources.ArgumentMatrixSameColumnDimension, "result");
            }

            CopyTo(result);
            result.Multiply(scalar);
        }
Ejemplo n.º 5
0
 private Vector MultiplyByIndex(Matrix RightM, Vector LeftV, List<int> Index)
 {
     Vector leftVector = new DenseVector(Index.Count);
     int i = 0;
     foreach (int index in Index)
     {
         leftVector[i] = LeftV[index];
         i++;
     }
     leftVector = (Vector)RightM.Multiply(leftVector);
     return leftVector;
 }