Exemple #1
0
        public void Learn(
            Vector <double>[] trainingInput,
            Vector <double>[] trainingOutput,
            Vector <double>[] testInput,
            Vector <double>[] testOutput,
            int batch,
            double minError,
            int epochs)
        {
            Contract.Requires(trainingInput.Length == trainingOutput.Length);
            Contract.Requires(testInput.Length == testOutput.Length);
            Contract.Requires(trainingInput[0].Count == N + 1);
            Contract.Requires(testInput[0].Count == N + 1);

            double[] desiredTrainingOutput = trainingOutput.Select(o => o.At(0)).ToArray();
            double[] desiredTestOutput     = trainingOutput.Select(o => o.At(0)).ToArray();

            //Agrego el valor 1 al principio del input.
            Vector <double>[] input = new Vector <double> [trainingInput.Length];
            for (int i = 0; i < input.Length; i++)
            {
                input[i] = Vector <double> .Build.DenseOfEnumerable(new double[] { 1 }.Concat(trainingInput[i]));
            }
            int             p = input.Length;
            Vector <double> w = CreateVector.Random <double>(N + 1, new ContinuousUniform(-1d, 1d));
            Vector <double> deltaW = null;
            double          error = 1, error_min = p * 2;
            Vector <double> w_min = w;

            for (int i = 0, n = 0; i < epochs && error_min > minError; i++, n++)
            {
                if (n > 100 * p)
                {
                    w = CreateVector.Random <double>(N + 1, new ContinuousUniform(-1d, 1d));
                    n = 0;
                }

                int[]  rand = Combinatorics.GeneratePermutation(input.Length);
                double lr   = this.AdaptiveLearningRate ? optimizing(w, input, desiredTrainingOutput, batch, rand) : LearningRate;
                int    j;
                for (j = 0; j < input.Length; j++)
                {
                    int             ix    = rand[j];
                    double          h     = input[ix] * w;
                    double          act   = ActivationFunction(h);
                    Vector <double> delta = lr * (desiredTrainingOutput[ix] - act) * input[ix] * ActivationFunctionDerivative(h);
                    deltaW = deltaW == null ? delta : deltaW + delta;
                    if (j % batch == 0)
                    {
                        w     += deltaW;
                        deltaW = null;
                        error  = CalculateError(input, desiredTrainingOutput, w);
                        if (error < error_min)
                        {
                            error_min = error;
                            w_min     = w;
                        }
                    }
                }
                if (j % batch != 0)
                {
                    w     += deltaW;
                    deltaW = null;
                    error  = CalculateError(input, desiredTrainingOutput, w);
                    if (error < error_min)
                    {
                        error_min = error;
                        w_min     = w;
                    }
                }
            }
            W = w_min;
        }
Exemple #2
0
        public double[] SolveEquationsSystem()
        {
            double[] solution = new double[_eqSystem.EquationFunctions.Count];

            //Выполняем нулевую итерацию на начальных значениях, пришедших из настроек.

            //Находим якобиан при начальных значениях.
            double epsJacobian = 0.00001;

            double[,] jacob = _eqSystem.GetJacobianMatrix(_settings.InitialValues, epsJacobian);
            double[] eqVals = _eqSystem.GetEquationsValues(_settings.InitialValues);

            Matrix <double> matrJacobian        = CreateMatrix.DenseOfArray(jacob);
            Vector <double> vectEquationsValues = CreateVector.DenseOfArray(eqVals);

            Matrix <double> matrInvJacobian = matrJacobian.Inverse();
            Vector <double> deltaX          = matrInvJacobian.Multiply(-1.0).Multiply(vectEquationsValues);

            Vector <double> vectPrevIterX = CreateVector.DenseOfArray(_settings.InitialValues);

            for (int i = 0; i < _settings.MaxIterations; i++)
            {
                Vector <double> vectCurrIterX = vectPrevIterX.Add(deltaX);
                solution = vectCurrIterX.ToArray();

                jacob  = _eqSystem.GetJacobianMatrix(vectCurrIterX.ToArray(), epsJacobian);
                eqVals = _eqSystem.GetEquationsValues(vectCurrIterX.ToArray());

                matrJacobian        = CreateMatrix.DenseOfArray(jacob);
                vectEquationsValues = CreateVector.DenseOfArray(eqVals);

                matrInvJacobian = matrJacobian.Inverse();
                deltaX          = matrInvJacobian.Multiply(-1.0).Multiply(vectEquationsValues);

                double[] currIterEpsX = new double[vectCurrIterX.Count];

                for (int j = 0; j < vectCurrIterX.Count; j++)
                {
                    currIterEpsX[j] = Math.Abs(vectCurrIterX[j] - vectPrevIterX[j]);
                }

                bool isPreciseSolution = true;

                for (int j = 0; j < currIterEpsX.Length; j++)
                {
                    if (currIterEpsX[j] > _settings.Precision)
                    {
                        isPreciseSolution = false;
                        break;
                    }
                }

                if (isPreciseSolution == true)
                {
                    break;
                }
                else
                {
                    vectPrevIterX = CreateVector.DenseOfArray(vectCurrIterX.ToArray());

                    bool isZeroEqVals = true;

                    for (int k = 0; k < eqVals.Length; k++)
                    {
                        if (Math.Abs(eqVals[k]) > _settings.Precision)
                        {
                            isZeroEqVals = false;
                            break;
                        }
                    }

                    if (isZeroEqVals == true)
                    {
                        break;
                    }
                }
            }

            return(solution);
        }
Exemple #3
0
        // data
        static void LoadData()
        {
            // read file & parse header
            string[] lines = File.ReadAllLines(filePath);
            if (lines.Length < 4)
            {
                filePath = "none";
                return;
            }
            if (!lines[0].StartsWith("DatLen"))
            {
                filePath = "none";
                return;
            }
            size = Convert.ToInt32(lines[0].Substring(7));
            if (!lines[1].StartsWith("InpDim"))
            {
                filePath = "none";
                return;
            }
            int inpDim = Convert.ToInt32(lines[1].Substring(7));

            if (!lines[2].StartsWith("OutDim"))
            {
                filePath = "none";
                return;
            }
            int outDim = Convert.ToInt32(lines[2].Substring(7));

            // read points
            if (lines.Length != 3 + size)
            {
                filePath = "none";
                return;
            }
            points = new DataPoint[size];
            for (int i = 0; i < size; i++)
            {
                // get numbers
                string[] numbers = lines[i + 3].Split(' ', '|', '\t');
                if (numbers.Length != inpDim + outDim)
                {
                    Console.WriteLine($"WARNING: incomplete line #{i+3} in data file '{filePath}' was ignored.");
                    continue;
                }
                // parse numbers
                double[] input  = new double[inpDim];
                double[] output = new double[outDim];
                for (int j = 0; j < inpDim; j++)
                {
                    input[j] = Convert.ToDouble(numbers[j]);
                }
                for (int j = inpDim; j < numbers.Length; j++)
                {
                    output[j] = Convert.ToDouble(numbers[j]);
                }
                // create data point
                Vector <double> x = CreateVector.DenseOfArray(input);
                Vector <double> y = CreateVector.DenseOfArray(output);
                points[i] = new DataPoint(x, y);
            }

            // preprocess
            decorrelated = false;
            standartized = false;
            if (doDecorrelation)
            {
                Decorrelate(points);
                decorrelated = true;
            }
            Normalize(points);
            if (doStandartization)
            {
                Standartize(points);
                standartized = true;
            }
        }
Exemple #4
0
        // for Proposed_local
        public static void Proposed_local(string fn, int v_init)
        {
            var H = Hypergraph.Open(fn);

            var time = new System.Diagnostics.Stopwatch();

            time.Start();

            int n = H.n;
            int m = H.m;

            const double eps = 0.9;

            const double dt = 1.0;
            const double T  = 30.0;

            var A_cand = new List <double>();

            for (int i = 0; i <= Math.Log(n * m) / Math.Log(1 + eps); i++)
            {
                A_cand.Add(Math.Pow(1 + eps, i) / (n * m));
            }

            var edge_size = new Dictionary <int, int>();

            for (int eid = 0; eid < H.m; eid++)
            {
                edge_size.Add(eid, H.ID_rev[eid].Count());
            }

            double min_conductance = double.MaxValue;

            foreach (double alpha in A_cand)
            {
                var vec = CreateVector.Dense <double>(n);

                vec[v_init] = 1.0;

                vec = Hypergraph.Simulate(H, vec, v_init, dt, T, alpha);

                for (int i = 0; i < n; i++)
                {
                    vec[i] /= H.w_Degree(i);
                }

                int[] index = Enumerable.Range(0, n).ToArray <int>();
                Array.Sort <int>(index, (a, b) => vec[a].CompareTo(vec[b]));

                Array.Reverse(index);

                double vol_V = 0;
                for (int i = 0; i < n; i++)
                {
                    vol_V += H.w_Degree(i);
                }

                var num_contained_nodes = new Dictionary <int, int>();
                for (int eid = 0; eid < H.m; eid++)
                {
                    num_contained_nodes.Add(eid, 0);
                }

                double cut_val     = 0;
                double vol_S       = 0;
                double conductance = double.MaxValue;
                int    best_index  = -1;

                foreach (int i in index)
                {
                    vol_S += H.w_Degree(i);
                    if (vol_S <= vol_V / 10.0)
                    {
                        foreach (var e in H.incident_edges[i])
                        {
                            if (num_contained_nodes[e] == 0)
                            {
                                cut_val += H.weights[e];
                            }
                            if (num_contained_nodes[e] == edge_size[e] - 1)
                            {
                                cut_val -= H.weights[e];
                            }
                            num_contained_nodes[e] += 1;
                        }
                        conductance = cut_val / Math.Min(vol_S, vol_V - vol_S);
                        if (conductance < min_conductance)
                        {
                            min_conductance = conductance;
                            best_index      = i;
                        }
                    }
                    else
                    {
                        break;
                    }
                }
            }
            time.Stop();
            TimeSpan ts = time.Elapsed;

            Console.WriteLine("conductance: " + min_conductance);
            Console.WriteLine("time(s): " + time.ElapsedMilliseconds / 1000.0);
        }
Exemple #5
0
        // Data must be sorted before using this method
        internal static void ApproximateExcessDistributionParametersBFGS(List <double> data, out double a, out double c, out double u)
        {
            double Fitness(Vector <double> input) // Input is assumed to be (a,c,u)
            {
                double sum = 0;
                //double weightsum = 0;

                // Compute the index of the next largest element that is at or after u in the data
                int nextLargestIndex = data.BinarySearch(input[2]);

                if (nextLargestIndex < 0)
                {
                    nextLargestIndex = ~nextLargestIndex + 1;
                }
                int knotCount = data.Count - nextLargestIndex;

                /* ECDF version MSE
                 * for (int i = 0; i < knotCount; i++)
                 * {
                 *  // The largest deviation should occur at one of the step points
                 *  double GHat = TailCDF(data[nextLargestIndex + i] - input[2], input[0], input[1]); // Args: x - u, a, c
                 *  double residual = i * 1.0 / knotCount - GHat; // Deviation from the top of the step at x_i
                 *  //double weight = knotCount - i + 1;
                 *  sum += residual * residual;
                 *  //sum += Math.Abs(residual) * weight;
                 *  //weightsum += weight;
                 * }
                 * return sum / knotCount; // Consider dividing by n or n^2 here
                 */
                //return sum / (weightsum * knotCount);

                // Smoothed version MSE
                for (int i = 0; i < knotCount - 1; i++)
                {
                    double GHat     = TailCDF(0.5 * (data[nextLargestIndex + i] + data[nextLargestIndex + i + 1]) - input[2], input[0], input[1]);
                    double residual = (2.0 * i + 3) / (2.0 * knotCount) - GHat;
                    sum += residual * residual;
                }
                //return sum / knotCount;
                return((1 + Math.Abs(input[1])) * sum / knotCount); // Weighted so that smaller magnitudes of c are preferred
            }

            // Get Pickands' estimates of a and c for m = n/16 + 1 as starting guesses, consistent with Z4M at the 75th percentile
            //double pickandsEstA, pickandsEstC;
            EstimateParams(data, data.Count / 16 + 1, out double pickandsEstC, out double pickandsEstA);
            double lowerBoundA = 0;
            double upperBoundA = 3 * pickandsEstA + 1;
            double lowerBoundC = Math.Min(3 * pickandsEstC, -3 * pickandsEstC) - 1;
            double upperBoundC = -lowerBoundC;
            // Initial guess for u is at data[(3/4)n]

            var optimum = FindMinimum.OfFunctionConstrained(Fitness,
                                                            lowerBound: CreateVector.DenseOfArray(new double[] { lowerBoundA, lowerBoundC, data[0] }),
                                                            upperBound: CreateVector.DenseOfArray(new double[] { upperBoundA, upperBoundC, data[data.Count - 3] }),
                                                            initialGuess: CreateVector.DenseOfArray(new double[] { pickandsEstA, pickandsEstC /*Math.Min(pickandsEstC, 0)*/, data[data.Count * 3 / 4] }));

            // Return parameters
            a = optimum[0];
            c = optimum[1];
            u = optimum[2];
        }
Exemple #6
0
        // BFGF Minimizer
        public static Value Argmin(Value function, Value initial, Value tolerance, Netlist netlist, Style style, int s)
        {
            if (!(initial is ListValue <Value>))
            {
                throw new Error("argmin: expecting a list for second argument");
            }
            Vector <double> initialGuess = CreateVector.Dense((initial as ListValue <Value>).ToDoubleArray("argmin: expecting a list of numbers for second argument"));

            if (!(tolerance is NumberValue))
            {
                throw new Error("argmin: expecting a number for third argument");
            }
            double toler = (tolerance as NumberValue).value;

            if (!(function is FunctionValue))
            {
                throw new Error("argmin: expecting a function as first argument");
            }
            FunctionValue closure = function as FunctionValue;

            if (closure.parameters.parameters.Count != 1)
            {
                throw new Error("argmin: initial values and function parameters have different lengths");
            }

            IObjectiveFunction objectiveFunction = ObjectiveFunction.Gradient(
                (Vector <double> objParameters) => {
                const string badResult  = "argmin: objective function should return a list with a number (cost) and a list of numbers (partial derivatives of cost)";
                List <Value> parameters = new List <Value>(); foreach (double parameter in objParameters)
                {
                    parameters.Add(new NumberValue(parameter));
                }
                ListValue <Value> arg1 = new ListValue <Value>(parameters);
                List <Value> arguments = new List <Value>(); arguments.Add(arg1);
                bool autoContinue      = netlist.autoContinue; netlist.autoContinue = true;
                Value result           = closure.ApplyReject(arguments, netlist, style, s);
                if (result == null)
                {
                    throw new Error(badResult);
                }
                netlist.autoContinue = autoContinue;
                if (!(result is ListValue <Value>))
                {
                    throw new Error(badResult);
                }
                List <Value> results = (result as ListValue <Value>).elements;
                if (results.Count != 2 || !(results[0] is NumberValue) || !(results[1] is ListValue <Value>))
                {
                    throw new Error(badResult);
                }
                double cost = (results[0] as NumberValue).value;
                ListValue <Value> gradients = results[1] as ListValue <Value>;
                KGui.gui.GuiOutputAppendText("argmin: parameters=" + arg1.Format(style) + " => cost=" + style.FormatDouble(cost) + ", gradients=" + results[1].Format(style) + Environment.NewLine);
                return(new Tuple <double, Vector <double> >(cost, CreateVector.Dense(gradients.ToDoubleArray(badResult))));
            });

            try {
                BfgsMinimizer      minimizer = new BfgsMinimizer(toler, toler, toler);
                MinimizationResult result    = minimizer.FindMinimum(objectiveFunction, initialGuess);
                if (result.ReasonForExit == ExitCondition.Converged || result.ReasonForExit == ExitCondition.AbsoluteGradient || result.ReasonForExit == ExitCondition.RelativeGradient)
                {
                    List <Value> elements = new List <Value>();
                    for (int i = 0; i < result.MinimizingPoint.Count; i++)
                    {
                        elements.Add(new NumberValue(result.MinimizingPoint[i]));
                    }
                    ListValue <Value> list = new ListValue <Value>(elements);
                    KGui.gui.GuiOutputAppendText("argmin: converged with parameters " + list.Format(style) + " and reason '" + result.ReasonForExit + "'" + Environment.NewLine);
                    return(list);
                }
                else
                {
                    throw new Error("reason '" + result.ReasonForExit.ToString() + "'");
                }
            } catch (Exception e) { throw new Error("argmin ended: " + ((e.InnerException == null) ? e.Message : e.InnerException.Message)); } // somehow we need to recatch the inner exception coming from CostAndGradient
        }
Exemple #7
0
 public Layer(int size, int sizeofPreviousLayer) : this(CreateVector.Random <double>(size, new Normal(0.0, 1.0)),
                                                        sizeofPreviousLayer > 0 ? CreateMatrix.Random <double>(size, sizeofPreviousLayer, new Normal(0.0, 1.0 / Math.Sqrt(sizeofPreviousLayer))) : null)
 {
 }