Exemplo n.º 1
0
        public void RunTest()
        {
            // Suppose we would like to map the continuous values in the
            // second column to the integer values in the first column.
            double[,] data =
            {
                { -40, -21142.1111111111 },
                { -30, -21330.1111111111 },
                { -20, -12036.1111111111 },
                { -10,   7255.3888888889 },
                {   0,  32474.8888888889 },
                {  10,  32474.8888888889 },
                {  20,   9060.8888888889 },
                {  30, -11628.1111111111 },
                {  40, -15129.6111111111 },
            };

            // Extract inputs and outputs
            double[][] inputs  = data.GetColumn(0).ToJagged();
            double[]   outputs = data.GetColumn(1);

            // Create a Nonlinear regression using
            NonlinearRegression regression = new NonlinearRegression(4, function, gradient);


            NonlinearLeastSquares nls = new NonlinearLeastSquares(regression);

            Assert.IsTrue(nls.Algorithm is LevenbergMarquardt);

            regression.Coefficients[0] = 0;         // m
            regression.Coefficients[1] = 80;        // s
            regression.Coefficients[2] = 53805;     // a
            regression.Coefficients[3] = -21330.11; //b

            double error = Double.PositiveInfinity;

            for (int i = 0; i < 100; i++)
            {
                error = nls.Run(inputs, outputs);
            }

            double m = regression.Coefficients[0];
            double s = regression.Coefficients[1];
            double a = regression.Coefficients[2];
            double b = regression.Coefficients[3];

            Assert.AreEqual(010345587.465428974, error);

            Assert.AreEqual(5.316196154830604, m, 1e-3);
            Assert.AreEqual(12.792301798208918, s, 1e-3);
            Assert.AreEqual(56794.832645792514, a, 1e-3);
            Assert.AreEqual(-20219.675997523173, b, 1e-2);

            Assert.IsFalse(Double.IsNaN(m));
            Assert.IsFalse(Double.IsNaN(s));
            Assert.IsFalse(Double.IsNaN(a));
            Assert.IsFalse(Double.IsNaN(b));
        }
        public void RunTest()
        {
            double[,] data =
            {
                { -40, -21142.1111111111 },
                { -30, -21330.1111111111 },
                { -20, -12036.1111111111 },
                { -10,   7255.3888888889 },
                {   0,  32474.8888888889 },
                {  10,  32474.8888888889 },
                {  20,   9060.8888888889 },
                {  30, -11628.1111111111 },
                {  40, -15129.6111111111 },
            };

            double[][] inputs  = data.GetColumn(0).ToArray();
            double[]   outputs = data.GetColumn(1);

            NonlinearRegression regression = new NonlinearRegression(4, function, gradient);


            NonlinearLeastSquares nls = new NonlinearLeastSquares(regression);

            Assert.IsTrue(nls.Algorithm is LevenbergMarquardt);

            regression.Coefficients[0] = 0;         // m
            regression.Coefficients[1] = 80;        // s
            regression.Coefficients[2] = 53805;     // a
            regression.Coefficients[3] = -21330.11; //b

            double error = 0;

            for (int i = 0; i < 100; i++)
            {
                error = nls.Run(inputs, outputs);
            }

            double m = regression.Coefficients[0];
            double s = regression.Coefficients[1];
            double a = regression.Coefficients[2];
            double b = regression.Coefficients[3];

            Assert.AreEqual(5.316196154830604, m, 1e-3);
            Assert.AreEqual(12.792301798208918, s, 1e-3);
            Assert.AreEqual(56794.832645792514, a, 1e-3);
            Assert.AreEqual(-20219.675997523173, b, 1e-2);

            Assert.IsFalse(Double.IsNaN(m));
            Assert.IsFalse(Double.IsNaN(s));
            Assert.IsFalse(Double.IsNaN(a));
            Assert.IsFalse(Double.IsNaN(b));
        }
        static void Main(string[] args)
        {
            // FM1 three month data
            //string depthFilePath = @"~/../../../QAQC/data/BAIF_DEPTH_FM1_STATION_ID_1278_PERIOD_JULY012017_OCT012017.csv";
            //string velocityFilePath = @"~/../../../QAQC/data/BAIF_VELOCITY_FM1_STATION_ID_1278_PERIOD_JULY012017_OCT012017.csv";

            // FM2 three month data
            //string depthFilePath = @"~/../../../QAQC/data/BAIF_DEPTH_FM2_STATION_ID_1279_PERIOD_JULY012017_OCT012017.csv";
            //string velocityFilePath = @"~/../../../QAQC/data/BAIF_VELOCITY_FM2_STATION_ID_1279_PERIOD_JULY012017_OCT012017.csv";


            // FM3 three month data
            //string depthFilePath = @"~/../../../QAQC/data/BAIF_DEPTH_FM3_STATION_ID_1280_PERIOD_JULY012017_OCT012017.csv";
            //string velocityFilePath = @"~/../../../QAQC/data/BAIF_VELOCITY_FM3_STATION_ID_1280_PERIOD_JULY012017_OCT012017.csv";


            // FM2 three month data 2018-JAN  TO APRIL
            //string depthFilePath = @"~/../../../QAQC/data/BAIF_DEPTH_FM2_STATION_ID_1279_PERIOD_JAN012018_APR012018.csv";
            //string velocityFilePath = @"~/../../../QAQC/data/BAIF_VELOCITY_FM2_STATION_ID_1279_PERIOD_JAN012018_APR012018.csv";



            // FM2 three month data 2018-JAN  TO APRIL fROM India team
            string depthFilePath    = @"~/data/BAIF_FM2_STATION_1279_DEPTH_01012018_31032018_FROM_IND.csv";
            string velocityFilePath = @"~/data/BAIF_FM2_STATION_1279_VELOCITY_01012018_31032018_FROM_IND.csv";



            // FM1 one day's data
            //string depthFilePath = @"~/../../../QAQC/data/BAIF_DEPTH_FM1_less_data.csv";
            //string velocityFilePath = @"~/../../../QAQC/data/BAIF_VELOCITY_FM1_less_data.csv";



            return;



            String fileName = @"C:\Users\yliu\Desktop\Tasks\QAQC_Research\Baif_Dev_FM1_Pattern_for_Gaussian_Test\Baif_Dev_FM1_Pattern_for_Gaussian_Test_CSV.csv";


            String[] lines = System.IO.File.ReadAllLines(fileName);


            SortedList <double, double> points = new SortedList <double, double>();

            for (int i = 1; i < lines.Length; i++)
            {
                double xi = double.Parse(lines[i].Split(',')[1]);
                double yi = double.Parse(lines[i].Split(',')[3]);

                points.Add(xi, yi);
            }

            List <double> x_queryLocations = new List <double>();

            for (int k = 0; k < points.Count; k++)
            {
                var p = points.ElementAt(k);
                if (k == 0)
                {
                    x_queryLocations.Add(p.Key);
                }
                else
                {
                    if ((k + 1) % 12 == 0)
                    {
                        x_queryLocations.Add(p.Key);
                    }
                }
            }

            double sigma            = 5;
            double kernelWindowSize = 6;
            SortedList <double, double> regressionValues = new GaussianKernelRegression(sigma, kernelWindowSize).GetRegressionValues(x_queryLocations, points);

            StringBuilder stringBuilder = new StringBuilder();

            foreach (var p in regressionValues)
            {
                stringBuilder.AppendLine(p.Key + "," + p.Value);
            }

            System.IO.File.WriteAllText(@"C: \Users\yliu\Desktop\Tasks\QAQC_Research\Baif_Dev_FM1_Pattern_for_Gaussian_Test\Baif_Dev_FM1_Pattern_for_Gaussian_Regression_result.csv", stringBuilder.ToString());



            // using regression values to evaluate other points value
            x_queryLocations = new List <double>();
            for (int k = 1; k <= 288; k++)
            {
                x_queryLocations.Add(k);
            }

            sigma            = 6;
            kernelWindowSize = 25;

            SortedList <double, double> predictedValues = new GaussianKernelRegression(sigma, kernelWindowSize).GetRegressionValues(x_queryLocations, regressionValues);

            stringBuilder = new StringBuilder();

            foreach (var p in predictedValues)
            {
                stringBuilder.AppendLine(p.Key + "," + p.Value);
            }

            System.IO.File.WriteAllText(@"C: \Users\yliu\Desktop\Tasks\QAQC_Research\Baif_Dev_FM1_Pattern_for_Gaussian_Test\Baif_Dev_FM1_Pattern_for_Gaussian_Regression_predictedValues.csv", stringBuilder.ToString());



            // Suppose we would like to map the continuous values in the
            // second column to the integer values in the first column.
            double[,] data =
            {
                { -40, -21142.1111111111 },
                { -30, -21330.1111111111 },
                { -20, -12036.1111111111 },
                { -10,   7255.3888888889 },
                {   0,  32474.8888888889 },
                {  10,  32474.8888888889 },
                {  20,   9060.8888888889 },
                {  30, -11628.1111111111 },
                {  40, -15129.6111111111 },
            };


            // Extract inputs and outputs
            double[][] inputs  = data.GetColumn(0).ToJagged();
            double[]   outputs = data.GetColumn(1);

            // Create a Nonlinear regression using
            var nls = new NonlinearLeastSquares()
            {
                NumberOfParameters = 3,

                // Initialize to some random values
                StartValues = new[] { 4.2, 0.3, 1 },

                // Let's assume a quadratic model function: ax² + bx + c
                Function = (w, x) => w[0] * x[0] * x[0] + w[1] * x[0] + w[2],

                // Derivative in respect to the weights:
                Gradient = (w, x, r) =>
                {
                    r[0] = w[0] * w[0]; // w.r.t a: a²  // https://www.wolframalpha.com/input/?i=diff+ax²+%2B+bx+%2B+c+w.r.t.+a
                    r[1] = w[0];        // w.r.t b: b   // https://www.wolframalpha.com/input/?i=diff+ax²+%2B+bx+%2B+c+w.r.t.+b
                    r[2] = 1;           // w.r.t c: 1   // https://www.wolframalpha.com/input/?i=diff+ax²+%2B+bx+%2B+c+w.r.t.+c
                },

                Algorithm = new LevenbergMarquardt()
                {
                    MaxIterations = 100,
                    Tolerance     = 0
                }
            };


            var regression = nls.Learn(inputs, outputs);

            // Use the function to compute the input values
            double[] predict = regression.Transform(inputs);



            // Baif development fm1 regression values from sunday's mean pattern
            double[,] data_Baif_FM1_Gaussian_Regression_values =
            {
                {   1, 0.843583238859984 },
                {  12, 0.808214893088327 },
                {  24, 0.769726299319266 },
                {  36, 0.740846246915708 },
                {  48, 0.725768880553313 },
                {  60, 0.725449048010978 },
                {  72, 0.751216253011904 },
                {  84, 0.819776029981002 },
                {  96, 0.874824753578552 },
                { 108, 0.904100592020644 },
                { 120, 0.918876958470868 },
                { 132, 0.918613100338434 },
                { 144, 0.912382838858771 },
                { 156, 0.904891186350628 },
                { 168, 0.896757292426472 },
                { 180, 0.892574620228816 },
                { 192, 0.888423797146568 },
                { 204, 0.892518295131734 },
                { 216, 0.899512739057531 },
                { 228, 0.903684396998739 },
                { 240, 0.905212224625666 },
                { 252, 0.906419205656247 },
                { 264, 0.893860862478626 },
                { 276,  0.87675216096711 },
                { 288, 0.846998769532342 }
            };



            //################## non linear ###########################//

            // Extract inputs and outputs
            double[][] inputs_Baif  = data_Baif_FM1_Gaussian_Regression_values.GetColumn(0).ToJagged();
            double[]   outputs_Baif = data_Baif_FM1_Gaussian_Regression_values.GetColumn(1);

            double delta = 0.0000000001;

            double pi           = Math.PI;
            double flowDayCycle = 288;

            double modelCycle = 2 * pi / 288;


            // Create a Nonlinear regression using
            // with 5 parameters , fixed cycle:2*pi/288
            // accept solution: ok but not perfect
            var nls_Baif = new NonlinearLeastSquares()
            {
                NumberOfParameters = 5,

                // Initialize to some random values
                //StartValues = new[] { 1.3, 0.258, -1, 1.1, 3.8, 2 },
                //StartValues = new[] { 0.2, -1, 0.2, 3.8, 0.8 },
                StartValues = new[] { 0.0, 0.0, 0.0, 0.0, 0.0 },
                //StartValues = new[] { 5, 0.022119618, -6, 0.1, 1, 4 },

                // Let's assume a quadratic model function: w0*(sin(0.022119618*x +w1))^2 + w2*sin(0.022119618*x + w3) +w4
                Function = (w, x) => w[0] * Math.Pow(Math.Sin(modelCycle * x[0] + w[1]), 2) + w[2] * Math.Sin(modelCycle * x[0] + w[3]) + w[4],

                // Derivative in respect to the weights:
                // accept solution: ok but not perfect
                Gradient = (w, x, r) =>
                {
                    // function: w0*(sin(w1x+w2))^2 + w3*sin(w1*x+w4)+w5
                    // w[0] derivative: sin(c*x+w_1)^2
                    r[0] = Math.Pow(Math.Sin(modelCycle * x[0] + w[1]), 2);



                    //w[1] derivative: -b^2*(2*a*sin(b*x+c)^2-2*a*cos(b*x+c)^2+d*sin(b*x+e))
                    //r[1] =  w[1];
                    //r[1] = 0.022119618;


                    // w[1] derivative: 2*w_0*cos(w_1+c*x)*sin(w_1+c*x)
                    r[1] = 2 * w[0] * Math.Cos(w[1] + modelCycle * x[0]) * Math.Sin(w[1] + modelCycle * x[0]);


                    // w2 derivative: sin(c*x+w_3)
                    r[2] = Math.Sin(modelCycle * x[0] + w[3]);


                    // w3 derivative: w_2*cos(w_3+c*x)
                    r[3] = w[2] * Math.Cos(w[3] + modelCycle * x[0]);

                    r[4] = 1;
                },

                Algorithm = new LevenbergMarquardt()
                {
                    MaxIterations = 10000,
                    Tolerance     = 0
                }

                //Algorithm = new GaussNewton()
                //{
                //    MaxIterations = 1000,
                //    Tolerance = 0
                //}
            };


            // another soloution : 7 parameter sin wave
            // Create a Nonlinear regression using
            //var nls_Baif = new NonlinearLeastSquares()
            //{
            //    NumberOfParameters = 7,

            //    // Initialize to some random values
            //    //StartValues = new[] { 1.3, 0.258, -1, 1.1, 3.8, 2 },
            //    //StartValues = new[] { 0.2, -1, 0.2, 3.8, 0.8 },
            //    StartValues = new[] { 0.1, 0.02, -0.01, 0.0, 0.0, 0.0, 0.1 },
            //    //StartValues = new[] { 5, 0.022119618, -6, 0.1, 1, 4 },

            //    // Let's assume a quadratic model function: w0*(sin(w1*x +w2))^2 + w3*sin(w4*x + w5) +w6
            //    Function = (w, x) => w[0] * Math.Pow(Math.Sin(w[1] * x[0] + w[2]), 2) + w[3] * Math.Sin(w[4] * x[0] + w[5]) + w[6],

            //    // Derivative in respect to the weights:
            //    Gradient = (w, x, r) =>
            //    {

            //        // function: w0*(sin(w1x+w2))^2 + w3*sin(w1*x+w4)+w5
            //        // w[0] derivative: sin(w_1*x+w_2)^2
            //        r[0] = Math.Pow(Math.Sin(w[1] * x[0] + w[2]), 2);

            //        // w[1] derivative: 2*w_0*x*cos(x*w_1+w_2)*sin(x*w_1+w_2)
            //        r[1] = 2 * w[0] * x[0] * Math.Cos(w[1] * x[0] + w[2]) * Math.Sin(w[1] * x[0] + w[2]);


            //        // w2 derivative: 2*w_0*cos(w_2+w_1*x)*sin(w_2+w_1*x)
            //        r[2] = 2 * w[0] * Math.Cos(w[1] * x[0] + w[2]) * Math.Sin(w[1] * x[0] + w[2]);


            //        // w3 derivative: sin(w_4*x+w_5)
            //        r[3] = Math.Sin(w[4] * x[0] + w[5]);

            //        // w4 derivative: w_3*x*cos(x*w_4+w_5)
            //        r[4] = w[3] * x[0] * Math.Cos(w[4] * x[0] + w[5]);


            //        // w5 derivative: w_3*cos(w_5+w_4*x)
            //        r[5] = w[3] * Math.Cos(w[4] * x[0] + w[5]);

            //        // w6 derivative: 1
            //        r[6] = 1;


            //    },

            //    Algorithm = new LevenbergMarquardt()
            //    {
            //        MaxIterations = 100000,
            //        Tolerance = 0
            //    }

            //    //Algorithm = new GaussNewton()
            //    //{
            //    //    MaxIterations = 100000,
            //    //    Tolerance = 0
            //    //}
            //};



            // another soloution :  poly
            // Create a Nonlinear regression using
            // result : not acceptable
            //var nls_Baif = new NonlinearLeastSquares()
            //{
            //    NumberOfParameters = 6,


            //    StartValues = new[] { 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 },

            //    // Let's assume a quadratic model function: w0*x^5 +w1*x^4 + w2*x^3 + w3*x^2 +w4*x[0] +w5
            //    Function = (w, x) => w[0] * Math.Pow(x[0], 5) + w[1] * Math.Pow(x[0], 4) + w[2] * Math.Pow(x[0], 3) + w[3] * Math.Pow(x[0], 2) + w[4] * x[0] + w[5],

            //    // Derivative in respect to the weights:
            //    Gradient = (w, x, r) =>
            //    {


            //        r[0] = Math.Pow(x[0], 5);


            //        r[1] = Math.Pow(x[0], 4);


            //        r[2] = Math.Pow(x[0], 3);

            //        r[3] = Math.Pow(x[0], 2);

            //        r[4] = x[0];

            //        r[5] = 1;


            //    },

            //    Algorithm = new LevenbergMarquardt()
            //    {
            //        MaxIterations = 1000000,
            //        Tolerance = 0
            //    }

            //    //Algorithm = new GaussNewton()
            //    //{
            //    //    MaxIterations = 100000,
            //    //    Tolerance = 0
            //    //}
            //};


            var regression_Baif = nls_Baif.Learn(inputs_Baif, outputs_Baif);

            // Use the function to compute the input values
            double[] predict_Baif = regression_Baif.Transform(inputs_Baif);


            Console.WriteLine(string.Join(",", regression_Baif.Coefficients));

            /*
             * //############### linear #####################///
             * // Let's retrieve the input and output data:
             * double[]inputs_Baif = data_Baif_FM1_Gaussian_Regression_values.GetColumn(0);
             * double[] outputs_Baif = data_Baif_FM1_Gaussian_Regression_values.GetColumn(1);
             *
             * // We can create a learning algorithm
             * var ls = new PolynomialLeastSquares()
             * {
             *  Degree = 5
             * };
             *
             * // Now, we can use the algorithm to learn a polynomial
             * PolynomialRegression poly = ls.Learn(inputs_Baif, outputs_Baif);
             *
             * // The learned polynomial will be given by
             * string str = poly.ToString("N1"); // "y(x) = 1.0x^2 + 0.0x^1 + 0.0"
             *
             * // Where its weights can be accessed using
             * double[] weights = poly.Weights;   // { 1.0000000000000024, -1.2407665029287351E-13 }
             * double intercept = poly.Intercept; // 1.5652369518855253E-12
             *
             * // Finally, we can use this polynomial
             * // to predict values for the input data
             * double[] predict_Baif = poly.Transform(inputs_Baif);
             */



            stringBuilder = new StringBuilder();

            for (int i = 0; i < inputs_Baif.Length; i++)
            {
                stringBuilder.AppendLine(inputs_Baif[i][0] + "," + predict_Baif[i]);
            }

            System.IO.File.WriteAllText(@"C: \Users\yliu\Desktop\Tasks\QAQC_Research\Baif_Dev_FM1_Pattern_for_Gaussian_Test\Baif_Dev_FM1_Pattern_NonlinearRegression_Predict_values.csv", stringBuilder.ToString());

            Console.ReadKey();
        }
        public void RunTest1()
        {
            // Example from https://en.wikipedia.org/wiki/Gauss%E2%80%93Newton_algorithm

            double[,] data =
            {
                { 0.03, 0.1947, 0.425,  0.626,  1.253,  2.500,  3.740 },
                { 0.05,  0.127, 0.094, 0.2122, 0.2729, 0.2665, 0.3317 }
            };

            double[][] inputs  = data.GetRow(0).ToArray();
            double[]   outputs = data.GetRow(1);


            RegressionFunction rate = (double[] weights, double[] xi) =>
            {
                double x = xi[0];
                return((weights[0] * x) / (weights[1] + x));
            };

            RegressionGradientFunction grad = (double[] weights, double[] xi, double[] result) =>
            {
                double x = xi[0];

                FiniteDifferences diff = new FiniteDifferences(2);
                diff.Function = (bla) => rate(bla, xi);
                double[] compare = diff.Compute(weights);

                result[0] = -((-x) / (weights[1] + x));
                result[1] = -((weights[0] * x) / Math.Pow(weights[1] + x, 2));
            };


            NonlinearRegression regression = new NonlinearRegression(2, rate, grad);

            NonlinearLeastSquares nls = new NonlinearLeastSquares(regression, new GaussNewton(2));

            Assert.IsTrue(nls.Algorithm is GaussNewton);

            regression.Coefficients[0] = 0.9; // β1
            regression.Coefficients[1] = 0.2; // β2

            int iterations = 10;

            double[] errors = new double[iterations];
            for (int i = 0; i < errors.Length; i++)
            {
                errors[i] = nls.Run(inputs, outputs);
            }

            double b1 = regression.Coefficients[0];
            double b2 = regression.Coefficients[1];

            Assert.AreEqual(0.362, b1, 1e-3);
            Assert.AreEqual(0.556, b2, 3e-3);

            Assert.IsFalse(Double.IsNaN(b1));
            Assert.IsFalse(Double.IsNaN(b2));

            for (int i = 1; i < errors.Length; i++)
            {
                Assert.IsFalse(Double.IsNaN(errors[i - 1]));
                Assert.IsTrue(errors[i - 1] >= errors[i]);
            }

            Assert.AreEqual(1.23859, regression.StandardErrors[0], 1e-3);
            Assert.AreEqual(6.06352, regression.StandardErrors[1], 3e-3);
        }
Exemplo n.º 5
0
        public void ExampleTest()
        {
            // Suppose we would like to map the continuous values in the
            // second column to the integer values in the first column.
            double[,] data =
            {
                { -40, -21142.1111111111 },
                { -30, -21330.1111111111 },
                { -20, -12036.1111111111 },
                { -10,   7255.3888888889 },
                {   0,  32474.8888888889 },
                {  10,  32474.8888888889 },
                {  20,   9060.8888888889 },
                {  30, -11628.1111111111 },
                {  40, -15129.6111111111 },
            };

            // Extract inputs and outputs
            double[][] inputs  = data.GetColumn(0).ToJagged();
            double[]   outputs = data.GetColumn(1);

            // Create a Nonlinear regression using
            var regression = new NonlinearRegression(3,

                                                     // Let's assume a quadratic model function: ax² + bx + c
                                                     function: (w, x) => w[0] * x[0] * x[0] + w[1] * x[0] + w[2],

                                                     // Derivative in respect to the weights:
                                                     gradient: (w, x, r) =>
            {
                r[0] = 2 * w[0];     // w.r.t a: 2a
                r[1] = w[1];         // w.r.t b: b
                r[2] = w[2];         // w.r.t c: 0
            }
                                                     );

            // Create a non-linear least squares teacher
            var nls = new NonlinearLeastSquares(regression);

            // Initialize to some random values
            regression.Coefficients[0] = 4.2;
            regression.Coefficients[1] = 0.3;
            regression.Coefficients[2] = 1;

            // Run the function estimation algorithm
            double error = Double.PositiveInfinity;

            for (int i = 0; i < 100; i++)
            {
                error = nls.Run(inputs, outputs);
            }

            // Use the function to compute the input values
            double[] predict = inputs.Apply(regression.Compute);

            Assert.IsTrue(nls.Algorithm is LevenbergMarquardt);

            Assert.AreEqual(1318374605.8436923d, error);

            Assert.AreEqual(-12.025250289329851, regression.Coefficients[0], 1e-3);
            Assert.AreEqual(-0.082208180694676766, regression.Coefficients[1], 1e-3);
            Assert.AreEqual(-0.27402726898225627, regression.Coefficients[2], 1e-3);

            Assert.AreEqual(-19237.386162968953, predict[0]);
            Assert.AreEqual(-10820.533042245008, predict[1]);
            Assert.AreEqual(-4808.7299793870288, predict[2]);
            Assert.AreEqual(-1203.6211380089139, predict[5]);
        }
        public void simple_gauss_newton_test()
        {
            #region doc_learn_gn
            // Suppose we would like to map the continuous values in the
            // second row to the integer values in the first row.
            double[,] data =
            {
                { 0.03, 0.1947, 0.425,  0.626,  1.253,  2.500,  3.740 },
                { 0.05,  0.127, 0.094, 0.2122, 0.2729, 0.2665, 0.3317 }
            };

            // Extract inputs and outputs
            double[][] inputs  = data.GetRow(0).ToJagged();
            double[]   outputs = data.GetRow(1);

            // Create a Nonlinear regression using
            var nls = new NonlinearLeastSquares()
            {
                // Initialize to some random values
                StartValues = new[] { 0.9, 0.2 },

                // Let's assume a quadratic model function: ax² + bx + c
                Function = (w, x) => (w[0] * x[0]) / (w[1] + x[0]),

                // Derivative in respect to the weights:
                Gradient = (w, x, r) =>
                {
                    r[0] = -((-x[0]) / (w[1] + x[0]));
                    r[1] = -((w[0] * x[0]) / Math.Pow(w[1] + x[0], 2));
                },

                Algorithm = new GaussNewton()
                {
                    MaxIterations = 0,
                    Tolerance     = 1e-5
                }
            };


            var regression = nls.Learn(inputs, outputs);

            // Use the function to compute the input values
            double[] predict = regression.Transform(inputs);
            #endregion

            var alg = nls.Algorithm as GaussNewton;
            Assert.AreEqual(0, alg.MaxIterations);
            Assert.AreEqual(1e-5, alg.Tolerance);
            Assert.AreEqual(6, alg.CurrentIteration);

            double error = new SquareLoss(outputs)
            {
                Mean = false
            }.Loss(predict) / 2.0;

            Assert.AreEqual(0.004048452937977628, error, 1e-8);

            double b1 = regression.Coefficients[0];
            double b2 = regression.Coefficients[1];

            Assert.AreEqual(0.362, b1, 1e-3);
            Assert.AreEqual(0.556, b2, 3e-3);

            Assert.AreEqual(1.23859, regression.StandardErrors[0], 1e-3);
            Assert.AreEqual(6.06352, regression.StandardErrors[1], 5e-3);
        }
        public void learn_test()
        {
            #region doc_learn_lm
            // Suppose we would like to map the continuous values in the
            // second column to the integer values in the first column.
            double[,] data =
            {
                { -40, -21142.1111111111 },
                { -30, -21330.1111111111 },
                { -20, -12036.1111111111 },
                { -10,   7255.3888888889 },
                {   0,  32474.8888888889 },
                {  10,  32474.8888888889 },
                {  20,   9060.8888888889 },
                {  30, -11628.1111111111 },
                {  40, -15129.6111111111 },
            };

            // Extract inputs and outputs
            double[][] inputs  = data.GetColumn(0).ToJagged();
            double[]   outputs = data.GetColumn(1);

            // Create a Nonlinear regression using
            var nls = new NonlinearLeastSquares()
            {
                NumberOfParameters = 3,

                // Initialize to some random values
                StartValues = new[] { 4.2, 0.3, 1 },

                // Let's assume a quadratic model function: ax² + bx + c
                Function = (w, x) => w[0] * x[0] * x[0] + w[1] * x[0] + w[2],

                // Derivative in respect to the weights:
                Gradient = (w, x, r) =>
                {
                    r[0] = w[0] * w[0]; // w.r.t a: a²  // https://www.wolframalpha.com/input/?i=diff+ax²+%2B+bx+%2B+c+w.r.t.+a
                    r[1] = w[0];        // w.r.t b: b   // https://www.wolframalpha.com/input/?i=diff+ax²+%2B+bx+%2B+c+w.r.t.+b
                    r[2] = 1;           // w.r.t c: 1   // https://www.wolframalpha.com/input/?i=diff+ax²+%2B+bx+%2B+c+w.r.t.+c
                },

                Algorithm = new LevenbergMarquardt()
                {
                    MaxIterations = 100,
                    Tolerance     = 0
                }
            };


            var regression = nls.Learn(inputs, outputs);

            // Use the function to compute the input values
            double[] predict = regression.Transform(inputs);
            #endregion

            Assert.IsTrue(nls.Algorithm is LevenbergMarquardt);

            double error = new SquareLoss(outputs)
            {
                Mean = false
            }.Loss(predict) / 2.0;

            Assert.AreEqual(1323157065.612951, error);

            Assert.AreEqual(-11.762225559121427, regression.Coefficients[0], 1e-3);
            Assert.AreEqual(-3.8757457457050744, regression.Coefficients[1], 1e-3);
            Assert.AreEqual(0.035457104325563406, regression.Coefficients[2], 1e-3);

            Assert.AreEqual(-18664.495607661756, predict[0]);
            Assert.AreEqual(-10469.695173733808, predict[1]);
            Assert.AreEqual(-4627.339851630144, predict[2]);
            Assert.AreEqual(-1214.944556264868, predict[5]);
        }
        /// <summary>
        /// calculate scattergraph indicator using Accord Non-linear regression library
        /// </summary>
        /// <param name="diameter">sewer pipe diameter, m</param>
        /// <param name="slope"></param>
        /// <param name="depthData">all depth sensor data that fall in time range specified by this indicator result's start and end time </param>
        /// <param name="velocityData">all velocity sensor data that fall in time range specified by this indicator result's start and end time </param>
        /// <returns></returns>
        public static void Regression_LevenbergMarquardt(double diameter, double slope,
                                                         List <SensorDataDTO> depthData, List <SensorDataDTO> velocityData)
        {
            // validate and get list of depth-velocity pair with common timestamp
            List <Tuple <double, double> > depth_velocity_list = ValidateAndCombineInputOutputDataToDepthVelocityPairList(depthData, velocityData);

            if (depth_velocity_list.Count <= 10)
            {
                throw new Exception("Error: don't have enough data for regression: your total number of input data, which is " + depth_velocity_list.Count + ", less than 10");
            }



            // transfer depth_velocity pair list to array to improve loop speed
            Tuple <double, double>[] depth_velocity_array = depth_velocity_list.ToArray();

            //get depth and velocity data to array
            int numData = depth_velocity_array.Length;

            double[,] data = new double[2, numData];    //first row is depth as input, second row is velocity as output
            for (int i = 0; i < numData; i++)
            {
                data[0, i] = Convert.ToDouble(depth_velocity_array[i].Item1);
                data[1, i] = Convert.ToDouble(depth_velocity_array[i].Item2);
            }

            // Extract inputs and outputs
            double[][] inputs_depth_array = data.GetRow(0).ToJagged();


            double[] outputs = data.GetRow(1);

            // Create a Nonlinear regression using
            var nls = new NonlinearLeastSquares()
            {
                NumberOfParameters = 1,

                // Initialize to some random values
                StartValues = new[] { 5.5 },

                // use Lanfear-Coll method  :  V_lc = C_lc * R_lc^(2/3)
                // x is list of R_lc^(2/3), w is C_lc
                Function = (w, x) => w[0] * x[0],

                // Derivative in respect to the weights:
                Gradient = (w, x, r) =>
                {
                    r[0] = x[0];
                    //Console.WriteLine(h + ": " + r[0]);
                    //h++;
                },

                Algorithm = new LevenbergMarquardt()
                {
                    MaxIterations = 20,
                    Tolerance     = 0
                }
            };


            // regression
            var regression = nls.Learn(inputs_depth_array, outputs);


            // get the roughness
            double C_lc      = regression.Coefficients[0];
            double roughness = Math.Sqrt(slope) / C_lc;



            //calculate R-Squred
            double[] velocitys_manning_calculated = regression.Transform(inputs_depth_array);
        }
        public void learn_test()
        {
            #region doc_learn_lm
            // Suppose we would like to map the continuous values in the
            // second column to the integer values in the first column.
            double[,] data =
            {
                { -40, -21142.1111111111 },
                { -30, -21330.1111111111 },
                { -20, -12036.1111111111 },
                { -10,   7255.3888888889 },
                {   0,  32474.8888888889 },
                {  10,  32474.8888888889 },
                {  20,   9060.8888888889 },
                {  30, -11628.1111111111 },
                {  40, -15129.6111111111 },
            };

            // Extract inputs and outputs
            double[][] inputs  = data.GetColumn(0).ToJagged();
            double[]   outputs = data.GetColumn(1);

            // Create a Nonlinear regression using
            var nls = new NonlinearLeastSquares()
            {
                NumberOfParameters = 3,

                // Initialize to some random values
                StartValues = new[] { 4.2, 0.3, 1 },

                // Let's assume a quadratic model function: ax² + bx + c
                Function = (w, x) => w[0] * x[0] * x[0] + w[1] * x[0] + w[2],

                // Derivative in respect to the weights:
                Gradient = (w, x, r) =>
                {
                    r[0] = w[0] * w[0]; // w.r.t a: a²  // https://www.wolframalpha.com/input/?i=diff+ax²+%2B+bx+%2B+c+w.r.t.+a
                    r[1] = w[0];        // w.r.t b: b   // https://www.wolframalpha.com/input/?i=diff+ax²+%2B+bx+%2B+c+w.r.t.+b
                    r[2] = 1;           // w.r.t c: 1   // https://www.wolframalpha.com/input/?i=diff+ax²+%2B+bx+%2B+c+w.r.t.+c
                },

                Algorithm = new LevenbergMarquardt()
                {
                    MaxIterations = 100,
                    Tolerance     = 0
                }
            };


            var regression = nls.Learn(inputs, outputs);

            // Use the function to compute the input values
            double[] predict = regression.Transform(inputs);
            #endregion

            Assert.IsTrue(nls.Algorithm is LevenbergMarquardt);

            double error = new SquareLoss(outputs)
            {
                Mean = false
            }.Loss(predict) / 2.0;

            Assert.AreEqual(1616964052.1048875, error, 1e7);

            Assert.AreEqual(-16.075187551945078, regression.Coefficients[0], 1e-3);
            Assert.AreEqual(-221.50453233335202, regression.Coefficients[1], 1e-3);
            Assert.AreEqual(1995.1774385125705, regression.Coefficients[2], 1e-3);

            Assert.AreEqual(-14864.941351259276, predict[0], 1e-10);
            Assert.AreEqual(-5827.35538823598, predict[1], 1e-10);
            Assert.AreEqual(-4.8069356009871171, predict[2], 1e-10);
            Assert.AreEqual(-1827.3866400257925, predict[5], 1e-10);
        }
        public void ExampleTest()
        {
            // Suppose we would like to map the continuous values in the
            // second column to the integer values in the first column.
            double[,] data =
            {
                { -40, -21142.1111111111 },
                { -30, -21330.1111111111 },
                { -20, -12036.1111111111 },
                { -10,   7255.3888888889 },
                {   0,  32474.8888888889 },
                {  10,  32474.8888888889 },
                {  20,   9060.8888888889 },
                {  30, -11628.1111111111 },
                {  40, -15129.6111111111 },
            };

            // Extract inputs and outputs
            double[][] inputs  = data.GetColumn(0).ToJagged();
            double[]   outputs = data.GetColumn(1);

            // Create a Nonlinear regression using
            var regression = new NonlinearRegression(3,

                                                     // Let's assume a quadratic model function: ax² + bx + c
                                                     function: (w, x) => w[0] * x[0] * x[0] + w[1] * x[0] + w[2],

                                                     // Derivative in respect to the weights:
                                                     gradient: (w, x, r) =>
            {
                r[0] = 2 * w[0];     // w.r.t a: 2a
                r[1] = w[1];         // w.r.t b: b
                r[2] = w[2];         // w.r.t c: 0
            }
                                                     );

            // Create a non-linear least squares teacher
            var nls = new NonlinearLeastSquares(regression);

            // Initialize to some random values
            regression.Coefficients[0] = 4.2;
            regression.Coefficients[1] = 0.3;
            regression.Coefficients[2] = 1;

            // Run the function estimation algorithm
            double error = Double.PositiveInfinity;

            for (int i = 0; i < 100; i++)
            {
                error = nls.Run(inputs, outputs);
            }

            // Use the function to compute the input values
            double[] predict = inputs.Apply(regression.Compute);

            Assert.IsTrue(nls.Algorithm is LevenbergMarquardt);

            Assert.AreEqual(2145404235.739383, error, 1e-7);

            Assert.AreEqual(-11.916652026711853, regression.Coefficients[0], 1e-3);
            Assert.AreEqual(-358.9758898959638, regression.Coefficients[1], 1e-3);
            Assert.AreEqual(-107.31273008811895, regression.Coefficients[2], 1e-3);

            Assert.AreEqual(-4814.9203769986034, predict[0], 1e-10);
            Assert.AreEqual(-63.02285725721211, predict[1], 1e-10);
            Assert.AreEqual(2305.5442571416661, predict[2], 1e-10);
            Assert.AreEqual(-4888.736831716782, predict[5], 1e-10);
        }
        public void learn_test()
        {
            #region doc_learn_lm
            // Suppose we would like to map the continuous values in the
            // second column to the integer values in the first column.
            double[,] data =
            {
                { -40, -21142.1111111111 },
                { -30, -21330.1111111111 },
                { -20, -12036.1111111111 },
                { -10,   7255.3888888889 },
                {   0,  32474.8888888889 },
                {  10,  32474.8888888889 },
                {  20,   9060.8888888889 },
                {  30, -11628.1111111111 },
                {  40, -15129.6111111111 },
            };

            // Extract inputs and outputs
            double[][] inputs  = data.GetColumn(0).ToJagged();
            double[]   outputs = data.GetColumn(1);

            // Create a Nonlinear regression using
            var nls = new NonlinearLeastSquares()
            {
                NumberOfParameters = 3,

                // Initialize to some random values
                StartValues = new[] { 4.2, 0.3, 1 },

                // Let's assume a quadratic model function: ax² + bx + c
                Function = (w, x) => w[0] * x[0] * x[0] + w[1] * x[0] + w[2],

                // Derivative in respect to the weights:
                Gradient = (w, x, r) =>
                {
                    r[0] = 2 * w[0]; // w.r.t a: 2a
                    r[1] = w[1];     // w.r.t b: b
                    r[2] = w[2];     // w.r.t c: 0
                },

                Algorithm = new LevenbergMarquardt()
                {
                    MaxIterations = 100,
                    Tolerance     = 0
                }
            };


            var regression = nls.Learn(inputs, outputs);

            // Use the function to compute the input values
            double[] predict = regression.Transform(inputs);
            #endregion

            Assert.IsTrue(nls.Algorithm is LevenbergMarquardt);

            double error = new SquareLoss(outputs)
            {
                Mean = false
            }.Loss(predict) / 2.0;

            Assert.AreEqual(1318374605.8436923d, error);

            Assert.AreEqual(-12.025250289329851, regression.Coefficients[0], 1e-3);
            Assert.AreEqual(-0.082208180694676766, regression.Coefficients[1], 1e-3);
            Assert.AreEqual(-0.27402726898225627, regression.Coefficients[2], 1e-3);

            Assert.AreEqual(-19237.386162968953, predict[0]);
            Assert.AreEqual(-10820.533042245008, predict[1]);
            Assert.AreEqual(-4808.7299793870288, predict[2]);
            Assert.AreEqual(-1203.6211380089139, predict[5]);
        }
 /// <summary>
 /// Set model and learning adjustment for nonlinear regression analysis from already existing RA processor
 /// </summary>
 public void SetModel(RAProcessorNonLinear prototype)
 {
     LeastSquares = prototype.LeastSquares;
 }