Ejemplo n.º 1
0
        public void LinearRegresionTrainingMomentumNesterovWithTimeDecayTest()
        {
            Console.WriteLine("Linear regression");
            // Parameters
            var learning_rate   = 0.01f;
            var training_epochs = 2;

            // Training data
            var train_x = new float[] {
                3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f,
                7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f
            };
            var train_y = new float[] {
                1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f,
                2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f
            };
            var n_samples = train_x.Length;

            using (var graph = new TFGraph())
            {
                var rng = new Random(0);
                // tf Graph Input

                var X = graph.Placeholder(TFDataType.Float, TFShape.Scalar);
                var Y = graph.Placeholder(TFDataType.Float, TFShape.Scalar);

                var W    = graph.Variable(graph.Const(0.1f), operName: "weight");
                var b    = graph.Variable(graph.Const(0.1f), operName: "bias");
                var pred = graph.Add(graph.Mul(X, W.Read, "x_w"), b.Read);

                var cost = graph.Div(graph.ReduceSum(graph.Pow(graph.Sub(pred, Y), graph.Const(2f))), graph.Mul(graph.Const(2f), graph.Const((float)n_samples), "2_n_samples"));

                var sgd       = new SGD(graph, learning_rate, 0.9f, 0.5f, nesterov: true);
                var updateOps = sgd.Minimize(cost);

                var readIter = sgd.Iterations.ReadAfter(updateOps);
                var readW    = W.ReadAfter(updateOps);
                var readb    = b.ReadAfter(updateOps);

                using (var sesssion = new TFSession(graph))
                {
                    sesssion.GetRunner().AddTarget(graph.GetGlobalVariablesInitializer()).Run();

                    var expectedLines = File.ReadAllLines(Path.Combine(_testDataPath, "MomentumNesterovTimeDecay", "expected.txt"));
                    for (int i = 0; i < training_epochs; i++)
                    {
                        for (int j = 0; j < n_samples; j++)
                        {
                            var tensors = sesssion.GetRunner()
                                          .AddInput(X, new TFTensor(train_x[j]))
                                          .AddInput(Y, new TFTensor(train_y[j]))
                                          .AddTarget(updateOps)
                                          .Fetch(readIter, cost, readW, readb, sgd.LearningRate).Run();
                            var output = Invariant($"step: {tensors[0].GetValue():D}, loss: {tensors[1].GetValue():F4}, W: {tensors[2].GetValue():F4}, b: {tensors[3].GetValue():F4}, lr: {tensors[4].GetValue():F8}");
                            Assert.Equal(expectedLines[i * n_samples + j], output);
                        }
                    }
                }
            }
        }
Ejemplo n.º 2
0
        public static void XorNeuroTry()
        {
            var trainData = new double[] { 0.0, 1.0 };

            var n_samples = trainData.Length;


            using (var g = new TFGraph())
            {
                var s   = new TFSession(g);
                var rng = new Random(0);
                // tf Graph Input


                var X1 = g.Placeholder(TFDataType.Double);
                var X2 = g.Placeholder(TFDataType.Double);

                //расчетов начальных весов
                var W = g.Variable(g.Const(rng.NextDouble()), operName: "weight");

                //не уверен, что рассчет смещения рандомным образом - хорошая идея.
                var b = g.Variable(g.Const(rng.NextDouble()), operName: "bias");

                //вход умноженный на весовой коэффициент плюс смещение = операция которая вычисляет взвешенную сумма весов.
                var predX1 = g.Add(g.Mul(X1, W.Read, "x1_w"), b.Read);
                var predX2 = g.Add(g.Mul(X2, W.Read, "x2_w"), b.Read);

                var pred = g.Add(predX1, predX2);

                var cost = g.Sigmoid(pred);

                var learning_rate   = 0.001f;
                var training_epochs = 100;
                var sgd             = new SGD(g, learning_rate);
                var updateOps       = sgd.Minimize(cost);

                using (var sesssion = new TFSession(g))
                {
                    sesssion.GetRunner().AddTarget(g.GetGlobalVariablesInitializer()).Run();

                    for (int i = 0; i < training_epochs; i++)
                    {
                        double avgLoss = 0;
                        for (int j = 0; j < n_samples; j++)
                        {
                            var tensors = sesssion.GetRunner()
                                          .AddInput(X1, new TFTensor(trainData[j]))
                                          .AddInput(X2, new TFTensor(trainData[j]))
                                          .AddTarget(updateOps).Fetch(sgd.Iterations.Read, cost, W.Read, b.Read, sgd.LearningRate).Run();
                            avgLoss += (double)tensors[1].GetValue();
                        }
                        var tensors2 = sesssion.GetRunner()
                                       .Fetch(W.Read, b.Read).Run();
                        var output = $"Epoch: {i + 1:D}, loss: {avgLoss / n_samples:F4}, W: {tensors2[0].GetValue():F4}, b: {tensors2[1].GetValue():F4}";
                        Console.WriteLine(output);
                    }
                }
            }
        }
Ejemplo n.º 3
0
        public Q_Learning(List <int> actions)
        {
            graf = new TFGraph();
            TFOutput s = graf.Placeholder(TFDataType.Float, new TFShape(1, 16));

            va      = graf.Variable(graf.RandomUniform(new TFShape(16, 4), 0, 0.01));
            Qout    = graf.MatMul(s, va);
            Predict = graf.ArgMax(Qout, graf.Constant(1, new TFShape(1)));

            nextQ       = graf.Placeholder(TFDataType.Float, new TFShape(1, 4));
            Loss        = graf.ReduceSum(graf.SquaredDifference(nextQ, Qout));
            Trainer     = new SGD(graf, 0.1f);
            UpdateModel = Trainer.Minimize(Loss);
        }
Ejemplo n.º 4
0
        //
        // Port of https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/linear_regression.py
        //
        void LinearRegression()
        {
            Console.WriteLine("Linear regression");
            // Parameters
            var learning_rate   = 0.001f;
            var training_epochs = 100;
            var display_step    = 50;

            // Training data
            var train_x = new double [] {
                3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
                7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1
            };
            var train_y = new double [] {
                1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221,
                2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3
            };
            var n_samples = train_x.Length;

            using (var g = new TFGraph()) {
                var s   = new TFSession(g);
                var rng = new Random(0);
                // tf Graph Input

                var X = g.Placeholder(TFDataType.Double);
                var Y = g.Placeholder(TFDataType.Double);

                var W    = g.Variable(g.Const(rng.NextDouble()), operName: "weight");
                var b    = g.Variable(g.Const(rng.NextDouble()), operName: "bias");
                var pred = g.Add(g.Mul(X, W.Read, "x_w"), b.Read);

                var cost = g.Div(g.ReduceSum(g.Pow(g.Sub(pred, Y), g.Const(2.0))), g.Mul(g.Const(2.0), g.Const((double)n_samples), "2_n_samples"));

                // SOLVED
                // STuck here: TensorFlow bindings need to surface gradient support
                // waiting on Google for this
                // https://github.com/migueldeicaza/TensorFlowSharp/issues/25

                var sgd       = new SGD(g, learning_rate);
                var updateOps = sgd.Minimize(cost);

                using (var sesssion = new TFSession(g))
                {
                    sesssion.GetRunner().AddTarget(g.GetGlobalVariablesInitializer()).Run();

                    for (int i = 0; i < training_epochs; i++)
                    {
                        double avgLoss = 0;
                        for (int j = 0; j < n_samples; j++)
                        {
                            var tensors = sesssion.GetRunner()
                                          .AddInput(X, new TFTensor(train_x[j]))
                                          .AddInput(Y, new TFTensor(train_y[j]))
                                          .AddTarget(updateOps).Fetch(sgd.Iterations.Read, cost, W.Read, b.Read, sgd.LearningRate).Run();
                            avgLoss += (double)tensors[1].GetValue();
                        }
                        var tensors2 = sesssion.GetRunner()
                                       .Fetch(W.Read, b.Read).Run();
                        var output = $"Epoch: {i+1:D}, loss: {avgLoss / n_samples:F4}, W: {tensors2[0].GetValue():F4}, b: {tensors2[1].GetValue():F4}";
                        Console.WriteLine(output);
                    }
                }
            }
        }
Ejemplo n.º 5
0
        public void MNISTTwoHiddenLayerNetworkTest()
        {
            // Parameters
            var learningRate = 0.1f;
            var epochs       = 5;


            var mnist = new Mnist();

            mnist.ReadDataSets("/tmp");
            int batchSize  = 100;
            int numBatches = mnist.TrainImages.Length / batchSize;

            using (var graph = new TFGraph())
            {
                var X = graph.Placeholder(TFDataType.Float, new TFShape(-1, 784));
                var Y = graph.Placeholder(TFDataType.Float, new TFShape(-1, 10));

                graph.Seed = 1;
                var initB  = (float)(4 * Math.Sqrt(6) / Math.Sqrt(784 + 500));
                var W1     = graph.Variable(graph.RandomUniform(new TFShape(784, 500), minval: -initB, maxval: initB), operName: "W1");
                var b1     = graph.Variable(graph.Constant(0f, new TFShape(500), TFDataType.Float), operName: "b1");
                var layer1 = graph.Sigmoid(graph.Add(graph.MatMul(X, W1.Read), b1.Read, operName: "layer1"));

                initB = (float)(4 * Math.Sqrt(6) / Math.Sqrt(500 + 100));
                var W2     = graph.Variable(graph.RandomUniform(new TFShape(500, 100), minval: -initB, maxval: initB), operName: "W2");
                var b2     = graph.Variable(graph.Constant(0f, new TFShape(100), TFDataType.Float), operName: "b2");
                var layer2 = graph.Sigmoid(graph.Add(graph.MatMul(layer1, W2.Read), b2.Read, operName: "layer2"));

                initB = (float)(4 * Math.Sqrt(6) / Math.Sqrt(100 + 10));
                var W3     = graph.Variable(graph.RandomUniform(new TFShape(100, 10), minval: -initB, maxval: initB), operName: "W3");
                var b3     = graph.Variable(graph.Constant(0f, new TFShape(10), TFDataType.Float), operName: "b3");
                var layer3 = graph.Add(graph.MatMul(layer2, W3.Read), b3.Read, operName: "layer3");

                // No support for computing gradient for the SparseSoftmaxCrossEntropyWithLogits function
                // instead using SoftmaxCrossEntropyWithLogits
                var cost = graph.ReduceMean(graph.SoftmaxCrossEntropyWithLogits(layer3, Y, "cost").loss);

                var prediction = graph.ArgMax(graph.Softmax(layer3), graph.Const(1));
                var labels     = graph.ArgMax(Y, graph.Const(1));
                var areCorrect = graph.Equal(prediction, labels);
                var accuracy   = graph.ReduceMean(graph.Cast(areCorrect, TFDataType.Float));

                var sgd       = new SGD(graph, learningRate, 0.9f);
                var updateOps = sgd.Minimize(cost);

                using (var sesssion = new TFSession(graph))
                {
                    sesssion.GetRunner().AddTarget(graph.GetGlobalVariablesInitializer()).Run();

                    var expectedLines = File.ReadAllLines(Path.Combine(_testDataPath, "SGDMnist", "expected.txt"));

                    for (int i = 0; i < epochs; i++)
                    {
                        var   reader      = mnist.GetTrainReader();
                        float avgLoss     = 0;
                        float avgAccuracy = 0;
                        for (int j = 0; j < numBatches; j++)
                        {
                            var batch   = reader.NextBatch(batchSize);
                            var tensors = sesssion.GetRunner()
                                          .AddInput(X, batch.Item1)
                                          .AddInput(Y, batch.Item2)
                                          .AddTarget(updateOps).Fetch(cost, accuracy, prediction, labels).Run();

                            avgLoss     += (float)tensors[0].GetValue();
                            avgAccuracy += (float)tensors[1].GetValue();
                        }
                        var output = $"Epoch: {i}, loss(Cross-Entropy): {avgLoss / numBatches:F4}, Accuracy:{avgAccuracy / numBatches:F4}";
                        Assert.Equal(expectedLines[i], output);
                    }
                }
            }
        }