Пример #1
0
        private (TFOutput cost, TFOutput model, TFOutput accracy) CreateNetwork(TFGraph graph, TFOutput X, TFOutput Y, Dictionary <Variable, List <TFOutput> > variables)
        {
            graph.Seed = 1;
            var initB  = (float)(4 * Math.Sqrt(6) / Math.Sqrt(784 + 500));
            var W1     = GetVariable(graph, variables, graph.RandomUniform(new TFShape(784, 500), minval: -initB, maxval: initB), operName: "W1");
            var b1     = GetVariable(graph, variables, graph.Constant(0f, new TFShape(500), TFDataType.Float), operName: "b1");
            var layer1 = graph.Sigmoid(graph.Add(graph.MatMul(X, W1.Read), b1.Read));

            initB = (float)(4 * Math.Sqrt(6) / Math.Sqrt(500 + 100));
            var W2     = GetVariable(graph, variables, graph.RandomUniform(new TFShape(500, 100), minval: -initB, maxval: initB), operName: "W2");
            var b2     = GetVariable(graph, variables, graph.Constant(0f, new TFShape(100), TFDataType.Float), operName: "b2");
            var layer2 = graph.Sigmoid(graph.Add(graph.MatMul(layer1, W2.Read), b2.Read));

            initB = (float)(4 * Math.Sqrt(6) / Math.Sqrt(100 + 10));
            var W3    = GetVariable(graph, variables, graph.RandomUniform(new TFShape(100, 10), minval: -initB, maxval: initB), operName: "W3");
            var b3    = GetVariable(graph, variables, graph.Constant(0f, new TFShape(10), TFDataType.Float), operName: "b3");
            var model = graph.Add(graph.MatMul(layer2, W3.Read), b3.Read);

            // No support for computing gradient for the SparseSoftmaxCrossEntropyWithLogits function
            // instead using SoftmaxCrossEntropyWithLogits
            var cost = graph.ReduceMean(graph.SoftmaxCrossEntropyWithLogits(model, Y).loss);

            var prediction = graph.ArgMax(graph.Softmax(model), graph.Const(1));
            var labels     = graph.ArgMax(Y, graph.Const(1));
            var areCorrect = graph.Equal(prediction, labels);
            var accuracy   = graph.ReduceMean(graph.Cast(areCorrect, TFDataType.Float));

            return(cost, model, accuracy);
        }
        public static TFOutput KLUnivariateNormal(TFGraph graph, TFOutput mu1, TFOutput sigma1, TFOutput mu0, TFOutput sigma0)
        {
            TFOutput t1 = graph.Div(graph.Add(graph.Square(sigma1), graph.Square(graph.Sub(mu1, mu0))), graph.Mul(graph.Const(2F), graph.Square(sigma0)));
            TFOutput t2 = graph.Log(graph.Div(sigma0, graph.Add(sigma1, graph.Const(NUMERICALCONSTANT))));

            return(graph.Sub(graph.Add(t1, t2), graph.Const(0.5F)));
        }
Пример #3
0
        public static void XorNeuroTry()
        {
            var trainData = new double[] { 0.0, 1.0 };

            var n_samples = trainData.Length;


            using (var g = new TFGraph())
            {
                var s   = new TFSession(g);
                var rng = new Random(0);
                // tf Graph Input


                var X1 = g.Placeholder(TFDataType.Double);
                var X2 = g.Placeholder(TFDataType.Double);

                //расчетов начальных весов
                var W = g.Variable(g.Const(rng.NextDouble()), operName: "weight");

                //не уверен, что рассчет смещения рандомным образом - хорошая идея.
                var b = g.Variable(g.Const(rng.NextDouble()), operName: "bias");

                //вход умноженный на весовой коэффициент плюс смещение = операция которая вычисляет взвешенную сумма весов.
                var predX1 = g.Add(g.Mul(X1, W.Read, "x1_w"), b.Read);
                var predX2 = g.Add(g.Mul(X2, W.Read, "x2_w"), b.Read);

                var pred = g.Add(predX1, predX2);

                var cost = g.Sigmoid(pred);

                var learning_rate   = 0.001f;
                var training_epochs = 100;
                var sgd             = new SGD(g, learning_rate);
                var updateOps       = sgd.Minimize(cost);

                using (var sesssion = new TFSession(g))
                {
                    sesssion.GetRunner().AddTarget(g.GetGlobalVariablesInitializer()).Run();

                    for (int i = 0; i < training_epochs; i++)
                    {
                        double avgLoss = 0;
                        for (int j = 0; j < n_samples; j++)
                        {
                            var tensors = sesssion.GetRunner()
                                          .AddInput(X1, new TFTensor(trainData[j]))
                                          .AddInput(X2, new TFTensor(trainData[j]))
                                          .AddTarget(updateOps).Fetch(sgd.Iterations.Read, cost, W.Read, b.Read, sgd.LearningRate).Run();
                            avgLoss += (double)tensors[1].GetValue();
                        }
                        var tensors2 = sesssion.GetRunner()
                                       .Fetch(W.Read, b.Read).Run();
                        var output = $"Epoch: {i + 1:D}, loss: {avgLoss / n_samples:F4}, W: {tensors2[0].GetValue():F4}, b: {tensors2[1].GetValue():F4}";
                        Console.WriteLine(output);
                    }
                }
            }
        }
        public TFOutput Samplez(TFGraph graph, TFOutput act)
        {
            TFOutput mu    = graph.Add(graph.MatMul(act, Mu_W), Mu_b);
            TFOutput sigma = graph.Sqrt(graph.Add(graph.MatMul(graph.Square(act), graph.Square(NNOperations.LogTrans(graph, Phi_W))), graph.Square(NNOperations.LogTrans(graph, Phi_b))));
            int      seed  = Global.Random.Next(0, int.MaxValue);
            TFOutput eps   = graph.Cast(graph.RandomNormal(new TFShape(Global.TFMAXBATCHSIZE, graph.GetShape(mu)[1]), 0, 1, seed), TFDataType.Float);

            eps = graph.Slice(eps, graph.Const(new int[] { 0, 0 }), graph.Shape(mu));
            return(graph.Add(graph.Mul(eps, sigma), mu));
        }
Пример #5
0
        public void VariableArrayExample(float[] a, float[] b)
        {
            using (var graph = new TFGraph())
            {
                var aVar  = graph.VariableV2(new TFShape(a.Length), TFDataType.Float, operName: "aVar");
                var initA = graph.Assign(aVar, graph.Const(a, TFDataType.Float));

                var bVar  = graph.VariableV2(new TFShape(b.Length), TFDataType.Float, operName: "bVar");
                var initB = graph.Assign(bVar, graph.Const(b));

                var adderNode = graph.Add(aVar, bVar);

                using (var session = new TFSession(graph))
                {
                    var runInit = session.GetRunner()
                                  .AddTarget(initA.Operation).Run();
                    var runInit2 = session.GetRunner()
                                   .AddTarget(initB.Operation).Run();

                    var output  = session.GetRunner().Run(adderNode);
                    var results = (float[])output.GetValue();

                    var printValue = "";
                    foreach (var result in results)
                    {
                        printValue += $"{result},";
                    }
                    Console.WriteLine($"VariableArrayExample: {printValue}");
                }
            }
        }
Пример #6
0
        public void VariableExample(float a, float b)
        {
            using (var graph = new TFGraph())
            {
                var aVar  = graph.VariableV2(TFShape.Scalar, TFDataType.Float, operName: "aVar");
                var initA = graph.Assign(aVar, graph.Const(a));

                var bVar  = graph.VariableV2(TFShape.Scalar, TFDataType.Float, operName: "bVar");
                var initB = graph.Assign(bVar, graph.Const(b));

                var adderNode = graph.Add(aVar, bVar);

                using (var session = new TFSession(graph))
                {
                    var runInit = session.GetRunner()
                                  .AddTarget(initA.Operation)
                                  .AddTarget(initB.Operation).Run();

                    var output = session.GetRunner().Run(adderNode);
                    var result = (float)output.GetValue();

                    var printValue = result;
                    //foreach (var result in results)
                    //{
                    //    printValue += $"{result},";
                    //}
                    Console.WriteLine($"VariableExample: {printValue}");
                }
            }
        }
Пример #7
0
        static void Main(string[] args)
        {
            // 创建图
            var g = new TFGraph();

            // 创建占位符,以便在运行时赋值
            var x = g.Placeholder(TFDataType.Float); // 1
            var y = g.Placeholder(TFDataType.Float); // 2
            var z = g.Placeholder(TFDataType.Float); // 3

            // 进行各种数学运算
            var a = g.Add(x, y);
            var b = g.Mul(a, z);
            var c = g.Pow(b, g.Const(2.0f)); // 注意:一定要保证数据类型相同,否则报错
            var d = g.Div(c, x);
            var e = g.Sqrt(d);

            // 定义会话
            var sess = new TFSession(g);

            // 给占位符赋值并运行图
            var result = sess.GetRunner()
                         .AddInput(x, 1.0f) // 注意:一定要保证数据类型相同,否则报错
                         .AddInput(y, 2.0f)
                         .AddInput(z, 3.0f)
                         .Run(e).GetValue();

            // 输出结果
            // sqrt(((1+2)*3)^2/1) = 9
            Console.WriteLine("e={0}", result);
        }
Пример #8
0
        static void Main(string[] args)
        {
            // 创建图
            var g = new TFGraph();

            // 定义常量
            var a = g.Const(2);
            var b = g.Const(3);

            // 加法和乘法运算
            var add = g.Add(a, b);
            var mul = g.Mul(a, b);

            // 创建会话
            var sess = new TFSession(g);

            // 计算加法
            var result1 = sess.GetRunner().Run(add).GetValue();

            Console.WriteLine("a+b={0}", result1);

            // 计算乘法
            var result2 = sess.GetRunner().Run(mul).GetValue();

            Console.WriteLine("a*b={0}", result2);

            // 关闭会话
            sess.CloseSession();
        }
Пример #9
0
        public void Should_RunPartialRun()
        {
            using (var graph = new TFGraph())
                using (var session = new TFSession(graph))
                {
                    float aValue = 1;
                    float bValue = 2;

                    var a = graph.Placeholder(TFDataType.Float);
                    var b = graph.Placeholder(TFDataType.Float);
                    var c = graph.Placeholder(TFDataType.Float);

                    var r1 = graph.Add(a, b);
                    var r2 = graph.Mul(r1, c);

                    var h          = session.PartialRunSetup(new[] { a, b, c }, new[] { r1, r2 }, new[] { r1.Operation, r2.Operation });
                    var res        = session.PartialRun(h, new[] { a, b }, new TFTensor[] { aValue, bValue }, new TFOutput[] { r1 }, new[] { r1.Operation }); // 1+2=3
                    var calculated = (float)res[0].GetValue();
                    Assert.Equal(3, calculated);

                    float temp = calculated * 17;                                                                                   // 3*17=51
                    res        = session.PartialRun(h, new[] { c }, new TFTensor[] { temp }, new[] { r2 }, new[] { r2.Operation }); // 51*3=153
                    calculated = (float)res[0].GetValue();
                    Assert.Equal(153, calculated);
                }
        }
Пример #10
0
        public TFTensor Think(TFTensor inputs)
        {
            TFTensor previousLayerOutput = inputs;

            //Feed Forward through each layer in the network
            for (int i = 0; i < Weights.Count; i++)
            {
                using (TFGraph graph = new TFGraph())
                {
                    TFOutput layerinputs  = graph.Const(previousLayerOutput); //The inputs to this layer are the outputs from the previous layer.
                    TFOutput layerweights = graph.Const(Weights[i]);          //Get the weights for this layer.
                    TFOutput biases       = graph.Const(Biases[i]);           //Get the biases for this layer.

                    //Matrix Multiply the weights and inputs, then add the biases, then relu.
                    TFOutput inputsTimesWeights = graph.MatMul(layerweights, layerinputs);
                    TFOutput biasesAdded        = graph.Add(inputsTimesWeights, biases);
                    TFOutput activation         = graph.Relu(biasesAdded);

                    //Get the tensor to feed to the next layer
                    TFSession session = new TFSession(graph);
                    previousLayerOutput = session.GetRunner().Run(activation);
                }
            }


            return(previousLayerOutput);
        }
Пример #11
0
        public void DevicePlacementTest()
        {
            using (var graph = new TFGraph())
                using (var session = new TFSession(graph))
                {
                    var X = graph.Placeholder(TFDataType.Float, new TFShape(-1, 784));
                    var Y = graph.Placeholder(TFDataType.Float, new TFShape(-1, 10));

                    int numGPUs  = 4;
                    var Xs       = graph.Split(graph.Const(0), X, numGPUs);
                    var Ys       = graph.Split(graph.Const(0), Y, numGPUs);
                    var products = new TFOutput[numGPUs];
                    for (int i = 0; i < numGPUs; i++)
                    {
                        using (var device = graph.WithDevice("/device:GPU:" + i))
                        {
                            var W = graph.Constant(0.1f, new TFShape(784, 500), TFDataType.Float);
                            var b = graph.Constant(0.1f, new TFShape(500), TFDataType.Float);
                            products[i] = graph.Add(graph.MatMul(Xs[i], W), b);
                        }
                    }
                    var   stacked = graph.Concat(graph.Const(0), products);
                    Mnist mnist   = new Mnist();
                    mnist.ReadDataSets("/tmp");
                    int batchSize = 1000;
                    for (int i = 0; i < 100; i++)
                    {
                        var reader = mnist.GetTrainReader();
                        (var trainX, var trainY) = reader.NextBatch(batchSize);
                        var outputTensors = session.Run(new TFOutput[] { X }, new TFTensor[] { new TFTensor(trainX) }, new TFOutput[] { stacked });
                        Assert.Equal(1000, outputTensors[0].Shape[0]);
                        Assert.Equal(500, outputTensors[0].Shape[1]);
                    }
                }
        }
Пример #12
0
        public void LinearRegresionTrainingMomentumNesterovWithTimeDecayTest()
        {
            Console.WriteLine("Linear regression");
            // Parameters
            var learning_rate   = 0.01f;
            var training_epochs = 2;

            // Training data
            var train_x = new float[] {
                3.3f, 4.4f, 5.5f, 6.71f, 6.93f, 4.168f, 9.779f, 6.182f, 7.59f, 2.167f,
                7.042f, 10.791f, 5.313f, 7.997f, 5.654f, 9.27f, 3.1f
            };
            var train_y = new float[] {
                1.7f, 2.76f, 2.09f, 3.19f, 1.694f, 1.573f, 3.366f, 2.596f, 2.53f, 1.221f,
                2.827f, 3.465f, 1.65f, 2.904f, 2.42f, 2.94f, 1.3f
            };
            var n_samples = train_x.Length;

            using (var graph = new TFGraph())
            {
                var rng = new Random(0);
                // tf Graph Input

                var X = graph.Placeholder(TFDataType.Float, TFShape.Scalar);
                var Y = graph.Placeholder(TFDataType.Float, TFShape.Scalar);

                var W    = graph.Variable(graph.Const(0.1f), operName: "weight");
                var b    = graph.Variable(graph.Const(0.1f), operName: "bias");
                var pred = graph.Add(graph.Mul(X, W.Read, "x_w"), b.Read);

                var cost = graph.Div(graph.ReduceSum(graph.Pow(graph.Sub(pred, Y), graph.Const(2f))), graph.Mul(graph.Const(2f), graph.Const((float)n_samples), "2_n_samples"));

                var sgd       = new SGD(graph, learning_rate, 0.9f, 0.5f, nesterov: true);
                var updateOps = sgd.Minimize(cost);

                var readIter = sgd.Iterations.ReadAfter(updateOps);
                var readW    = W.ReadAfter(updateOps);
                var readb    = b.ReadAfter(updateOps);

                using (var sesssion = new TFSession(graph))
                {
                    sesssion.GetRunner().AddTarget(graph.GetGlobalVariablesInitializer()).Run();

                    var expectedLines = File.ReadAllLines(Path.Combine(_testDataPath, "MomentumNesterovTimeDecay", "expected.txt"));
                    for (int i = 0; i < training_epochs; i++)
                    {
                        for (int j = 0; j < n_samples; j++)
                        {
                            var tensors = sesssion.GetRunner()
                                          .AddInput(X, new TFTensor(train_x[j]))
                                          .AddInput(Y, new TFTensor(train_y[j]))
                                          .AddTarget(updateOps)
                                          .Fetch(readIter, cost, readW, readb, sgd.LearningRate).Run();
                            var output = Invariant($"step: {tensors[0].GetValue():D}, loss: {tensors[1].GetValue():F4}, W: {tensors[2].GetValue():F4}, b: {tensors[3].GetValue():F4}, lr: {tensors[4].GetValue():F8}");
                            Assert.Equal(expectedLines[i * n_samples + j], output);
                        }
                    }
                }
            }
        }
Пример #13
0
        //
        // Shows how to use placeholders to pass values
        //
        void BasicVariables()
        {
            Console.WriteLine("Using placerholders");
            using (var g = new TFGraph()) {
                var s = new TFSession(g);

                // We use "shorts" here, so notice the casting to short to get the
                // tensor with the right data type.
                var var_a = g.Placeholder(TFDataType.Int16);
                var var_b = g.Placeholder(TFDataType.Int16);

                var add = g.Add(var_a, var_b);
                var mul = g.Mul(var_a, var_b);

                var runner = s.GetRunner();
                runner.AddInput(var_a, new TFTensor((short)3));
                runner.AddInput(var_b, new TFTensor((short)2));
                Console.WriteLine("a+b={0}", runner.Run(add).GetValue());

                runner = s.GetRunner();
                runner.AddInput(var_a, new TFTensor((short)3));
                runner.AddInput(var_b, new TFTensor((short)2));

                Console.WriteLine("a*b={0}", runner.Run(mul).GetValue());

                // TODO
                // Would be nice to have an API that allows me to pass the values at Run time, easily:
                // s.Run (add, { var_a: 3, var_b: 2 })
                // C# allows something with Dictionary constructors, but you still must provide the type
                // signature.
            }
        }
Пример #14
0
        //
        // Shows the use of Variable
        //
        void TestVariable()
        {
            Console.WriteLine("Variables");
            var status = new TFStatus();

            using (var g = new TFGraph()) {
                var         initValue = g.Const(1.5);
                var         increment = g.Const(0.5);
                TFOperation init;
                TFOutput    value;
                var         handle = g.Variable(initValue, out init, out value);

                // Add 0.5 and assign to the variable.
                // Perhaps using op.AssignAddVariable would be better,
                // but demonstrating with Add and Assign for now.
                var update = g.AssignVariableOp(handle, g.Add(value, increment));

                var s = new TFSession(g);
                // Must first initialize all the variables.
                s.GetRunner().AddTarget(init).Run(status);
                Assert(status);
                // Now print the value, run the update op and repeat
                // Ignore errors.
                for (int i = 0; i < 5; i++)
                {
                    // Read and update
                    var result = s.GetRunner().Fetch(value).AddTarget(update).Run();

                    Console.WriteLine("Result of variable read {0} -> {1}", i, result [0].GetValue());
                }
            }
        }
Пример #15
0
        //占位符
        private static void placeholder(TFGraph g, TFSession sess)
        {
            Console.WriteLine("。。。。。。。。。。。。。。。。。占位符。。。。。。。。。。。。。。。。。。");
            //var g = new TFGraph();
            //var sess = new TFSession();

            var x = g.Placeholder(TFDataType.Float);
            var y = g.Placeholder(TFDataType.Float);
            var z = g.Placeholder(TFDataType.Float);


            var a1 = g.Add(x, y);
            var b1 = g.Mul(a1, z);
            var c1 = g.Pow(b1, g.Const(2.0f));
            var d1 = g.Div(c1, x);
            var e1 = g.Sqrt(d1);

            //var mn=

            var result3 = sess.GetRunner().AddInput(x, 1.0f)
                          .AddInput(y, 2.0f)
                          .AddInput(z, 3.0f)
                          .Run(e1).GetValue();


            Console.WriteLine("e={0}", result3);

            //sess.CloseSession();
        }
        public static TFOutput LogOfNormal(TFGraph graph, TFOutput x, TFOutput mu, TFOutput sigma)
        {
            TFOutput term1 = graph.Mul(graph.Const(-0.5F), graph.Log(graph.Square(sigma)));
            TFOutput term2 = graph.Div(graph.SquaredDifference(x, mu), graph.Mul(graph.Const(-2F), graph.Square(sigma)));

            return(graph.Add(term1, term2));
        }
        public static TFOutput LogOfLaplace(TFGraph graph, TFOutput x, TFOutput mu, TFOutput sigma)
        {
            TFOutput term1 = graph.Mul(graph.Const(-1F), graph.Log(sigma));
            TFOutput term2 = graph.Div(graph.Abs(graph.Sub(x, mu)), graph.Mul(graph.Const(-1F), sigma));

            return(graph.Add(term1, term2));
        }
        public TFOutput Sampleb(TFGraph graph)
        {
            int      seed    = Global.Random.Next(0, int.MaxValue);
            TFOutput eps     = graph.Cast(graph.RandomNormal(graph.GetTensorShape(Mu_b), 0, 1, seed), TFDataType.Float);
            TFOutput sigma_b = NNOperations.LogTrans(graph, Phi_b);

            return(graph.Add(graph.Mul(eps, sigma_b), Mu_b));
        }
Пример #19
0
        static void Main(string[] args)
        {
            // 加载手写数字资源
            var mnist = Mnist.Load();

            // 训练次数和测试次数
            var trainCount = 5000;
            var testCount  = 200;

            // 获取训练图片、训练图片标签、测试图片、测试图片标签
            float[,] trainingImages, trainingLabels, testImages, testLabels;
            mnist.GetTrainReader().NextBatch(trainCount, out trainingImages, out trainingLabels);
            mnist.GetTestReader().NextBatch(testCount, out testImages, out testLabels);

            // 创建图
            var g = new TFGraph();

            // 训练图片占位符和训练标签占位符
            var trainingInput = g.Placeholder(TFDataType.Float, new TFShape(-1, 784)); // 不定数量的像素为24*24的图片
            var xte           = g.Placeholder(TFDataType.Float, new TFShape(784));

            // 创建计算误差和预测的图
            var distance = g.ReduceSum(g.Abs(g.Add(trainingInput, g.Neg(xte))), axis: g.Const(1));
            var pred     = g.ArgMin(distance, g.Const(0));

            // 创建会话
            var sess = new TFSession(g);

            // 精度
            var accuracy = 0.0f;

            // 进行迭代训练,并且每次都输出预测值
            for (int i = 0; i < testCount; i++)
            {
                var runner = sess.GetRunner();

                // 计算并且获取误差和预测值
                var result = runner.
                             Fetch(pred).
                             Fetch(distance).
                             AddInput(trainingInput, trainingImages).
                             AddInput(xte, Extract(testImages, i)).Run();
                var r  = result[0].GetValue();
                var tr = result[1].GetValue();

                var nn_index = (int)(long)result[0].GetValue();

                Console.WriteLine($"训练次数 {i}: 预测: { ArgMax(trainingLabels, nn_index) } 真实值: { ArgMax(testLabels, i)} (nn_index= { nn_index })");
                if (ArgMax(trainingLabels, nn_index) == ArgMax(testLabels, i))
                {
                    accuracy += 1f / testImages.Length;
                }
            }

            // 精确度
            Console.WriteLine("精度:" + accuracy);
        }
Пример #20
0
        /// <summary>
        /// Compiles the model into an executable graph
        /// </summary>
        /// <param name="optimizer">The optimization algorithm to use for training the model</param>
        /// <param name="losses">The losses for each of the outputs of the model</param>
        /// <remarks>The list of loss functions should be in order of the outputs of the model</remarks>
        public void Compile(Optimizer optimizer, IEnumerable <LossFunction> losses)
        {
            if (optimizer == null)
            {
                throw new ArgumentNullException(
                          "The optimizer must be specified",
                          nameof(optimizer));
            }

            if (losses.Count() != _outputs.Count())
            {
                throw new ArgumentException(
                          "The number of loss functions does not match the number of outputs of the model",
                          nameof(losses));
            }

            _graph = new TFGraph();

            var compilationContext = new ModelCompilationContext(_graph);

            _optimizer          = optimizer;
            _inputMapping       = new Dictionary <Input, TFOutput>();
            _outputMapping      = new Dictionary <Layer, TFOutput>();
            _placeholderMapping = new Dictionary <Layer, TFOutput>();

            var compiledLosses = new List <TFOutput>();

            var layersWithLosses = Enumerable.Zip(_outputs, losses, (layer, loss) => (layer, loss));

            // By compiling the outputs, the layers that are connected
            // to the outputs are also compiled. This goes all the way back to the inputs.
            foreach (var(layer, loss) in layersWithLosses)
            {
                var placeholder = _graph.Placeholder(TFDataType.Double, new TFShape(layer.OutputShape));
                var output      = layer.Compile(compilationContext);

                _outputMapping.Add(layer, output);
                _placeholderMapping.Add(layer, placeholder);

                var compiledLoss = loss.Compile(compilationContext, output, placeholder);

                compiledLosses.Add(compiledLoss);
            }

            foreach (var input in _inputs)
            {
                _inputMapping.Add(input, input.Configuration.Output);
            }

            _modelLoss = compiledLosses.Aggregate((left, right) => _graph.Add(left, right));
            _optimizer.Compile(compilationContext, _modelLoss, compilationContext.Parameters);

            _initializers = compilationContext.Initializers;
            _parameters   = compilationContext.Parameters;
        }
Пример #21
0
        // This sample has a bug, I suspect the data loaded is incorrect, because the returned
        // values in distance is wrong, and so is the prediction computed from it.
        void NearestNeighbor()
        {
            // Get the Mnist data

            var mnist = Mnist.Load();

            // 5000 for training
            const int trainCount = 5000;
            const int testCount  = 200;
            var       Xtr        = mnist.GetBatchReader(mnist.TrainImages).ReadAsTensor(trainCount);
            var       Ytr        = mnist.OneHotTrainLabels;
            var       Xte        = mnist.GetBatchReader(mnist.TestImages).Read(testCount);
            var       Yte        = mnist.OneHotTestLabels;



            Console.WriteLine("Nearest neighbor on Mnist images");
            using (var g = new TFGraph()) {
                var s = new TFSession(g);


                TFOutput xtr = g.Placeholder(TFDataType.Float, new TFShape(-1, 784));

                TFOutput xte = g.Placeholder(TFDataType.Float, new TFShape(784));

                // Nearest Neighbor calculation using L1 Distance
                // Calculate L1 Distance
                TFOutput distance = g.ReduceSum(g.Abs(g.Add(xtr, g.Neg(xte))), axis: g.Const(1));

                // Prediction: Get min distance index (Nearest neighbor)
                TFOutput pred = g.ArgMin(distance, g.Const(0));

                var accuracy = 0f;
                // Loop over the test data
                for (int i = 0; i < testCount; i++)
                {
                    var runner = s.GetRunner();

                    // Get nearest neighbor

                    var result   = runner.Fetch(pred).Fetch(distance).AddInput(xtr, Xtr).AddInput(xte, Xte [i].DataFloat).Run();
                    var r        = result [0].GetValue();
                    var tr       = result [1].GetValue();
                    var nn_index = (int)(long)result [0].GetValue();

                    // Get nearest neighbor class label and compare it to its true label
                    Console.WriteLine($"Test {i}: Prediction: {ArgMax (Ytr, nn_index)} True class: {ArgMax (Yte, i)} (nn_index={nn_index})");
                    if (ArgMax(Ytr, nn_index) == ArgMax(Yte, i))
                    {
                        accuracy += 1f / Xte.Length;
                    }
                }
                Console.WriteLine("Accuracy: " + accuracy);
            }
        }
Пример #22
0
        public TFOutput Predict(TFOutput x, TFOutput[] w, TFOutput[] b, TFGraph graph)
        {
            TFOutput LayerOut = x;

            for (int i = 0; i < w.Length; i++)
            {
                LayerOut = graph.Add(graph.MatMul(LayerOut, w[i]), b[i]);
            }

            return(LayerOut);
        }
Пример #23
0
        static void Main(string[] args)
        {
            // 创建图
            var g = new TFGraph();

            // 创建占位符,占位符将由xValue矩阵填充
            var x      = g.Placeholder(TFDataType.Int32);
            var xValue = new int[, ]
            {
                { 0, 1 },
                { 2, 3 }
            };

            // 创建常量
            var a = g.Const(2);
            var b = g.Const(new int[, ]
            {
                { 1, 1 },
                { 1, 1 }
            });
            var c = g.Const(new int[, ]
            {
                { 1, 0 },
                { 0, 1 }
            });

            // 矩阵乘以常数
            var d = g.Mul(x, a);

            // 两个矩阵相加
            var e = g.Add(d, b);

            // 矩阵乘以矩阵
            var f = g.MatMul(e, c);

            // 创建会话
            var sess = new TFSession(g);

            // 计算矩阵f的值,并把结果保存到result中
            var result = (int[, ])sess.GetRunner()
                         .AddInput(x, xValue)
                         .Fetch(f).Run()[0].GetValue();

            // 输出矩阵f
            for (var i = 0; i < xValue.GetLength(0); i++)
            {
                for (var j = 0; j < xValue.GetLength(1); j++)
                {
                    Console.Write("{0}\t", result[i, j].ToString());
                }
                Console.Write("\r\n");
            }
        }
Пример #24
0
        static void Main(string[] args)
        {
            // 创建图
            var g = new TFGraph();

            var xValue = 1.0f;
            var yValue = 2.0f;

            // 创建占位符
            var x = g.Placeholder(TFDataType.Float);
            var y = g.Placeholder(TFDataType.Float);
            var z = g.Placeholder(TFDataType.Float);

            // 创建运算
            var a = g.Add(x, y); // x+y
            var b = g.Mul(a, z); // a*z

            // 创建会话
            var sess = new TFSession(g);

            // 运行设置:a = x + y, b = (x + y) * z
            var setup = sess.PartialRunSetup(
                new[] { x, y, z },
                new[] { a, b },
                new[] { a.Operation, b.Operation });

            // 部分运行
            var result1 = sess.PartialRun(setup,
                                          new[] { x, y },
                                          new TFTensor[] { xValue, yValue },
                                          new TFOutput[] { a },
                                          new[] { a.Operation }); // a = x + y = 1 + 2 = 3

            // 计算结果
            var aValue = (float)result1[0].GetValue(); // 3

            Console.WriteLine("a = {0}", aValue);

            // 部分运行
            var result2 = sess.PartialRun(setup,
                                          new[] { z },
                                          new TFTensor[] { aValue * 17 }, // 3 * 17 = 51
                                          new[] { b },
                                          new[] { b.Operation });         // 51 * 3=153

            // 计算结果
            var bValue = (float)result2[0].GetValue();

            Console.WriteLine("b = {0}", bValue);
        }
Пример #25
0
        public void ConstructLayer(TFGraph graph, Layer previousLayer)
        {
            var shape     = graph.GetShape(previousLayer.Output);
            var inputSize = shape[shape.Length - 1];

            if (Alphas.Length != inputSize)
            {
                throw new ArgumentException($"The {nameof(Alphas)} must have a size of {inputSize} instead of {Alphas.Length}", nameof(Alphas));
            }
            using (var scope = graph.WithScope(Name))
            {
                _alphaWeights = graph.Const(new TFTensor(Alphas), TFDataType.Float);
                Output        = graph.Add(graph.Relu(previousLayer.Output), graph.Mul(_alphaWeights, graph.Neg(graph.Relu(graph.Neg(previousLayer.Output)))));
            }
        }
Пример #26
0
        public void PlaceholderAddExample(float a, float b)
        {
            using (var graph = new TFGraph())
            {
                var node1 = graph.Placeholder(TFDataType.Float);
                var node2 = graph.Placeholder(TFDataType.Float);
                var node3 = graph.Add(node1, node2);

                using (var session = new TFSession(graph))
                {
                    var output = session.GetRunner().AddInput(node1, a).AddInput(node2, b).Run(node3);
                    var result = output.GetValue();

                    Console.WriteLine($"PlaceholderAddExample: {result}");
                }
            }
        }
Пример #27
0
        public void ConstantAddExample(float a, float b)
        {
            using (var graph = new TFGraph())
            {
                var node1 = graph.Const(a);
                var node2 = graph.Const(b);
                var node3 = graph.Add(node1, node2);

                using (var session = new TFSession(graph))
                {
                    var output = session.GetRunner().Run(node3);
                    var result = output.GetValue();

                    Console.WriteLine($"ConstantAddExample: {result}");
                }
            }
        }
Пример #28
0
        void LinearRegression()
        {
            Console.WriteLine("Linear regression");
            // Parameters
            var learning_rate   = 0.01;
            var training_epochs = 1000;
            var display_step    = 50;

            // Training data
            var train_x = new double [] {
                3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
                7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1
            };
            var train_y = new double [] {
                1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221,
                2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3
            };
            var n_samples = train_x.Length;

            using (var g = new TFGraph()) {
                var s   = new TFSession(g);
                var rng = new Random();
                // tf Graph Input

                var X = g.Placeholder(TFDataType.Float);
                var Y = g.Placeholder(TFDataType.Float);
                var W = g.Variable(g.Const(rng.Next()), operName: "weight");
                var b = g.Variable(g.Const(rng.Next()), operName: "bias");

                var pred = g.Add(g.Mul(X, W), b);

                // Struggling with the following:
                // The call to g.Pow returns a TFOutput, but g.ReduceSum expects a TFTensor
                // Python seems to return operation definitions, and somehow those can be p
                //passed as tensors:
                // tensorflow/python/framework/op_def_library.py
                //  (apply_op)
                //
                //https://github.com/aymericdamien/TensorFlow-Examples/blob/master/examples/2_BasicModels/linear_regression.py
                var cost = g.Div(g.ReduceSum(g.Pow(g.Sub(pred, Y), g.Const(2))), g.Mul(g.Const(2), g.Const(n_samples)));


                // STuck here: need gradient support
            }
        }
Пример #29
0
        public void LinearModelWithTrainingExample()
        {
            using (var graph = new TFGraph())
            {
                var wData = new float[] { 0.3f };
                var bData = new float[] { -0.3f };


                var w     = graph.VariableV2(new TFShape(wData.Length), TFDataType.Float, operName: "w");
                var initW = graph.Assign(w, graph.Const(wData, TFDataType.Float));

                var b     = graph.VariableV2(new TFShape(bData.Length), TFDataType.Float, operName: "b");
                var initB = graph.Assign(b, graph.Const(bData));

                var x           = graph.Placeholder(TFDataType.Float);
                var linearModel = graph.Add(b, graph.Mul(w, x));

                var y             = graph.Placeholder(TFDataType.Float);
                var squaredDeltas = graph.SquaredDifference(linearModel, y);
                var loss          = graph.ReduceSum(squaredDeltas);

                //var optimizer = graph.ApplyGradientDescent(loss, 0.01, );

                using (var session = new TFSession(graph))
                {
                    var runInit = session.GetRunner()
                                  .AddTarget(initW.Operation).Run();
                    var runInit2 = session.GetRunner()
                                   .AddTarget(initB.Operation).Run();
                    var output = session.GetRunner()
                                 .AddInput(x, new float[] { 1, 2, 3, 4 })
                                 .AddInput(y, new float[] { 0, -1, -2, -3 })
                                 .Run(loss);
                    var results = (float)output.GetValue();

                    var printValue = "";
                    //foreach (var result in results)
                    //{
                    //    printValue += $"{result},";
                    //}
                    printValue = $"{results}";
                    Console.WriteLine($"LinearModelWithTrainingExample: {printValue}");
                }
            }
        }
Пример #30
0
    void TryTensorflow()
    {
        using (var graph = new TFGraph())
        {
            var x = graph.Const(3.0);
            var y = graph.Const(2.0);
            var z = graph.Const(5.0);

            var h = graph.Add(x, y);
            var g = graph.Mul(h, z);

            using (var sess = new TFSession(graph))
            {
                var      runner = sess.GetRunner();
                TFTensor result = runner.Run(g);
                displayText = $"It worked!\n(x+y)*z\n= {result}";
            }
        }
    }