コード例 #1
0
        [Test] public void MultinomialRegression()
        {
            CleanMem_();
            const long batchSize = 1000L;
            const long epochs    = 3;

            var model = MultinomialRegressionModel();
            var ctx   = Context.GpuContext(0);
            var opt   = new GradientDescentOptimizer(ctx, model.Loss.Loss, 0.0005);

            opt.Initalize();

            var mnist   = new MNIST();
            var batcher = new Batcher(ctx, mnist.TrainImages, mnist.TrainLabels);

            for (var e = 1; e <= epochs; ++e)
            {
                var i = 0;
                while (batcher.Next(batchSize, opt, model.Images, model.Labels))
                {
                    i++;
                    opt.Forward();
                    opt.Backward();
                    opt.Optimize();

                    if ((i % 10 == 0) || ((i == 1) && (e == 1)))
                    {
                        PrintStatus(e, i, opt, model, mnist.ValidationImages, mnist.ValidationLabels);
                    }
                }
            }
            PrintResult(opt, model, mnist.TestImages, mnist.TestLabels);

            CleanMem_();
        }
コード例 #2
0
        public static void Example2()
        {
            var cns = ConvNetSharp <double> .Instance;

            // Graph creation
            var x = cns.PlaceHolder("x");
            var y = cns.PlaceHolder("y");

            var b = cns.Variable(2.0, "b");

            var fun = x + b;

            var cost = (y - fun) * (y - fun);

            var optimizer = new GradientDescentOptimizer <double>(learningRate: 0.01);

            using (var session = new Session <double>())
            {
                session.Differentiate(cost); // computes dCost/dW at every node of the graph

                double currentCost;
                do
                {
                    var xx = BuilderInstance <double> .Volume.From(new[] { -2.0, -3.0, -10.0 }, new Shape(1, 1, 1, 3));

                    var yy = BuilderInstance <double> .Volume.From(new[] { -5.0, -6.0, -13.0 }, new Shape(1, 1, 1, 3));

                    var dico = new Dictionary <string, Volume <double> > {
                        { "x", xx }, { "y", yy }
                    };

                    currentCost = Math.Abs(session.Run(cost, dico).ToArray().Sum());
                    Console.WriteLine($"cost: {currentCost}");

                    var result = session.Run(fun, dico);
                    session.Run(optimizer, dico);
                } while (currentCost > 1e-5);
            }

            // Display derivate at b
            var vm  = new ViewModel <double>(b.Derivate);
            var app = new Application();

            app.Run(new GraphControl {
                DataContext = vm
            });

            double finalb = b.Result;

            Console.WriteLine($"fun = x + {finalb}");
            Console.ReadLine();
        }
コード例 #3
0
        [Test] public void CompareMultiLayerPerceptron()
        {
            var model = MultiLayerPerceptronModel();
            var ctx   = Context.GpuContext(0);

            var memMb = ctx.ToGpuContext().Gpu.Device.TotalMemory / 1024.0 / 1024.0;

            if (memMb < 4096.0)
            {
                Assert.Inconclusive("Need more Gpu memory.");
            }

            var opt = new GradientDescentOptimizer(ctx, model.Loss.Loss, 0.00008);

            // now we need to initalize the parameters for the optimizer
            opt.Initalize();

            // load mnist data
            var mnist   = new MNIST();
            var batcher = new Batcher(ctx, mnist.TrainImages, mnist.TrainLabels);

            var timer = Stopwatch.StartNew();

            for (var i = 0; i < 1; ++i)
            {
                batcher.Next(5000, opt, model.Images, model.Labels);
                opt.Forward();
                opt.Backward();
                opt.Optimize();
            }
            timer.Stop();
            Console.WriteLine(timer.Elapsed);

            timer.Restart();
            for (var i = 0; i < 5; ++i)
            {
                batcher.Next(10000, opt, model.Images, model.Labels);
                opt.Forward();
                opt.Backward();
                opt.Optimize();
            }
            ctx.ToGpuContext().Stream.Synchronize();
            timer.Stop();
            Console.WriteLine(timer.Elapsed);

            timer.Restart();
            PrintResult(opt, model, mnist.TestImages, mnist.TestLabels);
            timer.Stop();
            Console.WriteLine(timer.Elapsed);

            CleanMem_();
        }
コード例 #4
0
        public void GradientDescentOptimizerReturnsValueCloseToOptimumOneParameter()
        {
            var expected          = 42.0;
            var parameterSettings = new[]
            {
                new ParameterSetting(0, 50, 1, 30)
            };

            Func <double[], double> costFunc = parameters => Math.Abs(expected - parameters[0]);
            var actual = GradientDescentOptimizer.Optimize(costFunc, parameterSettings, parameterSettings[0].StepSize);

            Assert.That(actual.Parameters[0], Is.EqualTo(expected).Within(parameterSettings[0].StepSize));
        }
コード例 #5
0
        public void GradientDescentOptimizerHandlesConstantCostFunction()
        {
            var parameterSettings = new[]
            {
                new ParameterSetting(0, 50, 1, 30)
            };

            Func <double[], double> costFunc = parameters => 0;
            OptimizationResult      actual   = null;

            Assert.That(() => actual = GradientDescentOptimizer.Optimize(costFunc, parameterSettings, parameterSettings[0].StepSize), Throws.Nothing);
            Assert.That(actual, Is.Not.Null);
            Assert.That(actual.Parameters[0], Is.Not.NaN);
        }
コード例 #6
0
        [Test] public void CompareConvolutionalNeuralNetwork()
        {
            var model = ConvolutionalNeuralNetworkModel();
            var ctx   = Context.GpuContext(0);

            var memMb = ctx.ToGpuContext().Gpu.Device.TotalMemory / 1024.0 / 1024.0;

            if (memMb < 4096.0)
            {
                Assert.Inconclusive("Need more Gpu memory.");
            }

            var opt = new GradientDescentOptimizer(ctx, model.Loss.Loss, 0.000008);

            opt.Initalize();

            var mnist   = new MNIST();
            var batcher = new Batcher(ctx, mnist.TrainImages, mnist.TrainLabels);

            var timer = Stopwatch.StartNew();

            for (var i = 0; i < 2; ++i)
            {
                batcher.Next(2500, opt, model.Images, model.Labels);
                opt.Forward();
                opt.Backward();
                opt.Optimize();
            }
            timer.Stop();
            Console.WriteLine(timer.Elapsed);

            timer.Restart();
            for (var i = 0; i < 20; ++i)
            {
                batcher.Next(2500, opt, model.Images, model.Labels);
                opt.Forward();
                opt.Backward();
                opt.Optimize();
            }
            ctx.ToGpuContext().Stream.Synchronize();
            timer.Stop();
            Console.WriteLine(timer.Elapsed);

            timer.Restart();
            PrintResult(opt, model, mnist.TestImages, mnist.TestLabels);
            timer.Stop();
            Console.WriteLine(timer.Elapsed);

            CleanMem_();
        }
コード例 #7
0
        //runs the backbone of the network (forward and backward prop and other stuff)
        [Test] public void LinearNeuron(Datas data)
        {
            CleanMem_();
            const long BatchSize = 1000L;
            const long Epoch     = 5;

            var model = LinearModel();
            var ctx   = Context.GpuContext(0);


            //makes sure gpu has 2 or more GB of ram
            var memMB = ctx.ToGpuContext().Gpu.Device.TotalMemory / 1024.0 / 1024.0;

            if (memMB < 4096.0)
            {
                Assert.Inconclusive("Need more gpu mem");
            }

            var opt = new GradientDescentOptimizer(ctx, model.Loss.Loss, 0.00005);

            opt.Initalize();

            var batcher = new Batcher(ctx, data.TrainText, data.TrainStory);

            for (var e = 1; e < Epoch; e++)
            {
                int i = 0;

                while (batcher.Next(BatchSize, opt, model.Text, model.Story))
                {
                    i++;

                    opt.Forward();
                    opt.Backward();
                    opt.Optimize();

                    if ((i % 10 == 0) || (i == 1 && e == 1))
                    {
                        PrintStatus(e, i, opt, model, data.TrainText, data.TrainStory);
                    }
                }
            }
            //PrintResult(opt, model, data.TestText, data.TestStory);
            //Need to make some place to dump the weights and biases, but not exactly sure how... maybe just have it write a story right away?

            CleanMem_();
        }
コード例 #8
0
        public static void SimpleLogisticRegression()
        {
            //const int N = 8;
            //const int D = 5;
            //const int P = 3;
            //const double learn = 0.001;

            const int    N     = 100;
            const int    D     = 784;
            const int    P     = 10;
            const double learn = 0.00005;

            var input   = Variable <double>();
            var label   = Variable <double>();
            var weights = Parameter(0.01 * RandomUniform <double>(Shape.Create(D, P)));
            var pred    = Dot(input, weights);
            var loss    = L2Loss(pred, label);

            var ctx = Context.GpuContext(0);
            var opt = new GradientDescentOptimizer(ctx, loss, learn);

            // set some data
            var inputData = new double[N, D];
            var matA      = new double[D, P];
            var matB      = new double[N, P];

            NormalRandomArray(inputData);
            NormalRandomArray(matA);
            NormalRandomArray(matB);
            var labelData = Dot(inputData, matA).Add(matB.Mul(0.1));

            opt.AssignTensor(input, inputData.AsTensor());
            opt.AssignTensor(label, labelData.AsTensor());

            opt.Initalize();
            for (var i = 0; i < 800; ++i)
            {
                opt.Forward();
                opt.Backward();
                opt.Optimize();
                if (i % 20 == 0)
                {
                    Console.WriteLine($"loss = {opt.GetTensor(loss).ToScalar()}");
                }
            }
        }
コード例 #9
0
        public void GradientDescentOptimizerReturnsValueCloseToOptimumThreeParameters()
        {
            var expected          = new double[] { 1, 13, -4 };
            var parameterSettings = new[]
            {
                new ParameterSetting(0, 10, 0.1, Double.NaN),
                new ParameterSetting(5, 20, 0.5, 19),
                new ParameterSetting(-10, 10, 0.2, 1.1)
            };

            Func <double[], double> costFunc = parameters => Math.Abs(expected[0] - parameters[0]) + Math.Abs(expected[1] - parameters[1]) + Math.Abs(expected[2] - parameters[2]);
            var actual = GradientDescentOptimizer.Optimize(costFunc, parameterSettings, parameterSettings.Sum(p => p.StepSize));

            for (int p = 0; p < expected.Length; p++)
            {
                Assert.That(actual.Parameters[p], Is.EqualTo(expected[p]).Within(parameterSettings[p].StepSize));
            }
        }
コード例 #10
0
        /// <summary>
        /// Solves y = x * W + b (GPU version)
        /// for y = 1 and x = -2
        /// </summary>
        public static void Example1()
        {
            var cns = ConvNetSharp <float> .Instance;

            BuilderInstance <float> .Volume = new VolumeBuilder();

            // Graph creation
            var x = cns.PlaceHolder("x");
            var y = cns.PlaceHolder("y");

            var W = cns.Variable(1.0f, "W");
            var b = cns.Variable(2.0f, "b");

            var fun = x * W + b;

            var cost = (fun - y) * (fun - y);

            var optimizer = new GradientDescentOptimizer <float>(learningRate: 0.01f);

            using (var session = new Session <float>())
            {
                session.Differentiate(cost); // computes dCost/dW at every node of the graph

                double currentCost;
                do
                {
                    var dico = new Dictionary <string, Volume <float> > {
                        { "x", -2.0f }, { "y", 1.0f }
                    };

                    currentCost = session.Run(cost, dico);
                    Console.WriteLine($"cost: {currentCost}");

                    var result = session.Run(fun, dico);
                    session.Run(optimizer, dico);
                } while (currentCost > 1e-5);

                double finalW = W.Result;
                double finalb = b.Result;
                Console.WriteLine($"fun = x * {finalW} + {finalb}");
                Console.ReadKey();
            }
        }
コード例 #11
0
    public Model()
    {
        float[,] aX = LoadCsv("dataX.csv");
        float[,] aY = LoadCsv("dataY.csv");
        _dataX      = new TFTensor(aX);
        _dataY      = new TFTensor(aY);

        _session = new TFSession();
        _graph   = _session.Graph;
        _input   = _graph.Placeholder(TFDataType.Float);
        _output  = _graph.Placeholder(TFDataType.Float);
        _y_out   = new LinearLayer(_graph, _input, (int)_dataX.Shape[0], 1);
        cost     = _graph.ReduceMean(_graph.SigmoidCrossEntropyWithLogits(_y_out.Result, _output));
        _gradientDescentOptimizer = new GradientDescentOptimizer(_graph, _cost, _y_out.W, _y_out.b);
        _gradientDescentOptimizer.ApplyGradientDescent(_graph);
        var runner = _session.GetRunner();

        runner.AddTarget(_y_out.InitB.Operation);
        runner.Run();
    }
コード例 #12
0
        public void EstimateAminoAcidHelixAffinity()
        {
            var annotatedSequencesFile = @"G:\Projects\HumanGenome\fullPdbSequencesHelixMarked.txt";
            var annotatedSequences     = ParseHelixSequences(annotatedSequencesFile);
            //var aminoAcidPairs = GetAminoAcidPairs(annotatedSequences);
            //var leastCommonPair = aminoAcidPairs.OrderBy(kvp => kvp.Value).First();

            Func <double[], double> costFunc = parameters => HelixSequenceCostFunc(parameters, annotatedSequences);
            var parameterSettings            = GeneratePairwiseAminoAcidParameters();

            var randomizedStartValueIterations = 100;

            //Parallel.For(0L, randomizedStartValueIterations, idx =>
            for (int idx = 0; idx < randomizedStartValueIterations; idx++)
            {
                RandomizeStartValues(parameterSettings, 2);
                var optimizationResult = GradientDescentOptimizer.Optimize(costFunc, parameterSettings, double.NegativeInfinity);
                WriteOptimizationResult(@"G:\Projects\HumanGenome\helixAffinityOptimizationResults.dat", optimizationResult);
            }
        }
コード例 #13
0
        static void Main()
        {
            GradientLog.OutputWriter = Console.Out;
            GradientEngine.UseEnvironmentFromVariable();

            var input  = tf.placeholder(tf.float32, new TensorShape(null, 1), name: "x");
            var output = tf.placeholder(tf.float32, new TensorShape(null, 1), name: "y");

            var hiddenLayer = tf.layers.dense(input, hiddenSize,
                                              activation: tf.sigmoid_fn,
                                              kernel_initializer: new ones_initializer(),
                                              bias_initializer: new random_uniform_initializer(minval: -x1, maxval: -x0),
                                              name: "hidden");

            var model = tf.layers.dense(hiddenLayer, units: 1, name: "output");

            var cost = tf.losses.mean_squared_error(output, model);

            var training = new GradientDescentOptimizer(learning_rate: learningRate).minimize(cost);

            dynamic init    = tf.global_variables_initializer();
            var     session = new Session();

            using var _ = session.StartUsing();
            session.run(init);

            foreach (int iteration in Enumerable.Range(0, iterations))
            {
                var(trainInputs, trainOutputs) = GenerateTestValues();
                var iterationDataset = new Dictionary <dynamic, object> {
                    [input]  = trainInputs,
                    [output] = trainOutputs,
                };
                session.run(training, feed_dict: iterationDataset);

                if (iteration % 100 == 99)
                {
                    Console.WriteLine($"cost = {session.run(cost, feed_dict: iterationDataset)}");
                }
            }

            var(testInputs, testOutputs) = GenerateTestValues();

            var testValues = session.run(model, feed_dict: new Dictionary <object, object> {
                [input] = testInputs,
            });

            using (new variable_scope("hidden", reuse: true).StartUsing()) {
                Variable w = tf.get_variable("kernel");
                Variable b = tf.get_variable("bias");
                Console.WriteLine("hidden:");
                Console.WriteLine($"kernel= {w.eval()}");
                Console.WriteLine($"bias  = {b.eval()}");
            }

            using (new variable_scope("output", reuse: true).StartUsing()) {
                Variable w = tf.get_variable("kernel");
                Variable b = tf.get_variable("bias");
                Console.WriteLine("hidden:");
                Console.WriteLine($"kernel= {w.eval()}");
                Console.WriteLine($"bias  = {b.eval()}");
            }
        }
コード例 #14
0
        public int Run()
        {
            dynamic datasets = Py.Import("sklearn.datasets");
            dynamic slice    = PythonEngine.Eval("slice");
            var     iris     = datasets.load_iris();
            dynamic firstTwoFeaturesIndex = new PyTuple(new PyObject[] {
                slice(null),
                slice(null, 2)
            });
            var         input          = iris.data.__getitem__(firstTwoFeaturesIndex);
            IEnumerable target         = iris.target;
            var         expectedOutput = target.Cast <dynamic>()
                                         .Select(l => (int)l == 0 ? 1 : -1)
                                         .ToArray();
            int trainCount = expectedOutput.Length * 4 / 5;
            var trainIn    = numpy.np.array(((IEnumerable)input).Cast <dynamic>().Take(trainCount));
            var trainOut   = numpy.np.array(expectedOutput.Take(trainCount));
            var testIn     = numpy.np.array(((IEnumerable)input).Cast <dynamic>().Skip(trainCount));
            var testOut    = numpy.np.array(expectedOutput.Skip(trainCount));

            var inPlace  = tf.placeholder(shape: new TensorShape(null, input.shape[1]), dtype: tf.float32);
            var outPlace = tf.placeholder(shape: new TensorShape(null, 1), dtype: tf.float32);
            var w        = new Variable(tf.random_normal(shape: new TensorShape((int)input.shape[1], 1)));
            var b        = new Variable(tf.random_normal(shape: new TensorShape(1, 1)));

            var totalLoss = Loss(w, b, inPlace, outPlace);
            var accuracy  = Inference(w, b, inPlace, outPlace);

            var trainOp = new GradientDescentOptimizer(this.flags.InitialLearningRate).minimize(totalLoss);

            var expectedTrainOut = trainOut.reshape((trainOut.Length, 1));
            var expectedTestOut  = testOut.reshape((testOut.Length, 1));

            new Session().UseSelf(sess =>
            {
                var init = tensorflow.tf.global_variables_initializer();
                sess.run(init);
                for (int step = 0; step < this.flags.StepCount; step++)
                {
                    (numpy.ndarray @in, numpy.ndarray @out) = NextBatch(trainIn, trainOut, sampleCount: this.flags.BatchSize);
                    var feed = new PythonDict <object, object> {
                        [inPlace]  = @in,
                        [outPlace] = @out,
                    };
                    sess.run(trainOp, feed_dict: feed);

                    var loss     = sess.run(totalLoss, feed_dict: feed);
                    var trainAcc = sess.run(accuracy, new PythonDict <object, object>
                    {
                        [inPlace]  = trainIn,
                        [outPlace] = expectedTrainOut,
                    });
                    var testAcc = sess.run(accuracy, new PythonDict <object, object>
                    {
                        [inPlace]  = testIn,
                        [outPlace] = expectedTestOut,
                    });

                    if ((step + 1) % 100 == 0)
                    {
                        Console.WriteLine($"Step{step}: test acc {testAcc}, train acc {trainAcc}");
                    }
                }

                //if (this.flags.IsEvaluation)
                //{

                //}
            });

            return(0);
        }
コード例 #15
0
ファイル: PTB.cs プロジェクト: vishalbelsare/AleaTK
            public Model(Context ctx, Config cfg, bool isTraining = true, bool usingCuDnn = true)
            {
                Config     = cfg;
                IsTraining = isTraining;
                UsingCuDnn = usingCuDnn;

                Inputs  = Variable <int>(PartialShape.Create(cfg.NumSteps, cfg.BatchSize));
                Targets = Variable <int>(PartialShape.Create(cfg.NumSteps, cfg.BatchSize));

                // embedding
                Embedding = new Embedding <float>(Inputs, cfg.VocabSize, cfg.HiddenSize, initScale: cfg.InitScale);

                // add dropout
                EmbeddedOutput = Embedding.Output;
                if (isTraining && cfg.KeepProb < 1.0)
                {
                    var dropout = new Dropout <float>(EmbeddedOutput, dropoutProb: 1.0 - cfg.KeepProb);
                    EmbeddedOutput = dropout.Output;
                }

                // rnn layer, dropout for intermediate lstm layers and for output
                if (usingCuDnn)
                {
                    RnnAccelerated = new Rnn <float>(new LstmRnnType(forgetBiasInit: 0.0), EmbeddedOutput, cfg.NumLayers, cfg.HiddenSize, isTraining: isTraining, dropout: isTraining && cfg.KeepProb < 1.0 ? 1.0 - Config.KeepProb : 0.0);
                    RnnOutput      = RnnAccelerated.Y;
                    if (isTraining && cfg.KeepProb < 1.0)
                    {
                        var dropout = new Dropout <float>(RnnOutput, dropoutProb: 1.0 - cfg.KeepProb);
                        RnnOutput = dropout.Output;
                    }
                }
                else
                {
                    RnnDirect = new Lstm <float> [cfg.NumLayers];
                    for (var i = 0; i < cfg.NumLayers; ++i)
                    {
                        var lstm = new Lstm <float>(i == 0 ? EmbeddedOutput : RnnOutput, cfg.HiddenSize, forgetBiasInit: 0.0);
                        RnnDirect[i] = lstm;
                        RnnOutput    = lstm.Y;
                        if (isTraining && cfg.KeepProb < 1.0)
                        {
                            var dropout = new Dropout <float>(RnnOutput, dropoutProb: 1.0 - cfg.KeepProb);
                            RnnOutput = dropout.Output;
                        }
                    }
                }

                FC = new FullyConnected <float>(RnnOutput.Reshape(RnnOutput.Shape[0] * RnnOutput.Shape[1], RnnOutput.Shape[2]), cfg.VocabSize);

                Loss = new SoftmaxCrossEntropySparse <float>(FC.Output, Targets.Reshape(Targets.Shape[0] * Targets.Shape[1]));

                Optimizer = new GradientDescentOptimizer(ctx, Loss.Loss, cfg.LearningRate, new GlobalNormGradientClipper(cfg.MaxGradNorm));

                // warmup to force JIT compilation to get timings without JIT overhead
                Optimizer.Initalize();
                ResetStates();
                Optimizer.AssignTensor(Inputs, Fill(Shape.Create(Inputs.Shape.AsArray), 0));
                Optimizer.AssignTensor(Targets, Fill(Shape.Create(Targets.Shape.AsArray), 0));
                Optimizer.Forward();
                if (isTraining)
                {
                    Optimizer.Backward();
                }

                // now reset states
                Optimizer.Initalize();
                ResetStates();
            }
コード例 #16
0
        /// <summary>
        ///     This sample shows how to serialize and deserialize a ConvNetSharp.Flow network
        ///     1) Graph creation
        ///     2) Dummy Training (only use a single data point)
        ///     3) Serialization
        ///     4) Deserialization
        /// </summary>
        private static void Main()
        {
            var cns = new ConvNetSharp <double>();

            // 1) Graph creation
            var input = cns.PlaceHolder("x"); // input

            var dense1  = cns.Dense(input, 20) + cns.Variable(BuilderInstance <double> .Volume.From(new double[20].Populate(0.1), new Shape(20)), "bias1", true);
            var relu    = cns.Relu(dense1);
            var dense2  = cns.Dense(relu, 10) + cns.Variable(new Shape(10), "bias2", true);
            var softmax = cns.Softmax(dense2); // output

            var output = cns.PlaceHolder("y"); // ground truth
            var cost   = new SoftmaxCrossEntropy <double>(cns, softmax, output);

            var x = BuilderInstance <double> .Volume.From(new[] { 0.3, -0.5 }, new Shape(2));

            var y = BuilderInstance <double> .Volume.From(new[] { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 }, new Shape(10));

            var dico = new Dictionary <string, Volume <double> > {
                { "x", x }, { "y", y }
            };

            var count     = 0;
            var optimizer = new GradientDescentOptimizer <double>(cns, 0.01);

            using (var session = new Session <double>())
            {
                session.Differentiate(cost); // computes dCost/dW at every node of the graph

                // 2) Dummy Training (only use a single data point)
                double currentCost;
                do
                {
                    currentCost = Math.Abs(session.Run(cost, dico, false).ToArray().Sum());
                    Console.WriteLine($"cost: {currentCost}");

                    session.Run(optimizer, dico);
                    count++;
                } while (currentCost > 1e-2);

                Console.WriteLine($"{count}");

                // Forward pass with original network
                var result = session.Run(softmax, new Dictionary <string, Volume <double> > {
                    { "x", x }
                });
                Console.WriteLine("probability that x is class 0: " + result.Get(0));
            }

            // 3) Serialization
            softmax.Save("MyNetwork");

            // 4) Deserialization
            var deserialized = SerializationExtensions.Load <double>("MyNetwork", false)[0]; // first element is the model (second element is the cost if it was saved along)

            using (var session = new Session <double>())
            {
                // Forward pass with deserialized network
                var result = session.Run(deserialized, new Dictionary <string, Volume <double> > {
                    { "x", x }
                });
                Console.WriteLine("probability that x is class 0: " + result.Get(0)); // This should give exactly the same result as previous network evaluation
            }

            Console.ReadLine();
        }
コード例 #17
0
        /// <summary>
        /// Solves y = x * W + b (CPU single version)
        /// for y = 1 and x = -2
        ///
        /// This also demonstrates how to save and load a graph
        /// </summary>
        public static void Example1()
        {
            var cns = ConvNetSharp <float> .Instance;

            // Graph creation
            Op <float> cost;
            Op <float> fun;

            if (File.Exists("test.graphml"))
            {
                Console.WriteLine("Loading graph from disk.");
                var ops = SerializationExtensions.Load <float>("test", true);

                fun  = ops[0];
                cost = ops[1];
            }
            else
            {
                var x = cns.PlaceHolder("x");
                var y = cns.PlaceHolder("y");

                var W = cns.Variable(1.0f, "W");
                var b = cns.Variable(2.0f, "b");

                fun = x * W + b;

                cost = (fun - y) * (fun - y);
            }


            var optimizer = new GradientDescentOptimizer <float>(0.01f);

            using (var session = new Session <float>())
            {
                session.Differentiate(cost); // computes dCost/dW at every node of the graph

                float currentCost;
                do
                {
                    var dico = new Dictionary <string, Volume <float> > {
                        { "x", -2.0f }, { "y", 1.0f }
                    };

                    currentCost = session.Run(cost, dico);
                    Console.WriteLine($"cost: {currentCost}");

                    var result = session.Run(fun, dico);
                    session.Run(optimizer, dico);
                } while (currentCost > 1e-5);

                float finalW = session.GetVariableByName(fun, "W").Result;
                float finalb = session.GetVariableByName(fun, "b").Result;
                Console.WriteLine($"fun = x * {finalW} + {finalb}");

                fun.Save("test", cost);

                // Display grpah
                var vm  = new ViewModel <float>(cost);
                var app = new Application();
                app.Run(new GraphControl {
                    DataContext = vm
                });
            }

            Console.ReadKey();
        }