public void PrepareData()
 {
     mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, trainSize: TrainSize, validationSize: ValidationSize, testSize: TestSize, showProgressInConsole: true).Result;
     // In this example, we limit mnist data
     (Xtr, Ytr) = mnist.Train.GetNextBatch(TrainSize == null ? 5000 : TrainSize.Value / 100); // 5000 for training (nn candidates)
     (Xte, Yte) = mnist.Test.GetNextBatch(TestSize == null ? 200 : TestSize.Value / 100);     // 200 for testing
 }
Esempio n. 2
0
        private static void fun1()
        {
            var path = "../../../../data/MNIST_data/";
            // 这里需要引用 Tensorflow.Hub 项目
            var mnist = MnistModelLoader.LoadAsync(path, oneHot: true).Result;

            var x = tf.placeholder(TF_DataType.TF_DOUBLE, (-1, 784), name: "x");
            var y = tf.placeholder(TF_DataType.TF_DOUBLE, (-1, 10), name: "y");

            var w = tf.Variable(tf.random_normal((784, 10), dtype: TF_DataType.TF_DOUBLE), dtype: TF_DataType.TF_DOUBLE, name: "w");
            var b = tf.Variable(tf.zeros(10, dtype: TF_DataType.TF_DOUBLE), dtype: TF_DataType.TF_DOUBLE, name: "b");

            var forward = tf.matmul(x, w) + b;
            var pred    = tf.nn.softmax(forward);

            var train_epochs  = 50;
            var batch_size    = 100;
            var total_batch   = (int)mnist.Train.NumOfExamples / batch_size;
            var display_step  = 1;
            var learning_rate = 0.01f;

            var loss_function = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices: 1));

            var optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function);

            var correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1));
            var accuracy           = tf.reduce_mean(tf.cast(correct_prediction, TF_DataType.TF_DOUBLE));

            using (var sess = tf.Session())
            {
                var init = tf.global_variables_initializer();
                sess.run(init);

                for (var epoch = 0; epoch < train_epochs; epoch++)
                {
                    for (var batch = 0; batch < total_batch; batch++)
                    {
                        var(xs, ys) = mnist.Train.GetNextBatch(batch_size);
                        sess.run(optimizer, new FeedItem(x, xs), new FeedItem(y, ys));
                    }

                    var(loss, acc) = sess.run((loss_function, accuracy),
                                              new FeedItem(x, mnist.Test.Data),
                                              new FeedItem(y, mnist.Test.Labels));

                    var accu_test = sess.run(accuracy, new FeedItem(x, mnist.Test.Data),
                                             new FeedItem(y, mnist.Test.Labels));

                    var accu_validation = sess.run(accuracy, new FeedItem(x, mnist.Validation.Data),
                                                   new FeedItem(y, mnist.Validation.Labels));


                    if ((epoch + 1) % display_step == 0)
                    {
                        Console.WriteLine($"train epoch:{epoch + 1}   loss={loss} accuracy={acc}");
                    }
                }
            }
        }
Esempio n. 3
0
 public override void PrepareData()
 {
     mnistV1 = MnistModelLoader.LoadAsync(".resources/mnist",
                                          oneHot: true,
                                          trainSize: train_size,
                                          validationSize: validation_size,
                                          testSize: test_size,
                                          showProgressInConsole: true).Result;
 }
        public override void PrepareData()
        {
            mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, showProgressInConsole: true).Result;

            print("Size of:");
            print($"- Training-set:\t\t{len(mnist.Train.Data)}");
            print($"- Validation-set:\t{len(mnist.Validation.Data)}");
            print($"- Test-set:\t\t{len(mnist.Test.Data)}");
        }
        public void PrepareData()
        {
            mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, showProgressInConsole: true).Result;
            (x_train, y_train) = (mnist.Train.Data, mnist.Train.Labels);
            (x_valid, y_valid) = (mnist.Validation.Data, mnist.Validation.Labels);
            (x_test, y_test)   = (mnist.Test.Data, mnist.Test.Labels);

            print("Size of:");
            print($"- Training-set:\t\t{len(mnist.Train.Data)}");
            print($"- Validation-set:\t{len(mnist.Validation.Data)}");
            print($"- Test-set:\t\t{len(mnist.Test.Data)}");
        }
        public async Task TestLoad()
        {
            var loader = new MnistModelLoader();
            var result = await loader.LoadAsync(new ModelLoadSetting
            {
                TrainDir       = "mnist",
                OneHot         = true,
                ValidationSize = 5000,
            });

            Assert.IsNotNull(result);
        }
Esempio n. 7
0
        public void PrepareData()
        {
            Directory.CreateDirectory(Name);

            mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, showProgressInConsole: true).Result;
            (x_train, y_train) = Reformat(mnist.Train.Data, mnist.Train.Labels);
            (x_valid, y_valid) = Reformat(mnist.Validation.Data, mnist.Validation.Labels);
            (x_test, y_test)   = Reformat(mnist.Test.Data, mnist.Test.Labels);

            Console.WriteLine("Size of:");
            Console.WriteLine($"- Training-set:\t\t{mnist.Train.Data.Shape.Size}");
            Console.WriteLine($"- Validation-set:\t{mnist.Validation.Data.Shape.Size}");
        }
Esempio n. 8
0
        public override void PrepareData()
        {
            mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, showProgressInConsole: true).Result;
            (x_train, y_train) = (mnist.Train.Data, mnist.Train.Labels);
            (x_valid, y_valid) = (mnist.Validation.Data, mnist.Validation.Labels);
            (x_test, y_test)   = (mnist.Test.Data, mnist.Test.Labels);

            y_valid = np.argmax(y_valid, axis: 1);
            x_valid = x_valid.reshape(-1, n_steps, n_inputs);
            y_test  = np.argmax(y_test, axis: 1);
            x_test  = x_test.reshape(-1, n_steps, n_inputs);

            print("Size of:");
            print($"- Training-set:\t\t{len(mnist.Train.Data)}");
            print($"- Validation-set:\t{len(mnist.Validation.Data)}");
            print($"- Test-set:\t\t{len(mnist.Test.Data)}");
        }
Esempio n. 9
0
        private static void fun1()
        {
            var path = "../../../../data/MNIST_data/";
            // 这里需要引用 Tensorflow.Hub 项目
            var mnist = MnistModelLoader.LoadAsync(path, oneHot: true).Result;

            Console.WriteLine($"训练集 train 数量:{mnist.Train.NumOfExamples}");
            Console.WriteLine($"验证集 validation 数量:{mnist.Validation.NumOfExamples}");
            Console.WriteLine($"测试集 test 数量:{mnist.Test.NumOfExamples}");

            Console.WriteLine($"train images shape: {mnist.Train.Data.Shape}");
            Console.WriteLine($"labels shape:{ mnist.Train.Labels.Shape}");

            var image = mnist.Train.Data[0];

            Console.WriteLine($"image shape={image.Shape}");
            Console.WriteLine($"image data={image.ToString()}");
            Console.WriteLine($"image.reshape={image.reshape(28,28)}");

            // 画图是不可能画图了的。
        }
Esempio n. 10
0
        static async Task Main(string[] args)
        {
            NDArray x_train, y_train;
            NDArray x_valid, y_valid;
            NDArray x_test, y_test;

            var mnist = await MnistModelLoader.LoadAsync(".resources/fashion_mnist", oneHot : true);

            x_train = mnist.Train.Data;
            y_train = mnist.Train.Labels;
            x_valid = mnist.Validation.Data;
            y_valid = mnist.Validation.Labels;
            x_test  = mnist.Test.Data;
            y_test  = mnist.Test.Labels;

            var model = keras.Sequential();

            model.add(keras.layers.Dense(28 * 28));
            model.add(keras.layers.Dense(128, activation: "relu"));
            model.add(keras.layers.Dense(10, activation: "softmax"));
        }
Esempio n. 11
0
        public void PrepareData()
        {
            var loader = new MnistModelLoader();

            var setting = new ModelLoadSetting
            {
                TrainDir       = ".resources/mnist",
                OneHot         = true,
                TrainSize      = train_size,
                ValidationSize = validation_size,
                TestSize       = test_size
            };

            mnist = loader.LoadAsync(setting).Result;

            full_data_x = mnist.Train.Data;

            // download graph meta data
            string url = "https://raw.githubusercontent.com/SciSharp/TensorFlow.NET/master/graph/kmeans.meta";

            loader.DownloadAsync(url, ".resources/graph", "kmeans.meta").Wait();
        }
 public void PrepareData()
 {
     mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, showProgressInConsole: true).Result;
 }
Esempio n. 13
0
 public void PrepareData()
 {
     mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true).Result;
 }
Esempio n. 14
0
        private static void fun1()
        {
            var path = "../../../../data/MNIST_data/";
            // 这里需要引用 Tensorflow.Hub 项目
            var mnist = MnistModelLoader.LoadAsync(path, oneHot: true).Result;

            // var savePath = "../../../../netSaver/";
            var savePath = @"..\..\..\..\netSaver";

            if (!Directory.Exists(savePath))
            {
                Directory.CreateDirectory(savePath);
            }

            // 输入层
            var x = tf.placeholder(TF_DataType.TF_DOUBLE, (-1, 784), name: "x");
            var y = tf.placeholder(TF_DataType.TF_DOUBLE, (-1, 10), name: "y");

            // 隐含层
            var H1_NN = 256;
            var H2_NN = 64;
            var H3_NN = 32;

            var h1 = fcn_layer(inputs: x, input_dim: 784, output_dim: H1_NN, activation: tf.nn.relu);
            var h2 = fcn_layer(h1, H1_NN, H2_NN, tf.nn.relu);
            var h3 = fcn_layer(h2, H2_NN, H3_NN, tf.nn.relu);

            var forward = fcn_layer(h3, H3_NN, 10, null);
            var pred    = tf.nn.softmax(forward);

            var train_epochs  = 10;
            var batch_size    = 100;
            var total_batch   = (int)mnist.Train.NumOfExamples / batch_size;
            var display_step  = 1;
            var save_step     = 10;
            var learning_rate = 0.01f;

            var loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: forward));
            // var optimizer = tf.train.AdamOptimizer(learning_rate, dtype: TF_DataType.TF_DOUBLE).minimize(loss_function);
            var optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function);

            // 准确率的定义
            var correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(pred, 1));
            var accuracy           = tf.reduce_mean(tf.cast(correct_prediction, TF_DataType.TF_DOUBLE));

            using (var sess = tf.Session())
            {
                var init = tf.global_variables_initializer();
                sess.run(init);
                var saver = tf.train.Saver();

                for (var epoch = 0; epoch < train_epochs; epoch++)
                {
                    for (var batch = 0; batch < total_batch; batch++)
                    {
                        var(xs, ys) = mnist.Train.GetNextBatch(batch_size);
                        sess.run(optimizer, new FeedItem(x, xs), new FeedItem(y, ys));
                    }

                    var(loss, acc) = sess.run((loss_function, accuracy),
                                              new FeedItem(x, mnist.Validation.Data),
                                              new FeedItem(y, mnist.Validation.Labels));

                    if ((epoch + 1) % display_step == 0)
                    {
                        Console.WriteLine($"train epoch:{epoch + 1}   loss={loss} accuracy={acc}");
                    }

                    if ((epoch + 1) % save_step == 0)
                    {
                        saver.save(sess, Path.Combine(savePath, $"mnist_model.{(epoch + 1)}.ckpt"));
                    }
                }

                var accu_test = sess.run(accuracy, (x, mnist.Test.Data), (y, mnist.Test.Labels));
                Console.WriteLine($"准确率:{accu_test}");

                saver.save(sess, savePath);
                Console.WriteLine("保存完毕!");
            }
        }
Esempio n. 15
0
        private static void fun1()
        {
            var path = "../../../../data/MNIST_data/";
            // 这里需要引用 Tensorflow.Hub 项目
            var mnist = MnistModelLoader.LoadAsync(path, oneHot: true).Result;

            // 输入层
            var x = tf.placeholder(TF_DataType.TF_DOUBLE, (-1, 784), name: "x");
            var y = tf.placeholder(TF_DataType.TF_DOUBLE, (-1, 10), name: "y");

            // 隐含层
            var H1_NN = 256;
            var H2_NN = 64;

            // 输入层 => 隐含层1
            var w1 = tf.Variable(tf.random_normal((784, H1_NN), dtype: TF_DataType.TF_DOUBLE));
            var b1 = tf.Variable(tf.zeros((H1_NN), dtype: TF_DataType.TF_DOUBLE));

            // 隐含层1 => 隐含层2
            var w2 = tf.Variable(tf.random_normal((H1_NN, H2_NN), dtype: TF_DataType.TF_DOUBLE));
            var b2 = tf.Variable(tf.zeros((H2_NN), dtype: TF_DataType.TF_DOUBLE));

            // 隐含层2 => 输出层
            var w3 = tf.Variable(tf.random_normal((H2_NN, 10), dtype: TF_DataType.TF_DOUBLE));
            var b3 = tf.Variable(tf.zeros((10), dtype: TF_DataType.TF_DOUBLE));

            var y1 = tf.nn.relu(tf.matmul(x, w1) + b1);
            var y2 = tf.nn.relu(tf.matmul(y1, w2) + b2);

            var forward = tf.matmul(y2, w3) + b3;
            var pred    = tf.nn.softmax(forward);

            var train_epochs  = 50;
            var batch_size    = 100;
            var total_batch   = (int)mnist.Train.NumOfExamples / batch_size;
            var display_step  = 1;
            var learning_rate = 0.01f;

            // var loss_function = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices: 1));
            var loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: forward));
            var optimizer     = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function);
            // var optimizer = tf.train.AdamOptimizer(learning_rate, dtype: TF_DataType.TF_DOUBLE).minimize(loss_function);

            // 准确率的定义
            var correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(pred, 1));
            var accuracy           = tf.reduce_mean(tf.cast(correct_prediction, TF_DataType.TF_DOUBLE));

            using (var sess = tf.Session())
            {
                var init = tf.global_variables_initializer();
                sess.run(init);

                for (var epoch = 0; epoch < train_epochs; epoch++)
                {
                    for (var batch = 0; batch < total_batch; batch++)
                    {
                        var(xs, ys) = mnist.Train.GetNextBatch(batch_size);
                        sess.run(optimizer, new FeedItem(x, xs), new FeedItem(y, ys));
                    }

                    var(loss, acc) = sess.run((loss_function, accuracy),
                                              new FeedItem(x, mnist.Validation.Data),
                                              new FeedItem(y, mnist.Validation.Labels));

                    if ((epoch + 1) % display_step == 0)
                    {
                        Console.WriteLine($"train epoch:{epoch + 1}   loss={loss} accuracy={acc}");
                    }
                }

                var accu_test = sess.run(accuracy, (x, mnist.Test.Data), (y, mnist.Test.Labels));
                Console.WriteLine($"准确率:{accu_test}");
            }
        }
Esempio n. 16
0
        private static void fun1()
        {
            var path = "../../../../data/MNIST_data/";
            // 这里需要引用 Tensorflow.Hub 项目
            var mnist = MnistModelLoader.LoadAsync(path, oneHot: true).Result;

            // var savePath = "../../../../netSaver";
            var savePath = @"..\..\..\..\netSaver";

            // var savePath = @"S:\MOOC_TF_Study\netSaver";
            if (!Directory.Exists(savePath))
            {
                Directory.CreateDirectory(savePath);
            }

            // 输入层
            var x = tf.placeholder(TF_DataType.TF_DOUBLE, (-1, 784), name: "x");
            var y = tf.placeholder(TF_DataType.TF_DOUBLE, (-1, 10), name: "y");

            // 隐含层
            var H1_NN = 256;
            var H2_NN = 64;
            var H3_NN = 32;

            var h1 = fcn_layer(inputs: x, input_dim: 784, output_dim: H1_NN, activation: tf.nn.relu);
            var h2 = fcn_layer(h1, H1_NN, H2_NN, tf.nn.relu);
            var h3 = fcn_layer(h2, H2_NN, H3_NN, tf.nn.relu);

            var forward = fcn_layer(h3, H3_NN, 10, null);
            var pred    = tf.nn.softmax(forward);

            var train_epochs  = 10;
            var batch_size    = 100;
            var total_batch   = (int)mnist.Train.NumOfExamples / batch_size;
            var display_step  = 1;
            var save_step     = 10;
            var learning_rate = 0.01f;

            var loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: forward));
            // var optimizer = tf.train.AdamOptimizer(learning_rate, dtype: TF_DataType.TF_DOUBLE).minimize(loss_function);
            var optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function);

            // 准确率的定义
            var correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(pred, 1));
            var accuracy           = tf.reduce_mean(tf.cast(correct_prediction, TF_DataType.TF_DOUBLE));

            using (var sess = tf.Session())
            {
                var init = tf.global_variables_initializer();
                sess.run(init);
                var saver = tf.train.Saver();

                var ckpt = tf.train.get_checkpoint_state(savePath);
                if (ckpt != null && !string.IsNullOrWhiteSpace(ckpt.ModelCheckpointPath))
                {
                    saver.restore(sess, ckpt.ModelCheckpointPath);
                    Console.WriteLine("模型已恢复");
                }

                var lastFile = tf.train.latest_checkpoint(savePath);
                Console.WriteLine(lastFile);

                var accu_test = sess.run(accuracy, (x, mnist.Test.Data), (y, mnist.Test.Labels));
                Console.WriteLine($"准确率:{accu_test}");
            }
        }
Esempio n. 17
0
        static async Task Main(string[] args)
        {
            // Constants
            var x  = tf.constant(2);
            var y  = tf.constant(10);
            var hi = tf.constant("hi");

            Console.WriteLine(x);
            Console.WriteLine(y);
            Console.WriteLine(hi);

            print(hi);

            print((int)x.numpy());
            print((int)y.numpy());
            print((string)hi.numpy());

            // Basic operations
            var add   = tf.add(x, y);
            var multi = tf.multiply(x, y);

            print((int)add.numpy());
            print((int)multi.numpy());

            var mean = tf.reduce_mean(new[] { x, y });
            var sum  = tf.reduce_sum(new[] { x, y });

            print((int)mean.numpy());
            print((int)sum.numpy());

            // MNIST example

            // Eager execution is turned on by default.
            // Turn it off to be able to create placeholders.
            // Eager execution evaluates operations immediatesly without building graphs.
            tf.compat.v1.disable_eager_execution();

            var mnistData = await MnistModelLoader.LoadAsync("./train", oneHot : true, trainSize : null, validationSize : 5000, testSize : null, showProgressInConsole : true);

            var mnistShape  = tf.placeholder(tf.float32, new TensorShape(-1, 28 * 28));
            var mnistOutput = tf.placeholder(tf.float32, new TensorShape(-1, 10));

            var W = tf.Variable(tf.zeros(new Shape(784, 10)));
            var b = tf.Variable(tf.zeros(new Shape(10)));

            var model = tf.nn.softmax(tf.matmul(mnistShape, W) + b);

            var crossEntropy = tf.reduce_mean(-tf.reduce_sum(mnistOutput * tf.log(model), reduction_indices: 1));

            var optimizer = tf.train.GradientDescentOptimizer(0.01f).minimize(crossEntropy);

            var totalBatches = mnistData.Train.NumOfExamples / 100;

            using var session = tf.Session();

            session.run(tf.global_variables_initializer());

            var accuracy = 0f;

            // Train
            foreach (var epoch in range(20))
            {
                var avg_cost = 0.0f;

                foreach (var i in range(totalBatches))
                {
                    var start = i * 100;
                    var end   = (i + 1) * 100;
                    var(batch_xs, batch_ys) = mnistData.GetNextBatch(mnistData.Train.Data, mnistData.Train.Labels, start, end);
                    // Run optimization op (backprop) and cost op (to get loss value)
                    (_, float c) = session.run((optimizer, crossEntropy),
                                               (mnistShape, batch_xs),
                                               (mnistOutput, batch_ys));

                    // Compute average loss
                    avg_cost += c / totalBatches;
                }

                // Display logs per epoch step
                var newEpoc = epoch + 1;

                if (newEpoc % 1 == 0)
                {
                    print($"Epoch: {newEpoc} Cost: {avg_cost:G9}");
                }
            }

            print("Optimization Finished!");
            // SaveModel(sess);

            // Test model
            var predictions = tf.equal(tf.argmax(model, 1), tf.argmax(mnistOutput, 1));
            // Calculate accuracy
            var acc = tf.reduce_mean(tf.cast(predictions, tf.float32));

            accuracy = acc.eval(session, (mnistShape, mnistData.Test.Data), (mnistOutput, mnistData.Test.Labels));
            print($"Accuracy: {accuracy:F4}");
        }
Esempio n. 18
0
        private static void fun1()
        {
            var path = "../../../../data/MNIST_data/";
            // 这里需要引用 Tensorflow.Hub 项目
            var mnist = MnistModelLoader.LoadAsync(path, oneHot: true).Result;

            // 输入层
            var x = tf.placeholder(TF_DataType.TF_DOUBLE, (-1, 784), name: "x");
            var y = tf.placeholder(TF_DataType.TF_DOUBLE, (-1, 10), name: "y");

            // 隐含层
            var H1_NN = 256;

            var w1 = tf.Variable(tf.random_normal((784, H1_NN), dtype: TF_DataType.TF_DOUBLE));
            var b1 = tf.Variable(tf.zeros((H1_NN), dtype: TF_DataType.TF_DOUBLE));

            var y1 = tf.nn.relu(tf.matmul(x, w1) + b1);

            // 输出层
            var w2 = tf.Variable(tf.random_normal((H1_NN, 10), dtype: TF_DataType.TF_DOUBLE));
            var b2 = tf.Variable(tf.zeros((10), dtype: TF_DataType.TF_DOUBLE));

            var forward = tf.matmul(y1, w2) + b2;
            var pred    = tf.nn.softmax(forward);

            var train_epochs  = 50;
            var batch_size    = 100;
            var total_batch   = (int)mnist.Train.NumOfExamples / batch_size;
            var display_step  = 1;
            var learning_rate = 0.01f;

            // var loss_function = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices: 1));
            var loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: forward));
            var optimizer     = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function);
            // var optimizer = tf.train.AdamOptimizer(learning_rate, TF_DataType.TF_DOUBLE).minimize(loss_function);

            var correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1));
            var accuracy           = tf.reduce_mean(tf.cast(correct_prediction, TF_DataType.TF_DOUBLE));

            using (var sess = tf.Session())
            {
                var init = tf.global_variables_initializer();
                sess.run(init);

                for (var epoch = 0; epoch < train_epochs; epoch++)
                {
                    for (var batch = 0; batch < total_batch; batch++)
                    {
                        var(xs, ys) = mnist.Train.GetNextBatch(batch_size);
                        sess.run(optimizer, new FeedItem(x, xs), new FeedItem(y, ys));
                    }

                    var(loss, acc) = sess.run((loss_function, accuracy),
                                              new FeedItem(x, mnist.Validation.Data),
                                              new FeedItem(y, mnist.Validation.Labels));

                    if ((epoch + 1) % display_step == 0)
                    {
                        Console.WriteLine($"train epoch:{epoch + 1}   loss={loss} accuracy={acc}");
                    }
                }

                // 评估模型
                var accu_test = sess.run(accuracy, (x, mnist.Test.Data), (y, mnist.Test.Labels));
                Console.WriteLine("准确率:{0}", accu_test.ToString());

                var prediction_result = sess.run(tf.argmax(pred, 1), new FeedItem(x, mnist.Test.Data));
                Console.WriteLine($"测试结果:{prediction_result["0:10"]}");

                var compare_lists = prediction_result == np.argmax(mnist.Test.Labels, 1);
                Console.WriteLine("预测与实际结果的比较 {0}", compare_lists);

                var compareResult = (bool[])compare_lists;

                /*
                 * var err_list = new List<int>();
                 * for(var i = 0;i < compareResult.Length; i++)
                 * {
                 *  if (!compareResult[i])
                 *      err_list.Add(i);
                 * }
                 */

                var err_list = compareResult.Select((result, index) => (result, index))
                               .Where(o => !o.result)
                               .Select(o => o.index).ToArray();

                var labels     = mnist.Test.Labels;
                var prediction = (long[])prediction_result;
                foreach (var errIndex in err_list)
                {
                    Console.WriteLine($"index={errIndex} 标签值={np.argmax(labels[errIndex])} 预测值={prediction[errIndex]}");
                }
            }
        }
Esempio n. 19
0
 public void PrepareData()
 {
     mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, trainSize: train_size, validationSize: validation_size, testSize: test_size).Result;
 }