public void PrepareData()
 {
     mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, showProgressInConsole: true).Result;
 }
Exemplo n.º 2
0
        private static void fun1()
        {
            var path = "../../../../data/MNIST_data/";
            // 这里需要引用 Tensorflow.Hub 项目
            var mnist = MnistModelLoader.LoadAsync(path, oneHot: true).Result;

            // 输入层
            var x = tf.placeholder(TF_DataType.TF_DOUBLE, (-1, 784), name: "x");
            var y = tf.placeholder(TF_DataType.TF_DOUBLE, (-1, 10), name: "y");

            // 隐含层
            var H1_NN = 256;

            var w1 = tf.Variable(tf.random_normal((784, H1_NN), dtype: TF_DataType.TF_DOUBLE));
            var b1 = tf.Variable(tf.zeros((H1_NN), dtype: TF_DataType.TF_DOUBLE));

            var y1 = tf.nn.relu(tf.matmul(x, w1) + b1);

            // 输出层
            var w2 = tf.Variable(tf.random_normal((H1_NN, 10), dtype: TF_DataType.TF_DOUBLE));
            var b2 = tf.Variable(tf.zeros((10), dtype: TF_DataType.TF_DOUBLE));

            var forward = tf.matmul(y1, w2) + b2;
            var pred    = tf.nn.softmax(forward);

            var train_epochs  = 50;
            var batch_size    = 100;
            var total_batch   = (int)mnist.Train.NumOfExamples / batch_size;
            var display_step  = 1;
            var learning_rate = 0.01f;

            // var loss_function = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices: 1));
            var loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: forward));
            var optimizer     = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function);
            // var optimizer = tf.train.AdamOptimizer(learning_rate, TF_DataType.TF_DOUBLE).minimize(loss_function);

            var correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1));
            var accuracy           = tf.reduce_mean(tf.cast(correct_prediction, TF_DataType.TF_DOUBLE));

            using (var sess = tf.Session())
            {
                var init = tf.global_variables_initializer();
                sess.run(init);

                for (var epoch = 0; epoch < train_epochs; epoch++)
                {
                    for (var batch = 0; batch < total_batch; batch++)
                    {
                        var(xs, ys) = mnist.Train.GetNextBatch(batch_size);
                        sess.run(optimizer, new FeedItem(x, xs), new FeedItem(y, ys));
                    }

                    var(loss, acc) = sess.run((loss_function, accuracy),
                                              new FeedItem(x, mnist.Validation.Data),
                                              new FeedItem(y, mnist.Validation.Labels));

                    if ((epoch + 1) % display_step == 0)
                    {
                        Console.WriteLine($"train epoch:{epoch + 1}   loss={loss} accuracy={acc}");
                    }
                }

                // 评估模型
                var accu_test = sess.run(accuracy, (x, mnist.Test.Data), (y, mnist.Test.Labels));
                Console.WriteLine("准确率:{0}", accu_test.ToString());

                var prediction_result = sess.run(tf.argmax(pred, 1), new FeedItem(x, mnist.Test.Data));
                Console.WriteLine($"测试结果:{prediction_result["0:10"]}");

                var compare_lists = prediction_result == np.argmax(mnist.Test.Labels, 1);
                Console.WriteLine("预测与实际结果的比较 {0}", compare_lists);

                var compareResult = (bool[])compare_lists;

                /*
                 * var err_list = new List<int>();
                 * for(var i = 0;i < compareResult.Length; i++)
                 * {
                 *  if (!compareResult[i])
                 *      err_list.Add(i);
                 * }
                 */

                var err_list = compareResult.Select((result, index) => (result, index))
                               .Where(o => !o.result)
                               .Select(o => o.index).ToArray();

                var labels     = mnist.Test.Labels;
                var prediction = (long[])prediction_result;
                foreach (var errIndex in err_list)
                {
                    Console.WriteLine($"index={errIndex} 标签值={np.argmax(labels[errIndex])} 预测值={prediction[errIndex]}");
                }
            }
        }
Exemplo n.º 3
0
 public override void PrepareData()
 {
     mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, trainSize: train_size, validationSize: validation_size, testSize: test_size, showProgressInConsole: true).Result;
 }
Exemplo n.º 4
0
 public void PrepareData()
 {
     mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true, trainSize: train_size, validationSize: validation_size, testSize: test_size).Result;
 }
Exemplo n.º 5
0
        static async Task Main(string[] args)
        {
            // Constants
            var x  = tf.constant(2);
            var y  = tf.constant(10);
            var hi = tf.constant("hi");

            Console.WriteLine(x);
            Console.WriteLine(y);
            Console.WriteLine(hi);

            print(hi);

            print((int)x.numpy());
            print((int)y.numpy());
            print((string)hi.numpy());

            // Basic operations
            var add   = tf.add(x, y);
            var multi = tf.multiply(x, y);

            print((int)add.numpy());
            print((int)multi.numpy());

            var mean = tf.reduce_mean(new[] { x, y });
            var sum  = tf.reduce_sum(new[] { x, y });

            print((int)mean.numpy());
            print((int)sum.numpy());

            // MNIST example

            // Eager execution is turned on by default.
            // Turn it off to be able to create placeholders.
            // Eager execution evaluates operations immediatesly without building graphs.
            tf.compat.v1.disable_eager_execution();

            var mnistData = await MnistModelLoader.LoadAsync("./train", oneHot : true, trainSize : null, validationSize : 5000, testSize : null, showProgressInConsole : true);

            var mnistShape  = tf.placeholder(tf.float32, new TensorShape(-1, 28 * 28));
            var mnistOutput = tf.placeholder(tf.float32, new TensorShape(-1, 10));

            var W = tf.Variable(tf.zeros(new Shape(784, 10)));
            var b = tf.Variable(tf.zeros(new Shape(10)));

            var model = tf.nn.softmax(tf.matmul(mnistShape, W) + b);

            var crossEntropy = tf.reduce_mean(-tf.reduce_sum(mnistOutput * tf.log(model), reduction_indices: 1));

            var optimizer = tf.train.GradientDescentOptimizer(0.01f).minimize(crossEntropy);

            var totalBatches = mnistData.Train.NumOfExamples / 100;

            using var session = tf.Session();

            session.run(tf.global_variables_initializer());

            var accuracy = 0f;

            // Train
            foreach (var epoch in range(20))
            {
                var avg_cost = 0.0f;

                foreach (var i in range(totalBatches))
                {
                    var start = i * 100;
                    var end   = (i + 1) * 100;
                    var(batch_xs, batch_ys) = mnistData.GetNextBatch(mnistData.Train.Data, mnistData.Train.Labels, start, end);
                    // Run optimization op (backprop) and cost op (to get loss value)
                    (_, float c) = session.run((optimizer, crossEntropy),
                                               (mnistShape, batch_xs),
                                               (mnistOutput, batch_ys));

                    // Compute average loss
                    avg_cost += c / totalBatches;
                }

                // Display logs per epoch step
                var newEpoc = epoch + 1;

                if (newEpoc % 1 == 0)
                {
                    print($"Epoch: {newEpoc} Cost: {avg_cost:G9}");
                }
            }

            print("Optimization Finished!");
            // SaveModel(sess);

            // Test model
            var predictions = tf.equal(tf.argmax(model, 1), tf.argmax(mnistOutput, 1));
            // Calculate accuracy
            var acc = tf.reduce_mean(tf.cast(predictions, tf.float32));

            accuracy = acc.eval(session, (mnistShape, mnistData.Test.Data), (mnistOutput, mnistData.Test.Labels));
            print($"Accuracy: {accuracy:F4}");
        }
Exemplo n.º 6
0
        private static void fun1()
        {
            var path = "../../../../data/MNIST_data/";
            // 这里需要引用 Tensorflow.Hub 项目
            var mnist = MnistModelLoader.LoadAsync(path, oneHot: true).Result;

            // var savePath = "../../../../netSaver/";
            var savePath = @"..\..\..\..\netSaver";

            if (!Directory.Exists(savePath))
            {
                Directory.CreateDirectory(savePath);
            }

            // 输入层
            var x = tf.placeholder(TF_DataType.TF_DOUBLE, (-1, 784), name: "x");
            var y = tf.placeholder(TF_DataType.TF_DOUBLE, (-1, 10), name: "y");

            // 隐含层
            var H1_NN = 256;
            var H2_NN = 64;
            var H3_NN = 32;

            var h1 = fcn_layer(inputs: x, input_dim: 784, output_dim: H1_NN, activation: tf.nn.relu);
            var h2 = fcn_layer(h1, H1_NN, H2_NN, tf.nn.relu);
            var h3 = fcn_layer(h2, H2_NN, H3_NN, tf.nn.relu);

            var forward = fcn_layer(h3, H3_NN, 10, null);
            var pred    = tf.nn.softmax(forward);

            var train_epochs  = 10;
            var batch_size    = 100;
            var total_batch   = (int)mnist.Train.NumOfExamples / batch_size;
            var display_step  = 1;
            var save_step     = 10;
            var learning_rate = 0.01f;

            var loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: forward));
            // var optimizer = tf.train.AdamOptimizer(learning_rate, dtype: TF_DataType.TF_DOUBLE).minimize(loss_function);
            var optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function);

            // 准确率的定义
            var correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(pred, 1));
            var accuracy           = tf.reduce_mean(tf.cast(correct_prediction, TF_DataType.TF_DOUBLE));

            using (var sess = tf.Session())
            {
                var init = tf.global_variables_initializer();
                sess.run(init);
                var saver = tf.train.Saver();

                for (var epoch = 0; epoch < train_epochs; epoch++)
                {
                    for (var batch = 0; batch < total_batch; batch++)
                    {
                        var(xs, ys) = mnist.Train.GetNextBatch(batch_size);
                        sess.run(optimizer, new FeedItem(x, xs), new FeedItem(y, ys));
                    }

                    var(loss, acc) = sess.run((loss_function, accuracy),
                                              new FeedItem(x, mnist.Validation.Data),
                                              new FeedItem(y, mnist.Validation.Labels));

                    if ((epoch + 1) % display_step == 0)
                    {
                        Console.WriteLine($"train epoch:{epoch + 1}   loss={loss} accuracy={acc}");
                    }

                    if ((epoch + 1) % save_step == 0)
                    {
                        saver.save(sess, Path.Combine(savePath, $"mnist_model.{(epoch + 1)}.ckpt"));
                    }
                }

                var accu_test = sess.run(accuracy, (x, mnist.Test.Data), (y, mnist.Test.Labels));
                Console.WriteLine($"准确率:{accu_test}");

                saver.save(sess, savePath);
                Console.WriteLine("保存完毕!");
            }
        }
Exemplo n.º 7
0
        private static void fun1()
        {
            var path = "../../../../data/MNIST_data/";
            // 这里需要引用 Tensorflow.Hub 项目
            var mnist = MnistModelLoader.LoadAsync(path, oneHot: true).Result;

            // 输入层
            var x = tf.placeholder(TF_DataType.TF_DOUBLE, (-1, 784), name: "x");
            var y = tf.placeholder(TF_DataType.TF_DOUBLE, (-1, 10), name: "y");

            // 隐含层
            var H1_NN = 256;

            var w1 = tf.Variable(tf.random_normal((784, H1_NN), dtype: TF_DataType.TF_DOUBLE));
            var b1 = tf.Variable(tf.zeros((H1_NN), dtype: TF_DataType.TF_DOUBLE));

            var y1 = tf.nn.relu(tf.matmul(x, w1) + b1);

            // 输出层
            var w2 = tf.Variable(tf.random_normal((H1_NN, 10), dtype: TF_DataType.TF_DOUBLE));
            var b2 = tf.Variable(tf.zeros((10), dtype: TF_DataType.TF_DOUBLE));

            var forward = tf.matmul(y1, w2) + b2;
            var pred    = tf.nn.softmax(forward);

            var train_epochs  = 50;
            var batch_size    = 100;
            var total_batch   = (int)mnist.Train.NumOfExamples / batch_size;
            var display_step  = 1;
            var learning_rate = 0.01f;

            // var loss_function = tf.reduce_mean(-tf.reduce_sum(y * tf.log(pred), reduction_indices: 1));
            var loss_function = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels: y, logits: forward));
            var optimizer     = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss_function);

            var correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1));
            var accuracy           = tf.reduce_mean(tf.cast(correct_prediction, TF_DataType.TF_DOUBLE));

            using (var sess = tf.Session())
            {
                var init = tf.global_variables_initializer();
                sess.run(init);

                for (var epoch = 0; epoch < train_epochs; epoch++)
                {
                    for (var batch = 0; batch < total_batch; batch++)
                    {
                        var(xs, ys) = mnist.Train.GetNextBatch(batch_size);
                        sess.run(optimizer, new FeedItem(x, xs), new FeedItem(y, ys));
                    }

                    var(loss, acc) = sess.run((loss_function, accuracy),
                                              new FeedItem(x, mnist.Validation.Data),
                                              new FeedItem(y, mnist.Validation.Labels));

                    if ((epoch + 1) % display_step == 0)
                    {
                        Console.WriteLine($"train epoch:{epoch + 1}   loss={loss} accuracy={acc}");
                    }
                }

                var prediction_result = sess.run(tf.argmax(pred, 1), new FeedItem(x, mnist.Test.Data));
                Console.WriteLine($"验证集结果:{prediction_result["0:10"]}");

                // 因为,这里没法画图,所以,只做一个简单的文字输出了
                int num        = 10;
                int startIndex = 0;
                var labels     = mnist.Test.Labels;

                for (var i = startIndex; i < num; i++)
                {
                    Console.WriteLine($"标签值:{np.argmax(labels[i])}  预测值:{prediction_result[i]}");
                }
            }
        }
Exemplo n.º 8
0
 public void PrepareData()
 {
     mnist = MnistModelLoader.LoadAsync(".resources/mnist", oneHot: true).Result;
 }