示例#1
0
        float accuracy_test = 0f;                 // 测试集准确率

        public void Main()
        {
            ((x_train, y_train), (x_test, y_test)) = tf.keras.datasets.mnist.load_data();                  // 下载 或 加载本地 MNIST
            (x_train, x_test) = (x_train.reshape((-1, num_features)), x_test.reshape((-1, num_features))); // 输入数据展平
            (x_train, x_test) = (x_train / 255f, x_test / 255f);                                           // 归一化

            train_data = tf.data.Dataset.from_tensor_slices(x_train, y_train);                             //转换为 Dataset 格式
            train_data = train_data.repeat()
                         .shuffle(5000)
                         .batch(batch_size)
                         .prefetch(1)
                         .take(training_steps);// 数据预处理

            // 随机初始化网络权重变量,并打包成数组方便后续梯度求导作为参数。
            var random_normal = tf.initializers.random_normal_initializer();

            h1   = tf.Variable(random_normal.Apply(new InitializerArgs((num_features, n_hidden_1))));
            h2   = tf.Variable(random_normal.Apply(new InitializerArgs((n_hidden_1, n_hidden_2))));
            wout = tf.Variable(random_normal.Apply(new InitializerArgs((n_hidden_2, num_classes))));
            b1   = tf.Variable(tf.zeros(n_hidden_1));
            b2   = tf.Variable(tf.zeros(n_hidden_2));
            bout = tf.Variable(tf.zeros(num_classes));
            var trainable_variables = new IVariableV1[] { h1, h2, wout, b1, b2, bout };

            // 采用随机梯度下降优化器
            var optimizer = tf.optimizers.SGD(learning_rate);

            // 训练模型
            foreach (var(step, (batch_x, batch_y)) in enumerate(train_data, 1))
            {
                // 运行优化器 进行模型权重 w 和 b 的更新
                run_optimization(optimizer, batch_x, batch_y, trainable_variables);

                if (step % display_step == 0)
                {
                    var pred = neural_net(batch_x);
                    var loss = cross_entropy(pred, batch_y);
                    var acc  = accuracy(pred, batch_y);
                    print($"step: {step}, loss: {(float)loss}, accuracy: {(float)acc}");
                }
            }

            // 在测试集上对训练后的模型进行预测准确率性能评估
            {
                var pred = neural_net(x_test);
                accuracy_test = (float)accuracy(pred, y_test);
                print($"Test Accuracy: {accuracy_test}");
            }
        }
示例#2
0
        public override void PrepareData()
        {
            ((x_train, y_train), (x_test, y_test)) = keras.datasets.mnist.load_data();
            // Convert to float32.
            // (x_train, x_test) = (np.array(x_train, np.float32), np.array(x_test, np.float32));
            // Normalize images value from [0, 255] to [0, 1].
            (x_train, x_test) = (x_train / 255.0f, x_test / 255.0f);

            train_data = tf.data.Dataset.from_tensor_slices(x_train, y_train);
            train_data = train_data.repeat()
                         .shuffle(5000)
                         .batch(batch_size)
                         .prefetch(1)
                         .take(training_steps);
        }
示例#3
0
        public override void PrepareData()
        {
            // Prepare MNIST data.
            ((x_train, y_train), (x_test, y_test)) = tf.keras.datasets.mnist.load_data();
            // Flatten images to 1-D vector of 784 features (28*28).
            (x_train, x_test) = (x_train.reshape((-1, num_features)), x_test.reshape((-1, num_features)));
            // Normalize images value from [0, 255] to [0, 1].
            (x_train, x_test) = (x_train / 255f, x_test / 255f);

            // Use tf.data API to shuffle and batch data.
            train_data = tf.data.Dataset.from_tensor_slices(x_train, y_train);
            train_data = train_data.repeat()
                         .shuffle(5000)
                         .batch(batch_size)
                         .prefetch(1)
                         .take(training_steps);
        }
        public void Main()
        {
            // Prepare MNIST data.
            ((x_train, y_train), (x_test, y_test)) = tf.keras.datasets.mnist.load_data();
            // Flatten images to 1-D vector of 784 features (28*28).
            (x_train, x_test) = (x_train.reshape((-1, num_features)), x_test.reshape((-1, num_features)));
            // Normalize images value from [0, 255] to [0, 1].
            (x_train, x_test) = (x_train / 255f, x_test / 255f);

            // Use tf.data API to shuffle and batch data.
            train_data = tf.data.Dataset.from_tensor_slices(x_train, y_train);
            train_data = train_data.repeat()
                         .shuffle(5000)
                         .batch(batch_size)
                         .prefetch(1)
                         .take(training_steps);


            // Build neural network model.
            var neural_net = new NeuralNet(new NeuralNetArgs
            {
                NumClasses      = num_classes,
                NeuronOfHidden1 = 128,
                Activation1     = tf.keras.activations.Relu,
                NeuronOfHidden2 = 256,
                Activation2     = tf.keras.activations.Relu
            });

            // Cross-Entropy Loss.
            Func <Tensor, Tensor, Tensor> cross_entropy_loss = (x, y) =>
            {
                // Convert labels to int 64 for tf cross-entropy function.
                y = tf.cast(y, tf.int64);
                // Apply softmax to logits and compute cross-entropy.
                var loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels: y, logits: x);
                // Average loss across the batch.
                return(tf.reduce_mean(loss));
            };

            // Accuracy metric.
            Func <Tensor, Tensor, Tensor> accuracy = (y_pred, y_true) =>
            {
                // Predicted class is the index of highest score in prediction vector (i.e. argmax).
                var correct_prediction = tf.equal(tf.argmax(y_pred, 1), tf.cast(y_true, tf.int64));
                return(tf.reduce_mean(tf.cast(correct_prediction, tf.float32), axis: -1));
            };

            // Stochastic gradient descent optimizer.
            var optimizer = tf.optimizers.SGD(learning_rate);

            // Optimization process.
            Action <Tensor, Tensor> run_optimization = (x, y) =>
            {
                // Wrap computation inside a GradientTape for automatic differentiation.
                using var g = tf.GradientTape();
                // Forward pass.
                var pred = neural_net.Apply(x, is_training: true);
                var loss = cross_entropy_loss(pred, y);

                // Compute gradients.
                var gradients = g.gradient(loss, neural_net.trainable_variables);

                // Update W and b following gradients.
                optimizer.apply_gradients(zip(gradients, neural_net.trainable_variables.Select(x => x as ResourceVariable)));
            };


            // Run training for the given number of steps.
            foreach (var(step, (batch_x, batch_y)) in enumerate(train_data, 1))
            {
                // Run the optimization to update W and b values.
                run_optimization(batch_x, batch_y);

                if (step % display_step == 0)
                {
                    var pred = neural_net.Apply(batch_x, is_training: true);
                    var loss = cross_entropy_loss(pred, batch_y);
                    var acc  = accuracy(pred, batch_y);
                    print($"step: {step}, loss: {(float)loss}, accuracy: {(float)acc}");
                }
            }

            // Test model on validation set.
            {
                var pred = neural_net.Apply(x_test, is_training: false);
                this.accuracy = (float)accuracy(pred, y_test);
                print($"Test Accuracy: {this.accuracy}");
            }
        }
        public void Run(IEnumerable <RushingRaw> rushing)
        {
            var plays = rushing
                        .GroupBy(x => (x.GameId, x.Season, x.PlayId, x.Yards))
                        .ToDictionary(g => g.Key, g => g.ToList())
                        .Select(x => x.Value).ToList();

            var trainSplit = plays.Count - (int)(0.2 * plays.Count);

            #region Train-Test Data

            var metrics = plays.Select(x => new[]
            {
                // x[0].GameId,
                // x[0].Season,
                // x[0].Yards,
                // x[0].PlayId,
                x[0].Quarter,
                x[0].Down,
                x[0].MinutesRemainingInQuarter,
                x[0].YardsFromOwnGoal,
                // x[0].IsOffenseLeading,

                x[0].StandardisedX,
                x[0].StandardisedY,
                x[0].StandardisedDir,
                x[0].StandardisedOrientation,
                x[0].RelativeX,
                x[0].RelativeY,
                x[0].RelativeSpeedX,
                x[0].RelativeSpeedY,

                x[1].StandardisedX,
                x[1].StandardisedY,
                x[1].StandardisedDir,
                x[1].StandardisedOrientation,
                x[1].RelativeX,
                x[1].RelativeY,
                x[1].RelativeSpeedX,
                x[1].RelativeSpeedY,

                x[2].StandardisedX,
                x[2].StandardisedY,
                x[2].StandardisedDir,
                x[2].StandardisedOrientation,
                x[2].RelativeX,
                x[2].RelativeY,
                x[2].RelativeSpeedX,
                x[2].RelativeSpeedY,

                x[3].StandardisedX,
                x[3].StandardisedY,
                x[3].StandardisedDir,
                x[3].StandardisedOrientation,
                x[3].RelativeX,
                x[3].RelativeY,
                x[3].RelativeSpeedX,
                x[3].RelativeSpeedY,

                x[4].StandardisedX,
                x[4].StandardisedY,
                x[4].StandardisedDir,
                x[4].StandardisedOrientation,
                x[4].RelativeX,
                x[4].RelativeY,
                x[4].RelativeSpeedX,
                x[4].RelativeSpeedY,

                x[5].StandardisedX,
                x[5].StandardisedY,
                x[5].StandardisedDir,
                x[5].StandardisedOrientation,
                x[5].RelativeX,
                x[5].RelativeY,
                x[5].RelativeSpeedX,
                x[5].RelativeSpeedY,

                x[6].StandardisedX,
                x[6].StandardisedY,
                x[6].StandardisedDir,
                x[6].StandardisedOrientation,
                x[6].RelativeX,
                x[6].RelativeY,
                x[6].RelativeSpeedX,
                x[6].RelativeSpeedY,

                x[7].StandardisedX,
                x[7].StandardisedY,
                x[7].StandardisedDir,
                x[7].StandardisedOrientation,
                x[7].RelativeX,
                x[7].RelativeY,
                x[7].RelativeSpeedX,
                x[7].RelativeSpeedY,

                x[8].StandardisedX,
                x[8].StandardisedY,
                x[8].StandardisedDir,
                x[8].StandardisedOrientation,
                x[8].RelativeX,
                x[8].RelativeY,
                x[8].RelativeSpeedX,
                x[8].RelativeSpeedY,

                x[9].StandardisedX,
                x[9].StandardisedY,
                x[9].StandardisedDir,
                x[9].StandardisedOrientation,
                x[9].RelativeX,
                x[9].RelativeY,
                x[9].RelativeSpeedX,
                x[9].RelativeSpeedY,

                x[10].StandardisedX,
                x[10].StandardisedY,
                x[10].StandardisedDir,
                x[10].StandardisedOrientation,
                x[10].RelativeX,
                x[10].RelativeY,
                x[10].RelativeSpeedX,
                x[10].RelativeSpeedY,

                x[11].StandardisedX,
                x[11].StandardisedY,
                x[11].StandardisedDir,
                x[11].StandardisedOrientation,
                x[11].RelativeX,
                x[11].RelativeY,
                x[11].RelativeSpeedX,
                x[11].RelativeSpeedY,

                x[12].StandardisedX,
                x[12].StandardisedY,
                x[12].StandardisedDir,
                x[12].StandardisedOrientation,
                x[12].RelativeX,
                x[12].RelativeY,
                x[12].RelativeSpeedX,
                x[12].RelativeSpeedY,

                x[13].StandardisedX,
                x[13].StandardisedY,
                x[13].StandardisedDir,
                x[13].StandardisedOrientation,
                x[13].RelativeX,
                x[13].RelativeY,
                x[13].RelativeSpeedX,
                x[13].RelativeSpeedY,

                x[14].StandardisedX,
                x[14].StandardisedY,
                x[14].StandardisedDir,
                x[14].StandardisedOrientation,
                x[14].RelativeX,
                x[14].RelativeY,
                x[14].RelativeSpeedX,
                x[14].RelativeSpeedY,

                x[15].StandardisedX,
                x[15].StandardisedY,
                x[15].StandardisedDir,
                x[15].StandardisedOrientation,
                x[15].RelativeX,
                x[15].RelativeY,
                x[15].RelativeSpeedX,
                x[15].RelativeSpeedY,

                x[16].StandardisedX,
                x[16].StandardisedY,
                x[16].StandardisedDir,
                x[16].StandardisedOrientation,
                x[16].RelativeX,
                x[16].RelativeY,
                x[16].RelativeSpeedX,
                x[16].RelativeSpeedY,

                x[17].StandardisedX,
                x[17].StandardisedY,
                x[17].StandardisedDir,
                x[17].StandardisedOrientation,
                x[17].RelativeX,
                x[17].RelativeY,
                x[17].RelativeSpeedX,
                x[17].RelativeSpeedY,

                x[18].StandardisedX,
                x[18].StandardisedY,
                x[18].StandardisedDir,
                x[18].StandardisedOrientation,
                x[18].RelativeX,
                x[18].RelativeY,
                x[18].RelativeSpeedX,
                x[18].RelativeSpeedY,

                x[19].StandardisedX,
                x[19].StandardisedY,
                x[19].StandardisedDir,
                x[19].StandardisedOrientation,
                x[19].RelativeX,
                x[19].RelativeY,
                x[19].RelativeSpeedX,
                x[19].RelativeSpeedY,

                x[20].StandardisedX,
                x[20].StandardisedY,
                x[20].StandardisedDir,
                x[20].StandardisedOrientation,
                x[20].RelativeX,
                x[20].RelativeY,
                x[20].RelativeSpeedX,
                x[20].RelativeSpeedY,

                x[21].StandardisedX,
                x[21].StandardisedY,
                x[21].StandardisedDir,
                x[21].StandardisedOrientation,
                x[21].RelativeX,
                x[21].RelativeY,
                x[21].RelativeSpeedX,
                x[21].RelativeSpeedY
            }).ToList();

            XTrain = np.array(metrics.Take(trainSplit).ToArray());
            YTrain = np.array(plays.Take(trainSplit).Select(x => x[0].Yards));

            XTest = np.array(metrics.Skip(trainSplit).ToArray());
            YTest = np.array(plays.Skip(trainSplit).Select(x => x[0].Yards));

            #endregion

            tf.enable_eager_execution();

            TrainData = tf.data.Dataset.from_tensor_slices(XTrain, YTrain);
            TrainData = TrainData
                        .repeat()
                        .shuffle(5000)
                        .batch(BatchSize)
                        .prefetch(1)
                        .take(TrainingSteps);

            // Build neural network model
            var neuralNet = new NeuralNet(new NeuralNetArgs
            {
                NumClasses       = 199,
                NeuronOfHidden1  = 128,
                Activation1      = tf.keras.activations.Relu,
                NeuronOfHidden2  = 256,
                Activation2      = tf.keras.activations.Relu,
                NeuronOfHidden3  = 512,
                Activation3      = tf.keras.activations.Relu,
                NeuronOfHidden4  = 512,
                Activation4      = tf.keras.activations.Relu,
                ActivationOutput = tf.keras.activations.Sigmoid,
            });

            /*
             * Cross-Entropy Loss
             * - Convert labels to int 64 for tf cross-entropy function.
             * - Apply softmax to logits and compute cross-entropy.
             *
             * Return average loss across the batch.
             */
            Tensor CrossEntropyLoss(Tensor x, Tensor y)
            {
                y = tf.cast(y, tf.int64);
                // var loss = tf.nn.sparse_softmax_cross_entropy_with_logits(y, x);
                var loss = tf.nn.softmax(x);

                return(tf.reduce_mean(loss));
            }

            /*
             * Accuracy metric
             *
             * Predicted class is the index of highest score in prediction vector (i.e. argmax).
             */
            Tensor Accuracy(Tensor yPred, Tensor yTrue)
            {
                var correctPrediction = tf.equal(tf.argmax(yPred, 1), tf.cast(yTrue, tf.int64));

                return(tf.reduce_mean(tf.cast(correctPrediction, tf.float32), -1));
            }

            // Stochastic gradient descent optimizer.
            var optimizer = tf.optimizers.Adam(LearningRate, 0.9f, 0.9f);

            /*
             * Optimization process
             *
             * - Wrap computation inside a GradientTape for automatic differentiation.
             */
            void RunOptimization(Tensor x, Tensor y)
            {
                using var g = tf.GradientTape();

                var pred = neuralNet.Apply(x, is_training: true);
                var loss = CrossEntropyLoss(pred, y);

                var gradients = g.gradient(loss, neuralNet.trainable_variables);

                // Update W and b following gradients
                optimizer.apply_gradients(
                    zip(gradients, neuralNet.trainable_variables.Select(i => i as ResourceVariable)));
            }

            // Run training for the given number of steps.
            foreach (var(step, (batchX, batchY)) in enumerate(TrainData, 1))
            {
                // Run the optimization to update W and b values.
                RunOptimization(batchX, batchY);

                if (step % DisplayStep != 0)
                {
                    continue;
                }

                var pred = neuralNet.Apply(batchX, is_training: true);
                var loss = CrossEntropyLoss(pred, batchY);
                var acc  = Accuracy(pred, batchY);

                Console.WriteLine($"step: {step}, loss: {(float) loss}, accuracy: {(float) acc}");
            }

            // Test model on validation set.
            {
                var pred = neuralNet.Apply(XTest, is_training: false);
                accuracy = (float)Accuracy(pred, YTest);
                Console.WriteLine($"Test Accuracy: {accuracy}");
            }

            Console.WriteLine($"Model accuracy: {accuracy}");
        }