예제 #1
0
        public void Run()
        {
            // Parameters
            double learning_rate   = 0.01;
            int    training_epochs = 1000;
            int    display_step    = 50;

            // Training Data
            var train_X = np.array(3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
                                   7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1);
            var train_Y = np.array(1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221,
                                   2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3);
            var n_samples = train_X.shape[0];

            // tf Graph Input
            var X = tf.placeholder(tf.float64);
            var Y = tf.placeholder(tf.float64);

            // Set model weights
            var W = tf.Variable(rng.randn <double>(), name: "weight");
            var b = tf.Variable(rng.randn <double>(), name: "bias");

            var part1 = tf.multiply(X, W);
            var pred  = tf.add(part1, b);

            // Mean squared error
            var sub = pred - Y;
            var pow = tf.pow(sub, 2);

            var reduce = tf.reduce_sum(pow);
            var cost   = reduce / (2d * n_samples);

            // radient descent
            // Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
            var optimizer = tf.train.GradientDescentOptimizer(learning_rate);

            optimizer.minimize(cost);

            // Initialize the variables (i.e. assign their default value)
            var init = tf.global_variables_initializer();

            // Start training
            Python.with <Session>(tf.Session(), sess =>
            {
                // Run the initializer
                sess.run(init);

                // Fit all training data
                for (int i = 0; i < training_epochs; i++)
                {
                    for (int index = 0; index < train_X.size; index++)
                    {
                        (double x, double y) = Python.zip <double>(train_X, train_Y, index);
                        var feed_dict        = new Dictionary <Tensor, NDArray>();

                        // sess.run(optimizer, feed_dict);
                    }
                }
            });
        }
예제 #2
0
        public void Run()
        {
            // Parameters
            double learning_rate   = 0.01;
            int    training_epochs = 1000;
            int    display_step    = 50;

            // Training Data
            var train_X = np.array(3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
                                   7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1);
            var train_Y = np.array(1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221,
                                   2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3);
            var n_samples = train_X.shape[0];

            // tf Graph Input
            var X = tf.placeholder(tf.float64);
            var Y = tf.placeholder(tf.float64);

            // Set model weights
            var W = tf.Variable(rng.randn <double>(), name: "weight");
            var b = tf.Variable(rng.randn <double>(), name: "bias");

            var part1 = tf.multiply(X, W);
            var pred  = tf.add(part1, b);

            // Mean squared error
            var sub    = pred - Y;
            var pow    = tf.pow(sub, 2);
            var reduce = tf.reduce_sum(pow);
            var cost   = reduce / (2d * n_samples);

            // radient descent
            // Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
            // var optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost);
        }
        /// <summary>
        /// 神经网络
        /// </summary>
        /// <param name="neuronsCounts">神经网络每一层神经元数量</param>
        public Network(int[] neuronsCounts)
        {
            this.LayersCount   = neuronsCounts.Length;
            this.NeuronsCounts = neuronsCounts;

            // 高斯(正态)分布 随机数发生器
            var pyRandom = new NumPyRandom();

            this.Biases  = neuronsCounts.Skip(1).Select(count => pyRandom.randn(count, 1)).ToArray();
            this.Weights = neuronsCounts.Skip(1).Zip(neuronsCounts.SkipLast(1)).Select(counts => pyRandom.randn(counts.First, counts.Second)).ToArray();
        }
        /// <summary>
        /// 初始化神经网络
        /// </summary>
        /// <param name="neuronCounts">神经网络每层神经元个数的集合</param>
        public Network(IEnumerable <int> neuronCounts)
        {
            // 初始化神经网络神经元集合
            this.NeuronPool = new List <List <TNeuron> >(
                neuronCounts.Select(
                    count =>
                    Enumerable.Range(0, count).
                    Select(index => Activator.CreateInstance <TNeuron>())
                    .ToList())
                );

            // 高斯(正态)分布 随机数发生器
            var pyRandom = new NumPyRandom();

            // 为非输入层初始化偏置
            _ = this.NeuronPool.Skip(1).All(neurons =>
            {
                foreach (var(neuron, bias) in neurons.Zip(pyRandom.randn(new[] { neurons.Count, 1 }).Array as double[]))
                {
                    neuron.Bias = bias;
                }

                return(true);
            });

            // 为非输入层初始化对前一层神经元输入的权重
            _ = this.NeuronPool.SkipLast(1).Zip(this.NeuronPool.Skip(1)).All(neurons =>
            {
                var weights = pyRandom.randn(new int[] { neurons.Second.Count, neurons.First.Count });
                for (int index = 0; index < neurons.Second.Count; index++)
                {
                    neurons.Second[index].Weight = weights[index].Array as double[];
                }

                return(true);
            });
        }
예제 #5
0
        public void Run()
        {
            // Parameters
            double learning_rate   = 0.01;
            int    training_epochs = 1000;
            int    display_step    = 50;

            // Training Data
            var train_X = np.array(3.3, 4.4, 5.5, 6.71, 6.93, 4.168, 9.779, 6.182, 7.59, 2.167,
                                   7.042, 10.791, 5.313, 7.997, 5.654, 9.27, 3.1);
            var train_Y = np.array(1.7, 2.76, 2.09, 3.19, 1.694, 1.573, 3.366, 2.596, 2.53, 1.221,
                                   2.827, 3.465, 1.65, 2.904, 2.42, 2.94, 1.3);
            var n_samples = train_X.shape[0];

            // tf Graph Input
            var X = tf.placeholder(tf.float64);
            var Y = tf.placeholder(tf.float64);

            // Set model weights
            var W = tf.Variable(rng.randn <double>(), name: "weight");
            var b = tf.Variable(rng.randn <double>(), name: "bias");

            var mul  = tf.multiply(X, W);
            var pred = tf.add(mul, b);

            // Mean squared error
            var sub = pred - Y;
            var pow = tf.pow(sub, 2);

            var reduce = tf.reduce_sum(pow);
            var cost   = reduce / (2d * n_samples);

            // radient descent
            // Note, minimize() knows to modify W and b because Variable objects are trainable=True by default
            var grad      = tf.train.GradientDescentOptimizer(learning_rate);
            var optimizer = grad.minimize(cost);

            // Initialize the variables (i.e. assign their default value)
            var init = tf.global_variables_initializer();

            // Start training
            Python.with <Session>(tf.Session(), sess =>
            {
                // Run the initializer
                sess.run(init);

                // Fit all training data
                for (int epoch = 0; epoch < training_epochs; epoch++)
                {
                    foreach (var(x, y) in Python.zip <double>(train_X, train_Y))
                    {
                        sess.run(optimizer, feed_dict: new FeedItem[]
                        {
                            new FeedItem(X, x),
                            new FeedItem(Y, y)
                        });
                    }

                    // Display logs per epoch step
                    if ((epoch + 1) % display_step == 0)
                    {
                        var c = sess.run(cost, feed_dict: new FeedItem[]
                        {
                            new FeedItem(X, train_X),
                            new FeedItem(Y, train_Y)
                        });
                        var rW = sess.run(W);
                        Console.WriteLine($"Epoch: {epoch + 1} cost={c} " +
                                          $"W={rW} b={sess.run(b)}");
                    }
                }