Beispiel #1
0
 public LSTMModel(LSTMModelArgs args)
     : base(args)
 {
     optimizer = tf.optimizers.Adam(args.LearningRate);
     lstm      = LSTM(args.NumUnits);
     output    = Dense(args.NumClasses);
 }
Beispiel #2
0
        public LSTMModel(LSTMModelArgs args)
            : base(args)
        {
            optimizer = keras.optimizers.Adam(args.LearningRate);

            var layers = keras.layers;

            lstm   = layers.LSTM(args.NumUnits);
            output = layers.Dense(args.NumClasses);
        }
        void _minimize(GradientTape tape, OptimizerV2 optimizer, Tensor loss, List <IVariableV1> trainable_variables)
        {
            var gradients = tape.gradient(loss, trainable_variables);

            gradients = optimizer._aggregate_gradients(zip(gradients, trainable_variables));
            gradients = optimizer._clip_gradients(gradients);

            optimizer.apply_gradients(zip(gradients, trainable_variables.Select(x => x as ResourceVariable)),
                                      experimental_aggregate_gradients: false);
        }
Beispiel #4
0
        void run_optimization(OptimizerV2 optimizer, Tensor x, Tensor y, IVariableV1[] trainable_variables)
        {
            using var g = tf.GradientTape();
            var pred = neural_net(x);
            var loss = cross_entropy(pred, y);

            // Compute gradients.
            var gradients = g.gradient(loss, trainable_variables);

            // Update W and b following gradients.
            optimizer.apply_gradients(zip(gradients, trainable_variables.Select(x => x as ResourceVariable)));
        }
        void run_optimization(ConvNet conv_net, OptimizerV2 optimizer, Tensor x, Tensor y)
        {
            using var g = tf.GradientTape();
            var pred = conv_net.Apply(x, training: true);
            var loss = cross_entropy_loss(pred, y);

            // Compute gradients.
            var gradients = g.gradient(loss, conv_net.trainable_variables);

            // Update W and b following gradients.
            optimizer.apply_gradients(zip(gradients, conv_net.trainable_variables.Select(x => x as ResourceVariable)));
        }
Beispiel #6
0
        void run_optimization(OptimizerV2 optimizer, Tensor x, Tensor y)
        {
            using var g = tf.GradientTape();
            var pred = conv_net(x);
            var loss = cross_entropy(pred, y);

            // Compute gradients.
            var trainable_variables = new IVariableV1[] { wc1, wc2, wd1, wout, bc1, bc2, bd1, bout };
            var gradients           = g.gradient(loss, trainable_variables);

            // Update W and b following gradients.
            optimizer.apply_gradients(zip(gradients, trainable_variables.Select(x => x as ResourceVariable)));
        }
Beispiel #7
0
        // 运行优化器
        void run_optimization(OptimizerV2 optimizer, Tensor x, Tensor y, IVariableV1[] trainable_variables)
        {
            using var g = tf.GradientTape();
            var pred = neural_net(x);
            var loss = cross_entropy(pred, y);

            // 计算梯度
            var gradients = g.gradient(loss, trainable_variables);

            // 更新模型权重 w 和 b
            var a = zip(gradients, trainable_variables.Select(x => x as ResourceVariable));

            optimizer.apply_gradients(zip(gradients, trainable_variables.Select(x => x as ResourceVariable)));
        }
        public void compile(ILossFunc loss, OptimizerV2 optimizer, string[] metrics)
        {
            this.optimizer   = optimizer;
            compiled_loss    = new LossesContainer(loss, output_names: output_names);
            compiled_metrics = new MetricsContainer(metrics, output_names: output_names);

            int experimental_steps_per_execution = 1;

            _configure_steps_per_execution(experimental_steps_per_execution);

            // Initialize cache attrs.
            _reset_compile_cache();
            _is_compiled = true;
            this.loss    = loss;
        }
        public void compile(OptimizerV2 optimizer = null,
                            ILossFunc loss        = null,
                            string[] metrics      = null)
        {
            this.optimizer = optimizer ?? new RMSprop(new RMSpropArgs
            {
            });

            this.loss = loss ?? new MeanSquaredError();

            compiled_loss    = new LossesContainer(loss, output_names: output_names);
            compiled_metrics = new MetricsContainer(metrics, output_names: output_names);

            int experimental_steps_per_execution = 1;

            _configure_steps_per_execution(experimental_steps_per_execution);

            // Initialize cache attrs.
            _reset_compile_cache();
            _is_compiled = true;
        }