Exemplo n.º 1
0
            /// <summary>
            /// Add a param group to the Optimizer s param_groups.
            /// </summary>
            /// <param name="param_group"></param>
            /// <remarks>This can be useful when fine tuning a pre-trained network as frozen layers can be made trainable and added to the Optimizer as training progresses.</remarks>
            public override void add_param_group(Modules.ParamGroup param_group)
            {
                var def = _defaults as Options;

                if (param_group.Options is null)
                {
                    param_group.Options = new Options();
                }

                var opt = param_group.Options as Options;

                // Make sure all the options are set.
                if (!opt.LearningRate.HasValue)
                {
                    opt.LearningRate = def.LearningRate;
                }
                if (!opt.beta1.HasValue)
                {
                    opt.beta1 = def.beta1;
                }
                if (!opt.beta2.HasValue)
                {
                    opt.beta2 = def.beta2;
                }
                if (!opt.eps.HasValue)
                {
                    opt.eps = def.eps;
                }
                if (!opt.weight_decay.HasValue)
                {
                    opt.weight_decay = def.weight_decay;
                }
                if (!opt.amsgrad.HasValue)
                {
                    opt.amsgrad = def.amsgrad;
                }
                if (!opt.maximize.HasValue)
                {
                    opt.maximize = def.maximize;
                }

                opt.InitialLearningRate = opt.LearningRate.Value;

                _parameter_groups.Add(param_group);

                foreach (var p in param_group.Parameters)
                {
                    var state = new State();
                    _state[p.Handle] = state;
                    state.step       = 0;
                    state.exp_avg    = torch.zeros_like(p);
                    state.exp_avg_sq = torch.zeros_like(p);
                    if (opt.amsgrad.Value)
                    {
                        state.max_exp_avg_sq = torch.zeros_like(p);
                    }
                }
            }
Exemplo n.º 2
0
            /// <summary>
            /// Add a param group to the Optimizer s param_groups.
            /// </summary>
            /// <param name="param_group"></param>
            /// <remarks>This can be useful when fine tuning a pre-trained network as frozen layers can be made trainable and added to the Optimizer as training progresses.</remarks>
            public override void add_param_group(Modules.ParamGroup param_group)
            {
                var def = _defaults as Options;

                if (param_group.Options is null)
                {
                    param_group.Options = new Options();
                }

                var opt = param_group.Options as Options;

                // Make sure all the options are set.
                if (!opt.LearningRate.HasValue)
                {
                    opt.LearningRate = def.LearningRate;
                }
                if (!opt.momentum.HasValue)
                {
                    opt.momentum = def.momentum;
                }
                if (!opt.eps.HasValue)
                {
                    opt.eps = def.eps;
                }
                if (!opt.alpha.HasValue)
                {
                    opt.alpha = def.alpha;
                }
                if (!opt.weight_decay.HasValue)
                {
                    opt.weight_decay = def.weight_decay;
                }
                if (!opt.centered.HasValue)
                {
                    opt.centered = def.centered;
                }

                opt.InitialLearningRate = opt.LearningRate.Value;

                _parameter_groups.Add(param_group);

                foreach (var p in param_group.Parameters)
                {
                    var state = new State();
                    _state[p.Handle]      = state;
                    state.square_avg      = torch.zeros_like(p);
                    state.grad_avg        = torch.zeros_like(p);
                    state.momentum_buffer = torch.zeros_like(p);
                }
            }
Exemplo n.º 3
0
            /// <summary>
            /// Add a param group to the Optimizer s param_groups.
            /// </summary>
            /// <param name="param_group"></param>
            /// <remarks>This can be useful when fine tuning a pre-trained network as frozen layers can be made trainable and added to the Optimizer as training progresses.</remarks>
            public override void add_param_group(Modules.ParamGroup param_group)
            {
                var def = _defaults as Options;

                if (param_group.Options is null)
                {
                    param_group.Options = new Options();
                }

                var opt = param_group.Options as Options;

                // Make sure all the options are set.
                if (!opt.LearningRate.HasValue)
                {
                    opt.LearningRate = def.LearningRate;
                }
                if (!opt.momentum.HasValue)
                {
                    opt.momentum = def.momentum;
                }
                if (!opt.dampening.HasValue)
                {
                    opt.dampening = def.dampening;
                }
                if (!opt.weight_decay.HasValue)
                {
                    opt.weight_decay = def.weight_decay;
                }
                if (!opt.nesterov.HasValue)
                {
                    opt.nesterov = def.nesterov;
                }
                if (!opt.maximize.HasValue)
                {
                    opt.maximize = def.maximize;
                }

                opt.InitialLearningRate = opt.LearningRate.Value;

                _parameter_groups.Add(param_group);

                foreach (var p in param_group.Parameters)
                {
                    var state = new State();
                    _state[p.Handle]      = state;
                    state.momentum_buffer = null;
                }
            }
Exemplo n.º 4
0
            /// <summary>
            /// Add a param group to the Optimizer s param_groups.
            /// </summary>
            /// <param name="param_group"></param>
            /// <remarks>This can be useful when fine tuning a pre-trained network as frozen layers can be made trainable and added to the Optimizer as training progresses.</remarks>
            public override void add_param_group(Modules.ParamGroup param_group)
            {
                var def = _defaults as Options;

                if (param_group.Options is null)
                {
                    param_group.Options = new Options();
                }

                var opt = param_group.Options as Options;

                // Make sure all the options are set.
                if (!opt.LearningRate.HasValue)
                {
                    opt.LearningRate = def.LearningRate;
                }
                if (!opt.rho.HasValue)
                {
                    opt.rho = def.rho;
                }
                if (!opt.eps.HasValue)
                {
                    opt.eps = def.eps;
                }
                if (!opt.weight_decay.HasValue)
                {
                    opt.weight_decay = def.weight_decay;
                }

                opt.InitialLearningRate = opt.LearningRate.Value;

                _parameter_groups.Add(param_group);

                foreach (var p in param_group.Parameters)
                {
                    var state = new State();
                    _state[p.Handle] = state;
                    state.step       = 0;
                    state.square_avg = torch.zeros_like(p);
                    state.acc_delta  = torch.zeros_like(p);
                }
            }