Пример #1
0
        internal PerWeightUpdateBase(INeuralNetworkLayerUpdater layerUpdater, ILinearAlgebraProvider lap)
        {
            _layerUpdater = layerUpdater;
            var targetWeight = layerUpdater.Layer.Weight;

            _cache = lap.Create(targetWeight.RowCount, targetWeight.ColumnCount, (x, y) => 0f);
        }
Пример #2
0
 public FilteredTrainer(ILinearAlgebraProvider lap, INeuralNetworkLayerUpdater layerUpdater, float ratio)
     : base(layerUpdater)
 {
     _lap = lap;
     _invertedMultiplier      = 1;// - ratio;
     _probabilityDistribution = new Bernoulli(_invertedMultiplier);
 }
Пример #3
0
        public AdamUpdater(INeuralNetworkLayerUpdater layerUpdater, ILinearAlgebraProvider lap, float decay, float decay2) : base(layerUpdater, lap)
        {
            _decay  = decay;
            _decay2 = decay2;
            var targetWeight = layerUpdater.Layer.Weight;

            _cache2 = lap.Create(targetWeight.RowCount, targetWeight.ColumnCount, (x, y) => 0f);
        }
Пример #4
0
            void _Update(IMatrix error, INeuralNetworkLayerUpdater w, INeuralNetworkLayerUpdater u, INeuralNetworkUpdateAccumulator updateAccumulator)
            {
                var deltaW = _input.TransposeThisAndMultiply(error);
                var deltaU = _pc.TransposeThisAndMultiply(error);

                updateAccumulator.Record(w, error, deltaW);
                updateAccumulator.Record(u, error.Clone(), deltaU);
            }
Пример #5
0
 public Backpropagation(IActivationFunction activation, INeuralNetworkLayerUpdater inputUpdater, INeuralNetworkLayerUpdater memoryUpdater, IMatrix input, IMatrix memory, IMatrix output)
 {
     _activation    = activation;
     _inputUpdater  = inputUpdater;
     _memoryUpdater = memoryUpdater;
     _input         = input;
     _memory        = memory;
     _output        = output;
 }
Пример #6
0
        INeuralNetworkLayerTrainer _CreateLayerUpdater(INeuralNetworkLayerUpdater layerUpdater, LayerDescriptor init)
        {
            switch (init.LayerTrainer)
            {
            case LayerTrainerType.DropConnect:
                return(_trainer.DropConnect(layerUpdater, init.Dropout));

            case LayerTrainerType.Dropout:
                return(_trainer.Dropout(layerUpdater, init.Dropout));

            default:
                return(_trainer.Standard(layerUpdater));
            }
        }
Пример #7
0
        public Lstm(int inputSize, int hiddenSize, INeuralNetworkFactory factory, LayerDescriptor template)
        {
            _lap        = factory.LinearAlgebraProvider;
            _activation = factory.GetActivation(template.Activation);

            _wc = CreateLayer(inputSize, hiddenSize, factory, template);
            _wi = CreateLayer(inputSize, hiddenSize, factory, template);
            _wf = CreateLayer(inputSize, hiddenSize, factory, template);
            _wo = CreateLayer(inputSize, hiddenSize, factory, template);

            _uc = CreateLayer(hiddenSize, hiddenSize, factory, template);
            _ui = CreateLayer(hiddenSize, hiddenSize, factory, template);
            _uf = CreateLayer(hiddenSize, hiddenSize, factory, template);
            _uo = CreateLayer(hiddenSize, hiddenSize, factory, template);
        }
Пример #8
0
 public Backpropagation(IActivationFunction activation, IMatrix ones, IMatrix c, IMatrix ca, IMatrix pc, IMatrix o, IMatrix a, IMatrix i, IMatrix f, IMatrix input,
                        INeuralNetworkLayerUpdater uc, INeuralNetworkLayerUpdater wc,
                        INeuralNetworkLayerUpdater ui, INeuralNetworkLayerUpdater wi,
                        INeuralNetworkLayerUpdater uf, INeuralNetworkLayerUpdater wf,
                        INeuralNetworkLayerUpdater uo, INeuralNetworkLayerUpdater wo
                        )
 {
     _activation = activation;
     _ones       = ones;
     _c          = c;
     _ca         = ca;
     _pc         = pc;
     _o          = o;
     _a          = a;
     _i          = i;
     _f          = f;
     _input      = input;
     _uc         = uc; _wc = wc;
     _ui         = ui; _wi = wi;
     _uf         = uf; _wf = wf;
     _uo         = uo; _wo = wo;
 }
Пример #9
0
 public INeuralNetworkLayerUpdater Adagrad(INeuralNetworkLayerUpdater primary)
 {
     return(new AdagradUpdater(primary, _lap));
 }
Пример #10
0
 public INeuralNetworkLayerTrainer Standard(INeuralNetworkLayerUpdater layerUpdater)
 {
     return(new StandardTrainer(layerUpdater));
 }
Пример #11
0
 public INeuralNetworkLayerTrainer DropConnect(INeuralNetworkLayerUpdater layerUpdater, float ratio)
 {
     return(new DropConnectTrainer(_lap, layerUpdater, ratio));
 }
Пример #12
0
 public StandardTrainer(INeuralNetworkLayerUpdater layerUpdater, bool verifyDerivatives = false)
 {
     _layerUpdater      = layerUpdater;
     _verifyDerivatives = verifyDerivatives;
 }
Пример #13
0
 public DropConnectTrainer(ILinearAlgebraProvider lap, INeuralNetworkLayerUpdater layerUpdater, float ratio)
     : base(lap, layerUpdater, ratio)
 {
 }
Пример #14
0
 public MomentumUpdater(INeuralNetworkLayerUpdater layerUpdater, ILinearAlgebraProvider lap, float momentum) : base(layerUpdater, lap)
 {
     _momentum = momentum;
 }
Пример #15
0
 public SimpleRecurrent(int inputSize, int hiddenSize, INeuralNetworkFactory factory, LayerDescriptor template)
 {
     _activation = factory.GetActivation(template.Activation);
     _input      = CreateLayer(inputSize, hiddenSize, factory, template);
     _memory     = CreateLayer(hiddenSize, hiddenSize, factory, template);
 }
Пример #16
0
 public INeuralNetworkLayerUpdater Adam(INeuralNetworkLayerUpdater primary, float beta1, float beta2)
 {
     return(new AdamUpdater(primary, _lap, beta1, beta2));
 }
Пример #17
0
 public NesterovMomentumUpdater(INeuralNetworkLayerUpdater layerUpdater, ILinearAlgebraProvider lap, float momentum) : base(layerUpdater, lap, momentum)
 {
 }
Пример #18
0
 public AdagradUpdater(INeuralNetworkLayerUpdater layerUpdater, ILinearAlgebraProvider lap) : base(layerUpdater, lap)
 {
 }
Пример #19
0
 public INeuralNetworkLayerUpdater NesterovMomentum(INeuralNetworkLayerUpdater primary, float momentumAmount)
 {
     return(new NesterovMomentumUpdater(primary, _lap, momentumAmount));
 }
Пример #20
0
 public RMSpropUpdater(INeuralNetworkLayerUpdater layerUpdater, ILinearAlgebraProvider lap, float decayRate) : base(layerUpdater, lap)
 {
     _decayRate = decayRate;
 }
Пример #21
0
 public INeuralNetworkLayerUpdater RMSprop(INeuralNetworkLayerUpdater primary, float decayRate)
 {
     return(new RMSpropUpdater(primary, _lap, decayRate));
 }
Пример #22
0
 public void Record(INeuralNetworkLayerUpdater updater, IMatrix bias, IMatrix weights)
 {
     _update.Add(Tuple.Create(updater, bias, weights));
 }