public void SetState(ModelMode mode) { switch (mode) { case ModelMode.Standard: ModelMode = ModelMode.Standard; ModelBuilder = _mainModel; break; case ModelMode.DeadEndFilled: if (_deadEndModelWrapper != null) { ModelMode = ModelMode.DeadEndFilled; ModelBuilder = _deadEndModelWrapper; } else { throw new ArgumentException("Dead end Mode has not been initialised"); } break; default: throw new ArgumentOutOfRangeException(); } }
public DualData(string id1, TData1 data1, string?id2, TData2 data2, ModelMode mode) : base(id1, data1, mode) { if (mode != ModelMode.Add) { if (id2._IsEmpty()) { throw new ArgumentNullException(nameof(id2)); } id2 = id2._NonNullTrim(); } else { if (id2._IsFilled()) { throw new ArgumentOutOfRangeException($"{nameof(id2)} is specified."); } id2 = null; } this.Id2 = id2; this.Data2 = data2; NormalizeImpl(); }
public override NDArray Backward(NDArray input, NDArray gradOutput, ModelMode mode) { DNN.ConvolutionBackwardData(bwdDataAlgo, cd, workspace, weight, gradOutput, gradInput); DNN.ConvolutionBackwardFilter(bwdFilterAlgo, cd, workspace, input, gradOutput, gradWeight); DNN.ConvolutionBackwardBias(cd, gradOutput, gradBias); return(gradInput); }
public SingleData(string?id, TData data, ModelMode mode) { if (mode != ModelMode.Add) { if (id._IsEmpty()) { throw new ArgumentNullException(nameof(id)); } id = id._NonNullTrim(); } else { if ((IsFilled)id) { throw new ArgumentOutOfRangeException($"{nameof(id)} is specified."); } id = null; } this.Id = id; this.Data = data; this.Mode = mode; NormalizeImpl(); // if (this.Data is IErrorCheckable check) check.CheckError(); }
public override NDArray Forward(NDArray input, ModelMode mode) { var keepElements = input.TVar() > threshold; (input.TVar().CMul(keepElements) + (1 - keepElements) * val) .Evaluate(activation); return activation; }
public SingleData() { this.Data = new TData(); this.Id = null; this.Mode = ModelMode.Unknown; NormalizeImpl(); }
public ModelData(string model1, string model2, ModelMode modelMode, int interpolation = 0) { model1Name = Path.GetFileNameWithoutExtension(model1); model2Name = Path.GetFileNameWithoutExtension(model2); model1Path = model1; model2Path = model2; mode = modelMode; interp = interpolation; }
public override Tensor Forward(Tensor input, ModelMode mode) { var keepElements = input.TVar() > threshold; (input.TVar().CMul(keepElements) + (1 - keepElements) * val) .Evaluate(activation); return(activation); }
public override NDArray Backward(NDArray input, NDArray gradOutput, ModelMode mode) { if (gradInput != null) { gradInput.Dispose(); } gradInput = gradOutput.View(lastInputSize); return(gradInput); }
public override Tensor Backward(Tensor input, Tensor gradOutput, ModelMode mode) { var go = gradOutput.TVar(); var a = activation.TVar().Exp().CMul(go.Sum(1).Expand(activation.Sizes)); (go - a) .Evaluate(gradInput); return(gradInput); }
public override Tensor Forward(Tensor input, ModelMode mode) { using (var input4d = As4d(input)) using (var activation4d = As4d(activation)) { DNN.SoftmaxForward(DNNSoftmaxAlgorithm.Log, DNNSoftmaxMode.Instance, input4d, activation4d); } return(activation); }
public override NDArray Backward(NDArray input, NDArray gradOutput, ModelMode mode) { var go = gradOutput.TVar(); var a = activation.TVar().Exp().CMul(go.Sum(1).Expand(activation.Shape)); (go - a) .Evaluate(gradInput); return(gradInput); }
public override NDArray Forward(NDArray input, ModelMode mode) { if (activation != null) { activation.Dispose(); } activation = input.View(resultSize); lastInputSize = input.Shape; return(activation); }
public override Tensor Forward(Tensor input, ModelMode mode) { if (activation != null) { activation.Dispose(); } activation = input.View(resultSize); lastInputSize = input.Sizes; return(activation); }
public override Tensor Backward(Tensor input, Tensor gradOutput, ModelMode mode) { if (gradInput != null) { gradInput.Dispose(); } gradInput = gradOutput.View(lastInputSize); return(gradInput); }
public override Tensor Forward(Tensor input, ModelMode mode) { // activation = [bias] + input * weights // where [bias] means broadcast the bias vector bias.TVar().Expand(batchSize, nOutput) .Addmm(1, 1, input, weights) .Evaluate(activation); return(activation); }
public override Tensor Backward(Tensor input, Tensor gradOutput, ModelMode mode) { using (var activation4d = As4d(activation)) using (var gradInput4d = As4d(gradInput)) using (var gradOutput4d = As4d(gradOutput)) { DNN.SoftmaxBackward(DNNSoftmaxAlgorithm.Log, DNNSoftmaxMode.Instance, activation4d, gradInput4d, gradOutput4d); } return(gradInput); }
public override Tensor Forward(Tensor input, ModelMode mode) { Tensor curOutput = input; foreach (var layer in layers) { curOutput = layer.Forward(curOutput, mode); } lastOutput = curOutput; return(curOutput); }
public override NDArray Forward(NDArray input, ModelMode mode) { NDArray curOutput = input; foreach (var layer in layers) { curOutput = layer.Forward(curOutput, mode); } lastOutput = curOutput; return(curOutput); }
public override Tensor Forward(Tensor input, ModelMode mode) { var maxes = input.TVar().Max(1); var maxesExp = maxes.Expand(input.Sizes); var d = (input - maxesExp).Exp().Sum(1).Log(); var logSum = (d + maxes).Expand(input.Sizes); (input - logSum) .Evaluate(activation); return(activation); }
public override NDArray Backward(NDArray input, NDArray gradOutput, ModelMode mode) { var curGradOutput = gradOutput; for (int i = layers.Count - 1; i > 0; --i) { var layer = layers[i]; var prevLayer = layers[i - 1]; curGradOutput = layer.Backward(prevLayer.Output, curGradOutput, mode); } curGradOutput = layers[0].Backward(input, curGradOutput, mode); lastGradInput = curGradOutput; return(curGradOutput); }
public override NDArray Forward(NDArray input, ModelMode mode) { Ops.Copy(activation, input); if (mode == ModelMode.Train) { var p = 1 - pRemove; Variable.RandomBernoulli(seedSource, p, allocator, elementType, noise.Shape) .Div(p) .Evaluate(noise); activation.TVar() .CMul(noise) .Evaluate(activation); } return(activation); }
public override Tensor Forward(Tensor input, ModelMode mode) { Ops.Copy(activation, input); if (mode == ModelMode.Train) { var p = 1 - pRemove; TVar.RandomBernoulli(seedSource, p, allocator, elementType, noise.Sizes) .Div(p) .Evaluate(noise); activation.TVar() .CMul(noise) .Evaluate(activation); } return(activation); }
private void OnTrimChange(object parameter) { if (parameter != null) { var trimItem = (TrimItem)parameter; var tradeinInfo = ChromeHelper.GetTradeInInfo(trimItem.TrimId); if (tradeinInfo == null) { _mode = ModelMode.New; SampleVin = null; TradeinValue = null; } else { MessageBox.Show("Loading data from database"); _mode = ModelMode.Edit; EstimatedMileage = tradeinInfo.EstimatedZeroPointMileage; SampleVin = tradeinInfo.SampleVIN; TradeinValue = tradeinInfo.TradeInValue; //Upde data fileds } } }
public override NDArray Forward(NDArray input, ModelMode mode) { maxPool.SpatialMaxPoolingForward(input, activation, indices, cd, ceilMode); return(activation); }
public override NDArray Backward(NDArray input, NDArray gradOutput, ModelMode mode) { CpuMaxPoolingOps.SpatialMaxPoolingBackward(input, gradOutput, gradInput, indices, cd, ceilMode); return(gradInput); }
public override NDArray Backward(NDArray input, NDArray gradOutput, ModelMode mode) { DNN.PoolingBackward(poolingDesc, input, activation, gradInput, gradOutput); return(gradInput); }
public override NDArray Forward(NDArray input, ModelMode mode) { DNN.PoolingForward(poolingDesc, input, activation); return(activation); }
public override Tensor Backward(Tensor input, Tensor gradOutput, ModelMode mode) { UpdateGradInput(gradOutput); AccWeightGrads(input, gradOutput); return(gradInput); }
public override Tensor Forward(Tensor input, ModelMode mode) { maxPool.SpatialMaxPoolingForward(input, activation, indices, cd, ceilMode); return(activation); }