예제 #1
0
        public void SetState(ModelMode mode)
        {
            switch (mode)
            {
            case ModelMode.Standard:
                ModelMode    = ModelMode.Standard;
                ModelBuilder = _mainModel;
                break;

            case ModelMode.DeadEndFilled:
                if (_deadEndModelWrapper != null)
                {
                    ModelMode    = ModelMode.DeadEndFilled;
                    ModelBuilder = _deadEndModelWrapper;
                }
                else
                {
                    throw new ArgumentException("Dead end Mode has not been initialised");
                }
                break;

            default:
                throw new ArgumentOutOfRangeException();
            }
        }
예제 #2
0
        public DualData(string id1, TData1 data1, string?id2, TData2 data2, ModelMode mode) : base(id1, data1, mode)
        {
            if (mode != ModelMode.Add)
            {
                if (id2._IsEmpty())
                {
                    throw new ArgumentNullException(nameof(id2));
                }

                id2 = id2._NonNullTrim();
            }
            else
            {
                if (id2._IsFilled())
                {
                    throw new ArgumentOutOfRangeException($"{nameof(id2)} is specified.");
                }
                id2 = null;
            }

            this.Id2   = id2;
            this.Data2 = data2;

            NormalizeImpl();
        }
예제 #3
0
 public override NDArray Backward(NDArray input, NDArray gradOutput, ModelMode mode)
 {
     DNN.ConvolutionBackwardData(bwdDataAlgo, cd, workspace, weight, gradOutput, gradInput);
     DNN.ConvolutionBackwardFilter(bwdFilterAlgo, cd, workspace, input, gradOutput, gradWeight);
     DNN.ConvolutionBackwardBias(cd, gradOutput, gradBias);
     return(gradInput);
 }
예제 #4
0
        public SingleData(string?id, TData data, ModelMode mode)
        {
            if (mode != ModelMode.Add)
            {
                if (id._IsEmpty())
                {
                    throw new ArgumentNullException(nameof(id));
                }

                id = id._NonNullTrim();
            }
            else
            {
                if ((IsFilled)id)
                {
                    throw new ArgumentOutOfRangeException($"{nameof(id)} is specified.");
                }
                id = null;
            }

            this.Id   = id;
            this.Data = data;
            this.Mode = mode;

            NormalizeImpl();

            //            if (this.Data is IErrorCheckable check) check.CheckError();
        }
예제 #5
0
 public override NDArray Forward(NDArray input, ModelMode mode)
 {
     var keepElements = input.TVar() > threshold;
     (input.TVar().CMul(keepElements) + (1 - keepElements) * val)
         .Evaluate(activation);
     
     return activation;
 }
예제 #6
0
        public SingleData()
        {
            this.Data = new TData();
            this.Id   = null;
            this.Mode = ModelMode.Unknown;

            NormalizeImpl();
        }
예제 #7
0
 public ModelData(string model1, string model2, ModelMode modelMode, int interpolation = 0)
 {
     model1Name = Path.GetFileNameWithoutExtension(model1);
     model2Name = Path.GetFileNameWithoutExtension(model2);
     model1Path = model1;
     model2Path = model2;
     mode       = modelMode;
     interp     = interpolation;
 }
예제 #8
0
        public override Tensor Forward(Tensor input, ModelMode mode)
        {
            var keepElements = input.TVar() > threshold;

            (input.TVar().CMul(keepElements) + (1 - keepElements) * val)
            .Evaluate(activation);

            return(activation);
        }
예제 #9
0
        public override NDArray Backward(NDArray input, NDArray gradOutput, ModelMode mode)
        {
            if (gradInput != null)
            {
                gradInput.Dispose();
            }

            gradInput = gradOutput.View(lastInputSize);
            return(gradInput);
        }
예제 #10
0
        public override Tensor Backward(Tensor input, Tensor gradOutput, ModelMode mode)
        {
            var go = gradOutput.TVar();
            var a  = activation.TVar().Exp().CMul(go.Sum(1).Expand(activation.Sizes));

            (go - a)
            .Evaluate(gradInput);

            return(gradInput);
        }
예제 #11
0
        public override Tensor Forward(Tensor input, ModelMode mode)
        {
            using (var input4d = As4d(input))
                using (var activation4d = As4d(activation))
                {
                    DNN.SoftmaxForward(DNNSoftmaxAlgorithm.Log, DNNSoftmaxMode.Instance, input4d, activation4d);
                }

            return(activation);
        }
예제 #12
0
        public override NDArray Backward(NDArray input, NDArray gradOutput, ModelMode mode)
        {
            var go = gradOutput.TVar();
            var a  = activation.TVar().Exp().CMul(go.Sum(1).Expand(activation.Shape));

            (go - a)
            .Evaluate(gradInput);

            return(gradInput);
        }
예제 #13
0
 public override NDArray Forward(NDArray input, ModelMode mode)
 {
     if (activation != null)
     {
         activation.Dispose();
     }
     activation    = input.View(resultSize);
     lastInputSize = input.Shape;
     return(activation);
 }
예제 #14
0
 public override Tensor Forward(Tensor input, ModelMode mode)
 {
     if (activation != null)
     {
         activation.Dispose();
     }
     activation    = input.View(resultSize);
     lastInputSize = input.Sizes;
     return(activation);
 }
예제 #15
0
        public override Tensor Backward(Tensor input, Tensor gradOutput, ModelMode mode)
        {
            if (gradInput != null)
            {
                gradInput.Dispose();
            }

            gradInput = gradOutput.View(lastInputSize);
            return(gradInput);
        }
예제 #16
0
        public override Tensor Forward(Tensor input, ModelMode mode)
        {
            // activation = [bias] + input * weights
            // where [bias] means broadcast the bias vector
            bias.TVar().Expand(batchSize, nOutput)
            .Addmm(1, 1, input, weights)
            .Evaluate(activation);

            return(activation);
        }
예제 #17
0
        public override Tensor Backward(Tensor input, Tensor gradOutput, ModelMode mode)
        {
            using (var activation4d = As4d(activation))
                using (var gradInput4d = As4d(gradInput))
                    using (var gradOutput4d = As4d(gradOutput))
                    {
                        DNN.SoftmaxBackward(DNNSoftmaxAlgorithm.Log, DNNSoftmaxMode.Instance, activation4d, gradInput4d, gradOutput4d);
                    }

            return(gradInput);
        }
예제 #18
0
        public override Tensor Forward(Tensor input, ModelMode mode)
        {
            Tensor curOutput = input;

            foreach (var layer in layers)
            {
                curOutput = layer.Forward(curOutput, mode);
            }

            lastOutput = curOutput;
            return(curOutput);
        }
예제 #19
0
        public override NDArray Forward(NDArray input, ModelMode mode)
        {
            NDArray curOutput = input;

            foreach (var layer in layers)
            {
                curOutput = layer.Forward(curOutput, mode);
            }

            lastOutput = curOutput;
            return(curOutput);
        }
예제 #20
0
        public override Tensor Forward(Tensor input, ModelMode mode)
        {
            var maxes    = input.TVar().Max(1);
            var maxesExp = maxes.Expand(input.Sizes);

            var d      = (input - maxesExp).Exp().Sum(1).Log();
            var logSum = (d + maxes).Expand(input.Sizes);

            (input - logSum)
            .Evaluate(activation);

            return(activation);
        }
예제 #21
0
        public override NDArray Backward(NDArray input, NDArray gradOutput, ModelMode mode)
        {
            var curGradOutput = gradOutput;

            for (int i = layers.Count - 1; i > 0; --i)
            {
                var layer     = layers[i];
                var prevLayer = layers[i - 1];

                curGradOutput = layer.Backward(prevLayer.Output, curGradOutput, mode);
            }

            curGradOutput = layers[0].Backward(input, curGradOutput, mode);

            lastGradInput = curGradOutput;
            return(curGradOutput);
        }
예제 #22
0
        public override NDArray Forward(NDArray input, ModelMode mode)
        {
            Ops.Copy(activation, input);

            if (mode == ModelMode.Train)
            {
                var p = 1 - pRemove;

                Variable.RandomBernoulli(seedSource, p, allocator, elementType, noise.Shape)
                .Div(p)
                .Evaluate(noise);

                activation.TVar()
                .CMul(noise)
                .Evaluate(activation);
            }

            return(activation);
        }
예제 #23
0
        public override Tensor Forward(Tensor input, ModelMode mode)
        {
            Ops.Copy(activation, input);

            if (mode == ModelMode.Train)
            {
                var p = 1 - pRemove;

                TVar.RandomBernoulli(seedSource, p, allocator, elementType, noise.Sizes)
                .Div(p)
                .Evaluate(noise);

                activation.TVar()
                .CMul(noise)
                .Evaluate(activation);
            }

            return(activation);
        }
예제 #24
0
 private void OnTrimChange(object parameter)
 {
     if (parameter != null)
     {
         var trimItem    = (TrimItem)parameter;
         var tradeinInfo = ChromeHelper.GetTradeInInfo(trimItem.TrimId);
         if (tradeinInfo == null)
         {
             _mode        = ModelMode.New;
             SampleVin    = null;
             TradeinValue = null;
         }
         else
         {
             MessageBox.Show("Loading data from database");
             _mode            = ModelMode.Edit;
             EstimatedMileage = tradeinInfo.EstimatedZeroPointMileage;
             SampleVin        = tradeinInfo.SampleVIN;
             TradeinValue     = tradeinInfo.TradeInValue;
             //Upde data fileds
         }
     }
 }
예제 #25
0
 public override NDArray Forward(NDArray input, ModelMode mode)
 {
     maxPool.SpatialMaxPoolingForward(input, activation, indices, cd, ceilMode);
     return(activation);
 }
예제 #26
0
 public override NDArray Backward(NDArray input, NDArray gradOutput, ModelMode mode)
 {
     CpuMaxPoolingOps.SpatialMaxPoolingBackward(input, gradOutput, gradInput, indices, cd, ceilMode);
     return(gradInput);
 }
예제 #27
0
 public override NDArray Backward(NDArray input, NDArray gradOutput, ModelMode mode)
 {
     DNN.PoolingBackward(poolingDesc, input, activation, gradInput, gradOutput);
     return(gradInput);
 }
예제 #28
0
 public override NDArray Forward(NDArray input, ModelMode mode)
 {
     DNN.PoolingForward(poolingDesc, input, activation);
     return(activation);
 }
예제 #29
0
 public override Tensor Backward(Tensor input, Tensor gradOutput, ModelMode mode)
 {
     UpdateGradInput(gradOutput);
     AccWeightGrads(input, gradOutput);
     return(gradInput);
 }
예제 #30
0
 public override Tensor Forward(Tensor input, ModelMode mode)
 {
     maxPool.SpatialMaxPoolingForward(input, activation, indices, cd, ceilMode);
     return(activation);
 }