示例#1
0
        public static Tensor Conv(IGraphNodeBase input, int[] filtersShape,
                                  Func <Tensor, Tensor>?activation,
                                  bool downsample = false,
                                  bool batchNorm  = true
                                  )
        {
            if (input is null)
            {
                throw new ArgumentNullException(nameof(input));
            }
            if (filtersShape is null)
            {
                throw new ArgumentNullException(nameof(filtersShape));
            }

            int            strides          = 1;
            IGraphNodeBase convolutionInput = input;
            string         padding          = "same";

            if (downsample)
            {
                convolutionInput = ZeroPadding2D.NewDyn(padding: ((1, 0), (1, 0))).__call__(input);
                padding          = "valid";
                strides          = 2;
            }

            var convLayer = new Conv2D(filters: filtersShape[^ 1], kernel_size: filtersShape[0],
示例#2
0
        static dynamic Inference(IGraphNodeBase W, IGraphNodeBase b, dynamic inputData, dynamic targetData)
        {
            var prediction = tf.sign_dyn(tf.subtract(tf.matmul(inputData, W), b));
            var accuracy   = tf.reduce_mean(tf.cast(tf.equal(prediction, targetData), tf.float32));

            return(accuracy);
        }
示例#3
0
        Tensor CallImpl(IGraphNodeBase inputs, dynamic?training)
        {
            IGraphNodeBase result = inputs;

            var batchNormExtraArgs = new Dictionary <string, object>();

            if (!(training is null))
            {
                batchNormExtraArgs["training"] = training;
            }

            for (int part = 0; part < PartCount; part++)
            {
                result = this.convs[part].__call__(result);
                result = this.batchNorms[part].__call__(result, kwargs: batchNormExtraArgs);
                if (part + 1 != PartCount)
                {
                    result = this.activation.Invoke(result) !;
                }
            }

            result = (Tensor)result + inputs;

            return(this.activation.Invoke(result) !);
        }
示例#4
0
        object callImpl(IGraphNodeBase inputs, dynamic training)
        {
            IGraphNodeBase result = inputs;

            var batchNormExtraArgs = new PythonDict <string, object>();

            if (training != null)
            {
                batchNormExtraArgs["training"] = training;
            }

            for (int part = 0; part < PartCount; part++)
            {
                result = this.convs[part].apply(result);
                result = this.batchNorms[part].apply(result, kwargs: batchNormExtraArgs);
                if (part + 1 != PartCount)
                {
                    result = tf.nn.relu(result);
                }
            }

            result += (Tensor)result + inputs;

            return(tf.nn.relu(result));
        }
示例#5
0
 public override dynamic __call__(IGraphNodeBase step)
 => tf.cond(step < this.warmupSteps,
            PythonFunctionContainer.Of <Tensor>(() => (step / this.warmupSteps) * this.initialLR),
            PythonFunctionContainer.Of <Tensor>(() => this.finalLR
                                                + 0.5f * (this.initialLR - this.finalLR)
                                                * (1 + tf.cos(
                                                       (step - this.warmupSteps) / (this.totalSteps - this.warmupSteps)
                                                       * Math.PI)))
            );
示例#6
0
        Tensor CallImpl(IGraphNodeBase input)
        {
            var result = (Tensor)input;

            for (int layerIndex = 0; layerIndex < this.innerLayers.Length; layerIndex++)
            {
                var   layer          = this.innerLayers[layerIndex];
                float frequencyScale = layerIndex == 0
                    ? this.InputFrequencyScale
                    : this.InnerFrequencyScale;
                result = tf.sin(layer.__call__(result) * frequencyScale);
            }

            return(result);
        }
示例#7
0
        Tensor CallImpl(IGraphNodeBase input, object?mask)
        {
            if (mask != null)
            {
                throw new NotImplementedException("mask");
            }
            var result = (Tensor)input;

            for (int layerIndex = 0; layerIndex < this.innerLayers.Length; layerIndex++)
            {
                var   layer          = this.innerLayers[layerIndex];
                float frequencyScale = layerIndex == 0
                    ? this.InputFrequencyScale
                    : this.InnerFrequencyScale;
                result = tf.sin(layer.__call__(result) * frequencyScale);
            }

            return(result);
        }
示例#8
0
        public Tensor call(IEnumerable <IGraphNodeBase> trainableOutputs)
        {
            var output = trainableOutputs.ToArray();
            var loss   = YOLO.Loss.Zero;

            for (int scaleIndex = 0; scaleIndex < this.strides.Length; scaleIndex++)
            {
                IGraphNodeBase conv = output[scaleIndex * 2];
                IGraphNodeBase pred = output[scaleIndex * 2 + 1];

                loss += YOLO.ComputeLoss((Tensor)pred, (Tensor)conv,
                                         targetLabels: this.trueLabels[scaleIndex],
                                         targetBBoxes: this.trueBoxes[scaleIndex],
                                         strideSize: this.strides[scaleIndex],
                                         classCount: this.classCount,
                                         intersectionOverUnionLossThreshold: YOLO.DefaultIntersectionOverUnionLossThreshold);
            }

            this.add_loss(loss.Conf);
            this.add_loss(loss.GIUO);
            this.add_loss(loss.Prob);
            return(loss.Conf + loss.GIUO + loss.Prob);
        }
示例#9
0
 public override Tensor call(IGraphNodeBase inputs, IGraphNodeBase?training = null, IEnumerable <IGraphNodeBase>?mask = null)
 {
     return(this.CallImpl((Tensor)inputs, training));
 }
示例#10
0
 public override Tensor call(IGraphNodeBase inputs, bool training, IGraphNodeBase?mask = null)
 {
     return(this.CallImpl((Tensor)inputs, training));
 }
示例#11
0
 public override Tensor call(IGraphNodeBase inputs, bool training, IGraphNodeBase?mask = null)
 => this.CallImpl(inputs, mask);
示例#12
0
 public override Tensor call(IGraphNodeBase inputs, bool training)
 => base.call(inputs, this.trainable && training);
示例#13
0
 public override Tensor call(IGraphNodeBase inputs, IGraphNodeBase?training = null)
 => base.call(inputs, this.ShouldTrain(training));
示例#14
0
 public override Tensor call(IGraphNodeBase inputs, IGraphNodeBase?training = null, IEnumerable <IGraphNodeBase>?mask = null)
 => this.CallImpl(inputs, mask);
示例#15
0
 public static Tensor Mish(IGraphNodeBase input)
 // https://github.com/hunglc007/tensorflow-yolov4-tflite/commit/a61f81f9118df9cec4d53736648174f6fb113e5f#diff-69d62c22a92472901b83e55ac7c153317c649564d4ae9945dcaed27d37295867R41
 => input *tf.tanh(tf.nn.softplus(input));
示例#16
0
 public IGraphNodeBase __call__(IGraphNodeBase input) => this.__call___dyn(input);
示例#17
0
 public override Tensor call(IGraphNodeBase inputs, params object[] args)
 => this.CallImpl(inputs);
示例#18
0
 public override Tensor call(IGraphNodeBase inputs, IGraphNodeBase training, IGraphNodeBase mask)
 => this.CallImpl(inputs, mask);
示例#19
0
 public Tensor Get(IGraphNodeBase step) => this.__call__(step);
示例#20
0
文件: YOLOv4.cs 项目: molekm/YOLOv4
 public static Output Apply(IGraphNodeBase input, int classCount)
 {
     if (classCount <= 0)
     {
         throw new ArgumentOutOfRangeException(nameof(classCount));
     }
示例#21
0
 public override dynamic call(IEnumerable <IGraphNodeBase> inputs, ImplicitContainer <IGraphNodeBase> training, IGraphNodeBase mask)
 {
     return(this.callImpl((Tensor)inputs.Single(), training));
 }
示例#22
0
 public override object call(object inputs, bool training, IGraphNodeBase mask = null)
 {
     return(this.callImpl((Tensor)inputs, training));
 }