public static Tensor Conv(IGraphNodeBase input, int[] filtersShape, Func <Tensor, Tensor>?activation, bool downsample = false, bool batchNorm = true ) { if (input is null) { throw new ArgumentNullException(nameof(input)); } if (filtersShape is null) { throw new ArgumentNullException(nameof(filtersShape)); } int strides = 1; IGraphNodeBase convolutionInput = input; string padding = "same"; if (downsample) { convolutionInput = ZeroPadding2D.NewDyn(padding: ((1, 0), (1, 0))).__call__(input); padding = "valid"; strides = 2; } var convLayer = new Conv2D(filters: filtersShape[^ 1], kernel_size: filtersShape[0],
static dynamic Inference(IGraphNodeBase W, IGraphNodeBase b, dynamic inputData, dynamic targetData) { var prediction = tf.sign_dyn(tf.subtract(tf.matmul(inputData, W), b)); var accuracy = tf.reduce_mean(tf.cast(tf.equal(prediction, targetData), tf.float32)); return(accuracy); }
Tensor CallImpl(IGraphNodeBase inputs, dynamic?training) { IGraphNodeBase result = inputs; var batchNormExtraArgs = new Dictionary <string, object>(); if (!(training is null)) { batchNormExtraArgs["training"] = training; } for (int part = 0; part < PartCount; part++) { result = this.convs[part].__call__(result); result = this.batchNorms[part].__call__(result, kwargs: batchNormExtraArgs); if (part + 1 != PartCount) { result = this.activation.Invoke(result) !; } } result = (Tensor)result + inputs; return(this.activation.Invoke(result) !); }
object callImpl(IGraphNodeBase inputs, dynamic training) { IGraphNodeBase result = inputs; var batchNormExtraArgs = new PythonDict <string, object>(); if (training != null) { batchNormExtraArgs["training"] = training; } for (int part = 0; part < PartCount; part++) { result = this.convs[part].apply(result); result = this.batchNorms[part].apply(result, kwargs: batchNormExtraArgs); if (part + 1 != PartCount) { result = tf.nn.relu(result); } } result += (Tensor)result + inputs; return(tf.nn.relu(result)); }
public override dynamic __call__(IGraphNodeBase step) => tf.cond(step < this.warmupSteps, PythonFunctionContainer.Of <Tensor>(() => (step / this.warmupSteps) * this.initialLR), PythonFunctionContainer.Of <Tensor>(() => this.finalLR + 0.5f * (this.initialLR - this.finalLR) * (1 + tf.cos( (step - this.warmupSteps) / (this.totalSteps - this.warmupSteps) * Math.PI))) );
Tensor CallImpl(IGraphNodeBase input) { var result = (Tensor)input; for (int layerIndex = 0; layerIndex < this.innerLayers.Length; layerIndex++) { var layer = this.innerLayers[layerIndex]; float frequencyScale = layerIndex == 0 ? this.InputFrequencyScale : this.InnerFrequencyScale; result = tf.sin(layer.__call__(result) * frequencyScale); } return(result); }
Tensor CallImpl(IGraphNodeBase input, object?mask) { if (mask != null) { throw new NotImplementedException("mask"); } var result = (Tensor)input; for (int layerIndex = 0; layerIndex < this.innerLayers.Length; layerIndex++) { var layer = this.innerLayers[layerIndex]; float frequencyScale = layerIndex == 0 ? this.InputFrequencyScale : this.InnerFrequencyScale; result = tf.sin(layer.__call__(result) * frequencyScale); } return(result); }
public Tensor call(IEnumerable <IGraphNodeBase> trainableOutputs) { var output = trainableOutputs.ToArray(); var loss = YOLO.Loss.Zero; for (int scaleIndex = 0; scaleIndex < this.strides.Length; scaleIndex++) { IGraphNodeBase conv = output[scaleIndex * 2]; IGraphNodeBase pred = output[scaleIndex * 2 + 1]; loss += YOLO.ComputeLoss((Tensor)pred, (Tensor)conv, targetLabels: this.trueLabels[scaleIndex], targetBBoxes: this.trueBoxes[scaleIndex], strideSize: this.strides[scaleIndex], classCount: this.classCount, intersectionOverUnionLossThreshold: YOLO.DefaultIntersectionOverUnionLossThreshold); } this.add_loss(loss.Conf); this.add_loss(loss.GIUO); this.add_loss(loss.Prob); return(loss.Conf + loss.GIUO + loss.Prob); }
public override Tensor call(IGraphNodeBase inputs, IGraphNodeBase?training = null, IEnumerable <IGraphNodeBase>?mask = null) { return(this.CallImpl((Tensor)inputs, training)); }
public override Tensor call(IGraphNodeBase inputs, bool training, IGraphNodeBase?mask = null) { return(this.CallImpl((Tensor)inputs, training)); }
public override Tensor call(IGraphNodeBase inputs, bool training, IGraphNodeBase?mask = null) => this.CallImpl(inputs, mask);
public override Tensor call(IGraphNodeBase inputs, bool training) => base.call(inputs, this.trainable && training);
public override Tensor call(IGraphNodeBase inputs, IGraphNodeBase?training = null) => base.call(inputs, this.ShouldTrain(training));
public override Tensor call(IGraphNodeBase inputs, IGraphNodeBase?training = null, IEnumerable <IGraphNodeBase>?mask = null) => this.CallImpl(inputs, mask);
public static Tensor Mish(IGraphNodeBase input) // https://github.com/hunglc007/tensorflow-yolov4-tflite/commit/a61f81f9118df9cec4d53736648174f6fb113e5f#diff-69d62c22a92472901b83e55ac7c153317c649564d4ae9945dcaed27d37295867R41 => input *tf.tanh(tf.nn.softplus(input));
public IGraphNodeBase __call__(IGraphNodeBase input) => this.__call___dyn(input);
public override Tensor call(IGraphNodeBase inputs, params object[] args) => this.CallImpl(inputs);
public override Tensor call(IGraphNodeBase inputs, IGraphNodeBase training, IGraphNodeBase mask) => this.CallImpl(inputs, mask);
public Tensor Get(IGraphNodeBase step) => this.__call__(step);
public static Output Apply(IGraphNodeBase input, int classCount) { if (classCount <= 0) { throw new ArgumentOutOfRangeException(nameof(classCount)); }
public override dynamic call(IEnumerable <IGraphNodeBase> inputs, ImplicitContainer <IGraphNodeBase> training, IGraphNodeBase mask) { return(this.callImpl((Tensor)inputs.Single(), training)); }
public override object call(object inputs, bool training, IGraphNodeBase mask = null) { return(this.callImpl((Tensor)inputs, training)); }