Esempio n. 1
0
        public (Tensor, Tensor) __call__(Tensor inputs,
                                         Tensor training     = null,
                                         VariableScope scope = null)
        {
            _set_scope(scope);
            _graph = ops._get_graph_from_inputs(new Tensor[] { inputs }, graph: _graph);

            variable_scope scope_context_manager = null;

            if (built)
            {
            }
            else
            {
                scope_context_manager = tf.variable_scope(_scope,
                                                          reuse: _reuse,
                                                          auxiliary_name_scope: false);
            }

            (Tensor, Tensor)outputs = (null, null);
            tf_with(scope_context_manager, scope2 =>
            {
                _current_scope = scope2;
                // Actually call layer
                outputs = base.__call__(new Tensor[] { inputs }, training: training);
            });


            // Update global default collections.
            _add_elements_to_collection(_updates.ToArray(), new string[] { tf.GraphKeys.UPDATE_OPS });

            return(outputs);
        }
Esempio n. 2
0
        static (Tensor xywh, Tensor conf, Tensor prob) DecodeCommon(
            Tensor convOut, int outputSize, int classCount,
            ReadOnlySpan <int> strides, Tensor <int> anchors,
            int scaleIndex, ReadOnlySpan <float> xyScale)
        {
            var varScope = new variable_scope("scale" + scaleIndex.ToString(System.Globalization.CultureInfo.InvariantCulture));

            using var _ = varScope.StartUsing();
            Tensor batchSize = tf.shape(convOut)[0];

            convOut = tf.reshape_dyn(convOut, new object[] { batchSize, outputSize, outputSize, 3, 5 + classCount });
            Tensor[] raws = tf.split(convOut, new[] { 2, 2, 1, classCount }, axis: -1);
            var(convRawDxDy, convRawDwDh, convRawConf, convRawProb) = raws;

            var meshgrid = tf.meshgrid(tf.range_dyn(outputSize), tf.range_dyn(outputSize));

            meshgrid = tf.expand_dims(tf.stack(meshgrid, axis: -1), axis: 2); // [gx, gy, 1, 2]
            Tensor xyGrid = tf.tile_dyn(
                tf.expand_dims(meshgrid, axis: 0),
                new object[] { tf.shape(convOut)[0], 1, 1, 3, 1 });

            xyGrid = tf.cast(xyGrid, tf.float32);

            var predictedXY   = ((tf.sigmoid(convRawDxDy) * xyScale[scaleIndex]) - 0.5 * (xyScale[scaleIndex] - 1) + xyGrid) * strides[scaleIndex];
            var predictedWH   = tf.exp(convRawDwDh) * tf.cast(anchors[scaleIndex], tf.float32);
            var predictedXYWH = tf.concat(new[] { predictedXY, predictedWH }, axis: -1);

            var predictedConf = tf.sigmoid(convRawConf);
            var predictedProb = tf.sigmoid(convRawProb);

            return(predictedXYWH, conf : predictedConf, prob : predictedProb);
        }
Esempio n. 3
0
        public Tensor __call__(Tensor inputs,
                               Tensor training     = null,
                               VariableScope scope = null)
        {
            _set_scope(scope);
            _graph = ops._get_graph_from_inputs(new List <Tensor> {
                inputs
            }, graph: _graph);

            variable_scope scope_context_manager = null;

            if (built)
            {
            }
            else
            {
                scope_context_manager = tf.variable_scope(_scope,
                                                          auxiliary_name_scope: false);
            }

            Python.with(scope_context_manager, scope2 => _current_scope = scope2);
            // Actually call layer
            var outputs = base.__call__(inputs, training: training);

            // Update global default collections.
            _add_elements_to_collection(_updates.ToArray(), new string[] { ops.GraphKeys.UPDATE_OPS });

            return(outputs);
        }
Esempio n. 4
0
        public Tensor __call__(Tensor inputs,
                               VariableScope scope = null)
        {
            _set_scope(scope);
            _graph = ops._get_graph_from_inputs(new List <Tensor> {
                inputs
            }, graph: _graph);

            variable_scope scope_context_manager = null;

            if (built)
            {
            }
            else
            {
                scope_context_manager = tf.variable_scope(_scope,
                                                          auxiliary_name_scope: false);
            }

            Python.with(scope_context_manager, scope2 => _current_scope = scope2);
            // Actually call layer
            var outputs = base.__call__(inputs);

            throw new NotImplementedException("");
        }
Esempio n. 5
0
        public static Tensor ClipButPassGradient(Tensor input, float min, float max)
        {
            using var _ = new variable_scope("clip_val_pass_grad").StartUsing();
            Tensor clippedMax = tf.cast(input > max, tf.float32);
            Tensor clippedMin = tf.cast(input < min, tf.float32);

            return(input + tf.stop_gradient(((max - input) * clippedMax)
                                            + ((min - input) * clippedMin)));
        }
Esempio n. 6
0
        public void ReenterVariableScope()
        {
            variable_scope vs = null;

            with(tf.variable_scope("foo"), v => vs = v);

            // Re-enter the variable scope.
            with(tf.variable_scope(vs, auxiliary_name_scope: false), v =>
            {
                var vs1 = (VariableScope)v;
                // Restore the original name_scope.
                with(tf.name_scope(vs1.original_name_scope), delegate
                {
                    var v1 = tf.get_variable("v", new TensorShape(1));
                    Assert.AreEqual(v1.name, "foo/v:0");
                    var c1 = tf.constant(new int[] { 1 }, name: "c");
                    Assert.AreEqual(c1.name, "foo/c:0");
                });
            });
        }
Esempio n. 7
0
        public Tensors __call__(Tensors inputs,
                                Tensor state        = null,
                                Tensor training     = null,
                                VariableScope scope = null)
        {
            _set_scope(scope);
            _graph = ops._get_graph_from_inputs(inputs, graph: _graph);

            variable_scope scope_context_manager = null;

            if (built)
            {
                scope_context_manager = tf.variable_scope(_scope,
                                                          reuse: true,
                                                          auxiliary_name_scope: false);
            }
            else
            {
                scope_context_manager = tf.variable_scope(_scope,
                                                          reuse: _reuse,
                                                          auxiliary_name_scope: false);
            }

            Tensors outputs = null;

            tf_with(scope_context_manager, scope2 =>
            {
                _current_scope = scope2;
                // Actually call layer
                outputs = base.Apply(inputs,
                                     state: state,
                                     is_training: training == null ? false : false);
            });


            // Update global default collections.
            _add_elements_to_collection(updates.ToArray(), new string[] { tf.GraphKeys.UPDATE_OPS });

            return(outputs);
        }
Esempio n. 8
0
        public static Tensor GaussianLikelihood(Tensor input, Tensor mu, Tensor logStd, string?name = null)
        {
            if (input is null)
            {
                throw new ArgumentNullException(nameof(input));
            }
            if (mu is null)
            {
                throw new ArgumentNullException(nameof(mu));
            }
            if (logStd is null)
            {
                throw new ArgumentNullException(nameof(logStd));
            }

            using var _ = new variable_scope("gaussian_likelihood").StartUsing();
            var preSum = (tf.square((input - mu) / (tf.exp(logStd) + Epsilon))
                          + logStd * 2
                          + MathF.Log(2 * MathF.PI))
                         * -0.5;

            return(tf.reduce_sum(preSum, axis: 1, name: name));
        }