コード例 #1
0
        public static Tensor softmax_cross_entropy_with_logits_v2_helper(Tensor labels,
                                                                         Tensor logits,
                                                                         int axis    = -1,
                                                                         string name = null)
        {
            return(Python.with(ops.name_scope(name, "softmax_cross_entropy_with_logits", new { }), scope =>
            {
                var precise_logits = logits;
                var input_rank = array_ops.rank(precise_logits);
                var shape = logits.GetShape();

                if (axis != -1)
                {
                    throw new NotImplementedException("softmax_cross_entropy_with_logits_v2_helper axis != -1");
                }

                var input_shape = array_ops.shape(precise_logits);

                // Do the actual op computation.
                // The second output tensor contains the gradients.  We use it in
                // _CrossEntropyGrad() in nn_grad but not here.

                var(cost, unused_backprop) = gen_nn_ops.softmax_cross_entropy_with_logits(precise_logits, labels, name: name);

                // The output cost shape should be the input minus axis.
                var output_shape = array_ops.slice(input_shape,
                                                   new int[] { 0 },
                                                   new Tensor[] { math_ops.subtract(input_rank, 1) });

                cost = array_ops.reshape(cost, output_shape);

                return cost;
            }));
        }
コード例 #2
0
        public (Tensor, Tensor, Tensor) _remove_squeezable_dimensions(Tensor labels,
                                                                      Tensor predictions,
                                                                      float weights          = 0,
                                                                      int expected_rank_diff = 0)
        {
            (labels, predictions) = confusion_matrix.remove_squeezable_dimensions(
                labels, predictions, expected_rank_diff: expected_rank_diff);

            if (weights > 0)
            {
                var weights_tensor = ops.convert_to_tensor(weights);
                var labels_rank    = labels.GetShape().NDim;
                var weights_shape  = weights_tensor.GetShape();
                var weights_rank   = weights_shape.NDim;

                if (labels_rank > -1 && weights_rank > -1)
                {
                    // Use static rank.
                    var rank_diff = weights_rank - labels_rank;
                    if (rank_diff == 1)
                    {
                        weights = array_ops.squeeze(weights_tensor, new int[] { -1 });
                    }
                    return(labels, predictions, weights_tensor);
                }

                // Use dynamic rank.
                throw new NotImplementedException("_remove_squeezable_dimensions dynamic rank");
            }

            throw new NotImplementedException("_remove_squeezable_dimensions");
        }
コード例 #3
0
        /// <summary>
        /// Squeeze last dim if ranks differ from expected by exactly 1.
        /// </summary>
        /// <param name="labels"></param>
        /// <param name="predictions"></param>
        /// <param name="expected_rank_diff"></param>
        /// <param name="name"></param>
        /// <returns></returns>
        public static (Tensor, Tensor) remove_squeezable_dimensions(Tensor labels,
                                                                    Tensor predictions,
                                                                    int expected_rank_diff = 0,
                                                                    string name            = null)
        {
            return(with(ops.name_scope(name, default_name: "remove_squeezable_dimensions", (labels, predictions)), delegate
            {
                predictions = ops.convert_to_tensor(predictions);
                labels = ops.convert_to_tensor(labels);
                var predictions_shape = predictions.GetShape();
                var predictions_rank = predictions_shape.NDim;
                var labels_shape = labels.GetShape();
                var labels_rank = labels_shape.NDim;
                if (labels_rank > -1 && predictions_rank > -1)
                {
                    // Use static rank.
                    var rank_diff = predictions_rank - labels_rank;
                    if (rank_diff == expected_rank_diff + 1)
                    {
                        predictions = array_ops.squeeze(predictions, new int[] { -1 });
                    }
                    else if (rank_diff == expected_rank_diff - 1)
                    {
                        labels = array_ops.squeeze(labels, new int[] { -1 });
                    }
                    return (labels, predictions);
                }

                // Use dynamic rank.
                throw new NotImplementedException("remove_squeezable_dimensions dynamic rank");
            }));
コード例 #4
0
        public static Tensor broadcast_weights(Tensor weights, Tensor values)
        {
            return(with(ops.name_scope(null, "broadcast_weights", (weights, values)), scope =>
            {
                values = ops.convert_to_tensor(values, name: "values");
                weights = ops.convert_to_tensor(
                    weights, dtype: values.dtype.as_base_dtype(), name: "weights");

                // Try static check for exact match.
                var weights_shape = weights.GetShape();
                var values_shape = values.GetShape();
                if (weights_shape.is_fully_defined() &&
                    values_shape.is_fully_defined())
                {
                    return weights;
                }

                return math_ops.multiply(
                    weights, array_ops.ones_like(values), name: scope);
            }));
        }
コード例 #5
0
        /// <summary>
        /// Computes sparse softmax cross entropy between `logits` and `labels`.
        /// </summary>
        /// <param name="labels"></param>
        /// <param name="logits"></param>
        /// <param name="name"></param>
        /// <returns></returns>
        public static Tensor sparse_softmax_cross_entropy_with_logits(Tensor labels = null,
                                                                      Tensor logits = null, string name = null)
        {
            // Reshape logits and labels to rank 2.
            return(with(ops.name_scope(name, default_name: "SparseSoftmaxCrossEntropyWithLogits", (labels, logits)), delegate
            {
                labels = ops.convert_to_tensor(labels);
                logits = ops.convert_to_tensor(logits);
                var precise_logits = logits.dtype == TF_DataType.TF_HALF ? math_ops.cast(logits, dtypes.float32) : logits;

                // Store label shape for result later.
                var labels_static_shape = labels.GetShape();
                var labels_shape = array_ops.shape(labels);

                /*bool static_shapes_fully_defined = (
                 *  labels_static_shape.is_fully_defined() &&
                 *      logits.get_shape()[:-1].is_fully_defined());*/

                // Check if no reshapes are required.
                if (logits.GetShape().NDim == 2)
                {
                    var(cost, _) = gen_nn_ops.sparse_softmax_cross_entropy_with_logits(
                        precise_logits, labels, name: name);
                    if (logits.dtype == dtypes.float16)
                    {
                        return math_ops.cast(cost, dtypes.float32);
                    }
                    else
                    {
                        return cost;
                    }
                }

                // Perform a check of the dynamic shapes if the static shapes are not fully
                // defined.
                throw new NotImplementedException("sparse_softmax_cross_entropy_with_logits");
            }));
        }
コード例 #6
0
        /// <summary>
        /// Computes dropout.
        /// </summary>
        /// <param name="x"></param>
        /// <param name="rate"></param>
        /// <param name="noise_shape"></param>
        /// <param name="seed"></param>
        /// <param name="name"></param>
        /// <returns></returns>
        public static Tensor dropout_v2(Tensor x, Tensor rate, Tensor noise_shape = null, int?seed = null, string name = null)
        {
            return(with(ops.name_scope(name, "dropout", x), scope =>
            {
                name = scope;
                x = ops.convert_to_tensor(x, name: "x");
                if (!x.dtype.is_floating())
                {
                    throw new NotImplementedException($"x has to be a floating point tensor since it's going to" +
                                                      $" be scaled. Got a {x.dtype} tensor instead.");
                }

                rate = ops.convert_to_tensor(rate, dtype: x.dtype, name: "rate");
                // Do nothing if we know rate == 0
                var val = tensor_util.constant_value(rate);
                if (!(val is null) && val.Data <float>(0) == 0)
                {
                    return x;
                }

                noise_shape = _get_noise_shape(x, noise_shape);

                // Sample a uniform distribution on [0.0, 1.0) and select values larger than
                // rate.
                //
                // NOTE: Random uniform actually can only generate 2^23 floats on [1.0, 2.0)
                // and subtract 1.0.
                var random_tensor = random_ops.random_uniform(noise_shape, seed: seed, dtype: x.dtype);
                var keep_prob = 1.0f - rate;
                var scale = 1.0f / keep_prob;
                // NOTE: if (1.0 + rate) - 1 is equal to rate, then we want to consider that
                // float to be selected, hence we use a >= comparison.
                var keep_mask = random_tensor >= rate;
                var ret = x * scale * math_ops.cast(keep_mask, x.dtype);
                ret.SetShape(x.GetShape());
                return ret;
            }));
コード例 #7
0
        private void _init_from_args(object initial_value,
                                     bool trainable            = true,
                                     List <string> collections = null,
                                     bool validate_shape       = true,
                                     string caching_device     = "",
                                     string name       = null,
                                     TF_DataType dtype = TF_DataType.DtInvalid)
        {
            if (initial_value is null)
            {
                throw new ValueError("initial_value must be specified.");
            }

            var init_from_fn = initial_value.GetType().Name == "Func`1";

            if (collections == null)
            {
                collections = new List <string> {
                    ops.GraphKeys.GLOBAL_VARIABLES
                };
            }

            // Store the graph key so optimizers know how to only retrieve variables from
            // this graph.
            _graph_key = ops.get_default_graph().graph_key;

            _trainable = trainable;
            if (trainable && !collections.Contains(ops.GraphKeys.TRAINABLE_VARIABLES))
            {
                collections.Add(ops.GraphKeys.TRAINABLE_VARIABLES);
            }

            ops.init_scope();
            var values = init_from_fn ? new object[0] : new object[] { initial_value };

            with(ops.name_scope(name, "Variable", values), scope =>
            {
                name = scope;
                if (init_from_fn)
                {
                    // Use attr_scope and device(None) to simulate the behavior of
                    // colocate_with when the variable we want to colocate with doesn't
                    // yet exist.
                    string true_name = ops._name_from_scope_name(name);
                    var attr         = new AttrValue
                    {
                        List = new AttrValue.Types.ListValue()
                    };
                    attr.List.S.Add(ByteString.CopyFromUtf8($"loc:{true_name}"));
                    with(ops.name_scope("Initializer"), scope2 =>
                    {
                        _initial_value = (initial_value as Func <Tensor>)();
                        _initial_value = ops.convert_to_tensor(_initial_value, name: "initial_value", dtype: dtype);
                    });
                    _variable = state_ops.variable_op_v2(_initial_value.shape, _initial_value.dtype.as_base_dtype(), name: name);
                }
                // Or get the initial value from a Tensor or Python object.
                else
                {
                    _initial_value = ops.convert_to_tensor(initial_value, name: "initial_value");

                    var shape = _initial_value.shape;
                    dtype     = _initial_value.dtype;
                    _variable = gen_state_ops.variable_v2(shape, dtype.as_base_dtype(), scope);
                }

                // Manually overrides the variable's shape with the initial value's.
                if (validate_shape)
                {
                    var initial_value_shape = _initial_value.GetShape();
                    if (!initial_value_shape.is_fully_defined())
                    {
                        throw new ValueError($"initial_value must have a shape specified: {_initial_value}");
                    }
                }

                // If 'initial_value' makes use of other variables, make sure we don't
                // have an issue if these other variables aren't initialized first by
                // using their initialized_value() method.
                var _initial_value2 = _try_guard_against_uninitialized_dependencies(_initial_value);

                _initializer_op = gen_state_ops.assign(_variable, _initial_value2, validate_shape).op;

                if (!String.IsNullOrEmpty(caching_device))
                {
                }
                else
                {
                    ops.colocate_with(_initializer_op);

                    _snapshot = gen_array_ops.identity(_variable, name = "read");
                }

                ops.add_to_collections(collections, this as VariableV1);
            });
        }