예제 #1
0
 public static Tensor embedding_lookup(RefVariable @params,
                                       Tensor ids,
                                       string partition_strategy = "mod",
                                       string name = null) => embedding_ops._embedding_lookup_and_transform(@params,
                                                                                                            ids,
                                                                                                            partition_strategy: partition_strategy,
                                                                                                            name: name);
예제 #2
0
 public static Tensor assign_sub(RefVariable @ref,
                                 Tensor value,
                                 bool use_locking = false,
                                 string name      = null) => gen_state_ops.assign_sub(@ref,
                                                                                      value,
                                                                                      use_locking: use_locking,
                                                                                      name: name);
예제 #3
0
        private static Tensor op_helper <T>(string default_name, RefVariable x, T y)
        {
            var xVal = x.value();

            return(tf_with(ops.name_scope(null, default_name, new { xVal, y }), scope =>
            {
                string name = scope;
                var yTensor = ops.convert_to_tensor(y, xVal.dtype.as_base_dtype(), "y");
                Tensor result = null;
                switch (default_name)
                {
                case "add":
                    result = gen_math_ops.add(xVal, yTensor, name);
                    break;

                case "sub":
                    result = gen_math_ops.sub(xVal, yTensor, name);
                    break;

                default:
                    throw new NotImplementedException("");
                }
                return result;
            }));
        }
예제 #4
0
 public static Tensor bias_add(Tensor value, RefVariable bias, string data_format = null, string name = null)
 {
     return(Python.with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope =>
     {
         name = scope;
         return gen_nn_ops.bias_add(value, bias, data_format: data_format, name: name);
     }));
 }
예제 #5
0
        public static Tensor assign_sub(RefVariable @ref,
                                        Tensor value,
                                        bool use_locking = false,
                                        string name      = null)
        {
            var _op = _op_def_lib._apply_op_helper("AssignSub", name: name, args: new { @ref, value, use_locking });

            return(_op.outputs[0]);
        }
예제 #6
0
        private static Tensor op_helper <T>(string default_name, RefVariable x, T y)
        {
            var tensor1 = x.value();

            return(with(ops.name_scope(null, default_name, new { tensor1, y }), scope => {
                var tensor2 = ops.convert_to_tensor(y, tensor1.dtype.as_base_dtype(), "y");
                return gen_math_ops.add(tensor1, tensor2, scope);
            }));
        }
예제 #7
0
        public static Tensor _clip(RefVariable @params, Tensor ids, string max_norm = null)
        {
            if (max_norm == null)
            {
                return(@params);
            }

            throw new NotImplementedException("_clip");
        }
예제 #8
0
        public static Tensor scatter_add(RefVariable @ref, Tensor indices, Tensor updates, bool use_locking = false, string name = null)
        {
            if (@ref.dtype.is_ref_dtype())
            {
                return(gen_state_ops.scatter_add(@ref, indices, updates, use_locking: use_locking, name: name));
            }

            throw new NotImplementedException("scatter_add");
        }
예제 #9
0
 public static Tensor is_variable_initialized(RefVariable @ref, string name = null)
 {
     if (@ref.dtype.is_ref_dtype())
     {
         return(gen_state_ops.is_variable_initialized(@ref: @ref, name: name));
     }
     throw new NotImplementedException("");
     //return @ref.is_initialized(name: name);
 }
예제 #10
0
 //"""Update 'ref' by adding 'value' to it.
 //
 //  This operation outputs "ref" after the update is done.
 //  This makes it easier to chain operations that need to use the reset value.
 //
 //  Args:
 //    ref: A mutable `Tensor`. Must be one of the following types:
 //      `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `int16`,
 //      `int8`, `complex64`, `complex128`, `qint8`, `quint8`, `qint32`, `half`.
 //      Should be from a `Variable` node.
 //    value: A `Tensor`. Must have the same type as `ref`.
 //      The value to be added to the variable.
 //    use_locking: An optional `bool`. Defaults to `False`.
 //      If True, the addition will be protected by a lock;
 //      otherwise the behavior is undefined, but may exhibit less contention.
 //    name: A name for the operation (optional).
 //
 //  Returns:
 //    Same as "ref".  Returned as a convenience for operations that want
 //    to use the new value after the variable has been updated.
 public static Tensor assign_add <T>(RefVariable @ref,
                                     T value,
                                     bool use_locking = false,
                                     string name      = null)
 {
     if (@ref.dtype.is_ref_dtype())
     {
         return(gen_state_ops.assign_add(@ref, value, use_locking: use_locking, name: name));
     }
     throw new NotImplementedException("assign_add");
 }
예제 #11
0
 public static Tensor assign(RefVariable @ref, object value,
                             bool validate_shape = true,
                             bool use_locking    = true,
                             string name         = null)
 {
     return(gen_state_ops.assign(@ref,
                                 value,
                                 validate_shape: validate_shape,
                                 use_locking: use_locking,
                                 name: name));
 }
        public static Tensor apply_gradient_descent(RefVariable var, Tensor alpha, Tensor delta, bool use_locking = false, string name = null)
        {
            var _op = tf.OpDefLib._apply_op_helper("ApplyGradientDescent", name, new
            {
                var,
                alpha,
                delta,
                use_locking
            });

            return(_op.outputs[0]);
        }
예제 #13
0
 /// <summary>
 /// Adds `bias` to `value`.
 /// </summary>
 /// <param name="value"></param>
 /// <param name="bias"></param>
 /// <param name="data_format"></param>
 /// <param name="name"></param>
 /// <returns></returns>
 public static Tensor bias_add(Tensor value,
                               RefVariable bias,
                               string data_format = null,
                               string name        = null)
 {
     return(Python.with(ops.name_scope(name, "BiasAdd", new { value, bias }), scope =>
     {
         value = ops.convert_to_tensor(value, name: "input");
         var bias_tensor = ops.convert_to_tensor(bias, dtype: value.dtype, name: "bias");
         return gen_nn_ops.bias_add(value, bias_tensor, data_format: data_format, name: name);
     }));
 }
예제 #14
0
 public static Tensor[] fused_batch_norm(Tensor x,
                                         RefVariable scale,
                                         RefVariable offset,
                                         Tensor mean        = null,
                                         Tensor variance    = null,
                                         float epsilon      = 0.001f,
                                         string data_format = "NHWC",
                                         bool is_training   = true,
                                         string name        = null) => nn_impl.fused_batch_norm(x, scale, offset, mean, variance,
                                                                                                epsilon: epsilon,
                                                                                                data_format: data_format,
                                                                                                is_training: is_training,
                                                                                                name: name);
예제 #15
0
        /// <summary>
        /// Converts the given `value` to a `Tensor`.
        /// </summary>
        /// <param name="value"></param>
        /// <param name="dtype"></param>
        /// <param name="name"></param>
        /// <returns></returns>
        public static Tensor convert_to_tensor(object value,
                                               TF_DataType dtype           = TF_DataType.DtInvalid,
                                               string name                 = null,
                                               bool as_ref                 = false,
                                               TF_DataType preferred_dtype = TF_DataType.DtInvalid,
                                               Context ctx                 = null)
        {
            if (dtype == TF_DataType.DtInvalid)
            {
                dtype = preferred_dtype;
            }

            if (value is EagerTensor eager_tensor)
            {
                if (tf.executing_eagerly())
                {
                    return(eager_tensor);
                }
                else
                {
                    var graph = get_default_graph();
                    if (!graph.building_function)
                    {
                        throw new RuntimeError("Attempting to capture an EagerTensor without building a function.");
                    }
                    return((graph as FuncGraph).capture(eager_tensor, name: name));
                }
            }

            Tensor ret = value switch
            {
                NDArray nd => constant_op.constant(nd, dtype: dtype, name: name),
                EagerTensor tensor => tensor.dtype == TF_DataType.TF_RESOURCE
                            ? tensor.AsPlaceholder(name: name)
                            : tensor.AsConstant(name: name),
                Tensor tensor => tensor,
                Tensor[] tensors => array_ops._autopacking_helper(tensors, dtype, name == null ? "packed" : name),
                RefVariable varVal => varVal._TensorConversionFunction(dtype: dtype, name: name, as_ref: as_ref),
                ResourceVariable varVal => varVal._TensorConversionFunction(dtype: dtype, name: name, as_ref: as_ref),
                TensorShape ts => constant_op.constant(ts.dims, dtype: dtype, name: name),
                int[] dims => constant_op.constant(dims, dtype: dtype, name: name),
                string str => constant_op.constant(str, dtype: tf.@string, name: name),
                string[] str => constant_op.constant(str, dtype: tf.@string, name: name),
                IEnumerable <object> objects => array_ops._autopacking_conversion_function(objects, dtype: dtype, name: name),
                _ => constant_op.constant(value, dtype: dtype, name: name)
            };

            return(ret);
        }
예제 #16
0
        public static Tensor assign(RefVariable @ref, object value,
                                    bool validate_shape = true,
                                    bool use_locking    = true,
                                    string name         = null)
        {
            var _op = tf.OpDefLib._apply_op_helper("Assign", name: name, args: new { @ref, value, validate_shape, use_locking });

            var _result      = _op.outputs;
            var _inputs_flat = _op.inputs;

            var _attrs = new Dictionary <string, object>();

            _attrs["T"] = _op.get_attr("T");
            _attrs["validate_shape"] = _op.get_attr("validate_shape");
            _attrs["use_locking"]    = _op.get_attr("use_locking");

            return(_result[0]);
        }
예제 #17
0
            public Tensor polynomial_decay(float learning_rate,
                                           RefVariable global_step,
                                           float decay_steps,
                                           float end_learning_rate = 0.0001f,
                                           float power             = 1.0f,
                                           bool cycle  = false,
                                           string name = null)
            {
                var decayed = new PolynomialDecay(learning_rate,
                                                  decay_steps,
                                                  end_learning_rate: end_learning_rate,
                                                  power: power,
                                                  cycle: cycle,
                                                  name: name);

                var decayed_lr = decayed.__call__(global_step);

                return(decayed_lr);
            }
예제 #18
0
        /// <summary>
        /// Add operations to minimize `loss` by updating `var_list`
        /// </summary>
        /// <param name="loss"></param>
        /// <returns>
        /// An Operation that updates the variables in `var_list`.  If `global_step`
        /// was not `None`, that operation also increments `global_step`.
        /// </returns>
        public Operation minimize(Tensor loss,
                                  RefVariable global_step          = null,
                                  GateGradientType gate_gradients  = GateGradientType.GATE_OP,
                                  bool colocate_gradients_with_ops = false)
        {
            var grads_and_vars = compute_gradients(loss,
                                                   gate_gradients: gate_gradients,
                                                   colocate_gradients_with_ops: colocate_gradients_with_ops);

            var vars_with_grad = grads_and_vars.Where(x => x.Item1 != null).Select(x => x.Item2).ToArray();

            if (vars_with_grad.Length == 0)
            {
                throw new ValueError($"No gradients provided for any variable, check your graph for ops" +
                                     $" that do not support gradients, between variables {string.Join(",", vars_with_grad.Select(x => x.name))} and loss {loss}.");
            }

            return(apply_gradients(grads_and_vars));
        }
예제 #19
0
        private RefVariable _get_single_variable(string name,
                                                 TensorShape shape   = null,
                                                 TF_DataType dtype   = TF_DataType.DtInvalid,
                                                 Tensor initializer  = null,
                                                 bool reuse          = false,
                                                 bool?trainable      = null,
                                                 bool validate_shape = false,
                                                 bool?use_resource   = null,
                                                 VariableSynchronization synchronization = VariableSynchronization.Auto,
                                                 VariableAggregation aggregation         = VariableAggregation.None)
        {
            if (use_resource == null)
            {
                use_resource = false;
            }

            if (_vars.ContainsKey(name))
            {
                if (!reuse)
                {
                    var var = _vars[name];
                }
                throw new NotImplementedException("_get_single_variable");
            }

            RefVariable v = null;

            // Create the variable.
            ops.init_scope();
            {
                var init_val = initializer;
                v = new RefVariable(init_val,
                                    name: name,
                                    validate_shape: validate_shape,
                                    trainable: trainable.Value);
            }

            _vars[name] = v;

            return(v);
        }
예제 #20
0
        public static Tensor cast(RefVariable x, TF_DataType dtype = TF_DataType.DtInvalid, string name = null)
        {
            var base_type = dtype.as_base_dtype();

            if (base_type == x.dtype)
            {
                return(x);
            }

            return(tf_with(ops.name_scope(name, "Cast", new { x }), scope =>
            {
                name = scope;
                var t_x = ops.convert_to_tensor(x, name: "x");
                if (t_x.dtype.as_base_dtype() != base_type)
                {
                    t_x = gen_math_ops.cast(t_x, base_type, name: name);
                }

                return x;
            }));
        }
예제 #21
0
            public Tensor conv2d(Tensor input, RefVariable filter, int[] strides, string padding, bool use_cudnn_on_gpu = true,
                                 string data_format = "NHWC", int[] dilations = null, string name = null)
            {
                var parameters = new Conv2dParams
                {
                    Input         = input,
                    Filter        = filter,
                    Strides       = strides,
                    Padding       = padding,
                    UseCudnnOnGpu = use_cudnn_on_gpu,
                    DataFormat    = data_format,
                    Name          = name
                };

                if (dilations != null)
                {
                    parameters.Dilations = dilations;
                }

                return(gen_nn_ops.conv2d(parameters));
            }
예제 #22
0
        /// <summary>
        /// Helper function for embedding_lookup and _compute_sampled_logits.
        /// </summary>
        /// <param name="params"></param>
        /// <param name="ids"></param>
        /// <param name="partition_strategy"></param>
        /// <param name="name"></param>
        /// <returns></returns>
        public static Tensor _embedding_lookup_and_transform(RefVariable @params,
                                                             Tensor ids,
                                                             string partition_strategy = "mod",
                                                             string name     = null,
                                                             string max_norm = null)
        {
            return(with(ops.name_scope(name, "embedding_lookup", new { @params, ids }), scope =>
            {
                name = scope;
                int np = 1;
                ids = ops.convert_to_tensor(ids, name: "ids");
                if (np == 1)
                {
                    var gather = array_ops.gather(@params, ids, name: name);
                    var result = _clip(gather, ids, max_norm);

                    return array_ops.identity(result);
                }

                throw new NotImplementedException("_embedding_lookup_and_transform");
            }));
        }
예제 #23
0
        public static Tensor apply_adam(RefVariable var, RefVariable m, RefVariable v, Tensor beta1_power, Tensor beta2_power,
                                        Tensor lr, Tensor beta1, Tensor beta2, Tensor epsilon, Tensor grad,
                                        bool use_locking = false, bool use_nesterov = false, string name = null)
        {
            var _op = _op_def_lib._apply_op_helper("ApplyAdam", name, new
            {
                var,
                m,
                v,
                beta1_power,
                beta2_power,
                lr,
                beta1,
                beta2,
                epsilon,
                grad,
                use_locking,
                use_nesterov
            });

            return(_op.outputs[0]);
        }
예제 #24
0
        /// <summary>
        /// Add operations to minimize `loss` by updating `var_list`
        ///
        ///  This method simply combines calls `compute_gradients()` and
        ///  `apply_gradients()`. If you want to process the gradient before applying
        ///  them call `compute_gradients()` and `apply_gradients()` explicitly instead
        ///  of using this function.
        /// </summary>
        /// <param name="loss">A `Tensor` containing the value to minimize.</param>
        /// <param name="global_step">Optional `Variable` to increment by one after the
        /// variables have been updated.</param>
        /// <param name="var_list">Optional list or tuple of `Variable` objects to update to
        /// minimize `loss`.  Defaults to the list of variables collected in
        /// the graph under the key `GraphKeys.TRAINABLE_VARIABLES`.</param>
        /// <param name="gate_gradients">
        /// How to gate the computation of gradients.  Can be
        /// `GATE_NONE`, `GATE_OP`, or  `GATE_GRAPH`.
        /// </param>
        /// <param name="aggregation_method">
        /// Specifies the method used to combine gradient terms.
        /// Valid values are defined in the class `AggregationMethod`.
        /// </param>
        /// <param name="colocate_gradients_with_ops"></param>
        /// <param name="name">Optional name for the returned operation.</param>
        /// <param name="grad_loss">Optional. A `Tensor` holding the gradient computed for `loss`.</param>
        /// <returns>
        /// An Operation that updates the variables in `var_list`.  If `global_step`
        /// was not `None`, that operation also increments `global_step`.
        /// </returns>
        public Operation minimize(Tensor loss,
                                  RefVariable global_step          = null,
                                  List <RefVariable> var_list      = null,
                                  GateGradientType gate_gradients  = GateGradientType.GATE_OP,
                                  int?aggregation_method           = null,
                                  bool colocate_gradients_with_ops = false, string name = null, Tensor grad_loss = null)
        {
            // TODO: strongly type aggregation_method
            var grads_and_vars = compute_gradients(loss, var_list: var_list,
                                                   gate_gradients: gate_gradients,
                                                   aggregation_method: aggregation_method,
                                                   colocate_gradients_with_ops: colocate_gradients_with_ops);

            var vars_with_grad = grads_and_vars.Where(x => x.Item1 != null).Select(x => x.Item2).ToArray();

            if (vars_with_grad.Length == 0)
            {
                throw new ValueError($"No gradients provided for any variable, check your graph for ops" +
                                     $" that do not support gradients, between variables {string.Join(",", vars_with_grad.Select(x => x.name))} and loss {loss}.");
            }

            return(apply_gradients(grads_and_vars, global_step: global_step, name: name));
        }
예제 #25
0
        /// <summary>
        /// Apply gradients to variables.
        ///
        /// This is the second part of `minimize()`. It returns an `Operation` that
        /// applies gradients.
        /// </summary>
        /// <param name="grads_and_vars">List of (gradient, variable) pairs as returned by
        /// `compute_gradients()`.</param>
        /// <param name="global_step">Optional `Variable` to increment by one after the
        /// variables have been updated.</param>
        /// <param name="name">Optional name for the returned operation.  Default to the
        /// name passed to the `Optimizer` constructor.</param>
        /// <returns>
        /// An `Operation` that applies the specified gradients. If `global_step`
        /// was not None, that operation also increments `global_step`.</returns>
        public Operation apply_gradients(Tuple <Tensor, RefVariable>[] grads_and_vars, RefVariable global_step = null, string name = null)
        {
            // No DistributionStrategy case.
            var converted_grads_and_vars = new List <(Tensor, RefVariable, _OptimizableVariable)>();

            foreach (var(g, v) in grads_and_vars)
            {
                if (g != null)
                {
                    // Convert the grad to Tensor or IndexedSlices if necessary.
                    var gR = ops.convert_to_tensor_or_indexed_slices(g);
                    var p  = _get_processor(v);
                    converted_grads_and_vars.Add((gR, v, p));
                }
            }

            var var_list = converted_grads_and_vars.Where(x => x.Item1 != null).Select(x => x.Item2).ToArray();

            if (var_list.Length == 0)
            {
                throw new ValueError($"No gradients provided for any variable");
            }

            ops.init_scope();
            _create_slots(var_list);

            var update_ops = new List <Operation>();

            return(tf_with(ops.name_scope(name, Name), scope =>
            {
                name = scope;
                _prepare();

                foreach (var(grad, var, processor) in converted_grads_and_vars)
                {
                    if (grad == null)
                    {
                        continue;
                    }

                    var scope_name = var.op.name;
                    tf_with(ops.name_scope("update_" + scope_name), scope2 =>
                    {
                        var op = processor.update_op(this, grad);
                        update_ops.Add(op);
                    });
                }

                Operation apply_updates = null;
                if (global_step == null)
                {
                    apply_updates = _finish(update_ops.ToArray(), name);
                }
                else
                {
                    tf_with(ops.control_dependencies(new object[] { _finish(update_ops.ToArray(), "update") }), dep =>
                    {
                        ops.colocate_with(global_step);
                        // TODO: port this if branch once ResourceVariable has been ported!
                        //if (global_step is ResourceVariable)
                        //{
                        //        # TODO(apassos): the implicit read in assign_add is slow; consider
                        //        # making it less so.
                        //        apply_updates = resource_variable_ops.assign_add_variable_op(
                        //            global_step.handle,
                        //            ops.convert_to_tensor(1, dtype = global_step.dtype),
                        //            name = name)
                        //}
                        //else
                        {
                            apply_updates = state_ops.assign_add(global_step, tf.constant(1), name: name);
                        }
                    });
                }

                if (!tf.context.executing_eagerly())
                {
                    var train_op = ops.get_collection_ref(ops.GraphKeys.TRAIN_OP) as List <ITensorOrOperation>;
                    if (train_op != null && train_op.Contains(apply_updates))
                    {
                        train_op.Add(apply_updates);
                    }
                }

                return apply_updates;
            }));
예제 #26
0
 public _RefVariableProcessor(RefVariable v)
 {
     _v = v;
 }
예제 #27
0
 public static _OptimizableVariable _get_processor(RefVariable v)
 {
     return(new _RefVariableProcessor(v));
 }
예제 #28
0
 public Tensor assign(RefVariable @ref, object value, bool validate_shape = true, bool use_locking = true, string name = null)
 => state_ops.assign(@ref, value, validate_shape, use_locking, name);
예제 #29
0
        public static (Dictionary <string, IVariableV1>, ITensorOrOperation[]) import_scoped_meta_graph_with_return_elements(MetaGraphDef meta_graph_or_file,
                                                                                                                             bool clear_devices  = false,
                                                                                                                             string import_scope = "",
                                                                                                                             Dictionary <string, Tensor> input_map = null,
                                                                                                                             string unbound_inputs_col_name        = "unbound_inputs",
                                                                                                                             string[] return_elements = null)
        {
            var meta_graph_def = meta_graph_or_file;

            if (!string.IsNullOrEmpty(unbound_inputs_col_name))
            {
                foreach (var col in meta_graph_def.CollectionDef)
                {
                    if (col.Key == unbound_inputs_col_name)
                    {
                        throw new NotImplementedException("import_scoped_meta_graph_with_return_elements");
                    }
                }
            }

            // Sets graph to default graph if it's not passed in.
            var graph = ops.get_default_graph();

            // Gathers the list of nodes we are interested in.
            OpList producer_op_list = null;

            if (meta_graph_def.MetaInfoDef.StrippedOpList != null)
            {
                producer_op_list = meta_graph_def.MetaInfoDef.StrippedOpList;
            }
            var input_graph_def = meta_graph_def.GraphDef;

            // Remove all the explicit device specifications for this node. This helps to
            // make the graph more portable.
            if (clear_devices)
            {
                foreach (var node in input_graph_def.Node)
                {
                    node.Device = "";
                }
            }

            var scope_to_prepend_to_names = graph.unique_name("", mark_as_used: false);
            var imported_return_elements  = importer.import_graph_def(input_graph_def,
                                                                      name: scope_to_prepend_to_names,
                                                                      input_map: input_map,
                                                                      producer_op_list: producer_op_list,
                                                                      return_elements: return_elements);

            // Restores all the other collections.
            var variable_objects = new Dictionary <ByteString, IVariableV1>();

            foreach (var col in meta_graph_def.CollectionDef.OrderBy(x => x.Key))
            {
                // Don't add unbound_inputs to the new graph.
                if (col.Key == unbound_inputs_col_name)
                {
                    continue;
                }

                switch (col.Value.KindCase)
                {
                case KindOneofCase.NodeList:
                    foreach (var value in col.Value.NodeList.Value)
                    {
                        var col_op = graph.as_graph_element(ops.prepend_name_scope(value, scope_to_prepend_to_names));
                        graph.add_to_collection(col.Key, col_op);
                    }
                    break;

                case KindOneofCase.BytesList:
                    //var proto_type = ops.get_collection_proto_type(key)
                    if (tf.GraphKeys._VARIABLE_COLLECTIONS.Contains(col.Key))
                    {
                        foreach (var value in col.Value.BytesList.Value)
                        {
                            IVariableV1 variable = null;
                            if (!variable_objects.ContainsKey(value))
                            {
                                var proto = VariableDef.Parser.ParseFrom(value);
                                if (proto.IsResource)
                                {
                                    variable = new ResourceVariable(variable_def: proto, import_scope: scope_to_prepend_to_names);
                                }
                                else
                                {
                                    variable = new RefVariable(variable_def: proto, import_scope: scope_to_prepend_to_names);
                                }
                                variable_objects[value] = variable;
                            }
                            variable = variable_objects[value];
                            graph.add_to_collection(col.Key, variable);
                        }
                    }
                    else
                    {
                        foreach (var value in col.Value.BytesList.Value)
                        {
                            switch (col.Key)
                            {
                            case "cond_context":
                            {
                                var proto       = CondContextDef.Parser.ParseFrom(value);
                                var condContext = new CondContext().from_proto(proto, import_scope);
                                graph.add_to_collection(col.Key, condContext);
                            }
                            break;

                            case "while_context":
                            {
                                var proto        = WhileContextDef.Parser.ParseFrom(value);
                                var whileContext = new WhileContext().from_proto(proto, import_scope);
                                graph.add_to_collection(col.Key, whileContext);
                            }
                            break;

                            default:
                                Console.WriteLine($"import_scoped_meta_graph_with_return_elements {col.Key}");
                                continue;
                            }
                        }
                    }

                    break;

                default:
                    Console.WriteLine($"Cannot identify data type for collection {col.Key}. Skipping.");
                    break;
                }
            }

            var variables = graph.get_collection <IVariableV1>(tf.GraphKeys.GLOBAL_VARIABLES,
                                                               scope: scope_to_prepend_to_names);
            var var_list = new Dictionary <string, IVariableV1>();

            variables.ForEach(v => var_list[ops.strip_name_scope(v.Name, scope_to_prepend_to_names)] = v);

            return(var_list, imported_return_elements);
        }
예제 #30
0
        public static Tensor is_variable_initialized(RefVariable @ref, string name = null)
        {
            var _op = tf.OpDefLib._apply_op_helper("IsVariableInitialized", name: name, args: new { @ref });

            return(_op.output);
        }