public static unsafe Tensor constant(object value) { var g = ops.get_default_graph(); var tensor_value = new attr_value_pb2.AttrValue(); var tensor_pb = tensor_util.make_tensor_proto(value); tensor_value.Tensor = tensor_pb; var dtype_value = new attr_value_pb2.AttrValue { Type = tensor_value.Tensor.Dtype, }; var attrs = new Dictionary <string, AttrValue>(); attrs["dtype"] = dtype_value; attrs["value"] = tensor_value; var const_tensor = g.create_op("Const", null, new TF_DataType[] { (TF_DataType)dtype_value.Type }, attrs: attrs).outputs[0]; return(const_tensor); }
private void _init_from_args(object initial_value, bool trainable = true, List <string> collections = null, bool validate_shape = true, string caching_device = "", string name = null, TF_DataType dtype = TF_DataType.DtInvalid) { if (initial_value is null) { throw new ValueError("initial_value must be specified."); } var init_from_fn = initial_value.GetType().Name == "Func`1"; if (collections == null) { collections = new List <string> { tf.GraphKeys.GLOBAL_VARIABLES }; } // Store the graph key so optimizers know how to only retrieve variables from // this graph. _graph_key = ops.get_default_graph().graph_key; _trainable = trainable; if (trainable && !collections.Contains(tf.GraphKeys.TRAINABLE_VARIABLES)) { collections.Add(tf.GraphKeys.TRAINABLE_VARIABLES); } tf_with(ops.init_scope2(), delegate { var values = init_from_fn ? new object[0] : new object[] { initial_value }; tf_with(ops.name_scope(name, "Variable", values), scope => { name = scope; if (init_from_fn) { // Use attr_scope and device(None) to simulate the behavior of // colocate_with when the variable we want to colocate with doesn't // yet exist. string true_name = ops.name_from_scope_name(name); var attr = new AttrValue { List = new AttrValue.Types.ListValue() }; attr.List.S.Add(ByteString.CopyFromUtf8($"loc:{true_name}")); tf_with(ops.name_scope("Initializer"), scope2 => { _initial_value = (initial_value as Func <Tensor>)(); _initial_value = ops.convert_to_tensor(_initial_value, name: "initial_value", dtype: dtype); }); _variable = state_ops.variable_op_v2(_initial_value.shape, _initial_value.dtype.as_base_dtype(), name: name); } // Or get the initial value from a Tensor or Python object. else { _initial_value = ops.convert_to_tensor(initial_value, name: "initial_value", dtype: dtype); var shape = _initial_value.shape; dtype = _initial_value.dtype; _variable = gen_state_ops.variable_v2(shape, dtype.as_base_dtype(), scope); } // Manually overrides the variable's shape with the initial value's. if (validate_shape) { var initial_value_shape = _initial_value.TensorShape; if (!initial_value_shape.is_fully_defined()) { throw new ValueError($"initial_value must have a shape specified: {_initial_value}"); } } // If 'initial_value' makes use of other variables, make sure we don't // have an issue if these other variables aren't initialized first by // using their initialized_value() method. var _initial_value2 = _try_guard_against_uninitialized_dependencies(name, _initial_value); _initializer_op = gen_state_ops.assign(_variable, _initial_value2, validate_shape).op; if (!String.IsNullOrEmpty(caching_device)) { } else { ops.colocate_with(_initializer_op); _snapshot = gen_array_ops.identity(_variable, name = "read"); } ops.add_to_collections(collections, this as IVariableV1); }); }); }
private AttrValue SetAttrValue(OpDef op_def, AttrDef attr_def, object value) { var attr_value = new AttrValue(); if (attr_def.Type.StartsWith("list(")) { if (attr_def.HasMinimum) { ; } attr_value.List = new AttrValue.Types.ListValue(); } switch (attr_def.Type) { case "string": attr_value.S = Google.Protobuf.ByteString.CopyFromUtf8((string)value); break; case "type": attr_value.Type = _MakeType((TF_DataType)value, attr_def); break; case "list(type)": attr_value.List.Type.AddRange((value as IList <TF_DataType>).Select(x => _MakeType(x, attr_def))); break; case "list(int)": attr_value.List.I.AddRange((value as int[]).Select(x => Convert.ToInt64(x))); break; case "bool": attr_value.B = (bool)value; break; case "float": attr_value.F = (float)value; break; case "int": attr_value.I = (int)value; if (attr_def.HasMinimum && attr_value.I < attr_def.Minimum) { throw new ValueError($"Attr '{attr_def.Name}' of '{op_def.Name}' Op passed {attr_value.I} less than minimum {attr_def.Minimum}."); } break; case "shape": if (value == null && attr_def.DefaultValue != null) { attr_value.Shape = attr_def.DefaultValue.Shape; } if (value is TensorShape val1) { attr_value.Shape = val1.as_proto(); } else if (value is long[] val2) { attr_value.Shape = tensor_util.as_shape(val2); } else if (value is int[] val3) { attr_value.Shape = tensor_util.as_shape(val3); } break; default: throw new TypeError($"SetAttrValue: can't not convert attr_def.Type '{attr_def.Type}' to protos."); } return(attr_value); }
private void _init_from_args(object initial_value = null, bool trainable = true, List <string> collections = null, string caching_device = "", string name = null, TF_DataType dtype = TF_DataType.DtInvalid, VariableAggregation aggregation = VariableAggregation.None, TensorShape shape = null) { var init_from_fn = initial_value.GetType().Name == "Func`1" || initial_value.GetType().GetInterface("IInitializer") != null; if (collections == null) { collections = new List <string>() { tf.GraphKeys.GLOBAL_VARIABLES } } ; _trainable = trainable; if (trainable && !collections.Contains(tf.GraphKeys.TRAINABLE_VARIABLES)) { collections.Add(tf.GraphKeys.TRAINABLE_VARIABLES); } _in_graph_mode = !tf.Context.executing_eagerly(); tf_with(ops.init_scope(), init_scope => { var values = init_from_fn ? new object[0] : new object[] { initial_value }; tf_with(ops.name_scope(name, "Variable", values), scope => { name = scope; var handle_name = ops.name_from_scope_name(name); string unique_id = ""; string shared_name = ""; if (_in_graph_mode) { shared_name = handle_name; unique_id = shared_name; } else { unique_id = $"{handle_name}_{ops.uid()}"; shared_name = tf.Context.shared_name(); } var attr = new AttrValue(); attr.List = new AttrValue.Types.ListValue(); attr.List.S.Add(ByteString.CopyFromUtf8($"loc:@{handle_name}")); tf_with(ops.name_scope("Initializer"), delegate { if (initial_value.GetType().GetInterface("IInitializer") != null) { initial_value = ops.convert_to_tensor((initial_value as IInitializer).Apply(new InitializerArgs(shape, dtype: dtype))); } else { var value = init_from_fn ? (initial_value as Func <Tensor>)() : initial_value; initial_value = ops.convert_to_tensor(value, name: "initial_value", dtype: dtype); } }); _shape = shape ?? (initial_value as Tensor).TensorShape; _initial_value = initial_value as Tensor; if (_in_graph_mode) { handle = state_ops.variable_op_v2(_initial_value.shape, _initial_value.dtype.as_base_dtype(), name: name); initializer_op = gen_state_ops.assign(handle, _initial_value, true).op; ops.colocate_with(initializer_op); _graph_element = gen_array_ops.identity(handle, name = "read"); ops.add_to_collections <IVariableV1>(collections, this); _dtype = handle.dtype; } else { handle = resource_variable_ops.eager_safe_variable_handle( initial_value: _initial_value, shape: _shape, shared_name: shared_name, name: name, graph_mode: _in_graph_mode); gen_resource_variable_ops.assign_variable_op(handle, _initial_value); is_initialized_op = null; initializer_op = null; _graph_element = null; _dtype = _initial_value.dtype.as_base_dtype(); initial_value = _in_graph_mode ? initial_value : null; } base.__init__(trainable: trainable, handle: handle, name: name, unique_id: unique_id, handle_name: handle_name); }); }); }
/// <param name="verify_shape">Boolean that enables verification of a shape of values.</param> public static Tensor _constant_impl(object value, TF_DataType dtype, TensorShape shape, string name, bool verify_shape, bool allow_broadcast) { if (tf.context.executing_eagerly()) { var t = convert_to_eager_tensor(value, tf.context, dtype: dtype); if (shape == null) { return(t); } if (t.shape.SequenceEqual(shape.dims)) { return(t); } if (verify_shape) { throw new TypeError($"Expected Tensor's shape: {shape}, got {t.shape}."); } var num_t = t.TensorShape.num_elements(); if (num_t == shape.num_elements()) { throw new NotImplementedException(""); } if (num_t == 1) { if (t.dtype == dtypes.@bool) { throw new NotImplementedException(""); } else { return(_eager_fill(shape, t, tf.context)); } } } Graph g = ops.get_default_graph(); var tensor_value = new AttrValue(); tensor_value.Tensor = tensor_util.make_tensor_proto(value, dtype: dtype, shape: shape, verify_shape: verify_shape, allow_broadcast: allow_broadcast); var dtype_value = new AttrValue { Type = tensor_value.Tensor.Dtype, }; var attrs = new Dictionary <string, AttrValue>(); attrs["value"] = tensor_value; attrs["dtype"] = dtype_value; var op = g.create_op("Const", new Tensor[0], new TF_DataType[] { dtype_value.Type.as_tf_dtype() }, attrs: attrs, name: name); return(op.outputs[0]); }
public Operation _apply_op_helper(string op_type_name, string name = "", dynamic args = null) { var keywords = ConvertToDict(args); var g = ops.get_default_graph(); var op_def = g.GetOpDef(op_type_name); // Default name if not specified. if (String.IsNullOrEmpty(name)) { name = op_type_name; } // Check for deprecation if (op_def.Deprecation != null && op_def.Deprecation.Version > 0) { } var default_type_attr_map = new Dictionary <string, object>(); foreach (var attr_def in op_def.Attr) { if (attr_def.Type != "type") { continue; } var key = attr_def.Name; if (attr_def.DefaultValue != null) { default_type_attr_map[key] = attr_def.DefaultValue.Type; } } var attrs = new Dictionary <string, object>(); var inputs = new List <Tensor>(); var input_types = new List <TF_DataType>(); Operation op = null; Python.with <ops.name_scope>(new ops.name_scope(name), scope => { // Perform input type inference foreach (var input_arg in op_def.InputArg) { var input_name = input_arg.Name; if (keywords[input_name] is double int_value) { keywords[input_name] = constant_op.constant(int_value, input_name); } if (keywords[input_name] is Tensor value) { if (keywords.ContainsKey(input_name)) { inputs.Add(value); } if (!String.IsNullOrEmpty(input_arg.TypeAttr)) { attrs[input_arg.TypeAttr] = value.dtype; } if (input_arg.IsRef) { } else { var base_type = value.dtype.as_base_dtype(); input_types.Add(base_type); } } } // Process remaining attrs foreach (var attr in op_def.Attr) { if (keywords.ContainsKey(attr.Name)) { attrs[attr.Name] = keywords[attr.Name]; } } // Convert attr values to AttrValue protos. var attr_protos = new Dictionary <string, AttrValue>(); foreach (var attr_def in op_def.Attr) { var key = attr_def.Name; var value = attrs[key]; var attr_value = new AttrValue(); switch (attr_def.Type) { case "string": attr_value.S = Google.Protobuf.ByteString.CopyFromUtf8((string)value); break; case "type": attr_value.Type = _MakeType((TF_DataType)value, attr_def); break; case "bool": attr_value.B = (bool)value; break; case "shape": attr_value.Shape = value == null ? attr_def.DefaultValue.Shape : tensor_util.as_shape((long[])value); break; default: throw new InvalidDataException($"attr_def.Type {attr_def.Type}"); } attr_protos[key] = attr_value; } // Determine output types (possibly using attrs) var output_types = new List <TF_DataType>(); foreach (var arg in op_def.OutputArg) { if (!String.IsNullOrEmpty(arg.NumberAttr)) { } else if (!String.IsNullOrEmpty(arg.TypeAttr)) { output_types.Add((TF_DataType)attr_protos[arg.TypeAttr].Type); } } // Add Op to graph op = g.create_op(op_type_name, inputs, output_types.ToArray(), name: scope, input_types: input_types.ToArray(), attrs: attr_protos, op_def: op_def); }); return(op); }
private void _init_from_args(object initial_value = null, bool trainable = true, List <string> collections = null, string caching_device = "", string name = null, TF_DataType dtype = TF_DataType.DtInvalid, TensorShape shape = null) { var init_from_fn = initial_value.GetType().Name == "Func`1"; if (collections == null) { collections = new List <string>() { tf.GraphKeys.GLOBAL_VARIABLES } } ; _trainable = trainable; if (trainable && !collections.Contains(tf.GraphKeys.TRAINABLE_VARIABLES)) { collections.Add(tf.GraphKeys.TRAINABLE_VARIABLES); } ops.init_scope(); _in_graph_mode = !tf.context.executing_eagerly(); tf_with(ops.name_scope(name, "Variable"), scope => { name = scope; var handle_name = ops.name_from_scope_name(name); var unique_id = $"{handle_name}_{ops.uid()}"; var shared_name = tf.context.shared_name(); if (_in_graph_mode) { shared_name = handle_name; unique_id = shared_name; } var attr = new AttrValue(); attr.List = new AttrValue.Types.ListValue(); attr.List.S.Add(ByteString.CopyFromUtf8($"loc:@{handle_name}")); tf_with(ops.name_scope("Initializer"), delegate { initial_value = ops.convert_to_tensor(init_from_fn ? (initial_value as Func <Tensor>)() : initial_value, name: "initial_value", dtype: dtype); }); _shape = shape ?? (initial_value as Tensor).TensorShape; _initial_value = initial_value as Tensor; _handle = resource_variable_ops.eager_safe_variable_handle( initial_value: _initial_value, shape: _shape, shared_name: shared_name, name: name, graph_mode: _in_graph_mode); _unique_id = unique_id; _handle_name = handle_name + ":0"; _dtype = _initial_value.dtype.as_base_dtype(); // _constraint = constraint; if (_in_graph_mode) { tf_with(ops.name_scope("IsInitialized"), delegate { _is_initialized_op = gen_resource_variable_ops.var_is_initialized_op(_handle); }); if (initial_value != null) { tf_with(ops.name_scope("Assign"), scope1 => { string n = scope1; _initializer_op = gen_resource_variable_ops.assign_variable_op(_handle, variables._try_guard_against_uninitialized_dependencies(name, _initial_value), name: n); }); } // Manually assign reads to the handle's device to avoid log // messages. tf_with(ops.name_scope("Read"), delegate { var value = _read_variable_op(); _graph_element = value; }); } ops.add_to_collections(collections, this); }); }