public RefVariable get_variable(_VariableStore var_store, string name, TensorShape shape = null, TF_DataType dtype = TF_DataType.DtInvalid, object initializer = null, // IInitializer or Tensor bool?trainable = null, List <string> collections = null, bool?use_resource = null, bool validate_shape = true, VariableSynchronization synchronization = VariableSynchronization.Auto, VariableAggregation aggregation = VariableAggregation.None) { string full_name = !string.IsNullOrEmpty(this.name) ? this.name + "/" + name : name; return(tf_with(ops.name_scope(null), scope => { if (dtype == TF_DataType.DtInvalid) { dtype = _dtype; } return var_store.get_variable(full_name, shape: shape, dtype: dtype, initializer: initializer, reuse: resue, trainable: trainable, collections: collections, synchronization: synchronization, aggregation: aggregation) as RefVariable; })); }
protected virtual RefVariable add_weight(string name, int[] shape, TF_DataType dtype = TF_DataType.DtInvalid, IInitializer initializer = null, bool?trainable = null, VariableSynchronization synchronization = VariableSynchronization.AUTO, VariableAggregation aggregation = VariableAggregation.NONE) { var default_graph = ops.get_default_graph(); Graph init_graph = null; RefVariable[] existing_variables = null; if (default_graph.building_function) { throw new NotImplementedException("add_weight"); } else { init_graph = default_graph; existing_variables = variables.global_variables().ToArray(); } if (dtype == TF_DataType.DtInvalid) { dtype = TF_DataType.TF_FLOAT; } _set_scope(); var reuse = built || (_reuse != null && _reuse.Value); return(Python.with(tf.variable_scope(_scope, reuse: reuse, auxiliary_name_scope: false), scope => { _current_scope = scope; return Python.with(ops.name_scope(_name_scope()), delegate { var variable = base.add_weight(name, shape, dtype: dtype, initializer: initializer, trainable: trainable, getter: (name1, shape1, dtype1, initializer1, trainable1) => { return tf.get_variable(name1, shape: new TensorShape(shape1), dtype: dtype1, initializer: initializer1, trainable: trainable1); }); if (init_graph != null) { var trainable_variables = variables.trainable_variables(); } return variable; }); })); }
public RefVariable get_variable(_VariableStore var_store, string name, TensorShape shape = null, TF_DataType dtype = TF_DataType.DtInvalid, IInitializer initializer = null, bool?trainable = null, VariableSynchronization synchronization = VariableSynchronization.AUTO, VariableAggregation aggregation = VariableAggregation.NONE) { string full_name = !string.IsNullOrEmpty(this._name) ? this._name + "/" + name : name; return(with(new ops.name_scope(null), scope => { if (dtype == TF_DataType.DtInvalid) { dtype = _dtype; } return var_store.get_variable(full_name, shape: shape, dtype: dtype, initializer: initializer, trainable: trainable, synchronization: synchronization, aggregation: aggregation); })); }
public static RefVariable default_variable_creator(object initial_value, string name = null, bool?trainable = null, TF_DataType dtype = TF_DataType.DtInvalid, bool validate_shape = false, bool?use_resource = null, VariableSynchronization synchronization = VariableSynchronization.Auto, VariableAggregation aggregation = VariableAggregation.None) { trainable = _get_trainable_value(synchronization, trainable); if (!use_resource.HasValue) { use_resource = get_variable_scope().use_resource; } if (!use_resource.HasValue) { use_resource = _DEFAULT_USE_RESOURCE; } if (use_resource.Value) { throw new NotImplementedException(); } else { return(new RefVariable(initial_value, trainable: trainable.Value, validate_shape: validate_shape, name: name, dtype: dtype)); } }
protected override IVariableV1 add_weight(string name, TensorShape shape = null, TF_DataType dtype = TF_DataType.TF_FLOAT, IInitializer initializer = null, IRegularizer regularizer = null, VariableSynchronization synchronization = VariableSynchronization.OnRead, VariableAggregation aggregation = VariableAggregation.Sum, bool trainable = true, Func <VariableArgs, IVariableV1> getter = null) { if (shape == null) { shape = new TensorShape(new int[0]); } return(tf_with(ops.init_scope(), delegate { return base.add_weight(name, shape, dtype: dtype, trainable: false, initializer: initializer, synchronization: synchronization, aggregation: aggregation); })); }
private RefVariable _get_single_variable(string name, TensorShape shape = null, TF_DataType dtype = TF_DataType.DtInvalid, IInitializer initializer = null, bool reuse = false, bool?trainable = null, bool validate_shape = false, bool?use_resource = null, VariableSynchronization synchronization = VariableSynchronization.AUTO, VariableAggregation aggregation = VariableAggregation.NONE) { bool initializing_from_value = false; if (_vars.ContainsKey(name)) { if (!reuse) { var var = _vars[name]; } throw new NotImplementedException("_get_single_variable"); } Tensor init_val = null; ops.init_scope(); { if (initializing_from_value) { } else { init_val = initializer.call(shape, dtype); var variable_dtype = dtype.as_base_dtype(); } } // Create the variable. if (use_resource == null) { use_resource = false; } var v = variable_scope.default_variable_creator(init_val, name: name, trainable: trainable, dtype: TF_DataType.DtInvalid, validate_shape: validate_shape, synchronization: synchronization, aggregation: aggregation); _vars[name] = v; return(v); }
private IVariableV1 _true_getter(string name, TensorShape shape = null, TF_DataType dtype = TF_DataType.TF_FLOAT, object initializer = null, bool?trainable = null, List <string> collections = null, bool validate_shape = true, VariableSynchronization synchronization = VariableSynchronization.Auto, VariableAggregation aggregation = VariableAggregation.None) { bool is_scalar = !(shape is null) && shape.ndim == 0; if (initializer is IInitializer init) { return(_get_single_variable(name: name, shape: shape, dtype: dtype, initializer: init, trainable: trainable, collections: collections, validate_shape: validate_shape, synchronization: synchronization, aggregation: aggregation)); } else if (initializer is Tensor tensor) { return(_get_single_variable(name: name, shape: shape, dtype: dtype, initializer: tensor, trainable: trainable, validate_shape: validate_shape, synchronization: synchronization, aggregation: aggregation)); } else { IInitializer init1 = null; return(_get_single_variable(name: name, shape: shape, dtype: dtype, initializer: init1, trainable: trainable, validate_shape: validate_shape, synchronization: synchronization, aggregation: aggregation)); } }
public static RefVariable get_variable(string name, TensorShape shape = null, TF_DataType dtype = TF_DataType.DtInvalid, object initializer = null, // IInitializer or Tensor bool?trainable = null, VariableSynchronization synchronization = VariableSynchronization.Auto, VariableAggregation aggregation = VariableAggregation.None) { var scope = Tensorflow.variable_scope.get_variable_scope(); var store = Tensorflow.variable_scope._get_default_variable_store(); return(scope.get_variable(store, name, shape: shape, dtype: dtype, initializer: initializer, trainable: trainable)); }
/// <summary> /// Restore-on-create for a variable be saved with this `Checkpointable`. /// </summary> /// <returns></returns> protected virtual IVariableV1 _add_variable_with_custom_getter(string name, int[] shape, TF_DataType dtype = TF_DataType.TF_FLOAT, IInitializer initializer = null, Func <string, int[], TF_DataType, IInitializer, bool, IVariableV1> getter = null, bool overwrite = false, bool trainable = false, bool use_resource = false, VariableSynchronization synchronization = VariableSynchronization.Auto, VariableAggregation aggregation = VariableAggregation.None) { ops.init_scope(); #pragma warning disable CS0219 // Variable is assigned but its value is never used IInitializer checkpoint_initializer = null; #pragma warning restore CS0219 // Variable is assigned but its value is never used if (tf.context.executing_eagerly()) #pragma warning disable CS0642 // Possible mistaken empty statement { ; } #pragma warning restore CS0642 // Possible mistaken empty statement else { checkpoint_initializer = null; } IVariableV1 new_variable; new_variable = getter(name, shape, dtype, initializer, trainable); // If we set an initializer and the variable processed it, tracking will not // assign again. It will add this variable to our dependencies, and if there // is a non-trivial restoration queued, it will handle that. This also // handles slot variables. if (!overwrite || new_variable is RefVariable) { return(_track_checkpointable(new_variable, name: name, overwrite: overwrite)); } else { return(new_variable); } }
public static bool _get_trainable_value(VariableSynchronization synchronization, bool?trainable = true) { if (synchronization == VariableSynchronization.OnRead) { if (trainable.Value) { throw new ValueError("Synchronization value can be set to " + "VariableSynchronization.ON_READ only for non-trainable variables. " + "You have specified trainable=True and " + "synchronization=VariableSynchronization.ON_READ."); } } else if (!trainable.HasValue) { trainable = true; } return(trainable.Value); }
public static VariableV1 default_variable_creator(object initial_value, string name = null, bool?trainable = null, List <string> collections = null, TF_DataType dtype = TF_DataType.DtInvalid, int[] shape = null, bool validate_shape = false, bool?use_resource = null, VariableSynchronization synchronization = VariableSynchronization.Auto, VariableAggregation aggregation = VariableAggregation.None) { trainable = _get_trainable_value(synchronization, trainable); if (!use_resource.HasValue) { use_resource = get_variable_scope().use_resource; } if (!use_resource.HasValue) { use_resource = _DEFAULT_USE_RESOURCE; } if (use_resource.Value) { return(new ResourceVariable(initial_value, trainable: trainable.Value, validate_shape: validate_shape, collections: collections, name: name, dtype: dtype, shape: shape)); } else { return(new RefVariable(initial_value, trainable: trainable.Value, validate_shape: validate_shape, collections: collections, name: name, dtype: dtype)); } }
private RefVariable _true_getter(string name, TensorShape shape = null, TF_DataType dtype = TF_DataType.TF_FLOAT, IInitializer initializer = null, bool?trainable = null, bool validate_shape = true, VariableSynchronization synchronization = VariableSynchronization.AUTO, VariableAggregation aggregation = VariableAggregation.NONE) { bool is_scalar = shape.NDim == 0; return(_get_single_variable(name: name, shape: shape, dtype: dtype, initializer: initializer, trainable: trainable, validate_shape: validate_shape, synchronization: synchronization, aggregation: aggregation)); }
private RefVariable _get_single_variable(string name, TensorShape shape = null, TF_DataType dtype = TF_DataType.DtInvalid, Tensor initializer = null, bool reuse = false, bool?trainable = null, bool validate_shape = false, bool?use_resource = null, VariableSynchronization synchronization = VariableSynchronization.Auto, VariableAggregation aggregation = VariableAggregation.None) { if (use_resource == null) { use_resource = false; } if (_vars.ContainsKey(name)) { if (!reuse) { var var = _vars[name]; } throw new NotImplementedException("_get_single_variable"); } RefVariable v = null; // Create the variable. ops.init_scope(); { var init_val = initializer; v = new RefVariable(init_val, name: name, validate_shape: validate_shape, trainable: trainable.Value); } _vars[name] = v; return(v); }
public RefVariable get_variable(string name, TensorShape shape = null, TF_DataType dtype = TF_DataType.TF_FLOAT, IInitializer initializer = null, bool?trainable = null, bool validate_shape = true, VariableSynchronization synchronization = VariableSynchronization.AUTO, VariableAggregation aggregation = VariableAggregation.NONE) { dtype = dtype.as_base_dtype(); trainable = variable_scope._get_trainable_value(synchronization, trainable); return(_true_getter(name, shape: shape, dtype: dtype, initializer: initializer, trainable: trainable, validate_shape: validate_shape, synchronization: synchronization, aggregation: aggregation)); }
private RefVariable _true_getter(string name, TensorShape shape = null, TF_DataType dtype = TF_DataType.TF_FLOAT, object initializer = null, bool?trainable = null, bool validate_shape = true, VariableSynchronization synchronization = VariableSynchronization.AUTO, VariableAggregation aggregation = VariableAggregation.NONE) { bool is_scalar = !(shape is null) && shape.NDim == 0; if (initializer is IInitializer init) { return(_get_single_variable(name: name, shape: shape, dtype: dtype, initializer: init, trainable: trainable, validate_shape: validate_shape, synchronization: synchronization, aggregation: aggregation)); } else if (initializer is Tensor tensor) { return(_get_single_variable(name: name, shape: shape, dtype: dtype, initializer: tensor, trainable: trainable, validate_shape: validate_shape, synchronization: synchronization, aggregation: aggregation)); } else { throw new NotImplementedException("_true_getter"); } }
/// <summary> /// Adds a new variable to the layer, or gets an existing one; returns it. /// </summary> /// <param name="name"></param> /// <param name="shape"></param> /// <param name="dtype"></param> /// <param name="initializer"></param> /// <param name="trainable"></param> /// <param name="synchronization"></param> /// <param name="aggregation"></param> /// <returns></returns> protected virtual IVariableV1 add_weight(string name, int[] shape, TF_DataType dtype = TF_DataType.DtInvalid, IInitializer initializer = null, bool trainable = true, VariableSynchronization synchronization = VariableSynchronization.Auto, VariableAggregation aggregation = VariableAggregation.None) { var default_graph = ops.get_default_graph(); Graph init_graph = null; IVariableV1[] existing_variables = null; if (synchronization == VariableSynchronization.OnRead) { trainable = false; } if (default_graph.building_function) { throw new NotImplementedException("add_weight"); } else { init_graph = default_graph; existing_variables = variables.global_variables().ToArray(); } if (dtype == TF_DataType.DtInvalid) { dtype = TF_DataType.TF_FLOAT; } _set_scope(); var reuse = built || (_reuse != null && _reuse.Value); return(tf.Variable(0)); }
public IVariableV1 get_variable(string name, TensorShape shape = null, TF_DataType dtype = TF_DataType.TF_FLOAT, object initializer = null, // IInitializer or Tensor bool?reuse = null, bool?trainable = null, List <string> collections = null, bool validate_shape = true, VariableSynchronization synchronization = VariableSynchronization.Auto, VariableAggregation aggregation = VariableAggregation.None) { dtype = dtype.as_base_dtype(); trainable = variable_scope._get_trainable_value(synchronization, trainable); return(_true_getter(name, shape: shape, dtype: dtype, initializer: initializer, trainable: trainable, collections: collections, validate_shape: validate_shape, synchronization: synchronization, aggregation: aggregation)); }
public RefVariable get_variable(string name, TensorShape shape = null, TF_DataType dtype = TF_DataType.DtInvalid, object initializer = null, // IInitializer or Tensor bool?trainable = null, List <string> collections = null, bool?use_resource = null, bool validate_shape = true, VariableSynchronization synchronization = VariableSynchronization.Auto, VariableAggregation aggregation = VariableAggregation.None) { var scope = Tensorflow.variable_scope.get_variable_scope(); var store = Tensorflow.variable_scope._get_default_variable_store(); return(scope.get_variable(store, name, shape: shape, dtype: dtype, use_resource: use_resource, validate_shape: validate_shape, initializer: initializer, trainable: trainable, collections: collections)); }
public static RefVariable default_variable_creator(object initial_value, string name = "", TF_DataType dtype = TF_DataType.DtInvalid, bool?use_resource = null, VariableSynchronization synchronization = VariableSynchronization.AUTO) { var trainable = _get_trainable_value(synchronization); if (!use_resource.HasValue) { use_resource = get_variable_scope().use_resource; } if (!use_resource.HasValue) { use_resource = _DEFAULT_USE_RESOURCE; } if (use_resource.Value) { throw new NotImplementedException(); } else { return(new RefVariable(initial_value, name: name, dtype: dtype)); } }
public void add_weight(string name = null, TensorShape shape = null, string dtype = null, Initializer initializer = null, Regularizer regularizer = null, bool?trainable = null, ConstraintBase constraint = null, dynamic partitioner = null, bool?use_resource = null, VariableSynchronization synchronization = VariableSynchronization.Auto, VariableAggregation aggregation = VariableAggregation.None, Dictionary <string, object> kwargs = null) => throw new NotImplementedException();
/// <summary> /// Adds a new variable to the layer, or gets an existing one; returns it. /// </summary> /// <param name="name"></param> /// <param name="shape"></param> /// <param name="dtype"></param> /// <param name="initializer"></param> /// <param name="trainable"></param> /// <param name="synchronization"></param> /// <param name="aggregation"></param> /// <returns></returns> protected virtual IVariableV1 add_weight(string name, int[] shape, TF_DataType dtype = TF_DataType.DtInvalid, IInitializer initializer = null, bool trainable = true, VariableSynchronization synchronization = VariableSynchronization.Auto, VariableAggregation aggregation = VariableAggregation.None) { var default_graph = ops.get_default_graph(); Graph init_graph = null; IVariableV1[] existing_variables = null; if (synchronization == VariableSynchronization.OnRead) { trainable = false; } if (default_graph.building_function) { throw new NotImplementedException("add_weight"); } else { init_graph = default_graph; existing_variables = variables.global_variables().ToArray(); } if (dtype == TF_DataType.DtInvalid) { dtype = TF_DataType.TF_FLOAT; } _set_scope(); var reuse = built || (_reuse != null && _reuse.Value); return(tf_with(tf.variable_scope(_scope, reuse: reuse, auxiliary_name_scope: false), scope => { _current_scope = scope; return tf_with(ops.name_scope(_name_scope()), delegate { var variable = base.add_weight(name, shape, dtype: dtype, initializer: initializer, trainable: trainable, getter: (args) => tf.compat.v1.get_variable(args.Name, shape: args.Shape, dtype: args.DType, initializer: args.Initializer, trainable: args.Trainable) ); //if (init_graph != null) //var trainable_variables = variables.trainable_variables(); return variable; }); })); }
public void add_weight(string name, TensorShape shape = null, VariableAggregation aggregation = VariableAggregation.Sum, VariableSynchronization synchronization = VariableSynchronization.OnRead, Initializers.Initializer initializer = null, string dtype = null) => throw new NotImplementedException();
private IVariableV1 _get_single_variable(string name, TensorShape shape = null, TF_DataType dtype = TF_DataType.DtInvalid, IInitializer initializer = null, bool reuse = false, bool?trainable = null, List <string> collections = null, bool validate_shape = false, bool?use_resource = null, VariableSynchronization synchronization = VariableSynchronization.Auto, VariableAggregation aggregation = VariableAggregation.None) { bool initializing_from_value = false; if (use_resource == null) { use_resource = false; } if (_vars.ContainsKey(name)) { if (!reuse) { var var = _vars[name]; } throw new NotImplementedException("_get_single_variable"); } IVariableV1 v = null; // Create the tensor to initialize the variable with default value. if (initializer == null) { if (dtype.is_floating()) { initializer = tf.glorot_uniform_initializer; initializing_from_value = false; } } // Create the variable. ops.init_scope(); { if (initializing_from_value) { } else { Func <Tensor> init_val = () => initializer.call(shape, dtype); var variable_dtype = dtype.as_base_dtype(); v = variable_scope.default_variable_creator(init_val, name: name, trainable: trainable, collections: collections, dtype: variable_dtype, validate_shape: validate_shape, synchronization: synchronization, aggregation: aggregation); } } _vars[name] = v; return(v); }
protected virtual IVariableV1 add_weight(string name, Shape shape, TF_DataType dtype = TF_DataType.TF_FLOAT, IInitializer initializer = null, IRegularizer regularizer = null, VariableSynchronization synchronization = VariableSynchronization.Auto, VariableAggregation aggregation = VariableAggregation.None, bool trainable = true, Func <VariableArgs, IVariableV1> getter = null) { // Initialize variable when no initializer provided if (initializer == null) { // If dtype is DT_FLOAT, provide a uniform unit scaling initializer if (dtype.is_floating()) { initializer = tf.glorot_uniform_initializer; } else if (dtype.is_integer()) { initializer = tf.zeros_initializer; } else { throw new ValueError($"An initializer for variable {name} of type {dtype.as_base_dtype()} is required for layer {name}"); } } if (synchronization == VariableSynchronization.OnRead) { trainable = false; } var args = new VariableArgs { Name = name, Shape = shape, DType = dtype, Getter = getter ?? base_layer_utils.make_variable, Overwrite = true, Initializer = initializer, Synchronization = synchronization, Aggregation = aggregation, Trainable = trainable }; var variable = _add_variable_with_custom_getter(args); if (regularizer != null) { var name_in_scope = variable.Name.Split(':')[0]; _handle_weight_regularization(name_in_scope, variable, regularizer); } //backend.track_variable(variable); if (trainable == true) { trainable_weights.Add(variable); } else { non_trainable_weights.Add(variable); } return(variable); }
public static VariableV1 make_variable(string name, TensorShape shape = null, TF_DataType dtype = TF_DataType.TF_FLOAT, Initializer initializer = null, bool trainable = true, string caching_device = null, bool validate_shape = true, Constraints.ConstraintBase constraint = null, bool use_resource = false, Graph[] collections = null, VariableSynchronization synchronization = VariableSynchronization.Auto, VariableAggregation aggregation = VariableAggregation.None) => throw new NotImplementedException();