예제 #1
0
        public static Tensor convert_image_dtype(Tensor image, TF_DataType dtype, bool saturate = false, string name = null)
        {
            if (dtype == image.dtype)
            {
                return(array_ops.identity(image, name: name));
            }

            return(tf_with(ops.name_scope(name, "convert_image", image), scope =>
            {
                name = scope;

                if (image.dtype.is_integer() && dtype.is_integer())
                {
                    throw new NotImplementedException("convert_image_dtype is_integer");
                }
                else if (image.dtype.is_floating() && dtype.is_floating())
                {
                    throw new NotImplementedException("convert_image_dtype is_floating");
                }
                else
                {
                    if (image.dtype.is_integer())
                    {
                        // Converting to float: first cast, then scale. No saturation possible.
                        var cast = math_ops.cast(image, dtype);
                        var scale = 1.0f / image.dtype.max();
                        return math_ops.multiply(cast, scale, name: name);
                    }
                    else
                    {
                        throw new NotImplementedException("convert_image_dtype is_integer");
                    }
                }
            }));
        }
예제 #2
0
        protected virtual IVariableV1 add_weight(string name,
                                                 TensorShape shape,
                                                 TF_DataType dtype        = TF_DataType.DtInvalid,
                                                 IInitializer initializer = null,
                                                 bool?trainable           = null,
                                                 Func <VariableArgs, IVariableV1> getter = null)
        {
            if (dtype == TF_DataType.DtInvalid)
            {
                dtype = TF_DataType.TF_FLOAT;
            }

            if (trainable == null)
            {
                trainable = true;
            }

            // Initialize variable when no initializer provided
            if (initializer == null)
            {
                // If dtype is DT_FLOAT, provide a uniform unit scaling initializer
                if (dtype.is_floating())
                {
                    initializer = tf.glorot_uniform_initializer;
                }
                else if (dtype.is_integer())
                {
                    initializer = tf.zeros_initializer;
                }
                else
                {
                    throw new ValueError($"An initializer for variable {name} of type {dtype.as_base_dtype()} is required for layer {this.Name}");
                }
            }

            var args = new VariableArgs
            {
                Name        = name,
                Shape       = shape,
                DType       = dtype,
                Getter      = getter ?? base_layer_utils.make_variable,
                Overwrite   = true,
                Initializer = initializer,
                Trainable   = trainable.Value
            };
            var variable = _add_variable_with_custom_getter(args);

            //backend.track_variable(variable);
            if (trainable == true)
            {
                trainableWeights.Add(variable);
            }
            else
            {
                nonTrainableWeights.Add(variable);
            }

            return(variable);
        }
예제 #3
0
 public Tensor uniform(TensorShape shape,
                       float minval      = 0,
                       float maxval      = 1,
                       TF_DataType dtype = TF_DataType.TF_FLOAT,
                       int?seed          = null,
                       string name       = null)
 {
     if (dtype.is_integer())
     {
         return(random_ops.random_uniform_int(shape, (int)minval, (int)maxval, dtype, seed, name));
     }
     else
     {
         return(random_ops.random_uniform(shape, minval, maxval, dtype, seed, name));
     }
 }
예제 #4
0
        protected virtual RefVariable add_weight(string name,
                                                 int[] shape,
                                                 TF_DataType dtype        = TF_DataType.DtInvalid,
                                                 IInitializer initializer = null,
                                                 bool?trainable           = null,
                                                 Func <string, int[], TF_DataType, IInitializer, bool, RefVariable> getter = null)
        {
            if (dtype == TF_DataType.DtInvalid)
            {
                dtype = TF_DataType.TF_FLOAT;
            }

            if (trainable == null)
            {
                trainable = true;
            }

            // Initialize variable when no initializer provided
            if (initializer == null)
            {
                // If dtype is DT_FLOAT, provide a uniform unit scaling initializer
                if (dtype.is_floating())
                {
                    initializer = tf.glorot_uniform_initializer;
                }
                else if (dtype.is_integer())
                {
                    initializer = tf.zeros_initializer;
                }
                else
                {
                    throw new ValueError($"An initializer for variable {name} of type {dtype.as_base_dtype()} is required for layer {this.name}");
                }
            }
            var variable = _add_variable_with_custom_getter(name,
                                                            shape,
                                                            dtype: dtype,
                                                            getter: (getter == null) ? base_layer_utils.make_variable : getter,
                                                            overwrite: true,
                                                            initializer: initializer,
                                                            trainable: trainable.Value);

            backend.track_variable(variable);
            _trainable_weights.Add(variable);

            return(variable);
        }
예제 #5
0
 public static Tensor random_uniform(Tensor shape,
                                     int minval        = 0,
                                     Tensor maxval     = null,
                                     TF_DataType dtype = TF_DataType.TF_FLOAT,
                                     int?seed          = null,
                                     string name       = null)
 {
     return(tf_with(ops.name_scope(name, "random_uniform", new { shape, minval, maxval }), scope =>
     {
         name = scope;
         var minTensor = ops.convert_to_tensor(minval, dtype: dtype, name: "min");
         var maxTensor = ops.convert_to_tensor(maxval == null ? 1 : (int)maxval, dtype: dtype, name: "max");
         var(seed1, seed2) = random_seed.get_seed(seed);
         if (dtype.is_integer())
         {
             return gen_random_ops.random_uniform_int(shape, minTensor, maxTensor, seed: seed1, seed2: seed2, name: name);
         }
         else
         {
             var rnd = gen_random_ops.random_uniform(shape, dtype);
             return math_ops.add(rnd * (maxTensor - minTensor), minTensor, name: name);
         }
     }));
 }
예제 #6
0
        protected virtual IVariableV1 add_weight(string name,
                                                 Shape shape,
                                                 TF_DataType dtype        = TF_DataType.TF_FLOAT,
                                                 IInitializer initializer = null,
                                                 IRegularizer regularizer = null,
                                                 VariableSynchronization synchronization = VariableSynchronization.Auto,
                                                 VariableAggregation aggregation         = VariableAggregation.None,
                                                 bool trainable = true,
                                                 Func <VariableArgs, IVariableV1> getter = null)
        {
            // Initialize variable when no initializer provided
            if (initializer == null)
            {
                // If dtype is DT_FLOAT, provide a uniform unit scaling initializer
                if (dtype.is_floating())
                {
                    initializer = tf.glorot_uniform_initializer;
                }
                else if (dtype.is_integer())
                {
                    initializer = tf.zeros_initializer;
                }
                else
                {
                    throw new ValueError($"An initializer for variable {name} of type {dtype.as_base_dtype()} is required for layer {name}");
                }
            }

            if (synchronization == VariableSynchronization.OnRead)
            {
                trainable = false;
            }

            var args = new VariableArgs
            {
                Name            = name,
                Shape           = shape,
                DType           = dtype,
                Getter          = getter ?? base_layer_utils.make_variable,
                Overwrite       = true,
                Initializer     = initializer,
                Synchronization = synchronization,
                Aggregation     = aggregation,
                Trainable       = trainable
            };
            var variable = _add_variable_with_custom_getter(args);

            if (regularizer != null)
            {
                var name_in_scope = variable.Name.Split(':')[0];
                _handle_weight_regularization(name_in_scope, variable, regularizer);
            }

            //backend.track_variable(variable);
            if (trainable == true)
            {
                trainable_weights.Add(variable);
            }
            else
            {
                non_trainable_weights.Add(variable);
            }

            return(variable);
        }