Exemple #1
0
        public static Operation resource_apply_gradient_descent(EagerTensor var, EagerTensor alpha, EagerTensor delta, bool use_locking = false, string name = null)
        {
            if (tf.context.executing_eagerly())
            {
                Status status = c_api.TFE_FastPathExecute(tf.context, tf.context.device_name,
                                                          "ResourceApplyGradientDescent", name, new IntPtr[]
                {
                    var,
                    alpha,
                    delta
                }, 3,
                                                          op => wrap_tfe_src.SetOpAttrs(op, "use_locking", use_locking),
                                                          null, 0);
                status.Check(true);
                return(null);
            }

            var _op = _op_def_lib._apply_op_helper("ResourceApplyGradientDescent", name, new
            {
                var,
                alpha,
                delta,
                use_locking
            });

            return(_op.outputs[0]);
        }
Exemple #2
0
        public static Tensor[] _MulGrad(EagerOperation op, IntPtr[] grads)
        {
            var x    = op.InputHandles[0];
            var y    = op.InputHandles[1];
            var grad = grads[0];

            if (op.SkipInputIndices.Contains(1) &&
                EagerTensor.GetRank(grad) == 0)
            {
                return(new Tensor[]
                {
                    null,//gen_math_ops.mul(grad, math_ops.conj(y)),
                    null
                });
            }

            if (_ShapesFullySpecifiedAndEqual(x, y, grad))
            {
                return(new Tensor[]
                {
                    gen_math_ops.mul(grad, y),
                    gen_math_ops.mul(grad, x)
                });
            }

            throw new NotImplementedException("");
        }
 public void RawTensorV1()
 {
     var c = new EagerTensor(new float[, ]
     {
         { 3.0f, 1.0f },
         { 1.0f, 2.0f }
     }, "");
 }
Exemple #4
0
        private static Tensor _eager_fill(int[] dims, EagerTensor value, Context ctx)
        {
            var attr_t      = value.dtype.as_datatype_enum();
            var dims_t      = convert_to_eager_tensor(dims, ctx, dtypes.int32);
            var inputs_flat = new[] { dims_t, value };
            var attrs       = new object[] { "T", attr_t, "index_type", TF_DataType.TF_INT32 };
            var result      = tf.Runner.Execute(ctx, "Fill", 1, inputs_flat, attrs);

            return(result[0]);
        }
Exemple #5
0
        private static Tensor _eager_reshape(EagerTensor tensor, int[] shape, Context ctx)
        {
            var attr_t      = tensor.dtype.as_datatype_enum();
            var dims_t      = convert_to_eager_tensor(shape, ctx, dtypes.int32);
            var inputs_flat = new[] { tensor, dims_t };
            var attrs       = new object[] { "T", attr_t, "Tshape", TF_DataType.TF_INT32 };
            var result      = tf.Runner.Execute(ctx, "Reshape", 1, inputs_flat, attrs);

            return(result[0]);
        }
        public EagerTensor GetTensor(IntPtr handle)
        {
            if (tensors.ContainsKey(handle))
            {
                return(tensors[handle]);
            }

            //return new EagerTensor(handle);
            tensors[handle] = new EagerTensor(handle);
            return(tensors[handle]);
        }
Exemple #7
0
        protected override Operation _resource_apply_dense(ResourceVariable var, EagerTensor grad, Dictionary <DeviceDType, Dictionary <string, Tensor> > _apply_state)
        {
            if (_momentum)
            {
                throw new NotImplementedException("_resource_apply_dense");
            }
            var device_dtype = _apply_state.Keys.FirstOrDefault(x => x.Device == var.Device && x.DType == var.dtype.as_base_dtype());

            return(gen_training_ops.resource_apply_gradient_descent(var.Handle as EagerTensor,
                                                                    _apply_state[device_dtype]["lr_t"] as EagerTensor,
                                                                    grad,
                                                                    use_locking: _use_locking));
        }
Exemple #8
0
        public static bool _ShapesFullySpecifiedAndEqual(IntPtr x, IntPtr y, IntPtr grad)
        {
            var x_shape = EagerTensor.GetDims(x);
            var y_shape = EagerTensor.GetDims(y);

            var grad_shape = EagerTensor.GetDims(grad);

            return(x_shape != null &&
                   y_shape != null &&
                   Enumerable.SequenceEqual(x_shape, y_shape) &&
                   Enumerable.SequenceEqual(y_shape, grad_shape) &&
                   !x_shape.Contains(-1));
        }
Exemple #9
0
        /// <summary>
        /// Converts the given `value` to a `Tensor`.
        /// </summary>
        /// <param name="value"></param>
        /// <param name="dtype"></param>
        /// <param name="name"></param>
        /// <returns></returns>
        public static Tensor convert_to_tensor(object value,
                                               TF_DataType dtype           = TF_DataType.DtInvalid,
                                               string name                 = null,
                                               bool as_ref                 = false,
                                               TF_DataType preferred_dtype = TF_DataType.DtInvalid,
                                               Context ctx                 = null)
        {
            if (dtype == TF_DataType.DtInvalid)
            {
                dtype = preferred_dtype;
            }

            if (value is EagerTensor eager_tensor)
            {
                if (tf.executing_eagerly())
                {
                    return(eager_tensor);
                }
                else
                {
                    var graph = get_default_graph();
                    if (!graph.building_function)
                    {
                        throw new RuntimeError("Attempting to capture an EagerTensor without building a function.");
                    }
                    return((graph as FuncGraph).capture(eager_tensor, name: name));
                }
            }

            Tensor ret = value switch
            {
                NDArray nd => constant_op.constant(nd, dtype: dtype, name: name),
                EagerTensor tensor => tensor.dtype == TF_DataType.TF_RESOURCE
                            ? tensor.AsPlaceholder(name: name)
                            : tensor.AsConstant(name: name),
                Tensor tensor => tensor,
                Tensor[] tensors => array_ops._autopacking_helper(tensors, dtype, name == null ? "packed" : name),
                RefVariable varVal => varVal._TensorConversionFunction(dtype: dtype, name: name, as_ref: as_ref),
                ResourceVariable varVal => varVal._TensorConversionFunction(dtype: dtype, name: name, as_ref: as_ref),
                TensorShape ts => constant_op.constant(ts.dims, dtype: dtype, name: name),
                int[] dims => constant_op.constant(dims, dtype: dtype, name: name),
                string str => constant_op.constant(str, dtype: tf.@string, name: name),
                string[] str => constant_op.constant(str, dtype: tf.@string, name: name),
                IEnumerable <object> objects => array_ops._autopacking_conversion_function(objects, dtype: dtype, name: name),
                _ => constant_op.constant(value, dtype: dtype, name: name)
            };

            return(ret);
        }
        public static Operation resource_apply_gradient_descent(EagerTensor var, EagerTensor alpha, EagerTensor delta, bool use_locking = false, string name = null)
        {
            if (tf.context.executing_eagerly())
            {
                var result = tf.Runner.TFE_FastPathExecute(tf.context, tf.context.device_name,
                                                           "ResourceApplyGradientDescent", name,
                                                           null,
                                                           var, alpha, delta,
                                                           "use_locking", use_locking);
                return(null);
            }

            var _op = tf._op_def_lib._apply_op_helper("ResourceApplyGradientDescent", name, new
            {
                var,
                alpha,
                delta,
                use_locking
            });

            return(_op.outputs[0]);
        }
 public override string ToString()
 {
     if (tf.context.executing_eagerly())
     {
         return($"tf.Variable: '{Name}' shape={string.Join(",", shape)}, dtype={dtype.as_numpy_name()}, numpy={EagerTensor.GetFormattedString(dtype, numpy())}");
     }
     else
     {
         return($"tf.Variable: '{Name}' shape={string.Join(",", shape)}, dtype={dtype.as_numpy_name()}");
     }
 }
Exemple #12
0
 public void watch(EagerTensor x)
 {
     c_api.TFE_TapeWatch(_handle, x.EagerTensorHandle);
 }
Exemple #13
0
        private unsafe void InitGradientEnvironment()
        {
            var vspace = c_api.VSpace_Handle((shape, dims, dtype) =>
            {
                var ones = constant_op.constant(1.0f, dtype: dtype) as EagerTensor;
                return(ones.EagerTensorHandle);
            }, (gradients, num_grads) =>
            {
                var input_grads = new EagerTensor[num_grads];
                for (int i = 0; i < num_grads; i++)
                {
                    input_grads[i] = new EagerTensor(*((IntPtr *)gradients + i));
                }

                var add_n = gen_math_ops.add_n(input_grads);
                return((add_n as EagerTensor).EagerTensorHandle);
            });

            ops.RegisterFromAssembly();
            c_api.TFE_RegisterGradientFunction((op_name, op_inputs, op_outputs, num_attrs, output_grads, skip_input_indices) =>
            {
                var input_tensors = new EagerTensor[op_inputs.length];
                for (int i = 0; i < op_inputs.length; i++)
                {
                    input_tensors[i] = new EagerTensor(*((IntPtr *)op_inputs.array + i));
                }

                var output_tensors = new EagerTensor[op_outputs.length];
                for (int i = 0; i < op_outputs.length; i++)
                {
                    if (op_outputs.array != IntPtr.Zero)
                    {
                        output_tensors[i] = new EagerTensor(*((IntPtr *)op_outputs.array + i));
                    }
                }

                var output_grad_tensors = new EagerTensor[output_grads.length];
                for (int i = 0; i < output_grads.length; i++)
                {
                    output_grad_tensors[i] = new EagerTensor(*((IntPtr *)output_grads.array + i));
                }

                var skip_input_indices_param = new int[skip_input_indices.length];
                for (int i = 0; i < skip_input_indices.length; i++)
                {
                    skip_input_indices_param[i] = *((int *)skip_input_indices.array + i);
                }

                var gradients = ops.gradientFunctions[op_name](new EagerOperation
                {
                    NumInputs        = input_tensors.Length,
                    Inputs           = input_tensors,
                    Outputs          = output_tensors,
                    SkipInputIndices = skip_input_indices_param
                }, output_grad_tensors);

                var gradients_handles = gradients.Select(x => x == null ? IntPtr.Zero : (x as EagerTensor).EagerTensorHandle).ToArray();
                var wrap_handle       = c_api.TFE_WrapGradientResult(gradients_handles, gradients.Length);

                return(wrap_handle);
            });
        }