/// <summary> /// Long short-term memory cell (LSTM). /// </summary> /// <param name="inputs"></param> /// <param name="training"></param> /// <param name="state"></param> /// <returns></returns> protected Tensors Call(Tensors inputs, Tensor state = null, bool is_training = false) { var one = constant_op.constant(1, dtype: dtypes.int32); // Parameters of gates are concatenated into one multiply for efficiency. Tensor c = null; Tensor h = null; if (_state_is_tuple) { (c, h) = ((Tensor)_state.c, (Tensor)_state.h); } else { // array_ops.split(value: state, num_or_size_splits: 2, axis: one); throw new NotImplementedException("BasicLstmCell call"); } var gate_inputs = math_ops.matmul(array_ops.concat(new[] { (Tensor)inputs, h }, 1), _kernel.AsTensor()); gate_inputs = nn_ops.bias_add(gate_inputs, _bias); // i = input_gate, j = new_input, f = forget_gate, o = output_gate var tensors = array_ops.split(value: gate_inputs, num_split: 4, axis: one); var(i, j, f, o) = (tensors[0], tensors[1], tensors[2], tensors[3]); var forget_bias_tensor = constant_op.constant(_forget_bias, dtype: f.dtype); // Note that using `add` and `multiply` instead of `+` and `*` gives a // performance improvement. So using those at the cost of readability. var new_c = gen_math_ops.add( math_ops.multiply(c, math_ops.sigmoid(gen_math_ops.add(f, forget_bias_tensor))), math_ops.multiply(math_ops.sigmoid(i), _activation.Activate(j))); var new_h = math_ops.multiply(_activation.Activate(new_c), math_ops.sigmoid(o)); if (_state_is_tuple) { return(new_c); } else { return(array_ops.concat(new[] { new_c, new_h }, 1)); } }
public static Tensor einsum(string equation, Tensors inputs, string name = null) { return(tf_with(ops.name_scope(name, "einsum", inputs), scope => { name = scope; return tf.Context.ExecuteOp("Einsum", name, new ExecuteOpArgs { OpInputArgs = new object[] { inputs.ToArray() }, GetGradientAttrs = (op) => new { equation = op.get_attr <string>("equation"), N = op.get_attr <int>("N"), T = op.get_attr <TF_DataType>("T") } }.SetAttributes(new { equation = equation })); })); }
public static (Tensors, Tensor) clip_by_global_norm(Tensor[] t_list, float clip_norm, Tensor use_norm = null, string name = null) { use_norm = global_norm(t_list, name); return(tf_with(ops.name_scope(name, "clip_by_global_norm", t_list), delegate { // Calculate L2-norm, clip elements by ratio of clip_norm to L2-norm var scale_for_finite = clip_norm * math_ops.minimum( 1.0f / use_norm, constant_op.constant(1.0, dtype: use_norm.dtype) / clip_norm); // If use_norm is any finite number, this is a no-op. For inf/-inf/NaN, // this will make scale NaN. var scale = scale_for_finite + (use_norm - use_norm); Tensors values_clipped = new Tensors(); foreach (var(i, v) in enumerate(t_list)) { values_clipped.Add(array_ops.identity(v * scale, name: $"{name}_{i}")); } return (values_clipped, use_norm); }));
public static Tensors[] _reconstruct_sequence_inputs(OpDef op_def, Tensor[] inputs, MapField <string, AttrValue> attrs) { var grouped_inputs = new List <Tensors>(); int i = 0; foreach (var input_arg in op_def.InputArg) { int input_len = 1; bool is_sequence = false; if (!string.IsNullOrEmpty(input_arg.NumberAttr)) { input_len = (int)attrs[input_arg.NumberAttr].I; is_sequence = true; } else if (!string.IsNullOrEmpty(input_arg.TypeListAttr)) { input_len = attrs[input_arg.TypeListAttr].List.Type.Count; is_sequence = true; } if (is_sequence) { var input_tensors = new Tensors(inputs.Skip(i).Take(input_len).ToArray()); input_tensors.IsList = true; grouped_inputs.Add(input_tensors); } else { grouped_inputs.Add(inputs[i]); } i += input_len; } return(grouped_inputs.ToArray()); }
public Tensors __call__(Tensors inputs, Tensor state = null, Tensor training = null, VariableScope scope = null) { _set_scope(scope); _graph = ops._get_graph_from_inputs(inputs, graph: _graph); variable_scope scope_context_manager = null; if (built) { scope_context_manager = tf.variable_scope(_scope, reuse: true, auxiliary_name_scope: false); } else { scope_context_manager = tf.variable_scope(_scope, reuse: _reuse, auxiliary_name_scope: false); } Tensors outputs = null; tf_with(scope_context_manager, scope2 => { _current_scope = scope2; // Actually call layer }); // Update global default collections. return(outputs); }
public FilterDataset(IDatasetV2 input_dataset, Func <Tensors, Tensors> predicate_func) : base(input_dataset) { var func = new ConcreteFunction($"{predicate_func.Method.Name}_{Tensorflow.ops.uid_function()}"); func.Enter(); var inputs = new Tensors(); foreach (var input in input_dataset.element_spec) { inputs.Add(tf.placeholder(input.dtype, shape: input.shape, name: "arg")); } var outputs = predicate_func(inputs); func.ToGraph(inputs, outputs); func.Exit(); structure = func.OutputStructure; variant_tensor = ops.filter_dataset(input_dataset.variant_tensor, func, output_types, output_shapes); }
/// <summary> /// `Model` groups layers into an object with training and inference features. /// </summary> /// <param name="input"></param> /// <param name="output"></param> /// <returns></returns> public Functional Model(Tensors inputs, Tensors outputs, string name = null) => new Functional(inputs, outputs, name: name);
public static Graph _get_graph_from_inputs(Tensors op_input_list) => _get_graph_from_inputs(op_input_list: op_input_list, graph: null);
public IDatasetV2 from_tensors(Tensors tensors) => new TensorDataset(tensors);
public Tensor einsum(string equation, Tensors inputs, string name = null) => math_ops.einsum(equation, inputs, name: name);
/// <summary> /// `Model` groups layers into an object with training and inference features. /// </summary> /// <param name="input"></param> /// <param name="output"></param> /// <returns></returns> public Functional Model(Tensors inputs, Tensors outputs) => new Functional(inputs, outputs);