public TensorArray scatter(Tensor indices, Tensor value, string name = null) { return(tf_with(ops.name_scope(name, "TensorArrayScatter", new { _handle, value, indices }), delegate { value = ops.convert_to_tensor(value, preferred_dtype: _dtype, name: "value"); if (_infer_shape) { var shape = new TensorShape(value.TensorShape.dims.Skip(1).ToArray()); _merge_element_shape(shape); } _maybe_colocate_with(value); var flow_out = gen_data_flow_ops.tensor_array_scatter_v3( handle: _handle, indices: indices, value: value, flow_in: _flow, name: name); var ta = new TensorArray(_dtype, infer_shape: _infer_shape, element_shape: _element_shape[0], dynamic_size: _dynamic_size, handle: _handle, flow: flow_out, colocate_with_first_write_call: _colocate_with_first_write_call); return ta; })); }
/// <summary> /// Internal implementation of Dynamic RNN. /// </summary> /// <param name="cell"></param> /// <param name="inputs"></param> /// <param name="initial_state"></param> /// <param name="parallel_iterations"></param> /// <param name="swap_memory"></param> /// <param name="sequence_length"></param> /// <param name="dtype"></param> /// <returns></returns> private static (Tensor, Tensor) _dynamic_rnn_loop(RnnCell cell, Tensor inputs, Tensor initial_state, int parallel_iterations, bool swap_memory, Tensor sequence_length = null, TF_DataType dtype = TF_DataType.DtInvalid) { var state = initial_state; var state_size = cell.state_size; var flat_input = nest.flatten(inputs); var flat_output_size = nest.flatten(cell.output_size); // Construct an initial output var input_shape = array_ops.shape(flat_input[0]); var time_steps = input_shape.slice(0); var batch_size = _best_effort_input_batch_size(flat_input); var inputs_got_shape = flat_input.Select(input_ => input_.TensorShape.with_rank_at_least(3)).ToArray(); var dims = inputs_got_shape[0].dims.Take(2).ToArray(); var(const_time_steps, const_batch_size) = (dims[0], dims[1]); foreach (var shape in inputs_got_shape) { if (shape.dims[2] == -1) { throw new ValueError("Input size (depth of inputs) must be accessible via shape inference," + " but saw value None."); } var got_time_steps = shape.dims[0]; var got_batch_size = shape.dims[1]; if (const_time_steps != got_time_steps) { throw new ValueError("Time steps is not the same for all the elements in the input in a " + "batch."); } if (const_batch_size != got_batch_size) { throw new ValueError("Batch_size is not the same for all the elements in the input."); } } Func <int, Tensor> _create_zero_arrays = (size_) => { var size = rnn_cell_impl._concat(batch_size, size_); return(array_ops.zeros( array_ops.stack(size), dtype: _infer_state_dtype(dtype, state))); }; // Prepare dynamic conditional copying of state & output var flat_zero_output = flat_output_size.Select(output => _create_zero_arrays(output)).ToArray(); var zero_output = nest.pack_sequence_as(structure: cell.output_size, flat_sequence: flat_zero_output); Tensor min_sequence_length = null, max_sequence_length = null; if (sequence_length != null) { min_sequence_length = math_ops.reduce_min(sequence_length); max_sequence_length = math_ops.reduce_max(sequence_length); } else { max_sequence_length = time_steps; } var time = array_ops.constant(0, dtype: dtypes.int32, name: "time"); string base_name = null; tf_with(ops.name_scope("dynamic_rnn"), scope => base_name = scope); Func <string, TensorShape, TF_DataType, TensorArray> _create_ta = (name, element_shape, dtype_) => { var ta = new TensorArray(dtype: dtype_, size: time_steps, element_shape: element_shape, tensor_array_name: base_name + name); return(ta); }; bool in_graph_mode = true; var output_ta = new List <TensorArray>(); var input_ta = new List <TensorArray>(); if (in_graph_mode) { foreach (var(i, out_size) in enumerate(flat_output_size)) { output_ta.Add(_create_ta($"output_{i}", new TensorShape(const_batch_size).concatenate( _maybe_tensor_shape_from_tensor(out_size)), _infer_state_dtype(dtype, state))); } foreach (var(i, flat_input_i) in enumerate(flat_input)) { input_ta.Add(_create_ta($"input_{i}", new TensorShape(flat_input_i.dims.Skip(1).ToArray()), flat_input_i.dtype)); } input_ta = zip(input_ta, flat_input).Select(x => { var(ta, input_) = (x.Item1, x.Item2); return(ta.unstack(input_)); }).ToList(); } // Make sure that we run at least 1 step, if necessary, to ensure // the TensorArrays pick up the dynamic shape. Tensor loop_bound = null; if (in_graph_mode) { loop_bound = math_ops.minimum( time_steps, math_ops.maximum(1, max_sequence_length)); } Func <BodyItemInRnnWhileLoop, Tensor> cond = (item) => { return(item.time < loop_bound); }; // Take a time step of the dynamic RNN. Func <BodyItemInRnnWhileLoop, BodyItemInRnnWhileLoop> _time_step = (item) => { Tensor[] input_t = null; var(time1, output_ta_t, state1) = (item.time, item.output_ta_t, item.state); if (in_graph_mode) { input_t = input_ta.Select(ta => ta.read(time1)).ToArray(); // Restore some shape information foreach (var(input_, shape) in zip(input_t, inputs_got_shape)) { input_.set_shape(shape[new Slice(1)]); } } else { // input_t = tuple(ta[time.numpy()] for ta in input_ta) } var input_t_t = nest.pack_sequence_as2(structure: inputs, flat_sequence: input_t); // Keras RNN cells only accept state as list, even if it's a single tensor. // var is_keras_rnn_cell = _is_keras_rnn_cell(cell); Tensor[] outputs = null; if (sequence_length != null) { throw new NotImplementedException("sequence_length != null"); } else { outputs = cell.__call__(input_t_t, state: state1); } var(output, new_state) = (outputs[0], outputs[1]); // Keras cells always wrap state as list, even if it's a single tensor. // if(is_keras_rnn_cell && len(new_state)) == 1 // Pack state if using state tuples outputs = nest.flatten2(output).Select(x => x as Tensor).ToArray(); output_ta_t = zip(output_ta_t, outputs).Select(x => { var(ta, @out) = (x.Item1, x.Item2); return(ta.write(item.time, @out)); }).ToArray(); return(new BodyItemInRnnWhileLoop(item.time + 1, output_ta_t, new_state)); };
/// <summary> /// Internal implementation of Dynamic RNN. /// </summary> /// <param name="cell"></param> /// <param name="inputs"></param> /// <param name="initial_state"></param> /// <param name="parallel_iterations"></param> /// <param name="swap_memory"></param> /// <param name="sequence_length"></param> /// <param name="dtype"></param> /// <returns></returns> private static (Tensor, Tensor) _dynamic_rnn_loop(RNNCell cell, Tensor inputs, Tensor initial_state, int parallel_iterations, bool swap_memory, Tensor sequence_length = null, TF_DataType dtype = TF_DataType.DtInvalid) { var state = initial_state; var state_size = cell.state_size; var flat_input = nest.flatten(inputs); var flat_output_size = nest.flatten(cell.output_size); // Construct an initial output var input_shape = array_ops.shape(flat_input[0]); var time_steps = input_shape.slice(0); var batch_size = _best_effort_input_batch_size(flat_input); var inputs_got_shape = flat_input.Select(input_ => input_.TensorShape.with_rank_at_least(3)).ToArray(); var dims = inputs_got_shape[0].dims.Take(2).ToArray(); var(const_time_steps, const_batch_size) = (dims[0], dims[1]); foreach (var shape in inputs_got_shape) { if (shape.dims[2] == -1) { throw new ValueError("Input size (depth of inputs) must be accessible via shape inference," + " but saw value None."); } var got_time_steps = shape.dims[0]; var got_batch_size = shape.dims[1]; if (const_time_steps != got_time_steps) { throw new ValueError("Time steps is not the same for all the elements in the input in a " + "batch."); } if (const_batch_size != got_batch_size) { throw new ValueError("Batch_size is not the same for all the elements in the input."); } } Func <int, Tensor> _create_zero_arrays = (size_) => { var size = rnn_cell_impl._concat(batch_size, size_); return(array_ops.zeros( array_ops.stack(size), dtype: _infer_state_dtype(dtype, state))); }; // Prepare dynamic conditional copying of state & output var flat_zero_output = flat_output_size.Select(output => _create_zero_arrays(output)).ToArray(); var zero_output = nest.pack_sequence_as(structure: cell.output_size, flat_sequence: flat_zero_output); Tensor min_sequence_length = null, max_sequence_length = null; if (sequence_length != null) { min_sequence_length = math_ops.reduce_min(sequence_length); max_sequence_length = math_ops.reduce_max(sequence_length); } else { max_sequence_length = time_steps; } var time = array_ops.constant(0, dtype: dtypes.int32, name: "time"); string base_name = null; tf_with(ops.name_scope("dynamic_rnn"), scope => base_name = scope); Func <string, TensorShape, TF_DataType, TensorArray> _create_ta = (name, element_shape, dtype_) => { var ta = new TensorArray(dtype: dtype_, size: time_steps, element_shape: new[] { element_shape }, tensor_array_name: base_name + name); return(ta); }; bool in_graph_mode = true; var output_ta = new List <TensorArray>(); var input_ta = new List <TensorArray>(); if (in_graph_mode) { foreach (var(i, out_size) in enumerate(flat_output_size)) { output_ta.Add(_create_ta($"output_{i}", new TensorShape(const_batch_size).concatenate( _maybe_tensor_shape_from_tensor(out_size)), _infer_state_dtype(dtype, state))); } foreach (var(i, flat_input_i) in enumerate(flat_input)) { input_ta.Add(_create_ta($"input_{i}", new TensorShape(flat_input_i.dims.Skip(1).ToArray()), flat_input_i.dtype)); } for (int i = 0; i < input_ta.Count; i++) { var(ta, input_) = (input_ta[0], flat_input[0]); } } // Make sure that we run at least 1 step, if necessary, to ensure // the TensorArrays pick up the dynamic shape. Tensor loop_bound; if (in_graph_mode) { loop_bound = math_ops.minimum( time_steps, math_ops.maximum(1, max_sequence_length)); } /*Func<Tensor, Tensor> cond = (ctime) => * { * return null; * }; * * control_flow_ops.while_loop( * cond: cond, * body = );*/ throw new NotImplementedException(""); }