/// <summary> /// Create a new variable handle, optionally copying in `extra_handle_data` /// </summary> /// <param name="shape"></param> /// <param name="dtype"></param> /// <param name="shared_name"></param> /// <param name="name"></param> /// <param name="graph_mode"></param> /// <param name="initial_value"></param> /// <returns></returns> public static Tensor variable_handle_from_shape_and_dtype(TensorShape shape, TF_DataType dtype, string shared_name, string name, bool graph_mode, Tensor initial_value = null) { var container = ops.get_default_graph().Container; var handle = gen_resource_variable_ops.var_handle_op(shape: shape, dtype: dtype, shared_name: shared_name, name: name, container: container); if (initial_value == null) { initial_value = handle; } if (graph_mode) { var full_handle_data = _combine_handle_data(handle, initial_value); _set_handle_shapes_and_types(handle, full_handle_data, graph_mode); return(handle); } else { // We do not want two distinct ResourceVariable objects for the same // underlying resource in the runtime. // When in eager mode, explicitly ensure so here. When in graph mode, it's // ensured by always generating different variable names. var exists = gen_resource_variable_ops.var_is_initialized_op(handle); // We create an assert Op instead of checking right away in order to be // compatible with ASYNC execution mode. Further, since not all devices // support string tensors, we encode the assertion string in the Op name /*gen_logging_ops.assert(gen_math_ops.logical_not(exists), * new[] { exists }, * name: "EagerVariableNameReuse");*/ var handle_data = new HandleData(); handle_data.IsSet = true; handle_data.ShapeAndType.Add(new HandleShapeAndType { Dtype = dtype.as_datatype_enum(), Shape = shape.as_proto() }); _set_handle_shapes_and_types(handle, handle_data, graph_mode); return(handle); } }
public TensorShapeProto _MakeShape(TensorShape shape, AttrDef attr_def) { return(shape.as_proto()); }