public Tensor resize_images_v2(Tensor images, TensorShape size, string method = ResizeMethod.BILINEAR, bool preserve_aspect_ratio = false, bool antialias = false, string name = null) => image_ops_impl.resize_images(images, tf.constant(size.dims), method, preserve_aspect_ratio, antialias, name);
public unsafe Tensor placeholder(TF_DataType dtype, TensorShape shape = null, string name = null) { return(gen_array_ops.placeholder(dtype, shape, name)); }
public Tensor resize(Tensor image, TensorShape size, string method = ResizeMethod.BILINEAR) => image_ops_impl.resize_images_v2(image, size, method: method);
private RefVariable _get_single_variable(string name, TensorShape shape = null, TF_DataType dtype = TF_DataType.DtInvalid, IInitializer initializer = null, bool reuse = false, bool?trainable = null, List <string> collections = null, bool validate_shape = false, bool?use_resource = null, VariableSynchronization synchronization = VariableSynchronization.Auto, VariableAggregation aggregation = VariableAggregation.None) { bool initializing_from_value = false; if (use_resource == null) { use_resource = false; } if (_vars.ContainsKey(name)) { if (!reuse) { var var = _vars[name]; } throw new NotImplementedException("_get_single_variable"); } RefVariable v = null; // Create the tensor to initialize the variable with default value. if (initializer == null) { if (dtype.is_floating()) { initializer = tf.glorot_uniform_initializer; initializing_from_value = false; } } // Create the variable. ops.init_scope(); { if (initializing_from_value) { } else { Func <Tensor> init_val = () => initializer.call(shape, dtype); var variable_dtype = dtype.as_base_dtype(); v = variable_scope.default_variable_creator(init_val, name: name, trainable: trainable, collections: collections, dtype: variable_dtype, validate_shape: validate_shape, synchronization: synchronization, aggregation: aggregation); } } _vars[name] = v; return(v); }
/// <param name="verify_shape">Boolean that enables verification of a shape of values.</param> public static Tensor _constant_impl(object value, TF_DataType dtype, TensorShape shape, string name, bool verify_shape, bool allow_broadcast) { if (tf.Context.executing_eagerly()) { var t = convert_to_eager_tensor(value, tf.Context, dtype: dtype); if (shape == null) { return(t); } if (t.shape.SequenceEqual(shape.dims)) { return(t); } if (verify_shape) { throw new TypeError($"Expected Tensor's shape: {shape}, got {t.shape}."); } var num_t = t.TensorShape.num_elements(); if (num_t == shape.num_elements()) { throw new NotImplementedException(""); } if (num_t == 1) { if (t.dtype == dtypes.@bool) { throw new NotImplementedException(""); } else { return(_eager_fill(shape, t, tf.Context)); } } } Graph g = ops.get_default_graph(); var tensor_value = new AttrValue(); tensor_value.Tensor = tensor_util.make_tensor_proto(value, dtype: dtype, shape: shape, verify_shape: verify_shape, allow_broadcast: allow_broadcast); var dtype_value = new AttrValue { Type = tensor_value.Tensor.Dtype, }; var attrs = new Dictionary <string, AttrValue>(); attrs["value"] = tensor_value; attrs["dtype"] = dtype_value; var op = g.create_op("Const", new Tensor[0], new TF_DataType[] { dtype_value.Type.as_tf_dtype() }, attrs: attrs, name: name); return(op.outputs[0]); }
/// <summary> /// Create a TensorProto. /// </summary> /// <param name="values"></param> /// <param name="dtype"></param> /// <param name="shape"></param> /// <param name="verify_shape"></param> /// <param name="allow_broadcast"></param> /// <returns></returns> public static TensorProto make_tensor_proto(object values, TF_DataType dtype = TF_DataType.DtInvalid, int[] shape = null, bool verify_shape = false, bool allow_broadcast = false) { if (allow_broadcast && verify_shape) { throw new ValueError("allow_broadcast and verify_shape are not both allowed."); } if (values is TensorProto tp) { return(tp); } // We first convert value to a numpy array or scalar. NDArray nparray = null; var np_dt = dtype.as_numpy_dtype(); if (values is NDArray nd) { nparray = nd; } else { if (values == null) { throw new ValueError("None values not supported."); } nparray = convert_to_numpy_ndarray(values); if (np_dt != null && np_dt != typeof(string)) { nparray = nparray.astype(np_dt); } } var numpy_dtype = nparray.dtype.as_dtype(dtype: dtype); if (numpy_dtype == TF_DataType.DtInvalid) { throw new TypeError($"Unrecognized data type: {nparray.dtype}"); } // If dtype was specified and is a quantized type, we convert // numpy_dtype back into the quantized version. if (quantized_types.Contains(dtype)) { numpy_dtype = dtype; } bool is_same_size = false; int shape_size = 0; // If shape is not given, get the shape from the numpy array. if (shape == null) { if (numpy_dtype == TF_DataType.TF_STRING) { // scalar string shape = new int[0]; shape_size = 0; } else { shape = nparray.shape; is_same_size = true; shape_size = nparray.size; } } else { shape_size = new TensorShape(shape).size; is_same_size = shape_size == nparray.size; } var tensor_proto = new TensorProto { Dtype = numpy_dtype.as_datatype_enum(), TensorShape = tensor_util.as_shape(shape) }; if (is_same_size && _TENSOR_CONTENT_TYPES.Contains(numpy_dtype) && shape_size > 1) { byte[] bytes = nparray.ToByteArray(); tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(bytes.ToArray()); return(tensor_proto); } if (numpy_dtype == TF_DataType.TF_STRING && !(values is NDArray)) { if (values is string str) { tensor_proto.StringVal.Add(Google.Protobuf.ByteString.CopyFromUtf8(str)); tensor_proto.TensorShape = tensor_util.as_shape(new int[0]); } else if (values is string[] str_values) { tensor_proto.StringVal.AddRange(str_values.Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x))); } else if (values is byte[] byte_values) { tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(byte_values); } return(tensor_proto); } var proto_values = nparray.ravel(); switch (nparray.dtype.Name) { case "Bool": case "Boolean": tensor_proto.BoolVal.AddRange(proto_values.Data <bool>()); break; case "Int32": tensor_proto.IntVal.AddRange(proto_values.Data <int>()); break; case "Int64": tensor_proto.Int64Val.AddRange(proto_values.Data <long>()); break; case "Single": tensor_proto.FloatVal.AddRange(proto_values.Data <float>()); break; case "Double": tensor_proto.DoubleVal.AddRange(proto_values.Data <double>()); break; /*case "String": * tensor_proto.StringVal.AddRange(proto_values.Data<string>().Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x.ToString()))); * break;*/ default: throw new Exception("make_tensor_proto Not Implemented"); } return(tensor_proto); }
public static TensorShape constant_value_as_shape(Tensor tensor) { bool hasattr(Graph property, string attr) { var t = property.GetType().GetProperties(); foreach (System.Reflection.PropertyInfo pi in t) { if (pi.Name == attr) { return(true); } } return(false); } if (tensor.GetType() == typeof(EagerTensor)) { return(new TensorShape(tensor.numpy().ToArray <int>())); } if (tensor.TensorShape.ndim == 0) { var value_ = constant_value(tensor); if (value_ == null) { throw new ValueError( @"Received a scalar with unknown value as shape; require a statically known scalar with value '-1' to describe an unknown shape."); } if (value_ != -1) { throw new ValueError( String.Format(@"Received a scalar value {0} as shape; require a statically known scalar with value '-1' to describe an unknown shape.", value_)); } return(tensor.TensorShape.unknown_shape(-1)); } var shape = tensor.TensorShape.with_rank(1); if (shape == new TensorShape(new int[] { 1 })) { return(new TensorShape(new int[] { })); } else if (tensor.op.type == "Cast") { var pre_cast = constant_value_as_shape(tensor.op.inputs[0]); if (pre_cast.dims == null) { return(pre_cast); } var cast_dtype = dtypes.as_dtype((Type)tensor.op.get_attr("DstT")); if (!Array.Exists(new[] { dtypes.int32, dtypes.int64 }, cast_dtype_ => cast_dtype_ == cast_dtype)) { return(tensor.TensorShape.unknown_shape(shape.dims[0])); } int[] x_ = { }; foreach (var x in pre_cast.as_list()) { if (x != -1) { x_[x_.Length] = x; } else { x_[x_.Length] = -1; } } var dest_dtype_shape_array = np.array(x_).astype(cast_dtype.as_numpy_dtype()); int[] y_ = { }; foreach (int y in dest_dtype_shape_array) { if (y >= 0) { y_[y_.Length] = y; } else { y_[y_.Length] = -1; } } return(new TensorShape(y_)); } else if (tensor.op.type == "Shape") { return(tensor.op.inputs[0].shape); } else if (tensor.op.type == "Pack") { var ret_ = new TensorShape(new int[] { }); if ((int)tensor.op.get_attr("axis") != 0) { throw new ValueError(String.Format( @"Since rank 1 inputs are expected, Pack's axis: {0} must be 0, otherwise it would not be rank 1.", tensor.op.get_attr("axis"))); } foreach (Tensor pack_input in tensor.op.inputs) { var pack_input_val = constant_value(pack_input); Dimension new_dim; if (pack_input_val < 0) { new_dim = new Dimension(-1); } else if (pack_input_val == null) { new_dim = new Dimension(-1); } else { new_dim = new Dimension(pack_input_val); } ret_ = ret_.concatenate(new int[] { new_dim }); } return(ret_); } else if (tensor.op.type == "Concat") { var ret_ = new TensorShape(new int[] { }); var inputlist_ = new ArraySegment <Tensor>(tensor.op.inputs, 1, tensor.op.inputs.Length - 1); foreach (var concat_input in inputlist_) { ret_ = ret_.concatenate(constant_value_as_shape(concat_input)); } return(ret_); } else if (tensor.op.type == "StridedSlice") { try { var begin = constant_value(tensor.op.inputs[1]); var end = constant_value(tensor.op.inputs[2]); var strides = constant_value(tensor.op.inputs[3]); if (new[] { begin, end, strides }.All(x => x == null)) { begin = begin[0]; end = end[0]; strides = strides[0]; var begin_mask = tensor.op.get_attr("begin_mask"); if ((int)begin_mask == 1) { begin = null; } var end_mask = tensor.op.get_attr("end_mask"); if ((int)end_mask == 1) { end = null; } var ellipsis_mask = tensor.op.get_attr("ellipsis_mask"); var new_axis_mask = tensor.op.get_attr("new_axis_mask"); var shrink_axis_mask = tensor.op.get_attr("shrink_axis_mask"); bool valid_attributes; if (!(bool)ellipsis_mask && !(bool)new_axis_mask && !(bool)shrink_axis_mask && !((bool)begin_mask || (int)begin_mask == 1) && !((bool)end_mask || (int)end_mask == 1)) { valid_attributes = true; } else { valid_attributes = false; } if (valid_attributes) { // sorry for the mess here, but this hacky solution was the best way // i could come up with to implement the things done in python in c# var prev_ = constant_value_as_shape(tensor.op.inputs[0]).dims; var prev = prev_.Skip(begin).Take(end - begin).ToArray(); // 100 being the comparison doesn't really matter here; it's going to break anyway for (int iter = 0; iter != 100; iter = iter + strides) { prev[prev.Length] = prev_[iter]; if ((iter + strides) > prev_.Length) { break; } } var ret_ = new TensorShape(prev); return(ret_); } } } catch (Exception ex) { if (ex is ValueError || ex is TypeError) { } } } else if (tensor.op.type == "Placeholder" && tensor.op.graph.building_function && hasattr(tensor.op.graph, "internal_captures")) { int i = 0; foreach (Tensor capture in tensor.op.graph.internal_captures()) { if (capture.GetType() == typeof(Tensor)) { var external_capture = tensor.op.graph.external_captures()[i]; return(constant_value_as_shape(external_capture)); } i++; } } var ret = tensor.TensorShape.unknown_shape(shape.dims[0]); var value = constant_value(tensor); if (!(value is null)) { int[] d_ = { }; foreach (int d in value) { if (d >= 0) { d_[d_.Length] = d; } else { d_[d_.Length] = -1; // None } } ret = ret.merge_with(new TensorShape(d_)); } return(ret); }
public TensorShapeProto _MakeShape(TensorShape shape, AttrDef attr_def) { return(shape.as_proto()); }
public static void _SetShapeInvariants(Tensor[] input_vars, Tensor[] enter_vars, TensorShape shapes = null) { if (shapes == null) { return; } throw new NotImplementedException("_SetShapeInvariants"); }
public bool is_compatible_with(TensorShape shape2) { throw new NotImplementedException("TensorShape is_compatible_with"); }
/// <summary> /// Broadcast an array for a compatible shape. /// </summary> /// <param name="input"></param> /// <param name="shape"></param> /// <param name="name"></param> /// <returns></returns> public Tensor broadcast_to(Tensor input, TensorShape shape, string name = null) => gen_array_ops.broadcast_to(input, shape, name: name);
/// <summary> /// Create a TensorProto. /// </summary> /// <param name="values"></param> /// <param name="dtype"></param> /// <param name="shape"></param> /// <param name="verify_shape"></param> /// <param name="allow_broadcast"></param> /// <returns></returns> public static TensorProto make_tensor_proto(object values, TF_DataType dtype = TF_DataType.DtInvalid, int[] shape = null, bool verify_shape = false, bool allow_broadcast = false) { if (allow_broadcast && verify_shape) { throw new ValueError("allow_broadcast and verify_shape are not both allowed."); } if (values is TensorProto tp) { return(tp); } if (dtype != TF_DataType.DtInvalid) { ; } bool is_quantized = new TF_DataType[] { TF_DataType.TF_QINT8, TF_DataType.TF_QUINT8, TF_DataType.TF_QINT16, TF_DataType.TF_QUINT16, TF_DataType.TF_QINT32 }.Contains(dtype); // We first convert value to a numpy array or scalar. NDArray nparray = null; var np_dt = dtype.as_numpy_datatype(); if (values is NDArray nd) { nparray = nd; } else { if (values == null) { throw new ValueError("None values not supported."); } if (np_dt == null) { switch (values) { case bool boolVal: nparray = boolVal; break; case int intVal: nparray = intVal; break; case long intVal: nparray = intVal; break; case int[] intVals: nparray = np.array(intVals); break; case float floatVal: nparray = floatVal; break; case float[] floatVals: nparray = floatVals; break; case double doubleVal: nparray = doubleVal; break; case string strVal: nparray = strVal; break; case string[] strVals: nparray = strVals; break; case byte[] byteValues: nparray = byteValues; break; default: throw new NotImplementedException("make_tensor_proto Not Implemented"); } } else { // convert data type switch (np_dt.Name) { case "Int32": if (values.GetType().IsArray) { nparray = np.array((int[])values, np_dt); } else { nparray = Convert.ToInt32(values); } break; case "Single": if (values.GetType().IsArray) { nparray = np.array((float[])values, np_dt); } else { nparray = Convert.ToSingle(values); } break; case "Double": if (values.GetType().IsArray) { nparray = np.array((double[])values, np_dt); } else { nparray = Convert.ToDouble(values); } break; case "String": if (values.GetType().IsArray) { nparray = np.array((string[])values, np_dt); } else { nparray = Convert.ToString(values); } break; default: throw new NotImplementedException("make_tensor_proto Not Implemented"); } } } var numpy_dtype = dtypes.as_dtype(nparray.dtype); if (numpy_dtype == TF_DataType.DtInvalid) { throw new TypeError($"Unrecognized data type: {nparray.dtype}"); } // If dtype was specified and is a quantized type, we convert // numpy_dtype back into the quantized version. if (is_quantized) { numpy_dtype = dtype; } bool is_same_size = false; int shape_size = 0; // If shape is not given, get the shape from the numpy array. if (shape == null) { shape = nparray.shape; is_same_size = true; shape_size = nparray.size; } else { shape_size = new TensorShape(shape).Size; is_same_size = shape_size == nparray.size; } var tensor_proto = new tensor_pb2.TensorProto { Dtype = numpy_dtype.as_datatype_enum(), TensorShape = tensor_util.as_shape(shape) }; if (is_same_size && _TENSOR_CONTENT_TYPES.Contains(numpy_dtype) && shape_size > 1) { byte[] bytes = nparray.ToByteArray(); tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(bytes.ToArray()); return(tensor_proto); } if (numpy_dtype == TF_DataType.TF_STRING && !(values is NDArray)) { if (values is string str) { tensor_proto.StringVal.Add(Google.Protobuf.ByteString.CopyFromUtf8(str)); } else if (values is string[] str_values) { tensor_proto.StringVal.AddRange(str_values.Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x))); } else if (values is byte[] byte_values) { tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(byte_values); } return(tensor_proto); } var proto_values = nparray.ravel(); switch (nparray.dtype.Name) { case "Bool": tensor_proto.BoolVal.AddRange(proto_values.Data <bool>()); break; case "Int32": tensor_proto.IntVal.AddRange(proto_values.Data <int>()); break; case "Int64": tensor_proto.Int64Val.AddRange(proto_values.Data <long>()); break; case "Single": tensor_proto.FloatVal.AddRange(proto_values.Data <float>()); break; case "Double": tensor_proto.DoubleVal.AddRange(proto_values.Data <double>()); break; case "String": tensor_proto.StringVal.AddRange(proto_values.Data <string>().Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x.ToString()))); break; default: throw new Exception("make_tensor_proto Not Implemented"); } return(tensor_proto); }