public static TensorProto make_tensor_proto(NDArray nd, bool verify_shape = false) { var shape = nd.Storage.Shape; var numpy_dtype = dtypes.as_dtype(nd.dtype); var tensor_proto = new tensor_pb2.TensorProto { Dtype = numpy_dtype.as_datatype_enum(), TensorShape = shape.reshape(nd.shape).as_proto() }; switch (nd.dtype.Name) { case "Int32": tensor_proto.IntVal.AddRange(nd.Data <int>()); break; case "Single": tensor_proto.FloatVal.AddRange(nd.Data <float>()); break; case "Double": tensor_proto.DoubleVal.AddRange(nd.Data <double>()); break; case "String": tensor_proto.StringVal.AddRange(nd.Data <string>().Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x))); break; default: throw new Exception("Not Implemented"); } return(tensor_proto); }
public static NDArray MakeNdarray(TensorProto tensor) { var shape = tensor.TensorShape.Dim.Select(x => (int)x.Size).ToArray(); long num_elements = np.prod(shape); var tensor_dtype = tensor.Dtype.as_numpy_dtype(); if (tensor.TensorContent.Length > 0) { return(np.frombuffer(tensor.TensorContent.ToByteArray(), tensor_dtype) .reshape(shape)); } throw new NotImplementedException("MakeNdarray"); }
public static TensorProto make_tensor_proto(object values, Type dtype = null) { NDArray nparray; TensorProto tensor_proto = null; TensorShape tensor_shape = new TensorShape(0); switch (values) { case float val: nparray = np.array(new float[] { val }, np.float32); tensor_proto = new tensor_pb2.TensorProto { Dtype = DataType.DtFloat, TensorShape = tensor_shape.as_shape().as_proto() }; tensor_proto.FloatVal.Add(val); break; case double val: nparray = np.array(new double[] { val }, np.float64); tensor_proto = new tensor_pb2.TensorProto { Dtype = DataType.DtDouble, TensorShape = tensor_shape.as_shape().as_proto() }; tensor_proto.DoubleVal.Add(val); break; case string val: nparray = np.array(new string[] { val }, np.chars); tensor_proto = new tensor_pb2.TensorProto { Dtype = DataType.DtString, TensorShape = tensor_shape.as_shape().as_proto() }; tensor_proto.StringVal.Add(Google.Protobuf.ByteString.CopyFrom(val, Encoding.UTF8)); break; } return(tensor_proto); }
public static NDArray MakeNdarray(TensorProto tensor) { var shape = tensor.TensorShape.Dim.Select(x => (int)x.Size).ToArray(); int num_elements = np.prod(shape); var tensor_dtype = tensor.Dtype.as_numpy_dtype(); if (tensor.TensorContent.Length > 0) { return(np.frombuffer(tensor.TensorContent.ToByteArray(), tensor_dtype).reshape(shape)); } else if (tensor.Dtype == DataType.DtHalf || tensor.Dtype == DataType.DtBfloat16) #pragma warning disable CS0642 // Possible mistaken empty statement { ; } #pragma warning restore CS0642 // Possible mistaken empty statement else if (tensor.Dtype == DataType.DtFloat) #pragma warning disable CS0642 // Possible mistaken empty statement { ; } #pragma warning restore CS0642 // Possible mistaken empty statement else if (new DataType[] { DataType.DtInt32, DataType.DtUint8 }.Contains(tensor.Dtype)) { if (tensor.IntVal.Count == 1) { return(np.repeat(np.array(tensor.IntVal[0]), num_elements).reshape(shape)); } } else if (tensor.Dtype == DataType.DtBool) { if (tensor.BoolVal.Count == 1) { return(np.repeat(np.array(tensor.BoolVal[0]), num_elements).reshape(shape)); } } throw new NotImplementedException("MakeNdarray"); }
/// <summary> /// Create a TensorProto, invoked in graph mode /// </summary> /// <param name="values"></param> /// <param name="dtype"></param> /// <param name="shape"></param> /// <param name="verify_shape"></param> /// <param name="allow_broadcast"></param> /// <returns></returns> public static TensorProto make_tensor_proto(object values, TF_DataType dtype = TF_DataType.DtInvalid, Shape?shape = null, bool verify_shape = false, bool allow_broadcast = false) { if (allow_broadcast && verify_shape) { throw new ValueError("allow_broadcast and verify_shape are not both allowed."); } if (values is TensorProto tp) { return(tp); } var origin_dtype = values.GetDataType(); if (dtype == TF_DataType.DtInvalid) { dtype = origin_dtype; } else if (origin_dtype != dtype) { var new_system_dtype = dtype.as_system_dtype(); if (values is long[] long_values) { if (dtype == TF_DataType.TF_INT32) { values = long_values.Select(x => (int)Convert.ChangeType(x, new_system_dtype)).ToArray(); } } else { values = Convert.ChangeType(values, new_system_dtype); } dtype = values.GetDataType(); } shape = shape ?? values.GetShape(); var tensor_proto = new TensorProto { Dtype = dtype.as_datatype_enum(), TensorShape = shape.as_shape_proto() }; if (values is NDArray nd) { // scalar if (nd.shape.IsScalar) { switch (nd.dtype) { case TF_DataType.TF_BOOL: tensor_proto.BoolVal.AddRange(nd.ToArray <bool>()); break; case TF_DataType.TF_UINT8: tensor_proto.IntVal.AddRange(nd.ToArray <byte>().Select(x => (int)x).ToArray()); break; case TF_DataType.TF_INT32: tensor_proto.IntVal.AddRange(nd.ToArray <int>()); break; case TF_DataType.TF_INT64: tensor_proto.Int64Val.AddRange(nd.ToArray <long>()); break; case TF_DataType.TF_FLOAT: tensor_proto.FloatVal.AddRange(nd.ToArray <float>()); break; case TF_DataType.TF_DOUBLE: tensor_proto.DoubleVal.AddRange(nd.ToArray <double>()); break; default: throw new Exception("make_tensor_proto Not Implemented"); } } else { var len = nd.dtypesize * nd.size; byte[] bytes = nd.ToByteArray(); tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(bytes); } } else if (dtype == TF_DataType.TF_STRING && !(values is NDArray)) { if (values is string str) { tensor_proto.StringVal.Add(Google.Protobuf.ByteString.CopyFromUtf8(str)); } else if (values is string[] str_values) { tensor_proto.StringVal.AddRange(str_values.Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x))); } else if (values is byte[] byte_values) { tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(byte_values); } } else if (values is Array array) { // array var len = dtype.get_datatype_size() * (int)shape.size; byte[] bytes = new byte[len]; System.Buffer.BlockCopy(array, 0, bytes, 0, len); tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(bytes); } else { switch (values) { case Axis val: tensor_proto.IntVal.AddRange(val.axis); break; case Shape val: tensor_proto.Int64Val.AddRange(val.dims); break; case bool val: tensor_proto.BoolVal.AddRange(new[] { val }); break; case sbyte val: tensor_proto.IntVal.AddRange(new[] { (int)val }); break; case int val: tensor_proto.IntVal.AddRange(new[] { val }); break; case long val: tensor_proto.Int64Val.AddRange(new[] { val }); break; case float val: tensor_proto.FloatVal.AddRange(new[] { val }); break; case double val: tensor_proto.DoubleVal.AddRange(new[] { val }); break; default: throw new Exception("make_tensor_proto Not Implemented"); } } return(tensor_proto); }
/// <summary> /// Create a TensorProto. /// </summary> /// <param name="values"></param> /// <param name="dtype"></param> /// <param name="shape"></param> /// <param name="verify_shape"></param> /// <param name="allow_broadcast"></param> /// <returns></returns> public static TensorProto make_tensor_proto(object values, TF_DataType dtype = TF_DataType.DtInvalid, int[] shape = null, bool verify_shape = false, bool allow_broadcast = false) { if (allow_broadcast && verify_shape) { throw new ValueError("allow_broadcast and verify_shape are not both allowed."); } if (values is TensorProto tp) { return(tp); } if (dtype != TF_DataType.DtInvalid) { ; } bool is_quantized = new TF_DataType[] { TF_DataType.TF_QINT8, TF_DataType.TF_QUINT8, TF_DataType.TF_QINT16, TF_DataType.TF_QUINT16, TF_DataType.TF_QINT32 }.Contains(dtype); // We first convert value to a numpy array or scalar. NDArray nparray = null; var np_dt = dtype.as_numpy_datatype(); if (values is NDArray nd) { nparray = nd; } else { if (values == null) { throw new ValueError("None values not supported."); } if (np_dt == null) { switch (values) { case bool boolVal: nparray = boolVal; break; case int intVal: nparray = intVal; break; case int[] intVals: nparray = np.array(intVals); break; case int[,] intVals: nparray = np.array(intVals); break; case long intVal: nparray = intVal; break; case long[] intVals: nparray = np.array(intVals); break; case long[,] intVals: nparray = np.array(intVals); break; case float floatVal: nparray = floatVal; break; case float[] floatVals: nparray = floatVals; break; case float[,] floatVals: nparray = np.array(floatVals); break; case double doubleVal: nparray = doubleVal; break; case double[] doubleVals: nparray = np.array(doubleVals); break; case double[,] doubleVals: nparray = np.array(doubleVals); break; case string strVal: nparray = strVal; break; case string[] strVals: nparray = strVals; break; case byte[] byteValues: nparray = byteValues; break; case byte[,] byteValues: nparray = np.array(byteValues); break; default: throw new NotImplementedException($"make_tensor_proto: Support for type {values.GetType()} Not Implemented"); } } else { // convert data type switch (np_dt.Name) { case "Int32": if (values.GetType().IsArray) { nparray = np.array((int[])values, np_dt); } else { nparray = Convert.ToInt32(values); } break; case "Int64": if (values.GetType().IsArray) { nparray = np.array((int[])values, np_dt); } else { nparray = Convert.ToInt64(values); } break; case "Single": if (values.GetType().IsArray) { nparray = np.array((float[])values, np_dt); } else { nparray = Convert.ToSingle(values); } break; case "Double": if (values.GetType().IsArray) { nparray = np.array((double[])values, np_dt); } else { nparray = Convert.ToDouble(values); } break; case "String": if (values.GetType().IsArray) { nparray = np.array((string[])values, np_dt); } else { nparray = Convert.ToString(values); } break; default: throw new NotImplementedException($"make_tensor_proto: Support for type {np_dt.Name} Not Implemented"); } } } var numpy_dtype = dtypes.as_dtype(nparray.dtype); if (numpy_dtype == TF_DataType.DtInvalid) { throw new TypeError($"Unrecognized data type: {nparray.dtype}"); } // If dtype was specified and is a quantized type, we convert // numpy_dtype back into the quantized version. if (is_quantized) { numpy_dtype = dtype; } bool is_same_size = false; int shape_size = 0; // If shape is not given, get the shape from the numpy array. if (shape == null) { shape = nparray.shape; is_same_size = true; shape_size = nparray.size; } else { shape_size = new TensorShape(shape).Size; is_same_size = shape_size == nparray.size; } var tensor_proto = new TensorProto { Dtype = numpy_dtype.as_datatype_enum(), TensorShape = tensor_util.as_shape(shape) }; if (is_same_size && _TENSOR_CONTENT_TYPES.Contains(numpy_dtype) && shape_size > 1) { byte[] bytes = nparray.ToByteArray(); tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(bytes.ToArray()); return(tensor_proto); } if (numpy_dtype == TF_DataType.TF_STRING && !(values is NDArray)) { if (values is string str) { tensor_proto.StringVal.Add(Google.Protobuf.ByteString.CopyFromUtf8(str)); } else if (values is string[] str_values) { tensor_proto.StringVal.AddRange(str_values.Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x))); } else if (values is byte[] byte_values) { tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(byte_values); } return(tensor_proto); } var proto_values = nparray.ravel(); switch (nparray.dtype.Name) { case "Bool": case "Boolean": tensor_proto.BoolVal.AddRange(proto_values.Data <bool>()); break; case "Int32": tensor_proto.IntVal.AddRange(proto_values.Data <int>()); break; case "Int64": tensor_proto.Int64Val.AddRange(proto_values.Data <long>()); break; case "Single": tensor_proto.FloatVal.AddRange(proto_values.Data <float>()); break; case "Double": tensor_proto.DoubleVal.AddRange(proto_values.Data <double>()); break; case "String": tensor_proto.StringVal.AddRange(proto_values.Data <string>().Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x.ToString()))); break; default: throw new Exception("make_tensor_proto Not Implemented"); } return(tensor_proto); }
/// <summary> /// Create a TensorProto. /// </summary> /// <param name="values"></param> /// <param name="dtype"></param> /// <param name="shape"></param> /// <param name="verify_shape"></param> /// <param name="allow_broadcast"></param> /// <returns></returns> public static TensorProto make_tensor_proto(object values, TF_DataType dtype = TF_DataType.DtInvalid, int[] shape = null, bool verify_shape = false, bool allow_broadcast = false) { if (allow_broadcast && verify_shape) { throw new ValueError("allow_broadcast and verify_shape are not both allowed."); } if (values is TensorProto tp) { return(tp); } // We first convert value to a numpy array or scalar. NDArray nparray = null; var np_dt = dtype.as_numpy_dtype(); if (values is NDArray nd) { nparray = nd; } else { if (values == null) { throw new ValueError("None values not supported."); } nparray = convert_to_numpy_ndarray(values); if (np_dt != null && np_dt != typeof(string)) { nparray = nparray.astype(np_dt); } } var numpy_dtype = nparray.dtype.as_dtype(dtype: dtype); if (numpy_dtype == TF_DataType.DtInvalid) { throw new TypeError($"Unrecognized data type: {nparray.dtype}"); } // If dtype was specified and is a quantized type, we convert // numpy_dtype back into the quantized version. if (quantized_types.Contains(dtype)) { numpy_dtype = dtype; } bool is_same_size = false; int shape_size = 0; // If shape is not given, get the shape from the numpy array. if (shape == null) { shape = nparray.shape; is_same_size = true; shape_size = nparray.size; } else { shape_size = new TensorShape(shape).size; is_same_size = shape_size == nparray.size; } var tensor_proto = new TensorProto { Dtype = numpy_dtype.as_datatype_enum(), TensorShape = tensor_util.as_shape(shape) }; if (is_same_size && _TENSOR_CONTENT_TYPES.Contains(numpy_dtype) && shape_size > 1) { byte[] bytes = nparray.ToByteArray(); tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(bytes.ToArray()); return(tensor_proto); } if (numpy_dtype == TF_DataType.TF_STRING && !(values is NDArray)) { if (values is string str) { tensor_proto.StringVal.Add(Google.Protobuf.ByteString.CopyFromUtf8(str)); tensor_proto.TensorShape = tensor_util.as_shape(new int[0]); } else if (values is string[] str_values) { tensor_proto.StringVal.AddRange(str_values.Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x))); } else if (values is byte[] byte_values) { tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(byte_values); } return(tensor_proto); } var proto_values = nparray.ravel(); switch (nparray.dtype.Name) { case "Bool": case "Boolean": tensor_proto.BoolVal.AddRange(proto_values.Data <bool>()); break; case "Int32": tensor_proto.IntVal.AddRange(proto_values.Data <int>()); break; case "Int64": tensor_proto.Int64Val.AddRange(proto_values.Data <long>()); break; case "Single": tensor_proto.FloatVal.AddRange(proto_values.Data <float>()); break; case "Double": tensor_proto.DoubleVal.AddRange(proto_values.Data <double>()); break; /*case "String": * tensor_proto.StringVal.AddRange(proto_values.Data<string>().Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x.ToString()))); * break;*/ default: throw new Exception("make_tensor_proto Not Implemented"); } return(tensor_proto); }