예제 #1
0
        /// <summary>
        /// Converts a class vector (integers) to binary class matrix.
        /// </summary>
        /// <param name="y"></param>
        /// <param name="num_classes"></param>
        /// <param name="dtype"></param>
        /// <returns></returns>
        public static NDArray to_categorical(NDArray y, int num_classes = -1, TF_DataType dtype = TF_DataType.TF_FLOAT)
        {
            var y1 = y.astype(NPTypeCode.Int32).ToArray <int>();
            // var input_shape = y.shape[..^1];
            var categorical = np.zeros((y.size, num_classes), dtype: dtype.as_numpy_dtype());

            // categorical[np.arange(y.size), y] = 1;
            for (int i = 0; i < y.size; i++)
            {
                categorical[i][y1[i]] = 1;
            }

            return(categorical);
        }
예제 #2
0
        /// <summary>
        ///     Convert given <see cref="Array"/> to <see cref="Tensor"/>.
        /// </summary>
        /// <param name="array">The array to convert, can be regular, jagged or multi-dim array.</param>
        /// <param name="astype">Convert <see cref="Array"/> to given <paramref name="astype"/> before inserting it into a <see cref="Tensor"/>.</param>
        /// <exception cref="NotSupportedException"></exception>
        public static Tensor ToTensor(Array array, TF_DataType?astype = null)
        {
            if (array == null)
            {
                throw new ArgumentNullException(nameof(array));
            }
            var arrtype = array.ResolveElementType();

            var astype_type = astype?.as_numpy_dtype() ?? arrtype;

            if (astype_type == arrtype)
            {
                //no conversion required
                if (astype == TF_DataType.TF_STRING)
                {
                    throw new NotSupportedException(); //TODO! when string is fully implemented.
                }

                if (astype == TF_DataType.TF_INT8)
                {
                    if (array.Rank != 1 || array.GetType().GetElementType()?.IsArray == true) //is multidim or jagged
                    {
                        array = Arrays.Flatten(array);
                    }

                    return(new Tensor((sbyte[])array));
                }

                //is multidim or jagged, if so - use NDArrays constructor as it records shape.
                if (array.Rank != 1 || array.GetType().GetElementType().IsArray)
                {
                    return(new Tensor(new NDArray(array)));
                }

#if _REGEN
                #region Compute
                switch (arrtype)
                {
                    % foreach supported_dtypes, supported_dtypes_lowercase %
                case NPTypeCode.#1: return(new Tensor((#2[])arr));

                    %
예제 #3
0
        /// <summary>
        /// Create a TensorProto.
        /// </summary>
        /// <param name="values"></param>
        /// <param name="dtype"></param>
        /// <param name="shape"></param>
        /// <param name="verify_shape"></param>
        /// <param name="allow_broadcast"></param>
        /// <returns></returns>
        public static TensorProto make_tensor_proto(object values, TF_DataType dtype = TF_DataType.DtInvalid, int[] shape = null, bool verify_shape = false, bool allow_broadcast = false)
        {
            if (allow_broadcast && verify_shape)
            {
                throw new ValueError("allow_broadcast and verify_shape are not both allowed.");
            }
            if (values is TensorProto tp)
            {
                return(tp);
            }

            // We first convert value to a numpy array or scalar.
            NDArray nparray = null;
            var     np_dt   = dtype.as_numpy_dtype();

            if (values is NDArray nd)
            {
                nparray = nd;
            }
            else
            {
                if (values == null)
                {
                    throw new ValueError("None values not supported.");
                }

                nparray = convert_to_numpy_ndarray(values);

                if (np_dt != null && np_dt != typeof(string))
                {
                    nparray = nparray.astype(np_dt);
                }
            }

            var numpy_dtype = nparray.dtype.as_dtype(dtype: dtype);

            if (numpy_dtype == TF_DataType.DtInvalid)
            {
                throw new TypeError($"Unrecognized data type: {nparray.dtype}");
            }

            // If dtype was specified and is a quantized type, we convert
            // numpy_dtype back into the quantized version.
            if (quantized_types.Contains(dtype))
            {
                numpy_dtype = dtype;
            }

            bool is_same_size = false;
            int  shape_size   = 0;

            // If shape is not given, get the shape from the numpy array.
            if (shape == null)
            {
                shape        = nparray.shape;
                is_same_size = true;
                shape_size   = nparray.size;
            }
            else
            {
                shape_size   = new TensorShape(shape).size;
                is_same_size = shape_size == nparray.size;
            }

            var tensor_proto = new TensorProto
            {
                Dtype       = numpy_dtype.as_datatype_enum(),
                TensorShape = tensor_util.as_shape(shape)
            };

            if (is_same_size && _TENSOR_CONTENT_TYPES.Contains(numpy_dtype) && shape_size > 1)
            {
                byte[] bytes = nparray.ToByteArray();
                tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(bytes.ToArray());
                return(tensor_proto);
            }

            if (numpy_dtype == TF_DataType.TF_STRING && !(values is NDArray))
            {
                if (values is string str)
                {
                    tensor_proto.StringVal.Add(Google.Protobuf.ByteString.CopyFromUtf8(str));
                    tensor_proto.TensorShape = tensor_util.as_shape(new int[0]);
                }
                else if (values is string[] str_values)
                {
                    tensor_proto.StringVal.AddRange(str_values.Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x)));
                }
                else if (values is byte[] byte_values)
                {
                    tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(byte_values);
                }

                return(tensor_proto);
            }

            var proto_values = nparray.ravel();

            switch (nparray.dtype.Name)
            {
            case "Bool":
            case "Boolean":
                tensor_proto.BoolVal.AddRange(proto_values.Data <bool>());
                break;

            case "Int32":
                tensor_proto.IntVal.AddRange(proto_values.Data <int>());
                break;

            case "Int64":
                tensor_proto.Int64Val.AddRange(proto_values.Data <long>());
                break;

            case "Single":
                tensor_proto.FloatVal.AddRange(proto_values.Data <float>());
                break;

            case "Double":
                tensor_proto.DoubleVal.AddRange(proto_values.Data <double>());
                break;

            /*case "String":
             *  tensor_proto.StringVal.AddRange(proto_values.Data<string>().Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x.ToString())));
             *  break;*/
            default:
                throw new Exception("make_tensor_proto Not Implemented");
            }

            return(tensor_proto);
        }
예제 #4
0
        private static EagerTensor convert_to_eager_tensor(object value, Context ctx, TF_DataType dtype = TF_DataType.DtInvalid)
        {
            ctx.ensure_initialized();
            // convert data type
            if (dtype != TF_DataType.DtInvalid &&
                value.GetType().Name != "NDArray" &&
                value.GetType().BaseType.Name != "Array" &&
                dtypes.as_base_dtype(dtype) != dtypes.as_dtype(value.GetType()))
            {
                switch (dtype)
                {
                case TF_DataType.TF_DOUBLE:
                    value = Convert.ToDouble(value);
                    break;

                case TF_DataType.TF_FLOAT:
                    value = Convert.ToSingle(value);
                    break;

                case TF_DataType.TF_INT64:
                    value = Convert.ToInt64(value);
                    break;

                default:
                    break;
                }
            }
            else if (dtype != TF_DataType.DtInvalid &&
                     value is NDArray nd &&
                     dtypes.as_dtype(nd.dtype) != dtype)
            {
                value = nd.astype(dtype.as_numpy_dtype());
            }

            if (dtype == TF_DataType.TF_STRING && value is byte[] bytes)
            {
                return(new EagerTensor(bytes, ctx.DeviceName, TF_DataType.TF_STRING));
            }

            switch (value)
            {
            case EagerTensor val:
                return(val);

            case NDArray val:
                return(new EagerTensor(val, ctx.DeviceName));

            case TensorShape val:
                return(new EagerTensor(val.dims, ctx.DeviceName));

            case string val:
                return(new EagerTensor(val, ctx.DeviceName));

            case string[] val:
                return(new EagerTensor(val, ctx.DeviceName));

            case bool val:
                return(new EagerTensor(val, ctx.DeviceName));

            case byte val:
                return(new EagerTensor(val, ctx.DeviceName));

            case byte[] val:
                return(new EagerTensor(val, ctx.DeviceName));

            case byte[,] val:
                return(new EagerTensor(val, ctx.DeviceName));

            case byte[,,] val:
                return(new EagerTensor(val, ctx.DeviceName));

            case int val:
                return(new EagerTensor(val, ctx.DeviceName));

            case int[] val:
                return(new EagerTensor(val, ctx.DeviceName));

            case int[,] val:
                return(new EagerTensor(val, ctx.DeviceName));

            case int[,,] val:
                return(new EagerTensor(val, ctx.DeviceName));

            case long val:
                return(new EagerTensor(val, ctx.DeviceName));

            case long[] val:
                return(new EagerTensor(val, ctx.DeviceName));

            case long[,] val:
                return(new EagerTensor(val, ctx.DeviceName));

            case long[,,] val:
                return(new EagerTensor(val, ctx.DeviceName));

            case float val:
                return(new EagerTensor(val, ctx.DeviceName));

            case float[] val:
                return(new EagerTensor(val, ctx.DeviceName));

            case float[,] val:
                return(new EagerTensor(val, ctx.DeviceName));

            case float[,,] val:
                return(new EagerTensor(val, ctx.DeviceName));

            case double val:
                return(new EagerTensor(val, ctx.DeviceName));

            case double[] val:
                return(new EagerTensor(val, ctx.DeviceName));

            case double[,] val:
                return(new EagerTensor(val, ctx.DeviceName));

            case double[,,] val:
                return(new EagerTensor(val, ctx.DeviceName));

            default:
                throw new NotImplementedException($"convert_to_eager_tensor {value.GetType()}");
            }
        }
예제 #5
0
        /// <summary>
        /// Create a TensorProto.
        /// </summary>
        /// <param name="values"></param>
        /// <param name="dtype"></param>
        /// <param name="shape"></param>
        /// <param name="verify_shape"></param>
        /// <param name="allow_broadcast"></param>
        /// <returns></returns>
        public static TensorProto make_tensor_proto(object values, TF_DataType dtype = TF_DataType.DtInvalid, int[] shape = null, bool verify_shape = false, bool allow_broadcast = false)
        {
            if (allow_broadcast && verify_shape)
            {
                throw new ValueError("allow_broadcast and verify_shape are not both allowed.");
            }
            if (values is TensorProto tp)
            {
                return(tp);
            }

            if (dtype != TF_DataType.DtInvalid)
            {
                ;
            }

            bool is_quantized = new TF_DataType[]
            {
                TF_DataType.TF_QINT8, TF_DataType.TF_QUINT8, TF_DataType.TF_QINT16, TF_DataType.TF_QUINT16,
                TF_DataType.TF_QINT32
            }.Contains(dtype);

            // We first convert value to a numpy array or scalar.
            NDArray nparray = null;
            var     np_dt   = dtype.as_numpy_dtype();

            if (values is NDArray nd)
            {
                nparray = nd;
            }
            else
            {
                if (values == null)
                {
                    throw new ValueError("None values not supported.");
                }

                if (np_dt == null)
                {
                    switch (values)
                    {
                    case bool boolVal:
                        nparray = boolVal;
                        break;

                    case int intVal:
                        nparray = intVal;
                        break;

                    case int[] intVals:
                        nparray = np.array(intVals);
                        break;

                    case int[,] intVals:
                        nparray = np.array(intVals);
                        break;

                    case long intVal:
                        nparray = intVal;
                        break;

                    case long[] intVals:
                        nparray = np.array(intVals);
                        break;

                    case long[,] intVals:
                        nparray = np.array(intVals);
                        break;

                    case float floatVal:
                        nparray = floatVal;
                        break;

                    case float[] floatVals:
                        nparray = floatVals;
                        break;

                    case float[,] floatVals:
                        nparray = np.array(floatVals);
                        break;

                    case double doubleVal:
                        nparray = doubleVal;
                        break;

                    case double[] doubleVals:
                        nparray = np.array(doubleVals);
                        break;

                    case double[,] doubleVals:
                        nparray = np.array(doubleVals);
                        break;

                    case string strVal:
                        nparray = strVal;
                        break;

                    case string[] strVals:
                        nparray = strVals;
                        break;

                    case byte[] byteValues:
                        nparray = byteValues;
                        break;

                    case byte[,] byteValues:
                        nparray = np.array(byteValues);
                        break;

                    default:
                        throw new NotImplementedException($"make_tensor_proto: Support for type {values.GetType()} Not Implemented");
                    }
                }
                else
                {
                    // convert data type
                    switch (np_dt.Name)
                    {
                    case "Int32":
                        if (values.GetType().IsArray)
                        {
                            nparray = np.array((int[])values, np_dt);
                        }
                        else
                        {
                            nparray = Converts.ToInt32(values);
                        }
                        break;

                    case "Int64":
                        if (values.GetType().IsArray)
                        {
                            nparray = np.array((int[])values, np_dt);
                        }
                        else
                        {
                            nparray = Converts.ToInt64(values);
                        }
                        break;

                    case "Single":
                        if (values.GetType().IsArray)
                        {
                            nparray = np.array((float[])values, np_dt);
                        }
                        else
                        {
                            nparray = Converts.ToSingle(values);
                        }
                        break;

                    case "Double":
                        if (values.GetType().IsArray)
                        {
                            nparray = np.array((double[])values, np_dt);
                        }
                        else
                        {
                            nparray = Converts.ToDouble(values);
                        }
                        break;

                    case "String":
                        if (values.GetType().IsArray)
                        {
                            nparray = np.array((string[])values, np_dt);
                        }
                        else
                        {
                            nparray = NDArray.FromString(Converts.ToString(values));
                        }
                        break;

                    case "Boolean":
                        if (values.GetType().IsArray)
                        {
                            nparray = np.array((bool[])values, np_dt);
                        }
                        else
                        {
                            nparray = Converts.ToBoolean(values);
                        }
                        break;

                    default:
                        throw new NotImplementedException($"make_tensor_proto: Support for type {np_dt.Name} Not Implemented");
                    }
                }
            }

            var numpy_dtype = dtypes.as_dtype(nparray.dtype, dtype: dtype);

            if (numpy_dtype == TF_DataType.DtInvalid)
            {
                throw new TypeError($"Unrecognized data type: {nparray.dtype}");
            }

            // If dtype was specified and is a quantized type, we convert
            // numpy_dtype back into the quantized version.
            if (is_quantized)
            {
                numpy_dtype = dtype;
            }

            bool is_same_size = false;
            int  shape_size   = 0;

            // If shape is not given, get the shape from the numpy array.
            if (shape == null)
            {
                shape        = nparray.shape;
                is_same_size = true;
                shape_size   = nparray.size;
            }
            else
            {
                shape_size   = new TensorShape(shape).size;
                is_same_size = shape_size == nparray.size;
            }

            var tensor_proto = new TensorProto
            {
                Dtype       = numpy_dtype.as_datatype_enum(),
                TensorShape = tensor_util.as_shape(shape)
            };

            if (is_same_size && _TENSOR_CONTENT_TYPES.Contains(numpy_dtype) && shape_size > 1)
            {
                byte[] bytes = nparray.ToByteArray();
                tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(bytes.ToArray());
                return(tensor_proto);
            }

            if (numpy_dtype == TF_DataType.TF_STRING && !(values is NDArray))
            {
                if (values is string str)
                {
                    tensor_proto.StringVal.Add(Google.Protobuf.ByteString.CopyFromUtf8(str));
                    tensor_proto.TensorShape = tensor_util.as_shape(new int[0]);
                }
                else if (values is string[] str_values)
                {
                    tensor_proto.StringVal.AddRange(str_values.Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x)));
                }
                else if (values is byte[] byte_values)
                {
                    tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(byte_values);
                }

                return(tensor_proto);
            }

            var proto_values = nparray.ravel();

            switch (nparray.dtype.Name)
            {
            case "Bool":
            case "Boolean":
                tensor_proto.BoolVal.AddRange(proto_values.Data <bool>());
                break;

            case "Int32":
                tensor_proto.IntVal.AddRange(proto_values.Data <int>());
                break;

            case "Int64":
                tensor_proto.Int64Val.AddRange(proto_values.Data <long>());
                break;

            case "Single":
                tensor_proto.FloatVal.AddRange(proto_values.Data <float>());
                break;

            case "Double":
                tensor_proto.DoubleVal.AddRange(proto_values.Data <double>());
                break;

            /*case "String":
             *  tensor_proto.StringVal.AddRange(proto_values.Data<string>().Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x.ToString())));
             *  break;*/
            default:
                throw new Exception("make_tensor_proto Not Implemented");
            }

            return(tensor_proto);
        }