Exemple #1
0
 public static NDArray Crop(NDArray bbox, (int, int, int, int)?crop_box = null, bool allow_outside_center = true) => throw new NotImplementedException();
Exemple #2
0
 /// <summary>
 /// Creates a `Dataset` with a single element, comprising the given tensors.
 /// </summary>
 /// <param name="tensors"></param>
 /// <returns></returns>
 public IDatasetV2 from_tensor(NDArray tensors)
 => new TensorDataset(tensors);
Exemple #3
0
 public NDArray spatial_derivative(NDArray A, int axis = 0)
 {
     return((A.roll(-1, axis) - A.roll(1, axis)) / (grid_spacing * 2.0));
 }
 public (NDArray, NDArray) CalibrationCurve(NDArray y_true, NDArray y_prob, bool normalize = false, int n_bins = 5,
                                            string strategy = "uniform")
 {
     throw new NotImplementedException();
 }
Exemple #5
0
 public static int check_and_adjust_axis(NDArray nd, int axis)
 {
     return(check_and_adjust_axis(nd.ndim, axis));
 }
Exemple #6
0
 public static NDArray ImResize(NDArray src, int w, int h, ImgInterp interp = ImgInterp.Bilinear)
 {
     return(nd.Cvimresize(src, w, h, (int)interp));
 }
Exemple #7
0
        public void assertAllClose(double value, NDArray array2, double eps = 1e-5)
        {
            var array1 = np.ones_like(array2) * value;

            Assert.IsTrue(np.allclose(array1, array2, rtol: eps));
        }
Exemple #8
0
        /// <summary>
        /// Create a TensorProto.
        /// </summary>
        /// <param name="values"></param>
        /// <param name="dtype"></param>
        /// <param name="shape"></param>
        /// <param name="verify_shape"></param>
        /// <param name="allow_broadcast"></param>
        /// <returns></returns>
        public static TensorProto make_tensor_proto(object values, TF_DataType dtype = TF_DataType.DtInvalid, int[] shape = null, bool verify_shape = false, bool allow_broadcast = false)
        {
            if (allow_broadcast && verify_shape)
            {
                throw new ValueError("allow_broadcast and verify_shape are not both allowed.");
            }
            if (values is TensorProto tp)
            {
                return(tp);
            }

            if (dtype != TF_DataType.DtInvalid)
            {
                ;
            }

            bool is_quantized = new TF_DataType[]
            {
                TF_DataType.TF_QINT8, TF_DataType.TF_QUINT8, TF_DataType.TF_QINT16, TF_DataType.TF_QUINT16,
                TF_DataType.TF_QINT32
            }.Contains(dtype);

            // We first convert value to a numpy array or scalar.
            NDArray nparray = null;
            var     np_dt   = dtype.as_numpy_datatype();

            if (values is NDArray nd)
            {
                nparray = nd;
            }
            else
            {
                if (values == null)
                {
                    throw new ValueError("None values not supported.");
                }

                if (np_dt == null)
                {
                    switch (values)
                    {
                    case bool boolVal:
                        nparray = boolVal;
                        break;

                    case int intVal:
                        nparray = intVal;
                        break;

                    case long intVal:
                        nparray = intVal;
                        break;

                    case int[] intVals:
                        nparray = np.array(intVals);
                        break;

                    case float floatVal:
                        nparray = floatVal;
                        break;

                    case float[] floatVals:
                        nparray = floatVals;
                        break;

                    case double doubleVal:
                        nparray = doubleVal;
                        break;

                    case string strVal:
                        nparray = strVal;
                        break;

                    case string[] strVals:
                        nparray = strVals;
                        break;

                    case byte[] byteValues:
                        nparray = byteValues;
                        break;

                    default:
                        throw new NotImplementedException("make_tensor_proto Not Implemented");
                    }
                }
                else
                {
                    // convert data type
                    switch (np_dt.Name)
                    {
                    case "Int32":
                        if (values.GetType().IsArray)
                        {
                            nparray = np.array((int[])values, np_dt);
                        }
                        else
                        {
                            nparray = Convert.ToInt32(values);
                        }
                        break;

                    case "Single":
                        if (values.GetType().IsArray)
                        {
                            nparray = np.array((float[])values, np_dt);
                        }
                        else
                        {
                            nparray = Convert.ToSingle(values);
                        }
                        break;

                    case "Double":
                        if (values.GetType().IsArray)
                        {
                            nparray = np.array((double[])values, np_dt);
                        }
                        else
                        {
                            nparray = Convert.ToDouble(values);
                        }
                        break;

                    case "String":
                        if (values.GetType().IsArray)
                        {
                            nparray = np.array((string[])values, np_dt);
                        }
                        else
                        {
                            nparray = Convert.ToString(values);
                        }
                        break;

                    default:
                        throw new NotImplementedException("make_tensor_proto Not Implemented");
                    }
                }
            }

            var numpy_dtype = dtypes.as_dtype(nparray.dtype);

            if (numpy_dtype == TF_DataType.DtInvalid)
            {
                throw new TypeError($"Unrecognized data type: {nparray.dtype}");
            }

            // If dtype was specified and is a quantized type, we convert
            // numpy_dtype back into the quantized version.
            if (is_quantized)
            {
                numpy_dtype = dtype;
            }

            bool is_same_size = false;
            int  shape_size   = 0;

            // If shape is not given, get the shape from the numpy array.
            if (shape == null)
            {
                shape        = nparray.shape;
                is_same_size = true;
                shape_size   = nparray.size;
            }
            else
            {
                shape_size   = new TensorShape(shape).Size;
                is_same_size = shape_size == nparray.size;
            }

            var tensor_proto = new tensor_pb2.TensorProto
            {
                Dtype       = numpy_dtype.as_datatype_enum(),
                TensorShape = tensor_util.as_shape(shape)
            };

            if (is_same_size && _TENSOR_CONTENT_TYPES.Contains(numpy_dtype) && shape_size > 1)
            {
                byte[] bytes = nparray.ToByteArray();
                tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(bytes.ToArray());
                return(tensor_proto);
            }

            if (numpy_dtype == TF_DataType.TF_STRING && !(values is NDArray))
            {
                if (values is string str)
                {
                    tensor_proto.StringVal.Add(Google.Protobuf.ByteString.CopyFromUtf8(str));
                }
                else if (values is string[] str_values)
                {
                    tensor_proto.StringVal.AddRange(str_values.Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x)));
                }
                else if (values is byte[] byte_values)
                {
                    tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(byte_values);
                }

                return(tensor_proto);
            }

            var proto_values = nparray.ravel();

            switch (nparray.dtype.Name)
            {
            case "Bool":
                tensor_proto.BoolVal.AddRange(proto_values.Data <bool>());
                break;

            case "Int32":
                tensor_proto.IntVal.AddRange(proto_values.Data <int>());
                break;

            case "Int64":
                tensor_proto.Int64Val.AddRange(proto_values.Data <long>());
                break;

            case "Single":
                tensor_proto.FloatVal.AddRange(proto_values.Data <float>());
                break;

            case "Double":
                tensor_proto.DoubleVal.AddRange(proto_values.Data <double>());
                break;

            case "String":
                tensor_proto.StringVal.AddRange(proto_values.Data <string>().Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x.ToString())));
                break;

            default:
                throw new Exception("make_tensor_proto Not Implemented");
            }

            return(tensor_proto);
        }
Exemple #9
0
 public abstract NDArray create_state(int index, NDArray weight);
Exemple #10
0
 public NDArray AugmentationTransform(NDArray data)
 {
     throw new NotImplementedException();
 }
Exemple #11
0
 public NDArray PostProcessData(NDArray datum)
 {
     throw new NotImplementedException();
 }
Exemple #12
0
 public void CheckValidImage(NDArray data)
 {
     throw new NotImplementedException();
 }
 public LightingAug(float alphastd, NDArray eigval, NDArray eigvec)
 {
     Alphastd = alphastd;
     Eigval   = eigval;
     Eigvec   = eigvec;
 }
Exemple #14
0
 public void PrepareData()
 {
     mnist       = MnistDataSet.read_data_sets("mnist", one_hot: true, train_size: train_size, validation_size: validation_size, test_size: test_size);
     full_data_x = mnist.train.images;
 }
Exemple #15
0
 public List <NDArray> MakePrediction(NDArray X)
 {
     return(FeedForward(X / 255, forPrediction: true));
 }
Exemple #16
0
 public abstract void update(int index, NDArray weight, NDArray grad, NDArray state);
Exemple #17
0
        public virtual NDArray Multiply(NDArray x, NDArray y)
        {
            /// following code is for determine if scalar or not
            /// also for determine result
            int scalarNo = !(x.ndim == 0 || y.ndim == 0) ? 0 : -1;

            if (scalarNo == 0)
            {
                if (!Enumerable.SequenceEqual(x.shape, y.shape))
                {
                    throw new IncorrectShapeException();
                }
            }
            else
            {
                if (x.ndim == 0)
                {
                    scalarNo = 1;
                }
                else
                {
                    scalarNo = 2;
                }
            }

            NDArray result = null;

            switch (scalarNo)
            {
            case 1:
            {
                result = new NDArray(y.dtype, y.shape);
                break;
            }

            case 2:
            {
                result = new NDArray(x.dtype, x.shape);
                break;
            }

            default:
            {
                result = new NDArray(x.dtype, x.shape);
                break;
            }
            }

            var np1SysArr = x.Array;
            var np2SysArr = y.Array;
            var np3SysArr = result.Array;

            switch (np3SysArr)
            {
            case int[] resArr:
            {
                var np1Array = np1SysArr as int[];
                var np2Array = np2SysArr as int[];
                np1Array = (np1Array == null) ? x.CloneData <int>() : np1Array;
                np2Array = (np2Array == null) ? y.CloneData <int>() : np2Array;

                if (scalarNo == 0)
                {
                    Parallel.For(0, np3SysArr.Length, idx =>
                        {
                            resArr[idx] = np1Array[idx] * np2Array[idx];
                        });
                }
                else if (scalarNo == 1)
                {
                    var scalar = x.CloneData <int>()[0];
                    for (int idx = 0; idx < np3SysArr.Length; idx++)
                    {
                        resArr[idx] = scalar * np2Array[idx];
                    }
                }
                else if (scalarNo == 2)
                {
                    var scalar = y.CloneData <int>()[0];
                    for (int idx = 0; idx < np3SysArr.Length; idx++)
                    {
                        resArr[idx] = np1Array[idx] * scalar;
                    }
                }
                break;
            }

            case System.Int64[] resArr:
            {
                System.Int64[] np1Array = np1SysArr as System.Int64[];
                System.Int64[] np2Array = np2SysArr as System.Int64[];
                np1Array = (np1Array == null) ? x.CloneData <System.Int64>() : np1Array;
                np2Array = (np2Array == null) ? y.CloneData <System.Int64>() : np2Array;

                if (scalarNo == 0)
                {
                    for (int idx = 0; idx < np3SysArr.Length; idx++)
                    {
                        resArr[idx] = np1Array[idx] * np2Array[idx];
                    }
                }
                else if (scalarNo == 1)
                {
                    System.Int64 scalar = x.CloneData <System.Int64>()[0];
                    for (int idx = 0; idx < np3SysArr.Length; idx++)
                    {
                        resArr[idx] = scalar * np2Array[idx];
                    }
                }
                else if (scalarNo == 2)
                {
                    System.Int64 scalar = y.CloneData <System.Int64>()[0];
                    for (int idx = 0; idx < np3SysArr.Length; idx++)
                    {
                        resArr[idx] = np1Array[idx] * scalar;
                    }
                }
                break;
            }

            case float[] resArr:
            {
                var np1Array = np1SysArr as float[];
                var np2Array = np2SysArr as float[];
                np1Array = (np1Array == null) ? x.CloneData <float>() : np1Array;
                np2Array = (np2Array == null) ? y.CloneData <float>() : np2Array;

                if (scalarNo == 0)
                {
                    Parallel.For(0, np3SysArr.Length, idx =>
                        {
                            resArr[idx] = np1Array[idx] * np2Array[idx];
                        });
                }
                else if (scalarNo == 1)
                {
                    var scalar = x.CloneData <float>()[0];
                    Parallel.For(0, np3SysArr.Length, idx =>
                        {
                            resArr[idx] = scalar * np2Array[idx];
                        });
                }
                else if (scalarNo == 2)
                {
                    var scalar = y.CloneData <float>()[0];
                    Parallel.For(0, np3SysArr.Length, idx =>
                        {
                            resArr[idx] = np1Array[idx] * scalar;
                        });
                }
                break;
            }

            case System.Double[] resArr:
            {
                System.Double[] np1Array = np1SysArr as System.Double[];
                System.Double[] np2Array = np2SysArr as System.Double[];
                np1Array = (np1Array == null) ? x.CloneData <System.Double>() : np1Array;
                np2Array = (np2Array == null) ? y.CloneData <System.Double>() : np2Array;

                if (scalarNo == 0)
                {
                    for (int idx = 0; idx < np3SysArr.Length; idx++)
                    {
                        resArr[idx] = np1Array[idx] * np2Array[idx];
                    }
                }
                else if (scalarNo == 1)
                {
                    System.Double scalar = x.CloneData <System.Double>()[0];
                    for (int idx = 0; idx < np3SysArr.Length; idx++)
                    {
                        resArr[idx] = scalar * np2Array[idx];
                    }
                }
                else if (scalarNo == 2)
                {
                    System.Double scalar = y.CloneData <System.Double>()[0];
                    for (int idx = 0; idx < np3SysArr.Length; idx++)
                    {
                        resArr[idx] = np1Array[idx] * scalar;
                    }
                }
                break;
            }

            case System.Numerics.Complex[] resArr:
            {
                System.Numerics.Complex[] np1Array = np1SysArr as System.Numerics.Complex[];
                System.Numerics.Complex[] np2Array = np2SysArr as System.Numerics.Complex[];
                np1Array = (np1Array == null) ? x.CloneData <System.Numerics.Complex>() : np1Array;
                np2Array = (np2Array == null) ? y.CloneData <System.Numerics.Complex>() : np2Array;

                if (scalarNo == 0)
                {
                    for (int idx = 0; idx < np3SysArr.Length; idx++)
                    {
                        resArr[idx] = np1Array[idx] * np2Array[idx];
                    }
                }
                else if (scalarNo == 1)
                {
                    System.Numerics.Complex scalar = x.CloneData <System.Numerics.Complex>()[0];
                    for (int idx = 0; idx < np3SysArr.Length; idx++)
                    {
                        resArr[idx] = scalar * np2Array[idx];
                    }
                }
                else if (scalarNo == 2)
                {
                    System.Numerics.Complex scalar = y.CloneData <System.Numerics.Complex>()[0];
                    for (int idx = 0; idx < np3SysArr.Length; idx++)
                    {
                        resArr[idx] = np1Array[idx] * scalar;
                    }
                }
                break;
            }

            /*case System.Numerics.Quaternion[] resArr :
             *              {
             *                  System.Numerics.Quaternion[] np1Array = np1SysArr as System.Numerics.Quaternion[];
             *                  System.Numerics.Quaternion[] np2Array = np2SysArr as System.Numerics.Quaternion[];
             *                  np1Array = (np1Array == null) ? np1.Storage.CloneData<System.Numerics.Quaternion>() : np1Array;
             *                  np2Array = (np2Array == null) ? np2.Storage.CloneData<System.Numerics.Quaternion>() : np2Array;
             *
             *                  if (scalarNo == 0 )
             *                      for( int idx = 0; idx < np3SysArr.Length;idx++)
             *                          resArr[idx] = np1Array[idx] * np2Array[idx];
             *                  else if (scalarNo == 1 )
             *                  {
             *                      System.Numerics.Quaternion scalar = np1.Storage.CloneData<System.Numerics.Quaternion>()[0];
             *                      for( int idx = 0; idx < np3SysArr.Length;idx++)
             *                          resArr[idx] = scalar * np2Array[idx];
             *                  }
             *                  else if (scalarNo == 2 )
             *                  {
             *                      System.Numerics.Quaternion scalar = np2.Storage.CloneData<System.Numerics.Quaternion>()[0];
             *                      for( int idx = 0; idx < np3SysArr.Length;idx++)
             *                          resArr[idx] = np1Array[idx] * scalar;
             *                  }
             *                  break;
             *              }*/
            default:
            {
                throw new IncorrectTypeException();
            }
            }

            return(result);
        }
Exemple #18
0
 public EagerTensor(NDArray value, string device_name) : base(value)
 {
     NewEagerTensorHandle(_handle);
 }
Exemple #19
0
 public void assertAllClose(NDArray array1, NDArray array2, double eps = 1e-5)
 {
     Assert.IsTrue(np.allclose(array1, array2, rtol: eps));
 }
Exemple #20
0
 public override void Update(NDArray labels, NDArray preds)
 {
     throw new NotImplementedException();
 }
        public void Train(Session sess)
        {
            var graph = tf.Graph();

            // Input images
            Tensor X = graph.get_operation_by_name("Placeholder");   // tf.placeholder(tf.float32, shape: new TensorShape(-1, num_features));
            //  Labels (for assigning a label to a centroid and testing)
            Tensor Y = graph.get_operation_by_name("Placeholder_1"); // tf.placeholder(tf.float32, shape: new TensorShape(-1, num_classes));

            // K-Means Parameters
            //var kmeans = new KMeans(X, k, distance_metric: KMeans.COSINE_DISTANCE, use_mini_batch: true);

            // Build KMeans graph
            //var training_graph = kmeans.training_graph();

            var     init_vars    = tf.global_variables_initializer();
            Tensor  init_op      = graph.get_operation_by_name("cond/Merge");
            var     train_op     = graph.get_operation_by_name("group_deps");
            Tensor  avg_distance = graph.get_operation_by_name("Mean");
            Tensor  cluster_idx  = graph.get_operation_by_name("Squeeze_1");
            NDArray result       = null;

            sess.run(init_vars, new FeedItem(X, full_data_x));
            sess.run(init_op, new FeedItem(X, full_data_x));

            // Training
            var sw = new Stopwatch();

            foreach (var i in range(1, num_steps + 1))
            {
                sw.Restart();
                result = sess.run(new ITensorOrOperation[] { train_op, avg_distance, cluster_idx }, new FeedItem(X, full_data_x));
                sw.Stop();

                if (i % 4 == 0 || i == 1)
                {
                    print($"Step {i}, Avg Distance: {result[1]} Elapse: {sw.ElapsedMilliseconds}ms");
                }
            }

            var idx = result[2].Data <int>();

            // Assign a label to each centroid
            // Count total number of labels per centroid, using the label of each training
            // sample to their closest centroid (given by 'idx')
            var counts = np.zeros((k, num_classes), np.float32);

            sw.Start();
            foreach (var i in range(idx.Length))
            {
                var x = mnist.Train.Labels[i];
                counts[idx[i]] += x;
            }

            sw.Stop();
            print($"Assign a label to each centroid took {sw.ElapsedMilliseconds}ms");

            // Assign the most frequent label to the centroid
            var labels_map_array = np.argmax(counts, 1);
            var labels_map       = tf.convert_to_tensor(labels_map_array);

            // Evaluation ops
            // Lookup: centroid_id -> label
            var cluster_label = tf.nn.embedding_lookup(labels_map, cluster_idx);

            // Compute accuracy
            var correct_prediction = tf.equal(cluster_label, tf.cast(tf.argmax(Y, 1), tf.int32));
            var cast        = tf.cast(correct_prediction, tf.float32);
            var accuracy_op = tf.reduce_mean(cast);

            // Test Model
            var(test_x, test_y) = (mnist.Test.Data, mnist.Test.Labels);
            result       = sess.run(accuracy_op, new FeedItem(X, test_x), new FeedItem(Y, test_y));
            accuray_test = result;
            print($"Test Accuracy: {accuray_test}");
        }
Exemple #22
0
        /// <summary>
        /// Create a TensorProto.
        /// </summary>
        /// <param name="values"></param>
        /// <param name="dtype"></param>
        /// <param name="shape"></param>
        /// <param name="verify_shape"></param>
        /// <param name="allow_broadcast"></param>
        /// <returns></returns>
        public static TensorProto make_tensor_proto(object values, TF_DataType dtype = TF_DataType.DtInvalid, int[] shape = null, bool verify_shape = false, bool allow_broadcast = false)
        {
            if (allow_broadcast && verify_shape)
            {
                throw new ValueError("allow_broadcast and verify_shape are not both allowed.");
            }
            if (values is TensorProto tp)
            {
                return(tp);
            }

            // We first convert value to a numpy array or scalar.
            NDArray nparray = null;
            var     np_dt   = dtype.as_numpy_dtype();

            if (values is NDArray nd)
            {
                nparray = nd;
            }
            else if (values is string str)
            {
                // scalar string
                nparray = convert_to_numpy_ndarray(values);
                shape   = new int[0];
            }
            else if (values is string[] strings)
            {
                nparray = convert_to_numpy_ndarray(values);
                shape   = new[] { strings.Length };
            }
            else
            {
                if (values == null)
                {
                    throw new ValueError("None values not supported.");
                }

                nparray = convert_to_numpy_ndarray(values);

                if (np_dt != null && np_dt != typeof(string))
                {
                    nparray = nparray.astype(np_dt);
                }
            }

            var numpy_dtype = nparray.dtype.as_dtype(dtype: dtype);

            if (numpy_dtype == TF_DataType.DtInvalid)
            {
                throw new TypeError($"Unrecognized data type: {nparray.dtype}");
            }

            // If dtype was specified and is a quantized type, we convert
            // numpy_dtype back into the quantized version.
            if (quantized_types.Contains(dtype))
            {
                numpy_dtype = dtype;
            }

            bool is_same_size = false;
            int  shape_size   = 0;

            // If shape is not given, get the shape from the numpy array.
            if (shape == null)
            {
                if (numpy_dtype == TF_DataType.TF_STRING)
                {
                    if (nparray.ndim == 0)
                    {
                        // scalar string
                        shape      = new int[0];
                        shape_size = 0;
                    }
                    else
                    {
                        throw new NotImplementedException($"Not implemented for {nparray.ndim} dims string array.");
                    }
                }
                else
                {
                    shape        = nparray.shape;
                    is_same_size = true;
                    shape_size   = nparray.size;
                }
            }
            else
            {
                shape_size   = new TensorShape(shape).size;
                is_same_size = shape_size == nparray.size;
            }

            var tensor_proto = new TensorProto
            {
                Dtype       = numpy_dtype.as_datatype_enum(),
                TensorShape = tensor_util.as_shape(shape)
            };

            if (is_same_size && _TENSOR_CONTENT_TYPES.Contains(numpy_dtype) && shape_size > 1)
            {
                byte[] bytes = nparray.ToByteArray();
                tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(bytes.ToArray());
                return(tensor_proto);
            }

            if (numpy_dtype == TF_DataType.TF_STRING && !(values is NDArray))
            {
                if (values is string str)
                {
                    tensor_proto.StringVal.Add(Google.Protobuf.ByteString.CopyFromUtf8(str));
                    tensor_proto.TensorShape = tensor_util.as_shape(new int[0]);
                }
                else if (values is string[] str_values)
                {
                    tensor_proto.StringVal.AddRange(str_values.Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x)));
                }
                else if (values is byte[] byte_values)
                {
                    tensor_proto.TensorContent = Google.Protobuf.ByteString.CopyFrom(byte_values);
                }

                return(tensor_proto);
            }

            var proto_values = nparray.ravel();

            switch (nparray.dtype.Name)
            {
            case "Bool":
            case "Boolean":
                tensor_proto.BoolVal.AddRange(proto_values.Data <bool>());
                break;

            case "Int32":
                tensor_proto.IntVal.AddRange(proto_values.Data <int>());
                break;

            case "Int64":
                tensor_proto.Int64Val.AddRange(proto_values.Data <long>());
                break;

            case "Single":
                tensor_proto.FloatVal.AddRange(proto_values.Data <float>());
                break;

            case "Double":
                tensor_proto.DoubleVal.AddRange(proto_values.Data <double>());
                break;

            /*case "String":
             *  tensor_proto.StringVal.AddRange(proto_values.Data<string>().Select(x => Google.Protobuf.ByteString.CopyFromUtf8(x.ToString())));
             *  break;*/
            default:
                throw new Exception("make_tensor_proto Not Implemented");
            }

            return(tensor_proto);
        }
 public (float, float) SigmoidCalibration(NDArray df, NDArray y, NDArray sample_weight = null)
 {
     throw new NotImplementedException();
 }
Exemple #24
0
        public static NDArray convert_to_numpy_ndarray(object values)
        {
            NDArray nd;

            switch (values)
            {
            case NDArray val:
                nd = val;
                break;

            case TensorShape val:
                nd = val.dims;
                break;

            case bool boolVal:
                nd = boolVal;
                break;

            case int intVal:
                nd = intVal;
                break;

            case int[] intVals:
                nd = np.array(intVals);
                break;

            case int[,] intVals:
                nd = np.array(intVals);
                break;

            case long intVal:
                nd = intVal;
                break;

            case long[] intVals:
                nd = np.array(intVals);
                break;

            case long[,] intVals:
                nd = np.array(intVals);
                break;

            case float floatVal:
                nd = floatVal;
                break;

            case float[] floatVals:
                nd = floatVals;
                break;

            case float[,] floatVals:
                nd = np.array(floatVals);
                break;

            case double doubleVal:
                nd = doubleVal;
                break;

            case double[] doubleVals:
                nd = np.array(doubleVals);
                break;

            case double[,] doubleVals:
                nd = np.array(doubleVals);
                break;

            case string strVal:
                nd = new NDArray(Encoding.ASCII.GetBytes(strVal));
                break;

            case string[] strVals:
                nd = np.array(strVals);
                break;

            case byte[] byteValues:
                nd = byteValues;
                break;

            case byte[,] byteValues:
                nd = np.array(byteValues);
                break;

            default:
                throw new NotImplementedException($"convert_to_numpy_ndarray: Support for type {values.GetType()} Not Implemented");
            }

            return(nd);
        }
Exemple #25
0
 public abstract NDArray Call(NDArray w);
Exemple #26
0
 public abstract NDArray Forward(NDArray preds, NDArray labels);
Exemple #27
0
 public IDatasetV2 from_tensor_slices(NDArray array)
 => new TensorSliceDataset(array);
Exemple #28
0
 public abstract NDArray Backward(NDArray preds, NDArray labels);
Exemple #29
0
 public NDArray d_dx(NDArray A)
 {
     return(this.spatial_derivative(A, 1));
 }
Exemple #30
0
 public static NDArray AffineTransform(NDArray pt, NDArray t) => throw new NotImplementedException();