private static NDArray _broadcast_to <T>(NDArray nd, Shape shape, bool subok, bool rdonly) { T[,] table = new T[shape.Dimensions[0], shape.Dimensions[1]]; if (nd.shape[0] == 1) {// (1,2,3) for (int i = 0; i < shape.Dimensions[0]; i++) { for (int j = 0; j < shape.Dimensions[1]; j++) { table[i, j] = nd.Data <T>(0, j); } } } else if (nd.shape[1] == 1) { for (int i = 0; i < shape.Dimensions[0]; i++) { for (int j = 0; j < shape.Dimensions[1]; j++) { table[i, j] = nd.Data <T>(i, 0); } } } return(np.array <T>(table)); }
public Tensor tensor(NumSharp.NDArray data, dtype?dtype = null, device?device = null, bool?requires_grad = null, bool?pin_memory = null) { // note: this implementation works only for device CPU // todo: implement for GPU var type = data.dtype.ToDtype(); if (dtype != null && type != dtype) { throw new NotImplementedException("Type of the array is different from specified dtype. Data conversion is not supported (yet)"); } var tensor = torch.empty((Shape)data.shape, dtype: type, device: device, requires_grad: requires_grad, pin_memory: pin_memory); var storage = tensor.PyObject.storage(); long ptr = storage.data_ptr(); switch (type) { case Torch.dtype.UInt8: Marshal.Copy(data.Data <byte>(), 0, new IntPtr(ptr), data.len); break; case Torch.dtype.Int32: Marshal.Copy(data.Data <int>(), 0, new IntPtr(ptr), data.len); break; case Torch.dtype.Int64: Marshal.Copy(data.Data <long>(), 0, new IntPtr(ptr), data.len); break; case Torch.dtype.Float32: Marshal.Copy(data.Data <float>(), 0, new IntPtr(ptr), data.len); break; case Torch.dtype.Float64: Marshal.Copy(data.Data <double>(), 0, new IntPtr(ptr), data.len); break; } return(tensor); }
/// <summary> /// Draw samples from a uniform distribution. /// Samples are uniformly distributed over the half-open interval [low, high) (includes low, but excludes high). In other words, any value within the given interval is equally likely to be drawn by uniform. /// </summary> /// <param name="low">Lower boundary of the output interval. All values generated will be greater than or equal to low. The default value is 0.</param> /// <param name="high">Upper boundary of the output interval. All values generated will be less than high. The default value is 1.0.</param> /// <param name="size">Output shape. If the given shape is, e.g., m, n, k, then m * n * k samples are drawn. If size is None (default), a single value is returned if low and high are both scalars. </param> /// <returns>NDArray with values of type <see cref="double"/></returns> public NDArray uniform(double low, double high, params int[] size) { if (size == null || size.Length == 0) //return scalar { var ret = new NDArray <double>(new Shape(1)); var data = new double[] { low + randomizer.NextDouble() * (high - low) }; ret.ReplaceData(data); return(ret); } var result = new NDArray <double>(size); double[] resultArray = result.Data <double>(); //parallelism is prohibited to make sure the result come out presistant double diff = high - low; for (int i = 0; i < result.size; ++i) { resultArray[i] = low + randomizer.NextDouble() * diff; } result.ReplaceData(resultArray); //incase of a view //todo! incase of a view? return(result); }
public static NDArray prod(NDArray nd, int axis = -1, Type dtype = null) { NDArray result = null; if (nd.size == 0) { return(1); } if (axis == -1) { switch (nd.dtype.Name) { case "Int32": { int prod = 1; for (int i = 0; i < nd.size; i++) { prod *= nd.Data <int>(i); } result = prod; } break; case "Int64": { long prod = 1; for (int i = 0; i < nd.size; i++) { prod *= nd.Data <long>(i); } result = prod; } break; } } else { throw new NotImplementedException($"np.prod axis {axis}"); } return(result); }
/// <summary> /// Random values in a given shape. /// Create an array of the given shape and populate it with random samples from a uniform distribution over [0, 1). /// </summary> public NDArray rand(Shape shape) { NDArray ndArray = new NDArray(typeof(double), shape); double[] numArray = ndArray.Data <double>(); for (int index = 0; index < ndArray.size; ++index) { numArray[index] = randomizer.NextDouble(); } ndArray.SetData <double[]>(numArray); return(ndArray); }
private NDArray setValue1D <T>(NDArray indexes) { var buf = Data <T>(); var idx = indexes.Data <int>(); var values = new T[indexes.size]; Parallel.For(0, indexes.size, (row) => { values[row] = buf[idx[row]]; }); return(new NDArray(values, indexes.size)); }
public static NDArray repeat(NDArray nd, int repeats, int axis = -1) { int size = nd.size * repeats; // scalar switch (nd.dtype.Name) { case "Int32": { var nd2 = new NDArray(new int[size], new Shape(size)); var data = nd.Data <int>(); for (int i = 0; i < nd.size; i++) { for (int j = 0; j < repeats; j++) { nd2.itemset(i * repeats + j, data[i]); } } return(nd2); } case "Boolean": { var nd2 = new NDArray(new bool[size], new Shape(size)); var data = nd.Data <bool>(); for (int i = 0; i < nd.size; i++) { for (int j = 0; j < repeats; j++) { nd2.itemset(i * repeats + j, data[i]); } } return(nd2); } } throw new NotImplementedException("np.repeat"); }
private NDArray setValue4D <T>(NDArray indexes) { var nd = new NDArray(dtype, new Shape(indexes.size, shape[1], shape[2], shape[3])); /*switch (Type.GetTypeCode(dtype)) * { * case TypeCode.Byte: * { * var buf = Data<byte>().AsSpan(); * * var idx = indexes.Data<int>().AsSpan(); * * var shapes = nd.shape.AsSpan(); * var data = nd.Data<byte>().AsSpan(); * * Parallel.For(0, nd.shape[0], (item) => * { * for (int row = 0; row < shapes[1]; row++) * for (int col = 0; col < shapes[2]; col++) * for (int channel = 0; channel < shapes[3]; channel++) * data[Storage.Shape.GetIndexInShape(item, row, col, channel)] = buf[Storage.Shape.GetIndexInShape(idx[item], row, col, channel)]; * }); * } * break; * }*/ { var buf = Data <T>(); var idx = indexes.Data <int>(); var data = nd.Data <T>(); Parallel.For(0, nd.shape[0], (item) => { for (int row = 0; row < nd.shape[1]; row++) { for (int col = 0; col < nd.shape[2]; col++) { for (int channel = 0; channel < nd.shape[3]; channel++) { data[Storage.Shape.GetIndexInShape(item, row, col, channel)] = buf[Storage.Shape.GetIndexInShape(idx[item], row, col, channel)]; } } } }); } return(nd); }
private NDArray setValue2D <T>(NDArray indexes) { var buf = Data <T>(); var idx = indexes.Data <int>(); var nd = new NDArray(dtype, new Shape(indexes.size, shape[1])); Parallel.For(0, nd.shape[0], (row) => { for (int col = 0; col < nd.shape[1]; col++) { nd.SetData(buf[Storage.Shape.GetIndexInShape(idx[row], col)], row, col); } }); return(nd); }
private NDArray setValue2D <T>(NDArray indexes) { var buf = Data <T>(); var idx = indexes.Data <int>(); var selectedValues = new NDArray(this.dtype, new Shape(indexes.size, shape[1])); Parallel.ForEach(Enumerable.Range(0, selectedValues.shape[0]), (row) => { for (int col = 0; col < selectedValues.shape[1]; col++) { selectedValues[row, col] = buf[Storage.Shape.GetIndexInShape(idx[row], col)]; } }); return(selectedValues); }
private NDArray setValue3D <T>(NDArray indexes) { var buf = Data <T>(); var selectedValues = new NDArray(dtype, new Shape(indexes.size, shape[1], shape[2])); var idx = indexes.Data <int>(); Parallel.ForEach(Enumerable.Range(0, selectedValues.shape[0]), (item) => { for (int row = 0; row < selectedValues.shape[1]; row++) { for (int col = 0; col < selectedValues.shape[2]; col++) { selectedValues.SetData(buf[Storage.Shape.GetIndexInShape(idx[item], row, col)], item, row, col); } } }); return(selectedValues); }
/// <summary> /// Draw random samples from a normal (Gaussian) distribution. /// </summary> /// <param name="loc">Mean of the distribution</param> /// <param name="scale">Standard deviation of the distribution</param> /// <param name="dims"></param> /// <returns></returns> public NDArray normal(double loc, double scale, params int[] dims) { var array = new NDArray(typeof(double), new Shape(dims)); double[] arr = array.Data <double>(); for (int i = 0; i < array.size; i++) { double u1 = 1.0 - randomizer.NextDouble(); //uniform(0,1] random doubles double u2 = 1.0 - randomizer.NextDouble(); double randStdNormal = Math.Sqrt(-2.0 * Math.Log(u1)) * Math.Sin(2.0 * Math.PI * u2); //random normal(0,1) double randNormal = loc + scale * randStdNormal; //random normal(mean,stdDev^2) arr[i] = randNormal; } array.ReplaceData(arr); return(array); }
private NDArray setValue <T>(NDArray indexes) { Shape newShape = new int[] { indexes.size }.Concat(shape.Skip(1)).ToArray(); var buf = Data <T>(); var idx = indexes.Data <int>(); var array = new T[newShape.Size]; var indice = Shape.GetShape(newShape.Dimensions, axis: 0); var length = Shape.GetSize(indice); for (var row = 0; row < newShape[0]; row++) { var d = buf.AsSpan(idx[row] * length, length); d.CopyTo(array.AsSpan(row * length)); } var nd = new NDArray(array, newShape); return(nd); }
public NDArray Permutation(int max) { var random = new Random(); int[] orders = new int[max]; var np = new NDArray(typeof(int)).arange(max); int[] npData = np.Data <int>(); for (int i = 0; i < max; i++) { var pos = random.Next(0, max); var zero = npData[0]; npData[0] = npData[pos]; npData[pos] = zero; } return(np); }
/// <summary> /// Least Square method /// /// Determines NDArray X which reduces least square error of Linear System A * X = B. /// This NDArray is equal to A. /// </summary> /// <param name="nDArrayB">Result NDArray B</param> /// <param name="rcon"></param> /// <returns>NArray X</returns> public NDArray lstqr(NDArray nDArrayB, double rcon = 0.0001) { var A = (double[])Data <double>(); var b = (double[])nDArrayB.Data <double>(); int m = this.shape[0]; int n = this.shape[1]; int nrhs = (nDArrayB.ndim > 1) ? nDArrayB.shape[1] : 1; int lda = m; int ldb = m; int rank = 0; double[] work = new double[1]; int lwork = -1; int info = 0; double[] singVal = new double[m]; LAPACK.dgelss_(ref m, ref n, ref nrhs, A, ref lda, b, ref ldb, singVal, ref rcon, ref rank, work, ref lwork, ref info); lwork = (int)work[0]; work = new double[lwork]; LAPACK.dgelss_(ref m, ref n, ref nrhs, A, ref lda, b, ref ldb, singVal, ref rcon, ref rank, work, ref lwork, ref info); double[] sln = new double[n * nrhs]; for (int idx = 0; idx < sln.Length; idx++) { sln[idx] = b[m * (idx % nrhs) + idx / nrhs]; } var slnArr = new NDArray(typeof(double), new Shape(n, nrhs)); slnArr.Storage.ReplaceData(sln); return(slnArr); }
public NDArray this[NDArray <bool> booleanArray] { get { if (!Enumerable.SequenceEqual(shape, booleanArray.shape)) { throw new IncorrectShapeException(); } var boolDotNetArray = booleanArray.Data <bool>(); switch (dtype.Name) { case "Int32": { var nd = new List <int>(); for (int idx = 0; idx < boolDotNetArray.Length; idx++) { if (boolDotNetArray[idx]) { nd.Add(Data <int>(booleanArray.Storage.Shape.GetDimIndexOutShape(idx))); } } return(new NDArray(nd.ToArray(), nd.Count)); } case "Double": { var nd = new List <double>(); for (int idx = 0; idx < boolDotNetArray.Length; idx++) { if (boolDotNetArray[idx]) { nd.Add(Data <double>(booleanArray.Storage.Shape.GetDimIndexOutShape(idx))); } } return(new NDArray(nd.ToArray(), nd.Count)); } } throw new NotImplementedException(""); } set { if (!Enumerable.SequenceEqual(shape, booleanArray.shape)) { throw new IncorrectShapeException(); } object scalarObj = value.Storage.GetData().GetValue(0); bool[] boolDotNetArray = booleanArray.Storage.GetData() as bool[]; int elementsAmount = booleanArray.size; for (int idx = 0; idx < elementsAmount; idx++) { if (boolDotNetArray[idx]) { int[] indexes = booleanArray.Storage.Shape.GetDimIndexOutShape(idx); Array.SetValue(scalarObj, Storage.Shape.GetIndexInShape(indexes)); } } } }