public Tensor tensor(NumSharp.NDArray data, dtype?dtype = null, device?device = null, bool?requires_grad = null, bool?pin_memory = null) { // note: this implementation works only for device CPU // todo: implement for GPU var type = data.dtype.ToDtype(); if (dtype != null && type != dtype) { throw new NotImplementedException("Type of the array is different from specified dtype. Data conversion is not supported (yet)"); } var tensor = torch.empty((Shape)data.shape, dtype: type, device: device, requires_grad: requires_grad, pin_memory: pin_memory); var storage = tensor.PyObject.storage(); long ptr = storage.data_ptr(); switch (type) { case Torch.dtype.UInt8: Marshal.Copy(data.Data <byte>(), 0, new IntPtr(ptr), data.len); break; case Torch.dtype.Int32: Marshal.Copy(data.Data <int>(), 0, new IntPtr(ptr), data.len); break; case Torch.dtype.Int64: Marshal.Copy(data.Data <long>(), 0, new IntPtr(ptr), data.len); break; case Torch.dtype.Float32: Marshal.Copy(data.Data <float>(), 0, new IntPtr(ptr), data.len); break; case Torch.dtype.Float64: Marshal.Copy(data.Data <double>(), 0, new IntPtr(ptr), data.len); break; } return(tensor); }
internal static IEnumerable <object> EnumerateOverAxis(this NumSharp.NDArray array, int axis = 0) { var prefix = Enumerable.Repeat(All, axis).ToArray(); foreach (var idx in Enumerable.Range(0, array.Shape[axis])) { Slice[] slice = prefix.Concat(new Slice[] { idx, Ellipsis }).ToArray(); yield return(array[slice]); } }
public static byte[] PackImg(IRHeader header, NumSharp.NDArray img, int quality = 95, string img_fmt = ".jpg") { int[] encodeParams = null; OpenCvSharp.ImageEncodingParam imageEncodingParam = new ImageEncodingParam(ImwriteFlags.JpegQuality, quality); if (img_fmt.ToLower() == ".jpg" || img_fmt.ToLower() == ".jpeg") { encodeParams = new int[] { } } ; Cv2.ImEncode(img_fmt, new Mat(img.GetMemPtr()), out var buf, imageEncodingParam); return(Pack(header, buf)); } }
public void UpdateGraph(Series s, NumSharp.NDArray x, NumSharp.NDArray y) { s.Points.Clear(); for (int i = 0; i < 1000; i++) { var _x = x[i].GetSingle(); var _y = y[i].GetSingle(); if (i % 1 == 0) { //Console.WriteLine($"{i}:({_x},{_y}),"); s.Points.AddXY(_x, _y); } } }
public static Tensor tensor(NumSharp.NDArray data, dtype?dtype = null, device?device = null, bool?requires_grad = null, bool?pin_memory = null) => PyTorch.Instance.tensor(data, dtype: dtype, device: device, requires_grad: requires_grad, pin_memory: pin_memory);
private NDArray AminImpl <T>(int?axis = null) where T : struct { var res = new NDArray(dtype); if (axis == null) { var npArr = this.Storage.GetData <int>(); int min = npArr[0]; for (int i = 0; i < npArr.Length; i++) { min = Math.Min(min, npArr[i]); } res.Storage = res.TensorEngine.GetStorage(dtype); res.Storage.Allocate(new Shape(1)); res.Storage.ReplaceData(new int[1] { min }); } else { if (axis < 0 || axis >= this.ndim) { throw new Exception("Invalid input: axis"); } int[] resShapes = new int[this.shape.Length - 1]; int index = 0; //index for result shape set //axis departs the shape into three parts: prev, cur and post. They are all product of shapes int prev = 1; int cur = 1; int post = 1; int size = 1; //total number of the elements for result //Calculate new Shape for (int i = 0; i < this.shape.Length; i++) { if (i == axis) { cur = this.shape[i]; } else { resShapes[index++] = this.shape[i]; size *= this.shape[i]; if (i < axis) { prev *= this.shape[i]; } else { post *= this.shape[i]; } } } //Fill in data index = 0; //index for result data set int sameSetOffset = this.Storage.Shape.Strides[axis.Value]; int increments = cur * post; switch (typeof(T).Name) { case "Int32": { var resData = new int[size]; //res.Data = new double[size]; var npArr = Data <int>(); int start = 0; int min = 0; for (int i = 0; i < this.size; i += increments) { for (int j = i; j < i + post; j++) { start = j; min = npArr[start]; for (int k = 0; k < cur; k++) { min = Math.Min(min, npArr[start]); start += sameSetOffset; } resData[index++] = min; } } res.Storage.Allocate(new Shape(resShapes)); res.Storage.ReplaceData(resData); } break; case "Single": { var resData = new float[size]; //res.Data = new double[size]; var npArr = Data <float>(); int start = 0; float min = 0; for (int i = 0; i < this.size; i += increments) { for (int j = i; j < i + post; j++) { start = j; min = npArr[start]; for (int k = 0; k < cur; k++) { min = Math.Min(min, npArr[start]); start += sameSetOffset; } resData[index++] = min; } } res.Storage.Allocate(new Shape(resShapes)); res.Storage.ReplaceData(resData); } break; } } return(res); }
/// <summary> /// Join a sequence of arrays along an existing axis. /// </summary> /// <param name="axis">The axis along which the arrays will be joined. If axis is None, arrays are flattened before use. Default is 0.</param> /// <param name="arrays">The arrays must have the same shape, except in the dimension corresponding to axis (the first, by default).</param> /// <returns>The concatenated array.</returns> /// <remarks>https://docs.scipy.org/doc/numpy/reference/generated/numpy.concatenate.html</remarks> public static NDArray concatenate(NDArray[] arrays, int axis = 0) { //What we do is we have the axis which is the only dimension that is allowed to be different //We need to perform a check if the dimensions actually match. //After we have the axis ax=1 where shape is (3,ax,3) - ax is the only dimension that can vary. //So if we got input of (3,5,3) and (3,1,3), we create a return shape of (3,6,3). //We perform the assignment by iterating a slice: (:,n,:) on src and dst where dst while n of dst grows as we iterate all arrays. if (arrays == null) { throw new ArgumentNullException(nameof(arrays)); } if (arrays.Length == 0) { throw new ArgumentException("Value cannot be an empty collection.", nameof(arrays)); } if (arrays.Length == 1) { return(arrays[0]); } var first = arrays[0]; var firstShape = (int[])first.shape.Clone(); while (axis < 0) { axis = first.ndim + axis; //translate negative axis } int i, j; int axisSize = 0; //accumulated shape[axis] size for return shape. NPTypeCode retType = first.GetTypeCode; foreach (var src in arrays) { //accumulate the concatenated axis var shape = src.shape; axisSize += shape[axis]; if (ReferenceEquals(src, first)) { continue; } var srcType = src.GetTypeCode; //resolve what the return type should be and should we perform casting. if (first.GetTypeCode != srcType) { if (srcType.CompareTo(retType) == 1) { retType = srcType; } } if (shape.Length != first.ndim) { throw new IncorrectShapeException("all the input arrays must have same number of dimensions."); } //verify the shapes are equal for (j = 0; j < shape.Length; j++) { if (axis == j) { continue; } if (shape[j] != firstShape[j]) { throw new IncorrectShapeException("all the input array dimensions except for the concatenation axis must match exactly."); } } } //prepare return shape firstShape[axis] = axisSize; var retShape = new Shape(firstShape); var dst = new NDArray(retType, retShape); var accessorDst = new Slice[retShape.NDim]; var accessorSrc = new Slice[retShape.NDim]; for (i = 0; i < accessorDst.Length; i++) { accessorSrc[i] = accessorDst[i] = Slice.All; } accessorSrc[axis] = Slice.Index(0); accessorDst[axis] = Slice.Index(0); foreach (var src in arrays) { var len = src.shape[axis]; for (i = 0; i < len; i++) { var writeTo = dst[accessorDst]; var writeFrom = src[accessorSrc]; MultiIterator.Assign(writeTo.Storage, writeFrom.Storage); accessorSrc[axis]++; accessorDst[axis]++; //increment every step } accessorSrc[axis] = Slice.Index(0); //reset src } return(dst); }