/* * Ravel * Returns a contiguous array */ internal static NpyArray NpyArray_Ravel(NpyArray a, NPY_ORDER flags) { NpyArray_Dims newdim = new NpyArray_Dims() { ptr = null, len = 1 }; npy_intp [] val = new npy_intp[] { -1 }; bool fortran = false; if (flags == NPY_ORDER.NPY_ANYORDER) { fortran = NpyArray_ISFORTRAN(a); } newdim.ptr = val; if (!fortran && NpyArray_ISCONTIGUOUS(a)) { return(NpyArray_Newshape(a, newdim, flags)); } else if (fortran && NpyArray_ISFORTRAN(a)) { return(NpyArray_Newshape(a, newdim, NPY_ORDER.NPY_FORTRANORDER)); } else { return(NpyArray_Flatten(a, flags)); } }
internal byte[] ToString(NPY_ORDER order = NPY_ORDER.NPY_ANYORDER) { long nbytes = Size * Dtype.ElementSize; byte[] data = new byte[nbytes]; NpyCoreApi.GetBytes(this, data, order); return(data); }
public static ndarray reshape(this ndarray a, object oshape, NPY_ORDER order = NPY_ORDER.NPY_CORDER) { shape newshape = ConvertTupleToShape(oshape); if (newshape == null) { throw new Exception("Unable to convert shape object"); } return(np.reshape(a, newshape, order)); }
/*NUMPY_API * Allocate a new iterator for one array object. */ internal static NpyIter NpyIter_New(NpyArray op, ITERFLAGS flags, NPY_ORDER order, NPY_CASTING casting, NpyArray_Descr dtype) { /* Split the flags into separate global and op flags */ ITERFLAGS op_flags = flags & ITERFLAGS.NPY_ITER_PER_OP_FLAGS; flags &= ITERFLAGS.NPY_ITER_GLOBAL_FLAGS; return(NpyIter_AdvancedNew(1, &op, flags, order, casting, &op_flags, &dtype, -1, null, null, 0)); }
/* * Copy an array. */ internal static NpyArray NpyArray_NewCopy(NpyArray m1, NPY_ORDER order) { NpyArray ret; bool fortran; fortran = (order == NPY_ORDER.NPY_ANYORDER) ? NpyArray_ISFORTRAN(m1) : (order == NPY_ORDER.NPY_FORTRANORDER); Npy_INCREF(m1.descr); ret = NpyArray_Alloc(m1.descr, m1.nd, m1.dimensions, fortran, Npy_INTERFACE(m1)); if (ret == null) { return(null); } if (NpyArray_CopyInto(ret, m1) == -1) { Npy_DECREF(ret); return(null); } return(ret); }
internal static NpyArray NpyArray_Flatten(NpyArray a, NPY_ORDER order) { NpyArray ret; npy_intp [] size = new npy_intp[1]; if (order == NPY_ORDER.NPY_ANYORDER) { order = NpyArray_ISFORTRAN(a) ? NPY_ORDER.NPY_FORTRANORDER : NPY_ORDER.NPY_ANYORDER; } Npy_INCREF(a.descr); size[0] = NpyArray_SIZE(a); ret = NpyArray_Alloc(a.descr, 1, size, false, Npy_INTERFACE(a)); if (ret == null) { return(null); } if (_flat_copyinto(ret, a, order) < 0) { Npy_DECREF(ret); return(null); } return(ret); }
public static ndarray Ravel(this ndarray a, NPY_ORDER order) { return(np.ravel(a, order)); }
/* * Resize (reallocate data). Only works if nothing else is referencing this * array and it is contiguous. If refcheck is 0, then the reference count is * not checked and assumed to be 1. You still must own this data and have no * weak-references and no base object. */ internal static int NpyArray_Resize(NpyArray self, NpyArray_Dims newshape, bool refcheck, NPY_ORDER fortran) { npy_intp oldsize, newsize; int new_nd = newshape.len, k, elsize; int refcnt; npy_intp [] new_dimensions = newshape.ptr; npy_intp [] new_strides = new npy_intp[npy_defs.NPY_MAXDIMS]; size_t sd; npy_intp[] dimptr; npy_intp[] strptr; npy_intp largest; if (!NpyArray_ISONESEGMENT(self)) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "resize only works on single-segment arrays"); return(-1); } if (self.descr.elsize == 0) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "Bad data-type size."); return(-1); } newsize = 1; largest = npy_defs.NPY_MAX_INTP / self.descr.elsize; for (k = 0; k < new_nd; k++) { if (new_dimensions[k] == 0) { break; } if (new_dimensions[k] < 0) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "negative dimensions not allowed"); return(-1); } newsize *= new_dimensions[k]; if (newsize <= 0 || newsize > largest) { NpyErr_MEMORY(); return(-1); } } oldsize = NpyArray_SIZE(self); if (oldsize != newsize) { if (!((self.flags & NPYARRAYFLAGS.NPY_OWNDATA) != 0)) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "cannot resize this array: it does not own its data"); return(-1); } /* TODO: This isn't right for usage from C. I think we * need to revisit the refcounts so we don't have counts * of 0. */ if (refcheck) { refcnt = (int)self.nob_refcnt; } else { refcnt = 0; } if ((refcnt > 0) || (self.base_arr != null) || (null != self.base_obj)) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "cannot resize an array references or is referenced\nby another array in this way. Use the resize function"); return(-1); } if (newsize == 0) { sd = (size_t)self.descr.elsize; } else { sd = (size_t)(newsize * self.descr.elsize); } /* Reallocate space if needed */ VoidPtr new_data = NpyDataMem_RENEW(self.data, sd); if (new_data == null) { NpyErr_MEMORY(); return(-1); } self.data = new_data; } if ((newsize > oldsize) && NpyArray_ISWRITEABLE(self)) { /* Fill new memory with zeros */ elsize = self.descr.elsize; memset(self.data + oldsize * elsize, 0, (newsize - oldsize) * elsize); } if (self.nd != new_nd) { /* Different number of dimensions. */ self.nd = new_nd; /* Need new dimensions and strides arrays */ dimptr = NpyDimMem_NEW(new_nd); strptr = NpyDimMem_NEW(new_nd); if (dimptr == null || strptr == null) { NpyErr_MEMORY(); return(-1); } memcpy(dimptr, self.dimensions, self.nd * sizeof(npy_intp)); memcpy(strptr, self.strides, self.nd * sizeof(npy_intp)); self.dimensions = dimptr; self.strides = strptr; } /* make new_strides variable */ sd = (size_t)self.descr.elsize; NPYARRAYFLAGS flags = 0; sd = (size_t)npy_array_fill_strides(new_strides, new_dimensions, new_nd, sd, self.flags, ref flags); self.flags = flags; Array.Copy(new_dimensions, self.dimensions, new_nd); Array.Copy(new_strides, self.strides, new_nd); return(0); }
internal static int NpyArray_Resize(NpyArray self, NpyArray_Dims newshape, bool refcheck, NPY_ORDER fortran) { return(numpyinternal.NpyArray_Resize(self, newshape, refcheck, fortran)); }
private static NpyArray PyArray_ConcatenateFlattenedArrays(int narrays, NpyArray[] arrays, NPY_ORDER order, NpyArray ret) { int iarrays; npy_intp shape = 0; NpyArray sliding_view = null; if (narrays <= 0) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "need at least one array to concatenate"); return(null); } /* * Figure out the final concatenated shape starting from the first * array's shape. */ for (iarrays = 0; iarrays < narrays; ++iarrays) { shape += NpyArray_SIZE(arrays[iarrays]); /* Check for overflow */ if (shape < 0) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "total number of elements too large to concatenate"); return(null); } } if (ret != null) { if (NpyArray_NDIM(ret) != 1) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "Output array must be 1D"); return(null); } if (shape != NpyArray_SIZE(ret)) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "Output array is the wrong size"); return(null); } Npy_INCREF(ret); } else { npy_intp stride; /* Get the priority subtype for the array */ object subtype = NpyArray_GetSubType(narrays, arrays); /* Get the resulting dtype from combining all the arrays */ NpyArray_Descr dtype = NpyArray_ResultType(narrays, arrays, 0, null); if (dtype == null) { return(null); } stride = dtype.elsize; /* Allocate the array for the result. This steals the 'dtype' reference. */ ret = NpyArray_NewFromDescr(dtype, 1, new npy_intp[] { shape }, new npy_intp[] { stride }, null, 0, false, null, null); if (ret == null) { return(null); } } /* * Create a view which slides through ret for assigning the * successive input arrays. */ sliding_view = NpyArray_View(ret, null, PyArray_Type); if (sliding_view == null) { Npy_DECREF(ret); return(null); } for (iarrays = 0; iarrays < narrays; ++iarrays) { /* Adjust the window dimensions for this array */ sliding_view.dimensions[0] = NpyArray_SIZE(arrays[iarrays]); /* Copy the data for this array */ if (PyArray_CopyAsFlat(sliding_view, arrays[iarrays], order) < 0) { Npy_DECREF(sliding_view); Npy_DECREF(ret); return(null); } /* Slide to the start of the next window */ sliding_view.data.data_offset += sliding_view.strides[0] * NpyArray_SIZE(arrays[iarrays]); } Npy_DECREF(sliding_view); return(ret); }
internal static NpyArray NpyArray_Newshape(NpyArray self, NpyArray_Dims newdims, NPY_ORDER fortran) { int i; npy_intp [] dimensions = newdims.ptr; NpyArray ret; int n = newdims.len; bool same = true; bool incref = true; npy_intp[] strides = null; npy_intp [] newstrides = new npy_intp[npy_defs.NPY_MAXDIMS]; NPYARRAYFLAGS flags; if (newdims.len > npy_defs.NPY_MAXDIMS) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, string.Format("Maximum number of dimensions is {0}", npy_defs.NPY_MAXDIMS.ToString())); return(null); } if (fortran == NPY_ORDER.NPY_ANYORDER) { fortran = NpyArray_ISFORTRAN(self) ? NPY_ORDER.NPY_FORTRANORDER : NPY_ORDER.NPY_ANYORDER; } /* Quick check to make sure anything actually needs to be done */ if (n == self.nd) { same = true; i = 0; while (same && i < n) { if (NpyArray_DIM(self, i) != dimensions[i]) { same = false; } i++; } if (same) { return(NpyArray_View(self, null, null)); } } /* * Returns a pointer to an appropriate strides array * if all we are doing is inserting ones into the shape, * or removing ones from the shape * or doing a combination of the two * In this case we don't need to do anything but update strides and * dimensions. So, we can handle non single-segment cases. */ i = _check_ones(self, n, dimensions, newstrides); if (i == 0) { strides = newstrides; } flags = self.flags; if (strides == null) { /* * we are really re-shaping not just adding ones to the shape somewhere * fix any -1 dimensions and check new-dimensions against old size */ if (_fix_unknown_dimension(newdims, NpyArray_SIZE(self)) < 0) { return(null); } /* * sometimes we have to create a new copy of the array * in order to get the right orientation and * because we can't just re-use the buffer with the * data in the order it is in. */ if (!(NpyArray_ISONESEGMENT(self)) || (((NpyArray_CHKFLAGS(self, NPYARRAYFLAGS.NPY_CONTIGUOUS) && fortran == NPY_ORDER.NPY_FORTRANORDER) || (NpyArray_CHKFLAGS(self, NPYARRAYFLAGS.NPY_FORTRAN) && fortran == NPY_ORDER.NPY_CORDER)) && (self.nd > 1))) { bool success = _attempt_nocopy_reshape(self, n, dimensions, newstrides, (fortran == NPY_ORDER.NPY_FORTRANORDER)); if (success) { /* no need to copy the array after all */ strides = newstrides; flags = self.flags; } else { NpyArray newArray; newArray = NpyArray_NewCopy(self, fortran); if (newArray == null) { return(null); } incref = false; self = newArray; flags = self.flags; } } /* We always have to interpret the contiguous buffer correctly */ /* Make sure the flags argument is set. */ if (n > 1) { if (fortran == NPY_ORDER.NPY_FORTRANORDER) { flags &= ~NPYARRAYFLAGS.NPY_CONTIGUOUS; flags |= NPYARRAYFLAGS.NPY_FORTRAN; } else { flags &= ~NPYARRAYFLAGS.NPY_FORTRAN; flags |= NPYARRAYFLAGS.NPY_CONTIGUOUS; } } } else if (n > 0) { /* * replace any 0-valued strides with * appropriate value to preserve contiguousness */ if (fortran == NPY_ORDER.NPY_FORTRANORDER) { if (strides[0] == 0) { strides[0] = (npy_intp)self.descr.elsize; } for (i = 1; i < n; i++) { if (strides[i] == 0) { strides[i] = strides[i - 1] * dimensions[i - 1]; } } } else { if (strides[n - 1] == 0) { strides[n - 1] = (npy_intp)self.descr.elsize; } for (i = n - 2; i > -1; i--) { if (strides[i] == 0) { strides[i] = strides[i + 1] * dimensions[i + 1]; } } } } Npy_INCREF(self.descr); ret = NpyArray_NewFromDescr(self.descr, n, dimensions, strides, self.data, flags, false, null, Npy_INTERFACE(self)); if (ret == null) { goto fail; } if (incref) { Npy_INCREF(self); } ret.SetBase(self); NpyArray_UpdateFlags(ret, NPYARRAYFLAGS.NPY_CONTIGUOUS | NPYARRAYFLAGS.NPY_FORTRAN); Debug.Assert(null == ret.base_arr || null == ret.base_obj); return(ret); fail: if (!incref) { Npy_DECREF(self); } return(null); }
internal static NpyArray NpyArray_Newshape(NpyArray self, NpyArray_Dims newdims, NPY_ORDER fortran) { return(numpyinternal.NpyArray_Newshape(self, newdims, fortran)); }
internal static void NpyArrayAccess_SetState(NpyArray self, int ndim, npy_intp[] dims, NPY_ORDER order, string srcPtr, int srcLen) { //Debug.Assert(Validate(self)); //Debug.Assert(null != dims); //Debug.Assert(0 <= ndim); //// Clear existing data and references. Typically these will be empty. //if (NpyArray_CHKFLAGS(self,NPYARRAYFLAGS.NPY_OWNDATA)) //{ // if (null != NpyArray_BYTES(self)) // { // NpyArray_free(NpyArray_BYTES(self)); // } // self.flags &= ~NPYARRAYFLAGS.NPY_OWNDATA; //} //Npy_XDECREF(NpyArray_BASE_ARRAY(self)); //NpyInterface_DECREF(NpyArray_BASE(self)); //self.base_arr = null; //self.base_obj = null; //if (null != NpyArray_DIMS(self)) //{ // NpyDimMem_FREE(self.dimensions); // self.dimensions = null; //} //self.flags = NPYARRAYFLAGS.NPY_DEFAULT; //self.nd = ndim; //if (0 < ndim) //{ // self.dimensions = NpyDimMem_NEW(ndim); // self.strides = NpyDimMem_NEW(ndim); // memcpy(NpyArray_DIMS(self), dims, sizeof(npy_intp) * ndim); // npy_array_fill_strides(NpyArray_STRIDES(self), dims, ndim, // NpyArray_ITEMSIZE(self), order, ref self.flags)); //} //npy_intp bytes = NpyArray_ITEMSIZE(self) * NpyArray_SIZE(self); //NpyArray_BYTES(self) = (char*)NpyArray_malloc(bytes); //NpyArray_FLAGS(self) |= NPY_OWNDATA; //if (null != srcPtr) //{ // // This is unpleasantly inefficent. The input is a .NET string, which is 16-bit // // unicode. Thus the data is encoded into alternating bytes so we can't use memcpy. // char* destPtr = NpyArray_BYTES(self); // char* destEnd = destPtr + bytes; // const wchar_t* srcEnd = srcPtr + srcLen; // while (destPtr < destEnd && srcPtr < srcEnd) *(destPtr++) = (char)*(srcPtr++); //} //else //{ // memset(NpyArray_BYTES(self), 0, bytes); //} }
public static ndarray reshape(this ndarray a, shape newshape, NPY_ORDER order = NPY_ORDER.NPY_CORDER) { return(np.reshape(a, newshape, order)); }
internal static NpyArray NpyArray_Ravel(NpyArray a, NPY_ORDER order) { return(numpyinternal.NpyArray_Ravel(a, order)); }
/// <summary> /// Return a 2-D array with ones on the diagonal and zeros elsewhere /// </summary> /// <param name="N">Number of rows in the output</param> /// <param name="M">(optional) Number of columns in the output. If None, defaults to N</param> /// <param name="k">(optional) Index of the diagonal: 0 (the default) refers to the main diagonal, a positive value refers to an upper diagonal, and a negative value to a lower diagonal.</param> /// <param name="dtype">(optional) Data-type of the returned array</param> /// <param name="order">(optional)</param> /// <returns>An array where all elements are equal to zero, except for the k-th diagonal, whose values are equal to one</returns> public static ndarray eye(int N, int?M = null, int k = 0, dtype dtype = null, NPY_ORDER order = NPY_ORDER.NPY_CORDER) { /* * Return a 2-D array with ones on the diagonal and zeros elsewhere. * * Parameters * ---------- * N : int * Number of rows in the output. * M : int, optional * Number of columns in the output. If None, defaults to `N`. * k : int, optional * Index of the diagonal: 0 (the default) refers to the main diagonal, * a positive value refers to an upper diagonal, and a negative value * to a lower diagonal. * dtype : data-type, optional * Data-type of the returned array. * order : {'C', 'F'}, optional * Whether the output should be stored in row-major (C-style) or * column-major (Fortran-style) order in memory. * * .. versionadded:: 1.14.0 * * Returns * ------- * I : ndarray of shape (N,M) * An array where all elements are equal to zero, except for the `k`-th * diagonal, whose values are equal to one. * * See Also * -------- * identity : (almost) equivalent function * diag : diagonal 2-D array from a 1-D array specified by the user. * * Examples * -------- * >>> np.eye(2, dtype=int) * array([[1, 0], * [0, 1]]) * >>> np.eye(3, k=1) * array([[ 0., 1., 0.], * [ 0., 0., 1.], * [ 0., 0., 0.]]) * */ int i; if (M == null) { M = N; } ndarray m = zeros(new shape(N, (int)M), dtype: dtype, order: order); if (k >= M) { return(m); } if (k >= 0) { i = k; } else { i = (-k) * (int)M; } m.A(":" + (M - k).ToString()).Flat[i.ToString() + "::" + (M + 1).ToString()] = 1; return(m); }
public static void Resize(this ndarray a, npy_intp[] newdims, bool refcheck, NPY_ORDER order) { np.resize(a, newdims, refcheck, order); }
public static ndarray Flatten(this ndarray a, NPY_ORDER order) { return(NpyCoreApi.Flatten(a, order)); }
internal static int _flat_copyinto(NpyArray dst, NpyArray src, NPY_ORDER order) { return(numpyinternal._flat_copyinto(dst, src, order)); }
internal static NpyArray NpyArray_NewCopy(NpyArray m1, NPY_ORDER order) { return(numpyinternal.NpyArray_NewCopy(m1, order)); }
internal static void NpyArrayAccess_SetState(NpyArray self, int ndim, npy_intp[] dims, NPY_ORDER order, string srcPtr, int srcLen) { numpyinternal.NpyArrayAccess_SetState(self, ndim, dims, order, srcPtr, srcLen); }
internal static NpyArray NpyArray_Flatten(NpyArray a, NPY_ORDER order) { return(numpyinternal.NpyArray_Flatten(a, order)); }