/* * Ravel * Returns a contiguous array */ internal static NpyArray NpyArray_Ravel(NpyArray a, NPY_ORDER flags) { NpyArray_Dims newdim = new NpyArray_Dims() { ptr = null, len = 1 }; npy_intp [] val = new npy_intp[] { -1 }; bool fortran = false; if (flags == NPY_ORDER.NPY_ANYORDER) { fortran = NpyArray_ISFORTRAN(a); } newdim.ptr = val; if (!fortran && NpyArray_ISCONTIGUOUS(a)) { return(NpyArray_Newshape(a, newdim, flags)); } else if (fortran && NpyArray_ISFORTRAN(a)) { return(NpyArray_Newshape(a, newdim, NPY_ORDER.NPY_FORTRANORDER)); } else { return(NpyArray_Flatten(a, flags)); } }
internal static int NpyArray_SetStrides(NpyArray self, NpyArray_Dims newstrides) { NpyArray newArray; npy_intp numbytes = 0, offset = 0; if (newstrides.len != NpyArray_NDIM(self)) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "strides must be same length as shape"); return(-1); } newArray = NpyArray_BASE_ARRAY(self); while (null != NpyArray_BASE_ARRAY(newArray)) { newArray = NpyArray_BASE_ARRAY(newArray); } #if false /* TODO: Fix this so we can set strides on a buffer-backed array. */ /* Get the available memory through the buffer interface on * new.base or if that fails from the current new * NOTE: PyObject_AsReadBuffer is never called during tests */ if (newArray.base_obj != null && PyObject_AsReadBuffer(newArray.base_obj, (const void **)&buf, &buf_len) >= 0) { offset = NpyArray_BYTES(self) - buf; numbytes = buf_len - offset; } #else if (newArray.base_obj != null) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "strides cannot be set on array created from a buffer."); return(-1); } #endif else { NpyErr_Clear(); numbytes = NpyArray_MultiplyList(NpyArray_DIMS(newArray), NpyArray_NDIM(newArray)); numbytes = (npy_intp)(numbytes * NpyArray_ITEMSIZE(newArray)); // todo: Kevin - this calculation may not be correct offset = (npy_intp)(NpyArray_BYTES_Length(self) - NpyArray_BYTES_Length(newArray)); } if (!NpyArray_CheckStrides(NpyArray_ITEMSIZE(self), NpyArray_NDIM(self), numbytes, offset, NpyArray_DIMS(self), newstrides.ptr)) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "strides is not compatible with available memory"); return(-1); } memcpy(NpyArray_STRIDES(self), newstrides.ptr, sizeof(npy_intp) * newstrides.len); NpyArray_UpdateFlags(self, NPYARRAYFLAGS.NPY_CONTIGUOUS | NPYARRAYFLAGS.NPY_FORTRAN); return(0); }
static int _fix_unknown_dimension(NpyArray_Dims newshape, npy_intp s_original) { npy_intp[] dimensions; int i_unknown, s_known; int i, n; string msg = "total size of new array must be unchanged"; dimensions = newshape.ptr; n = newshape.len; s_known = 1; i_unknown = -1; for (i = 0; i < n; i++) { if (dimensions[i] < 0) { if (i_unknown == -1) { i_unknown = i; } else { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "can only specify one unknown dimension"); return(-1); } } else { s_known = (int)(s_known * dimensions[i]); } } if (i_unknown >= 0) { if ((s_known == 0) || (s_original % s_known != 0)) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, msg); return(-1); } dimensions[i_unknown] = (npy_intp)(s_original / s_known); } else { if (s_original != s_known) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, msg); return(-1); } } return(0); }
internal static int NpyArray_SetShape(NpyArray self, NpyArray_Dims newdims) { int nd; NpyArray ret; ret = NpyArray_Newshape(self, newdims, NPY_ORDER.NPY_CORDER); if (ret == null) { return(-1); } if (NpyArray_DATA(ret).datap != NpyArray_DATA(self).datap) { Npy_XDECREF(ret); NpyErr_SetString(npyexc_type.NpyExc_AttributeError, "incompatible shape for a non-contiguous array"); return(-1); } /* Free old dimensions and strides */ NpyDimMem_FREE(NpyArray_DIMS(self)); nd = NpyArray_NDIM(ret); NpyArray_NDIM_Update(self, nd); if (nd > 0) { /* create new dimensions and strides */ NpyArray_DIMS_Update(self, NpyDimMem_NEW(nd)); if (NpyArray_DIMS(self) == null) { Npy_XDECREF(ret); NpyErr_MEMORY(); return(-1); } NpyArray_STRIDES_Update(self, NpyDimMem_NEW(nd)); memcpy(NpyArray_DIMS(self), NpyArray_DIMS(ret), nd * sizeof(npy_intp)); memcpy(NpyArray_STRIDES(self), NpyArray_STRIDES(ret), nd * sizeof(npy_intp)); } else { NpyArray_DIMS_Update(self, null); NpyArray_STRIDES_Update(self, null); } Npy_XDECREF(ret); NpyArray_UpdateFlags(self, NPYARRAYFLAGS.NPY_CONTIGUOUS | NPYARRAYFLAGS.NPY_FORTRAN); return(0); }
static void _swap_axes(NpyArrayMapIterObject mit, ref NpyArray ret, bool getmap) { NpyArray _new; npy_intp n1, n2, n3, val, bnd; int i; NpyArray_Dims permute = new NpyArray_Dims(); npy_intp [] d = new npy_intp[npy_defs.NPY_MAXDIMS]; NpyArray arr; permute.ptr = d; permute.len = mit.nd; /* * arr might not have the right number of dimensions * and need to be reshaped first by pre-pending ones */ arr = ret; if (arr.nd != mit.nd) { for (i = 1; i <= arr.nd; i++) { permute.ptr[mit.nd - i] = arr.dimensions[arr.nd - i]; } for (i = 0; i < mit.nd - arr.nd; i++) { permute.ptr[i] = 1; } _new = NpyArray_Newshape(arr, permute, NPY_ORDER.NPY_ANYORDER); Npy_DECREF(arr); ret = _new; if (_new == null) { return; } } /* * Setting and getting need to have different permutations. * On the get we are permuting the returned object, but on * setting we are permuting the object-to-be-set. * The set permutation is the inverse of the get permutation. */ /* * For getting the array the tuple for transpose is * (n1,...,n1+n2-1,0,...,n1-1,n1+n2,...,n3-1) * n1 is the number of dimensions of the broadcast index array * n2 is the number of dimensions skipped at the start * n3 is the number of dimensions of the result */ /* * For setting the array the tuple for transpose is * (n2,...,n1+n2-1,0,...,n2-1,n1+n2,...n3-1) */ n1 = mit.iters[0].nd_m1 + 1; n2 = mit.iteraxes[0]; n3 = mit.nd; /* use n1 as the boundary if getting but n2 if setting */ bnd = getmap ? n1 : n2; val = bnd; i = 0; while (val < n1 + n2) { permute.ptr[i++] = val++; } val = 0; while (val < bnd) { permute.ptr[i++] = val++; } val = n1 + n2; while (val < n3) { permute.ptr[i++] = val++; } _new = NpyArray_Transpose(ret, permute); Npy_DECREF(ret); ret = _new; }
internal static NpyArray NpyArray_Transpose(NpyArray ap, NpyArray_Dims permute) { npy_intp[] axes; npy_intp axis; int i, n; npy_intp[] permutation = new npy_intp[npy_defs.NPY_MAXDIMS]; npy_intp[] reverse_permutation = new npy_intp[npy_defs.NPY_MAXDIMS]; NpyArray ret = null; if (permute == null) { n = ap.nd; for (i = 0; i < n; i++) { permutation[i] = n - 1 - i; } } else { n = permute.len; axes = permute.ptr; if (n != ap.nd) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "axes don't match array"); return(null); } for (i = 0; i < n; i++) { reverse_permutation[i] = -1; } for (i = 0; i < n; i++) { axis = axes[i]; if (axis < 0) { axis = ap.nd + axis; } if (axis < 0 || axis >= ap.nd) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "invalid axis for this array"); return(null); } if (reverse_permutation[axis] != -1) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "repeated axis in transpose"); return(null); } reverse_permutation[axis] = i; permutation[i] = axis; } for (i = 0; i < n; i++) { } } /* * this allocates memory for dimensions and strides (but fills them * incorrectly), sets up descr, and points data at ap.data. */ Npy_INCREF(ap.descr); ret = NpyArray_NewView(ap.descr, n, ap.dimensions, null, ap, 0, false); if (ret == null) { return(null); } /* fix the dimensions and strides of the return-array */ for (i = 0; i < n; i++) { ret.dimensions[i] = ap.dimensions[permutation[i]]; ret.strides[i] = ap.strides[permutation[i]]; } NpyArray_UpdateFlags(ret, NPYARRAYFLAGS.NPY_CONTIGUOUS | NPYARRAYFLAGS.NPY_FORTRAN); return(ret); }
/* * Resize (reallocate data). Only works if nothing else is referencing this * array and it is contiguous. If refcheck is 0, then the reference count is * not checked and assumed to be 1. You still must own this data and have no * weak-references and no base object. */ internal static int NpyArray_Resize(NpyArray self, NpyArray_Dims newshape, bool refcheck, NPY_ORDER fortran) { npy_intp oldsize, newsize; int new_nd = newshape.len, k, elsize; int refcnt; npy_intp [] new_dimensions = newshape.ptr; npy_intp [] new_strides = new npy_intp[npy_defs.NPY_MAXDIMS]; size_t sd; npy_intp[] dimptr; npy_intp[] strptr; npy_intp largest; if (!NpyArray_ISONESEGMENT(self)) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "resize only works on single-segment arrays"); return(-1); } if (self.descr.elsize == 0) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "Bad data-type size."); return(-1); } newsize = 1; largest = npy_defs.NPY_MAX_INTP / self.descr.elsize; for (k = 0; k < new_nd; k++) { if (new_dimensions[k] == 0) { break; } if (new_dimensions[k] < 0) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "negative dimensions not allowed"); return(-1); } newsize *= new_dimensions[k]; if (newsize <= 0 || newsize > largest) { NpyErr_MEMORY(); return(-1); } } oldsize = NpyArray_SIZE(self); if (oldsize != newsize) { if (!((self.flags & NPYARRAYFLAGS.NPY_OWNDATA) != 0)) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "cannot resize this array: it does not own its data"); return(-1); } /* TODO: This isn't right for usage from C. I think we * need to revisit the refcounts so we don't have counts * of 0. */ if (refcheck) { refcnt = (int)self.nob_refcnt; } else { refcnt = 0; } if ((refcnt > 0) || (self.base_arr != null) || (null != self.base_obj)) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "cannot resize an array references or is referenced\nby another array in this way. Use the resize function"); return(-1); } if (newsize == 0) { sd = (size_t)self.descr.elsize; } else { sd = (size_t)(newsize * self.descr.elsize); } /* Reallocate space if needed */ VoidPtr new_data = NpyDataMem_RENEW(self.data, sd); if (new_data == null) { NpyErr_MEMORY(); return(-1); } self.data = new_data; } if ((newsize > oldsize) && NpyArray_ISWRITEABLE(self)) { /* Fill new memory with zeros */ elsize = self.descr.elsize; memset(self.data + oldsize * elsize, 0, (newsize - oldsize) * elsize); } if (self.nd != new_nd) { /* Different number of dimensions. */ self.nd = new_nd; /* Need new dimensions and strides arrays */ dimptr = NpyDimMem_NEW(new_nd); strptr = NpyDimMem_NEW(new_nd); if (dimptr == null || strptr == null) { NpyErr_MEMORY(); return(-1); } memcpy(dimptr, self.dimensions, self.nd * sizeof(npy_intp)); memcpy(strptr, self.strides, self.nd * sizeof(npy_intp)); self.dimensions = dimptr; self.strides = strptr; } /* make new_strides variable */ sd = (size_t)self.descr.elsize; NPYARRAYFLAGS flags = 0; sd = (size_t)npy_array_fill_strides(new_strides, new_dimensions, new_nd, sd, self.flags, ref flags); self.flags = flags; Array.Copy(new_dimensions, self.dimensions, new_nd); Array.Copy(new_strides, self.strides, new_nd); return(0); }
internal static NpyArray NpyArray_SwapAxes(NpyArray ap, int a1, int a2) { NpyArray_Dims new_axes = new NpyArray_Dims(); npy_intp[] dims = new npy_intp[npy_defs.NPY_MAXDIMS]; int n, i, val; NpyArray ret; if (a1 == a2) { Npy_INCREF(ap); return(ap); } n = ap.nd; if (n <= 1) { Npy_INCREF(ap); return(ap); } if (a1 < 0) { a1 += n; } if (a2 < 0) { a2 += n; } if ((a1 < 0) || (a1 >= n)) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "bad axis1 argument to swapaxes"); return(null); } if ((a2 < 0) || (a2 >= n)) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, "bad axis2 argument to swapaxes"); return(null); } new_axes.ptr = dims; new_axes.len = n; for (i = 0; i < n; i++) { if (i == a1) { val = a2; } else if (i == a2) { val = a1; } else { val = i; } new_axes.ptr[i] = (npy_intp)val; } ret = NpyArray_Transpose(ap, new_axes); return(ret); }
internal static NpyArray NpyArray_Newshape(NpyArray self, NpyArray_Dims newdims, NPY_ORDER fortran) { int i; npy_intp [] dimensions = newdims.ptr; NpyArray ret; int n = newdims.len; bool same = true; bool incref = true; npy_intp[] strides = null; npy_intp [] newstrides = new npy_intp[npy_defs.NPY_MAXDIMS]; NPYARRAYFLAGS flags; if (newdims.len > npy_defs.NPY_MAXDIMS) { NpyErr_SetString(npyexc_type.NpyExc_ValueError, string.Format("Maximum number of dimensions is {0}", npy_defs.NPY_MAXDIMS.ToString())); return(null); } if (fortran == NPY_ORDER.NPY_ANYORDER) { fortran = NpyArray_ISFORTRAN(self) ? NPY_ORDER.NPY_FORTRANORDER : NPY_ORDER.NPY_ANYORDER; } /* Quick check to make sure anything actually needs to be done */ if (n == self.nd) { same = true; i = 0; while (same && i < n) { if (NpyArray_DIM(self, i) != dimensions[i]) { same = false; } i++; } if (same) { return(NpyArray_View(self, null, null)); } } /* * Returns a pointer to an appropriate strides array * if all we are doing is inserting ones into the shape, * or removing ones from the shape * or doing a combination of the two * In this case we don't need to do anything but update strides and * dimensions. So, we can handle non single-segment cases. */ i = _check_ones(self, n, dimensions, newstrides); if (i == 0) { strides = newstrides; } flags = self.flags; if (strides == null) { /* * we are really re-shaping not just adding ones to the shape somewhere * fix any -1 dimensions and check new-dimensions against old size */ if (_fix_unknown_dimension(newdims, NpyArray_SIZE(self)) < 0) { return(null); } /* * sometimes we have to create a new copy of the array * in order to get the right orientation and * because we can't just re-use the buffer with the * data in the order it is in. */ if (!(NpyArray_ISONESEGMENT(self)) || (((NpyArray_CHKFLAGS(self, NPYARRAYFLAGS.NPY_CONTIGUOUS) && fortran == NPY_ORDER.NPY_FORTRANORDER) || (NpyArray_CHKFLAGS(self, NPYARRAYFLAGS.NPY_FORTRAN) && fortran == NPY_ORDER.NPY_CORDER)) && (self.nd > 1))) { bool success = _attempt_nocopy_reshape(self, n, dimensions, newstrides, (fortran == NPY_ORDER.NPY_FORTRANORDER)); if (success) { /* no need to copy the array after all */ strides = newstrides; flags = self.flags; } else { NpyArray newArray; newArray = NpyArray_NewCopy(self, fortran); if (newArray == null) { return(null); } incref = false; self = newArray; flags = self.flags; } } /* We always have to interpret the contiguous buffer correctly */ /* Make sure the flags argument is set. */ if (n > 1) { if (fortran == NPY_ORDER.NPY_FORTRANORDER) { flags &= ~NPYARRAYFLAGS.NPY_CONTIGUOUS; flags |= NPYARRAYFLAGS.NPY_FORTRAN; } else { flags &= ~NPYARRAYFLAGS.NPY_FORTRAN; flags |= NPYARRAYFLAGS.NPY_CONTIGUOUS; } } } else if (n > 0) { /* * replace any 0-valued strides with * appropriate value to preserve contiguousness */ if (fortran == NPY_ORDER.NPY_FORTRANORDER) { if (strides[0] == 0) { strides[0] = (npy_intp)self.descr.elsize; } for (i = 1; i < n; i++) { if (strides[i] == 0) { strides[i] = strides[i - 1] * dimensions[i - 1]; } } } else { if (strides[n - 1] == 0) { strides[n - 1] = (npy_intp)self.descr.elsize; } for (i = n - 2; i > -1; i--) { if (strides[i] == 0) { strides[i] = strides[i + 1] * dimensions[i + 1]; } } } } Npy_INCREF(self.descr); ret = NpyArray_NewFromDescr(self.descr, n, dimensions, strides, self.data, flags, false, null, Npy_INTERFACE(self)); if (ret == null) { goto fail; } if (incref) { Npy_INCREF(self); } ret.SetBase(self); NpyArray_UpdateFlags(ret, NPYARRAYFLAGS.NPY_CONTIGUOUS | NPYARRAYFLAGS.NPY_FORTRAN); Debug.Assert(null == ret.base_arr || null == ret.base_obj); return(ret); fail: if (!incref) { Npy_DECREF(self); } return(null); }
internal static int NpyArray_SetStrides(NpyArray self, NpyArray_Dims newstrides) { return(numpyinternal.NpyArray_SetStrides(self, newstrides)); }
internal static int NpyArray_SetShape(NpyArray self, NpyArray_Dims newdims) { return(numpyinternal.NpyArray_SetShape(self, newdims)); }
internal static NpyArray NpyArray_Transpose(NpyArray ap, NpyArray_Dims permute) { return(numpyinternal.NpyArray_Transpose(ap, permute)); }
internal static NpyArray NpyArray_Newshape(NpyArray self, NpyArray_Dims newdims, NPY_ORDER fortran) { return(numpyinternal.NpyArray_Newshape(self, newdims, fortran)); }
internal static int NpyArray_Resize(NpyArray self, NpyArray_Dims newshape, bool refcheck, NPY_ORDER fortran) { return(numpyinternal.NpyArray_Resize(self, newshape, refcheck, fortran)); }