/* * Assigns a scalar value specified by 'src_dtype' and 'src_data' * to elements of 'dst'. * * dst: The destination array. * src_dtype: The data type of the source scalar. * src_data: The memory element of the source scalar. * wheremask: If non-NULL, a boolean mask specifying where to copy. * casting: An exception is raised if the assignment violates this * casting rule. * * This function is implemented in array_assign_scalar.c. * * Returns 0 on success, -1 on failure. */ private static int NpyArray_AssignRawScalar(NpyArray dst, NpyArray_Descr src_dtype, VoidPtr src_data, NpyArray wheremask, NPY_CASTING casting) { return(0); }
internal static void NpyArray_CopyTo2(NpyArray dst, NpyArray src, NPY_CASTING casting, NpyArray wheremask_in) { NpyArray wheremask = null; if (wheremask_in != null) { /* Get the boolean where mask */ NpyArray_Descr dtype = NpyArray_DescrFromType(NPY_TYPES.NPY_BOOL); if (dtype == null) { goto fail; } wheremask = NpyArray_FromArray(wheremask_in, dtype, NPYARRAYFLAGS.NPY_DEFAULT); if (wheremask == null) { goto fail; } } if (NpyArray_AssignArray(dst, src, wheremask, casting) < 0) { goto fail; } Npy_XDECREF(src); Npy_XDECREF(wheremask); return; fail: Npy_XDECREF(src); Npy_XDECREF(wheremask); return; }
internal static void NpyArray_CopyTo(NpyArray destArray, NpyArray srcArray, NPY_CASTING casting, NpyArray whereArray) { var destSize = NpyArray_Size(destArray); NumericOperations operations = NumericOperations.GetOperations(null, srcArray, destArray, whereArray); NpyArrayIterObject SrcIter = NpyArray_BroadcastToShape(srcArray, destArray.dimensions, destArray.nd); NpyArrayIterObject DestIter = NpyArray_BroadcastToShape(destArray, destArray.dimensions, destArray.nd); NpyArrayIterObject WhereIter = null; if (whereArray != null) { WhereIter = NpyArray_BroadcastToShape(whereArray, destArray.dimensions, destArray.nd); } bool whereValue = true; for (long i = 0; i < destSize; i++) { var srcValue = operations.srcGetItem(SrcIter.dataptr.data_offset - srcArray.data.data_offset, srcArray); if (WhereIter != null) { whereValue = (bool)operations.operandGetItem(WhereIter.dataptr.data_offset - whereArray.data.data_offset, whereArray); } if (whereValue) { operations.destSetItem(DestIter.dataptr.data_offset - destArray.data.data_offset, srcValue, destArray); } NpyArray_ITER_NEXT(SrcIter); NpyArray_ITER_NEXT(DestIter); if (WhereIter != null) { NpyArray_ITER_NEXT(WhereIter); } } return; }
/* * Typestr converter */ static string npy_casting_to_string(NPY_CASTING casting) { switch (casting) { case NPY_CASTING.NPY_NO_CASTING: return("'no'"); case NPY_CASTING.NPY_EQUIV_CASTING: return("'equiv'"); case NPY_CASTING.NPY_SAFE_CASTING: return("'safe'"); case NPY_CASTING.NPY_SAME_KIND_CASTING: return("'same_kind'"); case NPY_CASTING.NPY_UNSAFE_CASTING: return("'unsafe'"); default: return("<unknown>"); } }
internal static void NpyArray_CopyTo(NpyArray destArray, NpyArray srcArray, NPY_CASTING casting, NpyArray whereArray) { var destSize = NpyArray_Size(destArray); NumericOperations operations = NumericOperations.GetOperations(null, srcArray, destArray, whereArray); NpyArrayIterObject SrcIter = NpyArray_BroadcastToShape(srcArray, destArray.dimensions, destArray.nd); NpyArrayIterObject DestIter = NpyArray_BroadcastToShape(destArray, destArray.dimensions, destArray.nd); NpyArrayIterObject WhereIter = null; if (whereArray != null) { WhereIter = NpyArray_BroadcastToShape(whereArray, destArray.dimensions, destArray.nd); } IEnumerable <NpyArrayIterObject> srcParallelIters = NpyArray_ITER_ParallelSplit(SrcIter); IEnumerable <NpyArrayIterObject> destParallelIters = NpyArray_ITER_ParallelSplit(DestIter); IEnumerable <NpyArrayIterObject> whereParalleIters = null; if (WhereIter != null) { whereParalleIters = NpyArray_ITER_ParallelSplit(WhereIter); } Parallel.For(0, destParallelIters.Count(), index => //for (int index = 0; index < destParallelIters.Count(); index++) // { NpyArrayIterObject ldestIter = destParallelIters.ElementAt(index); NpyArrayIterObject lsrcIter = srcParallelIters.ElementAt(index); NpyArrayIterObject lwhereIter = null; bool whereValue = true; if (whereParalleIters != null) { lwhereIter = whereParalleIters.ElementAt(index); } while (ldestIter.index < ldestIter.size) { var srcValue = operations.srcGetItem(lsrcIter.dataptr.data_offset - srcArray.data.data_offset, srcArray); if (WhereIter != null) { whereValue = (bool)operations.operandGetItem(lwhereIter.dataptr.data_offset - whereArray.data.data_offset, whereArray); } if (whereValue) { operations.destSetItem(ldestIter.dataptr.data_offset - destArray.data.data_offset, srcValue, destArray); } NpyArray_ITER_PARALLEL_NEXT(ldestIter); NpyArray_ITER_PARALLEL_NEXT(lsrcIter); if (lwhereIter != null) { NpyArray_ITER_PARALLEL_NEXT(lwhereIter); } } }); return; }
/*NUMPY_API * Returns true if data of type 'from' may be cast to data of type * 'to' according to the rule 'casting'. */ internal static bool NpyArray_CanCastTypeTo(NpyArray_Descr from, NpyArray_Descr to, NPY_CASTING casting) { return(NpyArray_CanCastTo(from, to)); }
internal static void NpyArray_CopyTo(NpyArray dst, NpyArray src, NPY_CASTING casting, NpyArray wheremask_in) { numpyinternal.NpyArray_CopyTo(dst, src, casting, wheremask_in); }
/* * An array assignment function for copying arrays, broadcasting 'src' into * 'dst'. This function makes a temporary copy of 'src' if 'src' and * 'dst' overlap, to be able to handle views of the same data with * different strides. * * dst: The destination array. * src: The source array. * wheremask: If non-null, a boolean mask specifying where to copy. * casting: An exception is raised if the copy violates this * casting rule. * * Returns 0 on success, -1 on failure. */ private static int NpyArray_AssignArray(NpyArray dst, NpyArray src, NpyArray wheremask, NPY_CASTING casting) { bool copied_src = false; npy_intp [] src_strides = new npy_intp[npy_defs.NPY_MAXDIMS]; /* Use array_assign_scalar if 'src' NDIM is 0 */ if (NpyArray_NDIM(src) == 0) { return(NpyArray_AssignRawScalar(dst, NpyArray_DESCR(src), NpyArray_DATA(src), wheremask, casting)); } /* * Performance fix for expressions like "a[1000:6000] += x". In this * case, first an in-place add is done, followed by an assignment, * equivalently expressed like this: * * tmp = a[1000:6000] # Calls array_subscript in mapping.c * np.add(tmp, x, tmp) * a[1000:6000] = tmp # Calls array_assign_subscript in mapping.c * * In the assignment the underlying data type, shape, strides, and * data pointers are identical, but src != dst because they are separately * generated slices. By detecting this and skipping the redundant * copy of values to themselves, we potentially give a big speed boost. * * Note that we don't call EquivTypes, because usually the exact same * dtype object will appear, and we don't want to slow things down * with a complicated comparison. The comparisons are ordered to * try and reject this with as little work as possible. */ if (NpyArray_DATA(src) == NpyArray_DATA(dst) && NpyArray_DESCR(src) == NpyArray_DESCR(dst) && NpyArray_NDIM(src) == NpyArray_NDIM(dst) && NpyArray_CompareLists(NpyArray_DIMS(src), NpyArray_DIMS(dst), NpyArray_NDIM(src)) && NpyArray_CompareLists(NpyArray_STRIDES(src), NpyArray_STRIDES(dst), NpyArray_NDIM(src))) { /*printf("Redundant copy operation detected\n");*/ return(0); } if (NpyArray_FailUnlessWriteable(dst, "assignment destination") < 0) { goto fail; } /* Check the casting rule */ if (!NpyArray_CanCastTypeTo(NpyArray_DESCR(src), NpyArray_DESCR(dst), casting)) { string errmsg = string.Format("Cannot cast scalar from {0} to {1} according to the rule {2}", src.GetType(), dst.GetType(), npy_casting_to_string(casting)); goto fail; } /* * When ndim is 1 and the strides point in the same direction, * the lower-level inner loop handles copying * of overlapping data. For bigger ndim and opposite-strided 1D * data, we make a temporary copy of 'src' if 'src' and 'dst' overlap.' */ if (((NpyArray_NDIM(dst) == 1 && NpyArray_NDIM(src) >= 1 && NpyArray_STRIDES(dst)[0] * NpyArray_STRIDES(src)[NpyArray_NDIM(src) - 1] < 0) || NpyArray_NDIM(dst) > 1 || NpyArray_HASFIELDS(dst)) && arrays_overlap(src, dst)) { NpyArray tmp; /* * Allocate a temporary copy array. */ tmp = NpyArray_NewLikeArray(dst, NPY_ORDER.NPY_KEEPORDER, null, false); if (tmp == null) { goto fail; } if (NpyArray_AssignArray(tmp, src, null, NPY_CASTING.NPY_UNSAFE_CASTING) < 0) { Npy_DECREF(tmp); goto fail; } src = tmp; copied_src = true; } /* Broadcast 'src' to 'dst' for raw iteration */ if (NpyArray_NDIM(src) > NpyArray_NDIM(dst)) { int ndim_tmp = NpyArray_NDIM(src); npy_intp [] src_shape_tmp = NpyArray_DIMS(src); npy_intp [] src_strides_tmp = NpyArray_STRIDES(src); /* * As a special case for backwards compatibility, strip * away unit dimensions from the left of 'src' */ while (ndim_tmp > NpyArray_NDIM(dst) && src_shape_tmp[0] == 1) { --ndim_tmp; var tmp = src_shape_tmp; src_shape_tmp = new npy_intp[src_shape_tmp.Length - 1]; Array.Copy(tmp, 1, src_shape_tmp, 0, src_shape_tmp.Length); tmp = src_strides_tmp; src_strides_tmp = new npy_intp[src_strides_tmp.Length - 1]; Array.Copy(tmp, 1, src_strides_tmp, 0, src_strides_tmp.Length); } if (broadcast_strides(NpyArray_NDIM(dst), NpyArray_DIMS(dst), ndim_tmp, src_shape_tmp, src_strides_tmp, "input array", src_strides) < 0) { goto fail; } } else { if (broadcast_strides(NpyArray_NDIM(dst), NpyArray_DIMS(dst), NpyArray_NDIM(src), NpyArray_DIMS(src), NpyArray_STRIDES(src), "input array", src_strides) < 0) { goto fail; } } /* optimization: scalar boolean mask */ if (wheremask != null && NpyArray_NDIM(wheremask) == 0 && NpyArray_DESCR(wheremask).type_num == NPY_TYPES.NPY_BOOL) { bool[] values = NpyArray_DATA(wheremask).datap as bool[]; bool value = values[0]; if (value) { /* where=True is the same as no where at all */ wheremask = null; } else { /* where=False copies nothing */ return(0); } } if (wheremask == null) { /* A straightforward value assignment */ /* Do the assignment with raw array iteration */ if (raw_array_assign_array(NpyArray_NDIM(dst), NpyArray_DIMS(dst), NpyArray_DESCR(dst), NpyArray_DATA(dst), NpyArray_STRIDES(dst), NpyArray_DESCR(src), NpyArray_DATA(src), src_strides) < 0) { goto fail; } } else { npy_intp [] wheremask_strides = new npy_intp[npy_defs.NPY_MAXDIMS]; /* Broadcast the wheremask to 'dst' for raw iteration */ if (broadcast_strides(NpyArray_NDIM(dst), NpyArray_DIMS(dst), NpyArray_NDIM(wheremask), NpyArray_DIMS(wheremask), NpyArray_STRIDES(wheremask), "where mask", wheremask_strides) < 0) { goto fail; } /* A straightforward where-masked assignment */ /* Do the masked assignment with raw array iteration */ if (raw_array_wheremasked_assign_array( NpyArray_NDIM(dst), NpyArray_DIMS(dst), NpyArray_DESCR(dst), NpyArray_DATA(dst), NpyArray_STRIDES(dst), NpyArray_DESCR(src), NpyArray_DATA(src), src_strides, NpyArray_DESCR(wheremask), NpyArray_DATA(wheremask), wheremask_strides) < 0) { goto fail; } } if (copied_src) { Npy_DECREF(src); } return(0); fail: if (copied_src) { Npy_DECREF(src); } return(-1); }
/*NUMPY_API * Allocate a new iterator for one array object. */ internal static NpyIter NpyIter_New(NpyArray op, ITERFLAGS flags, NPY_ORDER order, NPY_CASTING casting, NpyArray_Descr dtype) { /* Split the flags into separate global and op flags */ ITERFLAGS op_flags = flags & ITERFLAGS.NPY_ITER_PER_OP_FLAGS; flags &= ITERFLAGS.NPY_ITER_GLOBAL_FLAGS; return(NpyIter_AdvancedNew(1, &op, flags, order, casting, &op_flags, &dtype, -1, null, null, 0)); }