public override CompletedStatus Test() { if (cachedStatus != null) { return(cachedStatus); } int flag; Unsafe.MPI_Status status; unsafe { // If completed, this will set request to null. int errorCode = Unsafe.MPI_Test(ref request, out flag, out status); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } if (flag == 0) { return(null); } action?.Invoke((T)value); Cleanup(); cachedStatus = new CompletedStatus(status, 1); return(cachedStatus); }
public override CompletedStatus Test() { if (cachedStatus != null) { return(cachedStatus); } int flag; Unsafe.MPI_Status status; unsafe { int errorCode = Unsafe.MPI_Test(ref request, out flag, out status); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } if (flag == 0) { return(null); } else { Cleanup(); cachedStatus = new CompletedStatus(status, 1); return(cachedStatus); } }
public override CompletedStatus Wait() { if (cachedStatus != null) { return(cachedStatus); } Unsafe.MPI_Status status; int count; int errorCode; unsafe { errorCode = Unsafe.MPI_Wait(ref request, out status); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } errorCode = Unsafe.MPI_Get_count(ref status, FastDatatypeCache <T> .datatype, out count); } Cleanup(); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } cachedStatus = new CompletedStatus(status, count); return(cachedStatus); }
public override void Cancel() { if (cachedStatus == null) { Unsafe.MPI_Status status; unsafe { int errorCode = Unsafe.MPI_Cancel(ref request); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } int flag; errorCode = Unsafe.MPI_Test(ref request, out flag, out status); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } if (flag != 0) { cachedStatus = new CompletedStatus(status, 0); } } } Cleanup(); }
/// <summary> /// Create a user-defined MPI operation based on the given reduction operation. /// </summary> /// <param name="op">The reduction operation.</param> public Operation(ReductionOperation <T> op) { // Try to find the predefined MPI operation mpiOp = GetPredefinedOperation(op); if (mpiOp == Unsafe.MPI_OP_NULL) { // Since we could not find a predefined operation, wrap up the user's operation // in a delegate that matches the signature of MPI_User_function unsafe { // Create the MPI_Op from the wrapper delegate if (UseGeneratedUserOps) { wrapperDelegate = MakeMPIDelegate(op); } else { WrapReductionOperation wrapper = new WrapReductionOperation(op); wrapperDelegate = new MPIDelegate(wrapper.Apply); } int errorCode = Unsafe.MPI_Op_create(Marshal.GetFunctionPointerForDelegate(wrapperDelegate), 0, out mpiOp); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } } }
/// <summary> /// Creates a new attribute that will be stored inside the low-level communicator. /// </summary> /// <param name="duplication">How this attribute will be duplicated.</param> /// <param name="onHeap">Whether this attribute will be allocated on the heap.</param> public StoredAttribute(AttributeDuplication duplication, bool onHeap) : base(duplication) { this.onHeap = onHeap; unsafe { Unsafe.MPI_Copy_function copyFn = Unsafe.MPI_NULL_COPY_FN; Unsafe.MPI_Delete_function deleteFn = Unsafe.MPI_NULL_DELETE_FN; #if BROKEN_NULL_DELEGATE copyFn = NullCopy; deleteFn = NullDelete; #endif if (duplication != AttributeDuplication.None) { if (onHeap) { copyFn = DeepCopy; deleteFn = DeleteAttributeMemory; } else { copyFn = ShallowCopy; } } int errorCode = Unsafe.MPI_Keyval_create(copyFn, deleteFn, out keyval, new IntPtr()); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } }
/// <summary> /// Creates a <see cref="CartesianCommunicator"/>. /// </summary> /// <param name="oldComm"> /// An existing Intracommunicator from which to create the new communicator (e.g. <see cref="MPI.Communicator.world"/>). /// </param> /// <param name="ndims"> /// The number of dimensions for the Cartesian grid. /// </param> /// <param name="dims"> /// An array of length <paramref name="ndims"/> indicating the size of the grid in each dimension. /// </param> /// <param name="periods"> /// A logical array of length <paramref name="ndims"/> indicating whether the grid is periodic in any given dimension. /// </param> /// <param name="reorder"> /// Logical indicating whether ranks may be reordered or not. /// </param> public CartesianCommunicator(Intracommunicator oldComm, int ndims, int[] dims, bool[] periods, bool reorder) { int reorder_int = Convert.ToInt32(reorder); int[] periods_int = BoolToInt(periods); unsafe { fixed(int *dimsPtr = dims, periodsPtr = periods_int) fixed(MPI_Comm * commPtr = &(this.comm)) { int errorCode = Unsafe.MPI_Cart_create(oldComm.comm, ndims, dimsPtr, periodsPtr, reorder_int, commPtr); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } } NDims = ndims; Dims = dims; Periods = periods; Coords = GetCartesianCoordinates(Rank); AttachToComm(); }
public override CompletedStatus Test() { if (cachedStatus != null) { return(cachedStatus); } Unsafe.MPI_Status status; int flag; unsafe { // Wait until the request completes int errorCode = Unsafe.MPI_Test(ref request, out flag, out status); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } if (flag == 0) { // This request has not completed return(null); } } Cleanup(); cachedStatus = new CompletedStatus(status, count); return(cachedStatus); }
/// <summary> /// Frees memory allocated via <see cref="AllocateMemory"/>. /// </summary> /// <param name="ptr">The pointer returned from <see cref="AllocateMemory"/>.</param> /// <param name="fromMPI">Whether this memory came from MPI or from the unmaanged heap.</param> private static void FreeMemory(IntPtr ptr, bool fromMPI) { unsafe { if (ptr == new IntPtr(null)) { return; } } if (fromMPI) { if (!Environment.Finalized) { unsafe { int errorCode = Unsafe.MPI_Free_mem(ptr); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } } } else { Marshal.FreeHGlobal(ptr); } }
/// <summary> /// Allocates unmanaged memory. The memory will either return memory allocated from /// MPI or from the unmanaged heap, depending on <paramref name="fromMPI"/>. Memory /// allocated in this way should be freed via <see cref="FreeMemory"/>. /// </summary> /// <param name="bytes">The number of bytes to allocate.</param> /// <param name="fromMPI"> /// If true, this routine will first try to allocate the memory /// via MPI's memory allocation routines (e.g. <see cref="Unsafe.MPI_Alloc_mem"/>), /// then will fall back to C#'s unmanaged memory allocation routines. /// This value will be updated to reflect where the memory actually came from. /// </param> /// <returns>A pointer to the newly-allocated memory.</returns> private static IntPtr AllocateMemory(long bytes, ref bool fromMPI) { if (fromMPI) { // Try allocating memory from MPI, directly. IntPtr ptr; unsafe { int errorCode = Unsafe.MPI_Alloc_mem((MPI_Aint)bytes, Unsafe.MPI_INFO_NULL, out ptr); if (errorCode == Unsafe.MPI_SUCCESS) { return(ptr); } else if (errorCode != Unsafe.MPI_ERR_NO_MEM) { throw Environment.TranslateErrorIntoException(errorCode); } } // MPI doesn't have any more memory; fall back to the C# facilities for allocating // unmanaged memory fromMPI = false; } // Allocate memory from the unmanaged heap return(Marshal.AllocHGlobal(new IntPtr(bytes))); }
public override void Cancel() { if (cachedStatus != null) { return; } unsafe { Unsafe.MPI_Status status; int errorCode = Unsafe.MPI_Cancel(ref request); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } errorCode = Unsafe.MPI_Wait(ref request, out status); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } cachedStatus = new CompletedStatus(status, 0); } Cleanup(); }
public override CompletedStatus Test() { if (cachedStatus != null) { return(cachedStatus); } Unsafe.MPI_Status status; unsafe { int flag; if (requests.header == Unsafe.MPI_REQUEST_NULL) { // Test whether the request has completed int errorCode = Unsafe.MPI_Test(ref requests.body, out flag, out status); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } else if (requests.body == Unsafe.MPI_REQUEST_NULL) { // Test whether the request has completed int errorCode = Unsafe.MPI_Test(ref requests.header, out flag, out status); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } else { fixed(MPI_Request *requestPtr = &requests.body) { // Test whether both requests completed Unsafe.MPI_Status[] statuses = new Unsafe.MPI_Status[2]; { int errorCode = Unsafe.MPI_Testall(2, &requestPtr[0], out flag, statuses); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } status = statuses[0]; } } } if (flag == 0) { // The communications have not yet completed. We're done here return(null); } } Cleanup(); cachedStatus = new CompletedStatus(status, this.count); return(cachedStatus); }
public override void Cancel() { if (cachedStatus != null) { return; } int errorCode1 = Unsafe.MPI_SUCCESS; int errorCode2 = Unsafe.MPI_SUCCESS; Unsafe.MPI_Status status = new Unsafe.MPI_Status(); int flag = 0; unsafe { // Cancel both MPI requests if (requests.body != Unsafe.MPI_REQUEST_NULL) { errorCode1 = Unsafe.MPI_Cancel(ref requests.body); if (errorCode1 == Unsafe.MPI_SUCCESS) { errorCode1 = Unsafe.MPI_Test(ref requests.body, out flag, out status); } } if (requests.header != Unsafe.MPI_REQUEST_NULL) { errorCode2 = Unsafe.MPI_Cancel(ref requests.header); if (errorCode2 == Unsafe.MPI_SUCCESS) { int myFlag = 0; errorCode2 = Unsafe.MPI_Test(ref requests.body, out myFlag, out status); if (myFlag != 0 && flag == 0) { flag = myFlag; } } } } Cleanup(); if (errorCode1 != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode1); } if (errorCode2 != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode2); } if (flag != 0) { cachedStatus = new CompletedStatus(status, 0); } }
/// <summary> /// Set the value of this attribute in a communicator. /// </summary> /// <param name="comm">The communicator to modify.</param> /// <param name="value">The new value.</param> internal void SetIntPtr(MPI_Comm comm, IntPtr value) { unsafe { int errorCode = Unsafe.MPI_Attr_put(comm, keyval, value); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } }
/// <summary> /// Translates the ranks of processes in this group to the ranks of the same processes within a different group. /// </summary> /// <param name="ranks">The rank values in this group that will be translated.</param> /// <param name="other">The group whose ranks we are translating to.</param> /// <returns> /// An integer array containing the ranks of the processes in <paramref name="other"/> that correspond to /// the ranks of the same processes in this group. For processes that are in this group but not /// <paramref name="other"/>, the resulting array will contain <see cref="Group.NoProcess"/>. /// </returns> public int[] TranslateRanks(int[] ranks, Group other) { int[] result = new int[ranks.Length]; unsafe { int errorCode = Unsafe.MPI_Group_translate_ranks(group, ranks.Length, ranks, other.group, result); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } return(result); }
public override CompletedStatus Wait() { if (cachedStatus != null) { return(cachedStatus); } Unsafe.MPI_Status status; unsafe { if (requests.header == Unsafe.MPI_REQUEST_NULL) { // Wait until the request completes int errorCode = Unsafe.MPI_Wait(ref requests.body, out status); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } else if (requests.body == Unsafe.MPI_REQUEST_NULL) { // Wait until the request completes int errorCode = Unsafe.MPI_Wait(ref requests.header, out status); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } else { // Wait until both requests complete Unsafe.MPI_Status[] statuses = new Unsafe.MPI_Status[2]; fixed(MPI_Request *requestsPtr = &requests.body) { int errorCode = Unsafe.MPI_Waitall(2, &requestsPtr[0], statuses); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } status = statuses[0]; } } } Cleanup(); cachedStatus = new CompletedStatus(status, this.count); return(cachedStatus); }
/// <summary> /// Compare two MPI groups. /// </summary> /// <list> /// <listheader> /// <term>Value</term> /// <description>Description</description> /// </listheader> /// <item> /// <term><see cref="Comparison.Identical"/></term> /// <description>The two <c>Group</c> objects represent the same group.</description> /// </item> /// <item> /// <term><see cref="Comparison.Congruent"/></term> /// <description> /// The two <c>Group</c> objects contain the same processes with the same ranks, /// but represent different groups. /// </description> /// </item> /// <item> /// <term><see cref="Comparison.Similar"/></term> /// <description> /// The two <c>Group</c> objects contain the same processes, but with different ranks. /// </description> /// </item> /// <item> /// <term><see cref="Comparison.Unequal"/></term> /// <descsription>The two <c>Group</c> objects are different.</descsription> /// </item> /// </list> public Comparison Compare(Group other) { int result; unsafe { int errorCode = Unsafe.MPI_Group_compare(group, other.group, out result); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } return(Unsafe.ComparisonFromInt(result)); }
/// <summary> /// Free the MPI operation that this object wraps, but only if it is not a /// predefined MPI operation. /// </summary> public void Dispose() { if (wrapperDelegate != null) { unsafe { // Free the MPI_Op int errorCode = Unsafe.MPI_Op_free(ref mpiOp); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } } }
/// <summary> /// Suggests a size for each dimension for a Cartesian topology, given the number of nodes and dimensions. /// </summary> /// <param name="nnodes"> /// The number of nodes in the grid. /// </param> /// <param name="ndims"> /// The number of dimensions. /// </param> /// <param name="dims"> /// An array of size <paramref name="ndims"/> to store the suggested sizes. Nonzero entries will be /// taken as given sizes. /// </param> public static void ComputeDimensions(int nnodes, int ndims, ref int[] dims) { unsafe { fixed(int *dimsPtr = dims) { int errorCode = Unsafe.MPI_Dims_create(nnodes, ndims, dimsPtr); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } } return; }
/// <summary> /// Finds the source and destination ranks necessary to shift data along the grid. /// </summary> /// <param name="direction"> /// The dimension in which to shift. /// </param> /// <param name="disp"> /// The distance along the grid to shift. Positive values indicate upward shifts, /// negative values downward shifts. /// </param> /// <param name="rank_source"> /// Will contain the rank of the source process. /// </param> /// <param name="rank_dest"> /// Will contain the rank of the destination process. /// </param> public void Shift(int direction, int disp, out int rank_source, out int rank_dest) { unsafe { fixed(int *rank_sourcePtr = &rank_source, rank_destPtr = &rank_dest) { int errorCode = Unsafe.MPI_Cart_shift(comm, direction, disp, rank_sourcePtr, rank_destPtr); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } } return; }
internal ValueArrayReceiveRequest(MPI_Comm comm, int source, int tag, T[] array) { this.cachedStatus = null; this.array = array; handle = GCHandle.Alloc(array, GCHandleType.Pinned); // Initiate the non-blocking receive into "value" int errorCode = Unsafe.MPI_Irecv(handle.AddrOfPinnedObject(), array.Length, FastDatatypeCache <T> .datatype, source, tag, comm, out request); if (errorCode != Unsafe.MPI_SUCCESS) { handle.Free(); throw Environment.TranslateErrorIntoException(errorCode); } }
/// <summary> /// Initiate an MPI non-blocking send operation managed by this Request object. /// </summary> /// <param name="comm">The communicator over which the initial message will be sent.</param> /// <param name="dest">The destination rank for this message.</param> /// <param name="tag">The message tag.</param> /// <param name="buffer">The bytes that should be transmitted. Must not be modified by the caller.</param> /// <param name="byteCount">The number of bytes from the beginning of buffer that should be transmitted.</param> /// <param name="count">The number of serialized objects stored in <paramref name="buffer"/>.</param> internal SerializedSendRequest(Communicator comm, int dest, int tag, byte[] buffer, int byteCount, int count) { this.count = count; this.cachedStatus = null; // Create the message header containing the size of the serialized data this.headerObj = byteCount; // Pin down this object and initiate the send of the length headerHandle = GCHandle.Alloc(headerObj, GCHandleType.Pinned); int errorCode; unsafe { errorCode = Unsafe.MPI_Isend(headerHandle.AddrOfPinnedObject(), 1, FastDatatypeCache <int> .datatype, dest, tag, comm.comm, out requests.header); } if (errorCode != Unsafe.MPI_SUCCESS) { headerHandle.Free(); throw Environment.TranslateErrorIntoException(errorCode); } if (byteCount > 0) { this.bufferHandle = GCHandle.Alloc(buffer, GCHandleType.Pinned); // Initiate a send of the serialized data unsafe { errorCode = Unsafe.MPI_Isend(bufferHandle.AddrOfPinnedObject(), byteCount, Unsafe.MPI_BYTE, dest, tag, comm.comm, out requests.body); } if (errorCode != Unsafe.MPI_SUCCESS) { Unsafe.MPI_Cancel(ref requests.header); headerHandle.Free(); bufferHandle.Free(); throw Environment.TranslateErrorIntoException(errorCode); } } else { requests.body = Unsafe.MPI_REQUEST_NULL; } }
internal ValueReceiveRequest(MPI_Comm comm, int source, int tag, Action <T> action = null) { this.action = action; this.cachedStatus = null; this.value = default(T); handle = GCHandle.Alloc(value, GCHandleType.Pinned); unsafe { // Initiate the non-blocking receive into "value" int errorCode = Unsafe.MPI_Irecv(handle.AddrOfPinnedObject(), 1, FastDatatypeCache <T> .datatype, source, tag, comm, out request); if (errorCode != Unsafe.MPI_SUCCESS) { handle.Free(); throw Environment.TranslateErrorIntoException(errorCode); } } }
/// <summary> /// Returns a process' rank given its coordinates in the CartesianCommunicator's grid. /// </summary> /// <param name="coords"> /// An integer array specifying the processes' coordinates. /// </param> /// <returns> /// The processes' rank in the communicator. /// </returns> public int GetCartesianRank(int[] coords) { int rank; unsafe { fixed(int *coordsPtr = coords) { int errorCode = Unsafe.MPI_Cart_rank(comm, coordsPtr, &rank); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } } return(rank); }
/// <summary> /// Free the low-level attribute associated with this attribute. /// </summary> public override void Dispose() { if (keyval != Unsafe.MPI_KEYVAL_INVALID) { if (!Environment.Finalized) { unsafe { int errorCode = Unsafe.MPI_Keyval_free(ref keyval); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } } keyval = Unsafe.MPI_KEYVAL_INVALID; } }
/// <summary> /// Provides the coordinates in the communicator's grid of a process, given its rank. /// </summary> /// <param name="rank"> /// The processes' rank in the communicator. /// </param> /// <returns> /// An array of ints giving the coordinates in each dimension. /// </returns> public int[] GetCartesianCoordinates(int rank) { int maxdims = NDims; int[] coords = new int[maxdims]; unsafe { fixed(int *coordsPtr = coords) { int errorCode = Unsafe.MPI_Cart_coords(comm, rank, maxdims, coordsPtr); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } } return(coords); }
/// <summary> /// Construct a lower dimensional subgrid from an existing CartesianCommunicator. /// </summary> /// <param name="remain_dims"> /// Logical array with an entry for each dimension indicating whether a dimension /// should be kept or dropped in the new communicator. /// </param> /// <returns> /// The new lower-dimensional communicator. /// </returns> public CartesianCommunicator Subgrid(int[] remain_dims) { MPI_Comm newComm; unsafe { fixed(int *dimsPtr = remain_dims) { int errorCode = Unsafe.MPI_Cart_sub(comm, dimsPtr, &newComm); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } } return((CartesianCommunicator)Communicator.Adopt(newComm)); }
/// <summary> /// Determine the number of elements transmitted by the communication /// operation associated with this object. /// </summary> /// <param name="type"> /// The type of data that will be stored in the message. /// </param> /// <returns> /// If the type of the data is a value type, returns the number /// of elements in the message. Otherwise, returns <c>null</c>, /// because the number of elements stored in the message won't /// be known until the message is received. /// </returns> public int?Count(Type type) { MPI_Datatype datatype = DatatypeCache.GetDatatype(type); if (datatype != Unsafe.MPI_DATATYPE_NULL) { int count; unsafe { int errorCode = Unsafe.MPI_Get_count(ref status, datatype, out count); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } return(count); } return(null); }
/// <summary> /// Returns a recommended configuration for a new Cartesian grid. /// </summary> /// <param name="oldcomm"> /// The existing communicator. /// </param> /// <param name="ndims"> /// The number of dimensions for the Cartesian grid. /// </param> /// <param name="dims"> /// An array of length <paramref name="ndims"/> indicating the size of the grid in each dimension. /// </param> /// <param name="periods"> /// A logical array of length <paramref name="ndims"/> indicating whether the grid is periodic in any given dimension. /// </param> /// <returns> /// The new rank of the calling process. /// </returns> public static int Map(Intracommunicator oldcomm, int ndims, int[] dims, bool[] periods) { int newrank; int[] periods_int = BoolToInt(periods); unsafe { fixed(int *dimsPtr = dims, periodsPtr = periods_int) { int errorCode = Unsafe.MPI_Cart_map(oldcomm.comm, ndims, dimsPtr, periodsPtr, out newrank); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } } } return(newrank); }
public override CompletedStatus Test() { if (cachedStatus != null) { return(cachedStatus); } int flag; Unsafe.MPI_Status status; int count = 0; int errorCode; unsafe { errorCode = Unsafe.MPI_Test(ref request, out flag, out status); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } if (flag != 0) { errorCode = Unsafe.MPI_Get_count(ref status, FastDatatypeCache <T> .datatype, out count); } } if (flag == 0) { return(null); } else { Cleanup(); if (errorCode != Unsafe.MPI_SUCCESS) { throw Environment.TranslateErrorIntoException(errorCode); } cachedStatus = new CompletedStatus(status, count); return(cachedStatus); } }