/// <summary> /// initiates the send/receive - processes and returns immediately; /// Every call of this method must be matched by a later call to <see cref="TransceiveFinish"/>; /// </summary> public void TransceiveStartImReturn() { ilPSP.MPICollectiveWatchDog.Watch(csMPI.Raw._COMM.WORLD); var Para = m_master.iParallel; int[] sndProc = Para.ProcessesToSendTo; int MyRank; csMPI.Raw.Comm_Rank(csMPI.Raw._COMM.WORLD, out MyRank); Array.Clear(this.rqst, 0, this.rqst.Length); unsafe { // Sending ... // ----------- // over all processes to which we have to send data to ... for (int i = 0; i < sndProc.Length; i++) { // destination processor and comm list int pDest = sndProc[i]; int[] commList = Para.SendCommLists[pDest]; int Len = commList.Length; // fill send buffer var SendBuffer = SendBuffers[i]; int cnt = 0; for (int l = 0; l < Len; l++) { int jCell = commList[l]; int N = m_map.GetLength(jCell); int i0 = m_map.LocalUniqueIndex(0, jCell, 0); for (int n = 0; n < N; n++) { SendBuffer[cnt] = this.m_vector[i0 + n]; cnt++; } } Debug.Assert(cnt == SendBuffers[i].Length); // MPI send SendBufferPin[i] = GCHandle.Alloc(SendBuffers[i], GCHandleType.Pinned); csMPI.Raw.Issend(Marshal.UnsafeAddrOfPinnedArrayElement(SendBuffers[i], 0), SendBuffers[i].Length, csMPI.Raw._DATATYPE.DOUBLE, pDest, 4442 + MyRank, csMPI.Raw._COMM.WORLD, out rqst[i]); } } }
/// <summary> /// ctor. /// </summary> public MPIexchange(MultigridMapping map, T vector) { // misc init // ========= IGridData master = map.AggGrid; int J = master.iLogicalCells.Count; if (vector.Count != map.LocalLength) { throw new ArgumentException("wrong length of input vector."); } m_vector = vector; m_master = master; m_map = map; var Para = m_master.iParallel; var rvcProc = Para.ProcessesToReceiveFrom; var sndProc = Para.ProcessesToSendTo; rqst = new MPI_Request[sndProc.Length + rvcProc.Length]; // allocate send buffers // ===================== { SendBuffers = new double[sndProc.Length][]; for (int i = 0; i < SendBuffers.Length; i++) { int p = sndProc[i]; // compute length of send list int L = 0; foreach (int jCell in Para.SendCommLists[p]) { Debug.Assert(map.IsLocalBlock(jCell + map.FirstBlock)); Debug.Assert(map.GetLength(jCell) == map.GetBlockLen(jCell + map.FirstBlock)); L += map.GetLength(jCell); } // alloc send buffer SendBuffers[i] = new double[L]; } SendBufferPin = new GCHandle[sndProc.Length]; } // allocate receive buffers // ======================== { int totL = 0; RcvBuffer = new double[rvcProc.Length][]; for (int i = 0; i < RcvBuffer.Length; i++) { int p = rvcProc[i]; // compute length of receive list int L = 0; int J0 = Para.RcvCommListsInsertIndex[p]; int JE = Para.RcvCommListsNoOfItems[p] + J0; for (int jCell = J0; jCell < JE; jCell++) { Debug.Assert(jCell >= map.LocalNoOfBlocks); L += map.GetLength(jCell); } totL += L; // alloc internal receive buffer RcvBuffer[i] = new double[L]; } RcvBufferPin = new GCHandle[RcvBuffer.Length]; m_Vector_Ext = new double[totL]; } }
/// <summary> /// initiates the send/receive - processes and returns immediately; /// Every call of this method must be matched by a later call to <see cref="TransceiveFinish"/>; /// </summary> public void TransceiveStartImReturn() { ilPSP.MPICollectiveWatchDog.Watch(csMPI.Raw._COMM.WORLD); var Para = m_master.iParallel; int[] sndProc = Para.ProcessesToReceiveFrom; // yes, intentional int MyRank; csMPI.Raw.Comm_Rank(csMPI.Raw._COMM.WORLD, out MyRank); Array.Clear(this.rqst, 0, this.rqst.Length); unsafe { // Sending ... // ----------- // over all processes to which we have to send data to ... for (int i = 0; i < sndProc.Length; i++) { // destination processor and external cell range int pDest = sndProc[i]; int J0 = Para.RcvCommListsInsertIndex[pDest]; int JE = Para.RcvCommListsNoOfItems[pDest] + J0; // fill send buffer var SendBuffer = SendBuffers[i]; int extOffset = m_map.LocalLength; int cnt = 0; for (int jCell = J0; jCell < JE; jCell++) { Debug.Assert(jCell >= m_master.iLogicalCells.NoOfLocalUpdatedCells); int N = m_map.GetLength(jCell); int i0 = m_map.LocalUniqueIndex(0, jCell, 0); for (int n = 0; n < N; n++) { SendBuffer[cnt] = this.m_Vector_Ext[i0 + n - extOffset]; cnt++; } } Debug.Assert(cnt == SendBuffers[i].Length); // MPI send SendBufferPin[i] = GCHandle.Alloc(SendBuffers[i], GCHandleType.Pinned); csMPI.Raw.Issend(Marshal.UnsafeAddrOfPinnedArrayElement(SendBuffers[i], 0), SendBuffers[i].Length, csMPI.Raw._DATATYPE.DOUBLE, pDest, 4442 + MyRank, csMPI.Raw._COMM.WORLD, out rqst[i]); } } }