Пример #1
0
        /// <summary>
        /// ctor
        /// </summary>
        /// <param name="_MPI_comm">the MPI communicator for this messenger (see <see cref="MPI_comm"/>);</param>
        public SerialisationMessenger(MPI_Comm _MPI_comm)
        {
            MPICollectiveWatchDog.Watch(_MPI_comm);


            m_MPI_comm = _MPI_comm;
            csMPI.Raw.Comm_Rank(_MPI_comm, out m_Rank);
            csMPI.Raw.Comm_Size(_MPI_comm, out m_Size);

            m_MyCommPaths        = new byte[m_Size];
            m_AllCommPaths       = new byte[m_Size, m_Size];
            m_ReceiveObjectSizes = new int[m_Size];
            m_SendObjectSizes    = new int[m_Size];
            m_Requests           = new MPI_Request[m_Size * 4];
            m_TransmittCalled    = new BitArray(m_Size, false);

            unsafe {
                int myTag = TagCnt;
                TagCnt += 13;

                if (m_Rank == 0)
                {
                    m_MyTagOffset = myTag;
                }

                csMPI.Raw.Bcast((IntPtr)(&myTag), 1, csMPI.Raw._DATATYPE.INT, 0, m_MPI_comm);

                if (m_Rank > 0)
                {
                    m_MyTagOffset = myTag;
                }
            }
        }
Пример #2
0
 /// <summary>
 /// Initialize a single request for a non-blocking send that has already been initiated.
 /// </summary>
 /// <param name="request">The request object.</param>
 /// <param name="count">The number of elements transmitted in this communication.</param>
 /// <param name="handle">A handle pointing to pinned memory that will be unpinned after
 /// this operation completes.</param>
 internal ValueTypeSendRequest(MPI_Request request, int count, GCHandle handle)
 {
     this.request      = request;
     this.count        = count;
     this.handle       = handle;
     this.cachedStatus = null;
 }
Пример #3
0
        /// <summary>
        /// ctor.
        /// </summary>
        /// <param name="master"></param>
        /// <param name="vector">a vector of length J*<paramref name="ItemsPerCell"/>, where J is the number of local cells (including ghost)</param>
        /// <param name="ItemsPerCell">the number of items that should be transmitted/received per cell</param>
        public VectorTransceiver(IGridData master, T vector, int ItemsPerCell)
        {
            CheckItemTypeRecursive(typeof(V));

            int J = master.iLogicalCells.Count;

            if (vector.Count != ItemsPerCell * J)
            {
                throw new ArgumentException("wrong length of input vector.");
            }

            m_ItemsPerCell = ItemsPerCell;
            m_vector       = vector;
            m_master       = master;

            //sms = new SerialisationMessenger(csMPI.Raw._COMM.WORLD);

            //// set comm. paths
            //sms.SetCommPathsAndCommit(master.Parallel.ProcessesToSendTo);

            var Para    = m_master.iParallel;
            var sndProc = Para.ProcessesToSendTo;
            var rvcProc = Para.ProcessesToReceiveFrom;

            SendBuffers = new V[sndProc.Length][];
            for (int i = 0; i < SendBuffers.Length; i++)
            {
                int p = sndProc[i];
                SendBuffers[i] = new V[Para.SendCommLists[p].Length * m_ItemsPerCell];
            }
            SendBufferPin = new GCHandle[sndProc.Length];
            rqst          = new MPI_Request[sndProc.Length + rvcProc.Length];
            staTussies    = new MPI_Status[rqst.Length];
            if (!(typeof(T).IsArray))
            {
                RcvBuffer = new V[rvcProc.Length][];
                for (int i = 0; i < RcvBuffer.Length; i++)
                {
                    int p = rvcProc[i];
                    RcvBuffer[i] = new V[Para.RcvCommListsNoOfItems[p] * m_ItemsPerCell];
                }
                RcvBufferPin = new GCHandle[RcvBuffer.Length];
            }
        }
Пример #4
0
        /// <summary>
        /// ctor
        /// </summary>
        /// <param name="comm"></param>
        //// <typeparam name="t">the primitive type of the message; must be a value-type;</typeparam>
        public Many2ManyMessenger(MPI.Wrappers.MPI_Comm comm)
        {
            m_Comm = comm;
            MPI.Wrappers.csMPI.Raw.Comm_Size(m_Comm, out size);
            MPI.Wrappers.csMPI.Raw.Comm_Rank(m_Comm, out MyRank);

            m_MyCommPaths  = new int[size];
            m_AllCommPaths = new int[size, size];

            m_ReceiveBuffers = new Buffer[size];
            m_SendBuffers    = new Buffer[size];

            m_Requests        = new MPI_Request[size * 2];
            m_ArrayOfStatuses = new MPI_Status[m_Requests.Length];


            // check item type
            m_ItemType = typeof(ItemType);
            CheckItemTypeRecursive(m_ItemType);
            m_ItemSize = Marshal.SizeOf(m_ItemType);
        }
Пример #5
0
        /// <summary>
        /// constructor
        /// </summary>
        /// <param name="TRXfields">
        /// fields which should be exchanged
        /// </param>
        public Transceiver(ICollection <DGField> TRXfields)
        {
            ilPSP.MPICollectiveWatchDog.Watch(csMPI.Raw._COMM.WORLD);

            csMPI.Raw.Comm_Rank(csMPI.Raw._COMM.WORLD, out m_MyRank);
            csMPI.Raw.Comm_Size(csMPI.Raw._COMM.WORLD, out m_Size);


            var grd = TRXfields.First().GridDat;

            foreach (var x in TRXfields)
            {
                if (!object.ReferenceEquals(x.GridDat, grd))
                {
                    throw new ArgumentException("all fields must be assigned to the same grid.");
                }
            }

            this.m_parallel = grd.iParallel;


            if (m_Size == 1)
            {
                return;              // nothing to communicate
            }
            // alloc some arrays
            // =================

            m_ReceiveBuffers   = new double[m_Size][];
            m_SendBuffers      = new double[m_Size][];
            m_ReceiveBufferPin = new GCHandle[m_parallel.ProcessesToReceiveFrom.Length];
            m_SendBufferPin    = new GCHandle[m_parallel.ProcessesToSendTo.Length];
            m_Requests         = new MPI_Request[m_parallel.ProcessesToSendTo.Length + m_parallel.ProcessesToReceiveFrom.Length];

            // collect fields
            // ==============

            //m_NumberOfItemsPerCell = 0;
            foreach (DGField f in TRXfields)
            {
                if (m_TRXFields.Contains(f))
                {
                    continue;
                }
                //throw new ArgumentException("each field can occur only once.");

                m_TRXFields.Add(f);
                //m_NumberOfItemsPerCell += f.NoOfCoordinatesPerCell;
            }

            //// compute offset indices
            //// ======================

            //m_Offset = new int[master.Size][];

            //for( int p = 0; p < master.Size; p++) {
            //    int[] commList = master.m_SendCommLists[p];

            //    if (commList != null) {
            //        m_Offset[p] = new int[m_TRXFields.Count];

            //        int offcur = 0;
            //        for (int i = 0; i < m_TRXFields.Count; i++) {
            //            m_Offset[p][i] = offcur;
            //            offcur += m_TRXFields[i].NoOfCoordinatesPerCell*commList.Length;
            //        }
            //    }
            //}
        }
Пример #6
0
        /// <summary>
        /// ctor.
        /// </summary>
        public MPIexchange(MultigridMapping map, T vector)
        {
            // misc init
            // =========

            IGridData master = map.AggGrid;
            int       J      = master.iLogicalCells.Count;

            if (vector.Count != map.LocalLength)
            {
                throw new ArgumentException("wrong length of input vector.");
            }


            m_vector = vector;
            m_master = master;
            m_map    = map;

            var Para    = m_master.iParallel;
            var rvcProc = Para.ProcessesToReceiveFrom;
            var sndProc = Para.ProcessesToSendTo;

            rqst = new MPI_Request[sndProc.Length + rvcProc.Length];

            // allocate send buffers
            // =====================
            {
                SendBuffers = new double[sndProc.Length][];
                for (int i = 0; i < SendBuffers.Length; i++)
                {
                    int p = sndProc[i];

                    // compute length of send list
                    int L = 0;
                    foreach (int jCell in Para.SendCommLists[p])
                    {
                        Debug.Assert(map.IsLocalBlock(jCell + map.FirstBlock));
                        Debug.Assert(map.GetLength(jCell) == map.GetBlockLen(jCell + map.FirstBlock));
                        L += map.GetLength(jCell);
                    }

                    // alloc send buffer
                    SendBuffers[i] = new double[L];
                }
                SendBufferPin = new GCHandle[sndProc.Length];
            }

            // allocate receive buffers
            // ========================

            {
                int totL = 0;
                RcvBuffer = new double[rvcProc.Length][];
                for (int i = 0; i < RcvBuffer.Length; i++)
                {
                    int p = rvcProc[i];

                    // compute length of receive list
                    int L  = 0;
                    int J0 = Para.RcvCommListsInsertIndex[p];
                    int JE = Para.RcvCommListsNoOfItems[p] + J0;
                    for (int jCell = J0; jCell < JE; jCell++)
                    {
                        Debug.Assert(jCell >= map.LocalNoOfBlocks);
                        L += map.GetLength(jCell);
                    }
                    totL += L;

                    // alloc internal receive buffer
                    RcvBuffer[i] = new double[L];
                }
                RcvBufferPin = new GCHandle[RcvBuffer.Length];

                m_Vector_Ext = new double[totL];
            }
        }
Пример #7
0
        /// <summary>
        /// ctor.
        /// </summary>
        public MPIexchange(MultigridMapping map, T vector)
        {
            // misc init
            // =========

            IGridData master = map.AggGrid;
            int       J      = master.iLogicalCells.Count;

            if (vector.Count != map.LocalLength)
            {
                throw new ArgumentException("wrong length of input vector.");
            }


            m_vector = vector;
            m_master = master;
            m_map    = map;

            var Para    = m_master.iParallel;
            var rvcProc = Para.ProcessesToReceiveFrom;
            var sndProc = Para.ProcessesToSendTo;

            rqst = new MPI_Request[sndProc.Length + rvcProc.Length];



            // allocate send buffers
            // =====================
            {
                SendBuffers = new double[sndProc.Length][];
                for (int i = 0; i < SendBuffers.Length; i++)
                {
                    int p = sndProc[i];

                    // compute length of send list
                    int L = 0;
                    foreach (int jCell in Para.SendCommLists[p])
                    {
                        Debug.Assert(map.IsLocalBlock(jCell + map.FirstBlock));
                        Debug.Assert(map.GetLength(jCell) == map.GetBlockLen(jCell + map.FirstBlock));
                        L += map.GetLength(jCell);
                    }

                    // alloc send buffer
                    SendBuffers[i] = new double[L];
                }
                SendBufferPin = new GCHandle[sndProc.Length];
            }


            // allocate receive buffers
            // ========================

            {
                int totL = 0;
                RcvBuffer = new double[rvcProc.Length][];
                for (int i = 0; i < RcvBuffer.Length; i++)
                {
                    int p = rvcProc[i];

                    // compute length of receive list
                    int L  = 0;
                    int J0 = Para.RcvCommListsInsertIndex[p];
                    int JE = Para.RcvCommListsNoOfItems[p] + J0;
                    for (int jCell = J0; jCell < JE; jCell++)
                    {
                        Debug.Assert(jCell >= map.LocalNoOfBlocks);
                        L += map.GetLength(jCell);
                    }
                    totL += L;

                    // alloc internal receive buffer
                    RcvBuffer[i] = new double[L];
                }
                RcvBufferPin = new GCHandle[RcvBuffer.Length];

                m_Vector_Ext = new double[totL];
            }

#if DEBUG
            // verify send and receive lengths
            // ===============================
            {
                var RcvSizes = new Dictionary <int, int[]>();


                for (int i = 0; i < SendBuffers.Length; i++)
                {
                    //Console.WriteLine("P{0}: to proc {1}: {2} items ", map.MpiRank, sndProc[i], SendBuffers[i].Length);

                    int source = map.MpiRank;
                    int target = sndProc[i];
                    int L      = SendBuffers[i].Length;

                    RcvSizes.Add(target, new[] { source, L });
                }


                var _RcvSizes = ilPSP.Utils.SerialisationMessenger.ExchangeData(RcvSizes);


                foreach (var kv in _RcvSizes)
                {
                    int source = kv.Key;

                    int sourceR = kv.Value[0];
                    int L       = kv.Value[1];

                    int idxSrs = Array.IndexOf(rvcProc, source);
                    Debug.Assert(idxSrs >= 0);
                    int _L = RcvBuffer[idxSrs].Length;

                    Debug.Assert(source == sourceR);
                    Debug.Assert(L == _L, "mismatch in receive buffer size on multigrid level" + map.AggGrid.MgLevel);
                }

                csMPI.Raw.Barrier(map.MPI_Comm);
            }
#endif
        }
Пример #8
0
        public void AccumulateGather(MultidimensionalArray A)
        {
            if (A.Dimension != 1)
            {
                throw new ArgumentException();
            }
            int K = m_Basis.NoOfLocalNodes;

            if (A.GetLength(0) != K)
            {
                throw new ArgumentException();
            }

            // for accumulate-gather, the role of send and insert lists is reversed!
            int[][] SendLists       = m_Basis.MPI_InsertLists;
            int[][] InsertLists     = m_Basis.MPI_SendLists;
            var     _SendBuffers    = this.ReceiveBuffers;
            var     _ReceiveBuffers = this.SendBuffers;


            int NoOf_PsendTo = _SendBuffers.Length;    // Number of processes to send to
            int NoOf_PrvcFrm = _ReceiveBuffers.Length; // Number of processes to receive from

            Debug.Assert(NoOf_PsendTo == SendLists.Where(sl => sl != null).Count());
            Debug.Assert(NoOf_PrvcFrm == InsertLists.Where(sl => sl != null).Count());


            MPI_Request[] req = new MPI_Request[NoOf_PsendTo + NoOf_PrvcFrm];

            // set up non-blocking receive
            // ===========================

            for (int i = 0; i < NoOf_PrvcFrm; i++)
            {
                Tuple <int, IntPtr> kv = _ReceiveBuffers[i];
                int    Rank            = kv.Item1;
                IntPtr Buffer          = kv.Item2;

                csMPI.Raw.Irecv(Buffer, InsertLists[Rank].Length, csMPI.Raw._DATATYPE.DOUBLE, Rank, 2341, csMPI.Raw._COMM.WORLD, out req[NoOf_PsendTo + i]);
            }


            // initiate sending
            // ================

            for (int i = 0; i < NoOf_PsendTo; i++)
            {
                Tuple <int, IntPtr> kv = _SendBuffers[i];
                int    Rank            = kv.Item1;
                IntPtr Buffer          = kv.Item2;

                int[] SndList = SendLists[Rank];

                unsafe {
                    int     L  = SndList.Length;
                    double *p0 = (double *)Buffer;

                    for (int l = 0; l < L; l++)
                    {
                        *p0 = A[SndList[l]];
                        p0++;
                    }
                }

                csMPI.Raw.Issend(Buffer, SndList.Length, csMPI.Raw._DATATYPE.DOUBLE, Rank, 2341, csMPI.Raw._COMM.WORLD, out req[i]);
            }


            // wait for operations to complete
            // ===============================

            int ReqCount = req.Length;

            while (ReqCount > 0)
            {
                int        index;
                MPI_Status status;
                csMPI.Raw.Waitany(NoOf_PsendTo + NoOf_PrvcFrm, req, out index, out status);
                ReqCount--;

                if (index < NoOf_PsendTo)
                {
                    // send finished
                    continue;
                }

                // else: receive finished
                Tuple <int, IntPtr> kv = _ReceiveBuffers[index - NoOf_PsendTo];
                int[] InsList          = InsertLists[kv.Item1];

                unsafe {
                    int     L  = InsList.Length;
                    double *p0 = (double *)kv.Item2;

                    for (int l = 0; l < L; l++)
                    {
                        A[InsList[l]] += *p0;
                        p0++;
                    }
                }
            }
        }
Пример #9
0
 public static unsafe extern int MPI_Irecv(IntPtr buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, out MPI_Request request);
Пример #10
0
 public static unsafe extern int MPI_Testsome(int incount, MPI_Request[] array_of_requests, 
                                              out int outcount, int[] array_of_indices, MPI_Status[] array_of_statuses);
Пример #11
0
 public static unsafe extern int MPI_Testall(int count, MPI_Request* array_of_requests, out int flag, MPI_Status[] array_of_statuses);
Пример #12
0
 public static unsafe extern int MPI_Waitall(int count, MPI_Request* array_of_requests, MPI_Status[] array_of_statuses);
Пример #13
0
 public static unsafe extern int MPI_Testany(int count, MPI_Request[] array_of_requests, out int index, out int flag, out MPI_Status status);
Пример #14
0
 public static unsafe extern int MPI_Request_free(ref MPI_Request request);
Пример #15
0
 public static unsafe extern int MPI_Test(ref MPI_Request request, out int flag, out MPI_Status status);
Пример #16
0
 public static unsafe extern int MPI_Wait(ref MPI_Request request, out MPI_Status status);
Пример #17
0
 public static unsafe extern int MPI_Cancel(ref MPI_Request request);
Пример #18
0
 public static unsafe extern int MPI_Isend(IntPtr buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, out MPI_Request request);