Exemplo n.º 1
0
        /// <summary>
        /// ctor.
        /// </summary>
        /// <param name="localsize">
        /// number of entries that should be stored in this MPI process
        /// </param>
        /// <param name="c">
        /// MPI communicator, <see cref="MPI_Comm"/>.
        /// </param>
        /// <param name="_BlockSize">
        /// See <see cref="BlockSize"/>.
        /// </param>
        public Partitioning(int localsize, MPI.Wrappers.MPI_Comm c)
        {
            m_comm = c;
            MPICollectiveWatchDog.Watch(c);
            csMPI.Raw.Comm_Rank(c, out m_rank);
            csMPI.Raw.Comm_Size(c, out m_size);

            m_LocalLengths = new int[m_size];
            int[] ll = { localsize };

            unsafe
            {
                fixed(void *pSndBuf = ll, pRcvBuf = m_LocalLengths)
                {
                    csMPI.Raw.Allgather((IntPtr)pSndBuf, 4, csMPI.Raw._DATATYPE.BYTE,
                                        (IntPtr)pRcvBuf, 4, csMPI.Raw._DATATYPE.BYTE,
                                        m_comm);
                }
            }

            m_i0Offset    = new int[m_size + 1];
            m_i0Offset[0] = 0;
            for (int i = 1; i <= m_size; i++)
            {
                long _i0oL = (long)(m_i0Offset[i - 1]) + (long)(m_LocalLengths[i - 1]);
                if (_i0oL > int.MaxValue)
                {
                    throw new OverflowException("Partition exceeds the range of 32-bit integer.");
                }
                m_i0Offset[i] = m_i0Offset[i - 1] + m_LocalLengths[i - 1];
            }
        }
Exemplo n.º 2
0
        /// <summary>
        /// ctor.
        /// </summary>
        /// <param name="localsize">
        /// number of entries that should be stored in this MPI process
        /// </param>
        /// <param name="c">
        /// MPI communicator, <see cref="MPI_Comm"/>.
        /// </param>
        /// <param name="_BlockSize">
        /// See <see cref="BlockSize"/>.
        /// </param>
        public Partitioning(int localsize, MPI.Wrappers.MPI_Comm c)
        {
            m_comm = c;
            MPICollectiveWatchDog.Watch(c);
            csMPI.Raw.Comm_Rank(c, out m_rank);
            csMPI.Raw.Comm_Size(c, out m_size);

            m_LocalLengths = new int[m_size];
            int[] ll = { localsize };

            unsafe
            {
                fixed(void *pSndBuf = ll, pRcvBuf = m_LocalLengths)
                {
                    csMPI.Raw.Allgather((IntPtr)pSndBuf, 4, csMPI.Raw._DATATYPE.BYTE,
                                        (IntPtr)pRcvBuf, 4, csMPI.Raw._DATATYPE.BYTE,
                                        m_comm);
                }
            }

            m_i0Offset    = new int[m_size + 1];
            m_i0Offset[0] = 0;
            for (int i = 1; i <= m_size; i++)
            {
                m_i0Offset[i] = m_i0Offset[i - 1] + m_LocalLengths[i - 1];
            }
        }
Exemplo n.º 3
0
 /// <summary>
 /// returns the minimum of each entry of <paramref name="i"/> on all MPI-processes in the
 /// <paramref name="comm"/>--communicator.
 /// </summary>
 static public int[] MPIMin(this int[] i, MPI_Comm comm)
 {
     int[] R = new int[i.Length];
     unsafe
     {
         fixed(int *loc = i, glob = R)
         {
             csMPI.Raw.Allreduce(((IntPtr)(loc)), ((IntPtr)(glob)), i.Length, csMPI.Raw._DATATYPE.INT, csMPI.Raw._OP.MIN, comm);
         }
     }
     return(R);
 }
Exemplo n.º 4
0
 /// <summary>
 /// returns the minimum of each entry of <paramref name="i"/> on all MPI-processes in the
 /// <paramref name="comm"/>--communicator.
 /// </summary>
 static public double[] MPIMin(this double[] i, MPI_Comm comm)
 {
     double[] R = new double[i.Length];
     unsafe
     {
         fixed(double *loc = i, glob = R)
         {
             csMPI.Raw.Allreduce(((IntPtr)(loc)), ((IntPtr)(glob)), i.Length, csMPI.Raw._DATATYPE.DOUBLE, csMPI.Raw._OP.MIN, comm);
         }
     }
     return(R);
 }
Exemplo n.º 5
0
        /// <summary>
        /// Returns, for each entry, the sum <paramref name="A"/> on all MPI-processes in the
        /// <paramref name="comm"/>--communicator.
        /// </summary>
        static public int[] MPISum(this int[] A, MPI_Comm comm)
        {
            int[] S = new int[A.Length];
            unsafe
            {
                fixed(int *pA = A, pS = S)
                {
                    csMPI.Raw.Allreduce(((IntPtr)(pA)), ((IntPtr)(pS)), A.Length, csMPI.Raw._DATATYPE.INT, csMPI.Raw._OP.SUM, comm);
                }

                return(S);
            }
        }
Exemplo n.º 6
0
        /// <summary>
        /// Returns, for each entry, the sum <paramref name="A"/> on all MPI-processes in the
        /// <paramref name="comm"/>--communicator.
        /// </summary>
        static public double[] MPISum(this double[] A, MPI_Comm comm)
        {
            double[] S = new double[A.Length];
            unsafe
            {
                fixed(double *pA = A, pS = S)
                {
                    csMPI.Raw.Allreduce(((IntPtr)(pA)), ((IntPtr)(pS)), 1, csMPI.Raw._DATATYPE.DOUBLE, csMPI.Raw._OP.SUM, comm);
                }

                return(S);
            }
        }
Exemplo n.º 7
0
 /// <summary>
 ///
 /// </summary>
 public byte[] Comm_f2c(MPI_Comm F_Comm)
 {
     byte[] ret = new byte[this.GetSizeof_C_MPI_comm()];
     unsafe {
         byte *pComm = (byte *)&(F_Comm.m1);
         for (int i = 0; i < ret.Length; i++)
         {
             ret[i] = *pComm;
             pComm++;
         }
     }
     return(ret);
 }
Exemplo n.º 8
0
        /// <summary>
        /// Wrapper around <see cref="IMPIdriver.Scatterv(IntPtr, IntPtr, IntPtr, MPI_Datatype, IntPtr, int, MPI_Datatype, int, MPI_Comm)"/>.
        /// </summary>
        static public int[] MPIScatterv(this int[] send, int[] sendcounts, int root, MPI_Comm comm)
        {
            csMPI.Raw.Comm_Size(comm, out int size);
            csMPI.Raw.Comm_Rank(comm, out int rank);
            int[] result = new int[Math.Max(1, sendcounts[rank])];

            unsafe {
                int *displs = stackalloc int[size];
                for (int i = 1; i < size; i++)
                {
                    displs[i] = displs[i - 1] + sendcounts[i - 1];
                }
                if (rank == root)
                {
                    if (send.Length < displs[size - 1] + sendcounts[size - 1])
                    {
                        throw new ArgumentException("Mismatch between send counts and send buffer size.");
                    }
                }

                //if (send == null || send.Length == 0) {
                //    // Dummy to avoid null pointer exception
                //    send = new int[1];
                //}

                fixed(int *pSend = send, pSendcounts = &sendcounts[0], pResult = &result[0])
                {
                    csMPI.Raw.Scatterv(
                        (IntPtr)pSend,
                        (IntPtr)pSendcounts,
                        (IntPtr)displs,
                        csMPI.Raw._DATATYPE.INT,
                        (IntPtr)pResult,
                        sendcounts[rank],
                        csMPI.Raw._DATATYPE.INT,
                        root,
                        comm);
                }
            }

            if (result.Length != sendcounts[rank])
            {
                Debug.Assert(result.Length == 1);
                Debug.Assert(sendcounts[rank] == 0);
                Array.Resize(ref result, 0);
            }

            return(result);
        }
Exemplo n.º 9
0
        /// <summary>
        /// returns the maximum of <paramref name="i"/> on all MPI-processes in the
        /// <paramref name="comm"/>--communicator.
        /// </summary>
        static public int MPIMax(this int i, MPI_Comm comm)
        {
            int loc = i;

            unsafe {
                int glob = int.MinValue;
                csMPI.Raw.Allreduce(
                    (IntPtr)(&loc),
                    (IntPtr)(&glob),
                    1,
                    csMPI.Raw._DATATYPE.INT,
                    csMPI.Raw._OP.MAX,
                    comm);
                return(glob);
            }
        }
Exemplo n.º 10
0
 /// <summary>
 /// returns true on every process for every entry of <paramref name="i"/> if they are equal at every process
 /// </summary>
 /// <param name="i"></param>
 /// <param name="comm"></param>
 /// <returns></returns>
 static public bool[] MPIEquals(this int[] i, MPI_Comm comm)
 {
     int[]  R     = new int[i.Length];
     bool[] check = new bool[i.Length];
     unsafe
     {
         fixed(int *loc = i, glob = R)
         {
             csMPI.Raw.Allreduce(((IntPtr)(loc)), ((IntPtr)(glob)), i.Length, csMPI.Raw._DATATYPE.INT, csMPI.Raw._OP.BXOR, comm);
         }
     }
     for (int k = 0; k < i.Length; k++)
     {
         check[k] = R[k] == 0 ? true : false;
     }
     return(check);
 }
Exemplo n.º 11
0
        /// <summary>
        /// MPI-process with rank <paramref name="root"/> gathers this int[] of all MPI-processes in the
        /// <paramref name="comm"/>-communicator with variable length. The length of the gathered int[] is specified by <paramref name="recvcount"/>
        /// </summary>
        static public int[] MPIGatherv(this int[] send, int[] recvcounts, int root, MPI_Comm comm)
        {
            csMPI.Raw.Comm_Size(comm, out int size);
            csMPI.Raw.Comm_Rank(comm, out int rank);

            int rcs = rank == root?recvcounts.Sum() : 0;

            int[] result = rank == root ? new int[Math.Max(1, rcs)] : null;


            unsafe {
                int *displs = stackalloc int[size];
                if (rank == root)
                {
                    for (int i = 1; i < size; i++)
                    {
                        displs[i] = displs[i - 1] + recvcounts[i - 1];
                    }
                }

                fixed(int *pSend = send, pRcvcounts = recvcounts, pResult = result)
                {
                    Debug.Assert((rank == root) != (pResult == null));

                    csMPI.Raw.Gatherv(
                        (IntPtr)pSend,
                        send.Length,
                        csMPI.Raw._DATATYPE.INT,
                        (IntPtr)pResult,
                        (IntPtr)pRcvcounts,
                        (IntPtr)displs,
                        csMPI.Raw._DATATYPE.INT,
                        root,
                        comm);
                }
            }

            if (result != null && result.Length > rcs)
            {
                Debug.Assert(rcs == 0);
                result = new int[0];
            }

            return(result);
        }
Exemplo n.º 12
0
        /// <summary>
        /// Gathers all send Arrays on all MPI-processes, at which every jth block of data is from the jth process.
        /// </summary>
        static private int[] Int_MPIAllGatherv(this int[] send, int[] m_recvcounts, MPI_Comm comm)
        {
            csMPI.Raw.Comm_Size(csMPI.Raw._COMM.WORLD, out int size);
            int rcs = m_recvcounts.Sum();

            if (rcs == 0)
            {
                return(new int[0]);
            }


            int[] result = new int[rcs];
            if (send.Length == 0)
            {
                send = new int[1];
            }

            unsafe {
                int *displs = stackalloc int[size];
                for (int i = 1; i < size; i++)
                {
                    displs[i] = displs[i - 1] + m_recvcounts[i - 1];
                }

                fixed(int *pResult = result, pSend = send)
                {
                    fixed(int *pRcvcounts = m_recvcounts)
                    {
                        csMPI.Raw.Allgatherv(
                            (IntPtr)pSend,
                            send.Length,
                            csMPI.Raw._DATATYPE.INT,
                            (IntPtr)pResult,
                            (IntPtr)pRcvcounts,
                            (IntPtr)displs,
                            csMPI.Raw._DATATYPE.INT,
                            comm);
                    }
                }
            }

            return(result);
        }
Exemplo n.º 13
0
        private void LoadSymb(string name, ref MPI_Comm sym)
        {
            string errstr;
            IntPtr addr = Utils.DynamicLibraries.LoadSymbol(m_conv.LibHandle, name, out errstr);

            if (addr == IntPtr.Zero)
            {
                throw new ApplicationException("OpenMPI error: unable to load symbol '" + name + "' from library '" + m_conv.CurrentLibraryName + "', Error string: >" + errstr + "<;");
            }

            sym.m1 = m_conv.MPI_Comm_c2f(addr);
            //Console.WriteLine("val of '" + name + "' is: " + addr + ", fortran value is " + sym.m1);
            IntPtr test = m_conv.MPI_Comm_f2c(sym);

            if (test != addr)
            {
                throw new ApplicationException("f**k");
            }
            //else
            //    Console.WriteLine("back passed.");
        }
Exemplo n.º 14
0
        /// <summary>
        /// ctor
        /// </summary>
        /// <param name="comm"></param>
        //// <typeparam name="t">the primitive type of the message; must be a value-type;</typeparam>
        public Many2ManyMessenger(MPI.Wrappers.MPI_Comm comm)
        {
            m_Comm = comm;
            MPI.Wrappers.csMPI.Raw.Comm_Size(m_Comm, out size);
            MPI.Wrappers.csMPI.Raw.Comm_Rank(m_Comm, out MyRank);

            m_MyCommPaths  = new int[size];
            m_AllCommPaths = new int[size, size];

            m_ReceiveBuffers = new Buffer[size];
            m_SendBuffers    = new Buffer[size];

            m_Requests        = new MPI_Request[size * 2];
            m_ArrayOfStatuses = new MPI_Status[m_Requests.Length];


            // check item type
            m_ItemType = typeof(ItemType);
            CheckItemTypeRecursive(m_ItemType);
            m_ItemSize = Marshal.SizeOf(m_ItemType);
        }
Exemplo n.º 15
0
        static public int[] MPIAllGather(this int i, MPI_Comm comm)
        {
            csMPI.Raw.Comm_Size(csMPI.Raw._COMM.WORLD, out int size);

            int[] result = new int[size];
            unsafe {
                int sendBuffer = i;
                fixed(int *pResult = &result[0])
                {
                    csMPI.Raw.Allgather(
                        (IntPtr)(&i),
                        1,
                        csMPI.Raw._DATATYPE.INT,
                        (IntPtr)pResult,
                        1,
                        csMPI.Raw._DATATYPE.INT,
                        comm);
                }
            }

            return(result);
        }
Exemplo n.º 16
0
        static public double[] MPIAllGather(this double d, MPI_Comm comm)
        {
            csMPI.Raw.Comm_Size(csMPI.Raw._COMM.WORLD, out int size);

            double[] result = new double[size];
            unsafe {
                double sendBuffer = d;
                fixed(double *pResult = &result[0])
                {
                    csMPI.Raw.Allgather(
                        (IntPtr)(&d),
                        1,
                        csMPI.Raw._DATATYPE.DOUBLE,
                        (IntPtr)pResult,
                        1,
                        csMPI.Raw._DATATYPE.DOUBLE,
                        comm);
                }
            }

            return(result);
        }
Exemplo n.º 17
0
        /// <summary>
        /// returns true on every process for every entry of <paramref name="i"/> if they are equal at every process
        /// </summary>
        /// <param name="i"></param>
        /// <param name="comm"></param>
        /// <returns></returns>
        static public bool[] MPIEquals(this double[] i, MPI_Comm comm)
        {
            ulong[] conv_loc = new ulong[i.Length];
            for (int iDbl = 0; iDbl < i.Length; iDbl++)
            {
                conv_loc[iDbl] = (ulong)i[iDbl];
            }

            ulong[] R     = new ulong[i.Length];
            bool[]  check = new bool[i.Length];
            unsafe
            {
                fixed(ulong *loc = conv_loc, glob = R)
                {
                    csMPI.Raw.Allreduce(((IntPtr)(loc)), ((IntPtr)(glob)), i.Length, csMPI.Raw._DATATYPE.UNSIGNED_LONG_LONG, csMPI.Raw._OP.BXOR, comm);
                }
            }
            for (int k = 0; k < i.Length; k++)
            {
                check[k] = R[k] == 0? true : false;
            }
            return(check);
        }
Exemplo n.º 18
0
        /// <summary>
        ///
        /// </summary>
        public MPI_Comm Comm_c2f(byte[] C_MPI_Comm)
        {
            if (C_MPI_Comm.Length != GetSizeof_C_MPI_comm())
            {
                throw new ArgumentException("wrong number of bytes; length of argument must match the value that is returend by 'GetSizeof_C_MPI_comm()';");
            }
            MPI_Comm F_Comm = default(MPI_Comm);

            unsafe
            {
                fixed(byte *pcComm = &C_MPI_Comm[0])
                {
                    byte *pComm = (byte *)&(F_Comm.m1);

                    for (int i = 0; i < C_MPI_Comm.Length; i++)
                    {
                        *pComm = pcComm[i];
                        pComm++;
                    }
                }
            }
            return(F_Comm);
        }
Exemplo n.º 19
0
        /// <summary>
        /// converts the MPI communicators in ilPSP (which are
        /// FORTRAN-communicators) into a C-Communicator
        /// </summary>
        /// <param name="EightByteComm">
        /// on exit, the value of the C - MPI communicator handle, if
        /// <see cref="IMPI_CommConstants.GetSizeof_C_MPI_comm"/> == 8,
        /// otherwise 0
        /// </param>
        /// <param name="FourByteComm">
        /// on exit, the value of the C - MPI communicator handle, if
        /// <see cref="IMPI_CommConstants.GetSizeof_C_MPI_comm"/> == 4,
        /// otherwise 0;
        /// </param>
        /// <param name="input">
        /// (FORTRAN) MPI communicator handle to convert
        /// </param>
        /// <returns>
        /// Equal to <see cref="IMPI_CommConstants.GetSizeof_C_MPI_comm"/>,
        /// either 4 or 8
        /// </returns>
        /// <remarks>
        /// The length of the C - MPI communicator handle depends on the used
        /// MPI implementation (see remarks at <see cref="MPI_Comm"/>) and is
        /// given by to the return value of
        /// <see cref="IMPI_CommConstants.GetSizeof_C_MPI_comm"/>
        /// </remarks>
        public int MPI_Comm_f2c(MPI_Comm input, out uint FourByteComm, out ulong EightByteComm)
        {
            unsafe {
                byte[] C_Comm = _COMM.Comm_f2c(input);
                fixed(byte *pComm = &(C_Comm[0]))
                {
                    uint  _FourByteComm  = 0;
                    ulong _EightByteComm = 0;
                    byte *pDest;

                    switch (C_Comm.Length)
                    {
                    case 4:
                        pDest = (byte *)(&_FourByteComm);
                        break;

                    case 8:
                        pDest = (byte *)(&_EightByteComm);
                        break;

                    default:
                        throw new NotImplementedException(
                                  "unknown size of C MPI communicator: " + C_Comm.Length + "bytes; ");
                    }
                    for (int i = 0; i < C_Comm.Length; i++)
                    {
                        pDest[i] = pComm[i];
                    }

                    FourByteComm  = _FourByteComm;
                    EightByteComm = _EightByteComm;
                }

                return(C_Comm.Length);
            }
        }
Exemplo n.º 20
0
        static public int[] MPIScatterv(this int[] send, int[] sendcounts, int root, MPI_Comm comm)
        {
            csMPI.Raw.Comm_Size(csMPI.Raw._COMM.WORLD, out int size);
            csMPI.Raw.Comm_Rank(csMPI.Raw._COMM.WORLD, out int rank);
            int[] result = new int[sendcounts[rank]];

            unsafe {
                int *displs = stackalloc int[size];
                for (int i = 1; i < size; i++)
                {
                    displs[i] = displs[i - 1] + sendcounts[i - 1];
                }

                if (send == null || send.Length == 0)
                {
                    // Dummy to avoid null pointer exception
                    send = new int[1];
                }

                fixed(int *pSend = &send[0], pSendcounts = &sendcounts[0], pResult = &result[0])
                {
                    csMPI.Raw.Scatterv(
                        (IntPtr)pSend,
                        (IntPtr)pSendcounts,
                        (IntPtr)displs,
                        csMPI.Raw._DATATYPE.INT,
                        (IntPtr)pResult,
                        sendcounts[rank],
                        csMPI.Raw._DATATYPE.INT,
                        root,
                        comm);
                }
            }

            return(result);
        }
Exemplo n.º 21
0
        /// <summary>
        /// MPI - parallel scalar product
        /// </summary>
        static public double ddot <TX, TY>(int N, TX x, int incx, TY y, int incy, MPI.Wrappers.MPI_Comm comm)
            where TX : IList <double>
            where TY : IList <double>
        {
            if (incx * x.Count < N)
            {
                throw new ArgumentException("vector too short.", "x");
            }
            if (incy * y.Count < N)
            {
                throw new ArgumentException("vector too short.", "y");
            }

            double locRes = 0;

            double[] dx = x as double[];
            double[] dy = y as double[];
            if (dx != null && dy != null)
            {
                // use dnese BLAS
                locRes = BLAS.ddot(N, dx, incx, dy, incy);
            }
            else
            {
                ISparseVector <double> spx = x as ISparseVector <double>;
                ISparseVector <double> spy = y as ISparseVector <double>;
                IList <double>         _y  = y;
                if (spy != null)
                {
                    if (spx == null || spy.Sparsity < spx.Sparsity)
                    {
                        // y is sparser than x, use y !
                        spx = spy;
                        spy = null;
                        _y  = x;
                        x   = default(TX);
                        y   = default(TY);
                        int buffy = incx;
                        incx = incy;
                        incy = buffy;
                    }
                }

                if (spx != null)
                {
                    // sparse implementation

                    foreach (var entry in spx.SparseStruct)
                    {
                        int m = entry.Key % incx;
                        if (m != 0)
                        {
                            // entry is skipped by x-increment
                            continue;
                        }

                        int n = entry.Key / incx;

                        locRes += entry.Value * _y[n * incy];
                    }
                }
                else
                {
                    // default IList<double> - implementation
                    for (int n = 0; n < N; n++)
                    {
                        locRes += x[n * incx] * y[n * incy];
                    }
                }
            }

            double globRes = double.NaN;

            unsafe {
                csMPI.Raw.Allreduce((IntPtr)(&locRes), (IntPtr)(&globRes), 1, csMPI.Raw._DATATYPE.DOUBLE, csMPI.Raw._OP.SUM, comm);
            }
            return(globRes);
        }
Exemplo n.º 22
0
        /// <summary>
        /// Wrapper around <see cref="IMPIdriver.Scatterv(IntPtr, IntPtr, IntPtr, MPI_Datatype, IntPtr, int, MPI_Datatype, int, MPI_Comm)"/>.
        /// </summary>
        static public double[] MPIScatterv(this double[] send, int[] sendcounts, int root, MPI_Comm comm)
        {
            csMPI.Raw.Comm_Size(comm, out int size);
            csMPI.Raw.Comm_Rank(comm, out int rank);
            double[] result = new double[sendcounts[rank]];

            unsafe
            {
                int *displs = stackalloc int[size];
                for (int i = 1; i < size; i++)
                {
                    displs[i] = displs[i - 1] + sendcounts[i - 1];
                    //sum += sendcounts[i];
                }
                if (rank == root)
                {
                    if (send.Length < displs[size - 1] + sendcounts[size - 1])
                    {
                        throw new ArgumentException("Mismatch between send counts and send buffer size.");
                    }
                }

                //if (send == null || send.Length == 0) {
                //    // Dummy to avoid null pointer exception
                //    send = new double[1];
                //}

                fixed(int *pSendcounts = &sendcounts[0])
                {
                    fixed(double *pSend = send, pResult = &result[0])
                    {
                        csMPI.Raw.Scatterv(
                            (IntPtr)pSend,
                            (IntPtr)pSendcounts,
                            (IntPtr)displs,
                            csMPI.Raw._DATATYPE.DOUBLE,
                            (IntPtr)pResult,
                            sendcounts[rank],
                            csMPI.Raw._DATATYPE.DOUBLE,
                            root,
                            comm);
                    }
                }
            }

            return(result);
        }
Exemplo n.º 23
0
        /// <summary>
        /// Gathers objects <paramref name="o"/> from each rank on rank <paramref name="root"/>.
        /// </summary>
        /// <param name="o"></param>
        /// <param name="root">
        /// MPI rank where the objects should be collected
        /// </param>
        /// <param name="comm"></param>
        /// <returns>
        /// - null on all ranks except <paramref name="root"/>
        /// - on rank <paramref name="root"/>, an array containing the objects from all ranks
        /// </returns>
        static public T[] MPIGatherO <T>(this T o, int root, MPI_Comm comm)
        {
            csMPI.Raw.Comm_Rank(comm, out int MyRank);
            csMPI.Raw.Comm_Size(comm, out int MpiSize);

            IFormatter _Formatter = new BinaryFormatter();

            // -----------------------------------------------------
            // 1st phase: serialize object and gather object size
            // -----------------------------------------------------

            byte[] buffer = null;
            int    Size;

            if (root != MyRank)
            {
                using (var ms = new MemoryStream()) {
                    _Formatter.Serialize(ms, o);
                    Size   = (int)ms.Position;
                    buffer = ms.GetBuffer();
                }
                if (Size <= 0)
                {
                    throw new IOException("Error serializing object for MPI broadcast - size is 0");
                }
                Array.Resize(ref buffer, Size);
            }
            else
            {
                buffer = new byte[0];
                Size   = 0;
            }

            int[] Sizes = Size.MPIGather(root, comm);

            // -----------------------------------------------------
            // 2nd phase: gather data
            // -----------------------------------------------------
            byte[] rcvBuffer = buffer.MPIGatherv(Sizes);

            // -----------------------------------------------------
            // 3rd phase: de-serialize
            // -----------------------------------------------------


            if (MyRank == root)
            {
                T[] ret = new T[MpiSize];
                using (var ms = new MemoryStream(rcvBuffer)) {
                    for (int r = 0; r < MpiSize; r++)
                    {
                        if (r == MyRank)
                        {
                            ret[r] = o;
                        }
                        else
                        {
                            ret[r] = (T)_Formatter.Deserialize(ms);
                        }
                    }
                }
                return(ret);
            }
            else
            {
                return(null);
            }
        }
Exemplo n.º 24
0
        /// <summary>
        /// Wrapper around <see cref="IMPIdriver.Gatherv(IntPtr, int, MPI_Datatype, IntPtr, IntPtr, IntPtr, MPI_Datatype, int, MPI_Comm)"/>
        /// </summary>
        static public double[] MPIGatherv(this double[] send, int[] recvcounts, int root, MPI_Comm comm)
        {
            csMPI.Raw.Comm_Size(comm, out int size);
            csMPI.Raw.Comm_Rank(comm, out int rank);

            double[] result = rank == root ? new double[recvcounts.Sum()] : null;

            unsafe {
                int *displs = stackalloc int[size];
                for (int i = 1; i < size; i++)
                {
                    displs[i] = displs[i - 1] + recvcounts[i - 1];
                }

                fixed(int *pRcvcounts = &recvcounts[0])
                {
                    fixed(double *pSend = &send[0], pResult = result)
                    {
                        Debug.Assert((rank == root) != (pResult == null));

                        csMPI.Raw.Gatherv(
                            (IntPtr)pSend,
                            send.Length,
                            csMPI.Raw._DATATYPE.DOUBLE,
                            (IntPtr)pResult,
                            (IntPtr)pRcvcounts,
                            (IntPtr)displs,
                            csMPI.Raw._DATATYPE.DOUBLE,
                            root,
                            comm);
                    }
                }
            }


            return(result);
        }
Exemplo n.º 25
0
        /// <summary>
        /// MPI-process with rank <paramref name="root"/> gathers this ulong[] of all MPI-processes in the
        /// <paramref name="comm"/>-communicator with variable length. The length of the gathered long[] is specified by <paramref name="recvcount"/>
        /// </summary>
        /// <param name="recvcount">
        /// Length of the receive buffer
        /// </param>
        static public ulong[] MPIGatherv(this ulong[] send, int[] recvcount, int root, MPI_Comm comm)
        {
            csMPI.Raw.Comm_Size(comm, out int size);
            ulong[] result = new ulong[recvcount.Sum()];

            unsafe {
                int *displs = stackalloc int[size];
                for (int i = 1; i < size; i++)
                {
                    displs[i] = displs[i - 1] + recvcount[i - 1];
                }

                //LONG_LONG for long of 64 bits in size
                fixed(ulong *pSend = &send[0], pResult = &result[0])
                {
                    fixed(int *pRcvcounts = &recvcount[0])
                    {
                        csMPI.Raw.Gatherv(
                            (IntPtr)pSend,
                            send.Length,
                            csMPI.Raw._DATATYPE.LONG_LONG,
                            (IntPtr)pResult,
                            (IntPtr)pRcvcounts,
                            (IntPtr)displs,
                            csMPI.Raw._DATATYPE.LONG_LONG,
                            root,
                            comm);
                    }
                }
            }

            return(result);
        }
Exemplo n.º 26
0
        /// <summary>
        /// Gathers all send Arrays on all MPI-processes, at which every jth block of data is from the jth process.
        /// </summary>
        static private ulong[] Long_MPIAllGatherv(this ulong[] send, int[] m_recvcounts, MPI_Comm comm)
        {
            csMPI.Raw.Comm_Size(csMPI.Raw._COMM.WORLD, out int size);
            ulong[] result = new ulong[m_recvcounts.Sum()];

            unsafe {
                int *displs = stackalloc int[size];
                for (int i = 1; i < size; i++)
                {
                    displs[i] = displs[i - 1] + m_recvcounts[i - 1];
                }

                fixed(ulong *pResult = &result[0], pSend = &send[0])
                {
                    fixed(int *pRcvcounts = &m_recvcounts[0])
                    {
                        csMPI.Raw.Allgatherv(
                            (IntPtr)pSend,
                            send.Length,
                            csMPI.Raw._DATATYPE.UNSIGNED_LONG_LONG,
                            (IntPtr)pResult,
                            (IntPtr)pRcvcounts,
                            (IntPtr)displs,
                            csMPI.Raw._DATATYPE.UNSIGNED_LONG_LONG,
                            comm);
                    }
                }
            }

            return(result);
        }
Exemplo n.º 27
0
        /// <summary>
        /// broadcasts an object to all other processes in the
        /// MPI communicator <paramref name="comm"/>.
        /// </summary>
        /// <param name="o">
        /// an arbitrary, serialize able object;
        /// ignored on all processes which are not <paramref name="root"/>
        /// </param>
        /// <param name="root">
        /// rank of the sender process
        /// </param>
        /// <param name="comm">MPI communicator</param>
        /// <returns>
        /// on the sender process, the input <paramref name="o"/> is returned;
        /// </returns>
        /// <typeparam name="T">
        /// type of the object to transmit
        /// </typeparam>
        static public T MPIBroadcast <T>(this T o, int root, MPI_Comm comm)
        {
            int MyRank;

            csMPI.Raw.Comm_Rank(comm, out MyRank);

            IFormatter _Formatter = new BinaryFormatter();

            // -----------------------------------------------------
            // 1st phase: serialize object and broadcast object size
            // -----------------------------------------------------

            byte[] buffer = null;
            int    Size   = -1;

            if (root == MyRank)
            {
                using (var ms = new MemoryStream()) {
                    _Formatter.Serialize(ms, o);
                    Size   = (int)ms.Position;
                    buffer = ms.GetBuffer();
                }
                if (Size <= 0)
                {
                    throw new IOException("Error serializing object for MPI broadcast - size is 0");
                }
                Array.Resize(ref buffer, Size);
            }

            unsafe {
                csMPI.Raw.Bcast((IntPtr)(&Size), 4, csMPI.Raw._DATATYPE.BYTE, root, comm);
            }

            // ---------------------------
            // 2nd phase: broadcast object
            // ---------------------------
            Debug.Assert(Size > 0);
            if (buffer == null)
            {
                buffer = new byte[Size];
            }

            unsafe
            {
                fixed(byte *pBuffer = buffer)
                {
                    csMPI.Raw.Bcast((IntPtr)pBuffer, Size, csMPI.Raw._DATATYPE.BYTE, root, comm);
                }
            }

            if (MyRank == root)
            {
                return(o);
            }
            else
            {
                T r;
                using (var ms = new MemoryStream(buffer)) {
                    r = (T)_Formatter.Deserialize(ms);
                    return(r);
                }
            }
        }
Exemplo n.º 28
0
        /// <summary>
        /// MPI-process with rank <paramref name="root"/> gathers this ulong[] of all MPI-processes in the
        /// <paramref name="comm"/>-communicator with variable length. The length of the gathered long[] is specified by <paramref name="recvcount"/>
        /// </summary>
        /// <param name="recvcount">
        /// number of items to receive from each sender
        /// </param>
        /// <param name="send">
        /// data to send
        /// </param>
        /// <param name="comm"></param>
        /// <param name="root">rank of receiver process</param>
        static public ulong[] MPIGatherv(this ulong[] send, int[] recvcount, int root, MPI_Comm comm)
        {
            csMPI.Raw.Comm_Size(comm, out int size);
            csMPI.Raw.Comm_Rank(comm, out int rank);

            int rcs = rank == root?recvcount.Sum() : 0;

            ulong[] result = rank == root ? new ulong[Math.Max(1, rcs)] : null;

            unsafe {
                int *displs = stackalloc int[size];
                if (rank == root)
                    for (int i = 1; i < size; i++)
                    {
                        displs[i] = displs[i - 1] + recvcount[i - 1];
                    }
                //LONG_LONG for long of 64 bits in size
                fixed(ulong *pSend = send, pResult = result)
                {
                    fixed(int *pRcvcounts = recvcount)
                    {
                        csMPI.Raw.Gatherv(
                            (IntPtr)pSend,
                            send.Length,
                            csMPI.Raw._DATATYPE.LONG_LONG,
                            (IntPtr)pResult,
                            (IntPtr)pRcvcounts,
                            (IntPtr)displs,
                            csMPI.Raw._DATATYPE.LONG_LONG,
                            root,
                            comm);
                    }
                }
            }

            if (result != null && result.Length > rcs)
            {
                Debug.Assert(rcs == 0);
                result = new ulong[0];
            }

            return(result);
        }
Exemplo n.º 29
0
        /// <summary>
        /// Wrapper around <see cref="IMPIdriver.Gatherv"/>
        /// </summary>
        static public byte[] MPIGatherv(this byte[] send, int[] recvcounts, int root, MPI_Comm comm)
        {
            csMPI.Raw.Comm_Size(comm, out int size);
            csMPI.Raw.Comm_Rank(comm, out int rank);

            int outsize;

            byte[] result;
            if (rank == root)
            {
                outsize = recvcounts.Sum();
                result  = new byte[Math.Max(outsize, 1)]; // this is only because a 0-length array maps to unsafe null
            }
            else
            {
                result  = null;
                outsize = 0;
            }

            unsafe {
                int *displs = stackalloc int[size];
                if (rank == root)
                    for (int i = 1; i < size; i++)
                    {
                        displs[i] = displs[i - 1] + recvcounts[i - 1];
                    }

                fixed(int *pRcvcounts = recvcounts)
                {
                    int lsend = send.Length;

                    if (lsend <= 0)
                    {
                        send = new byte[0];
                        fixed(byte *pSend = send, pResult = result)
                        {
                            Debug.Assert((rank == root) != (pResult == null));

                            csMPI.Raw.Gatherv(
                                (IntPtr)pSend,
                                lsend,
                                csMPI.Raw._DATATYPE.BYTE,
                                (IntPtr)pResult,
                                (IntPtr)pRcvcounts,
                                (IntPtr)displs,
                                csMPI.Raw._DATATYPE.BYTE,
                                root,
                                comm);
                        }
                }
            }

            if (outsize > 0)
            {
                Debug.Assert(outsize == result.Length);
                return(result);
            }
            else
            {
                return(new byte[0]);
            }
        }
Exemplo n.º 30
0
        /// <summary>
        /// if <paramref name="e"/> is unequal to null on any of the calling
        /// MPI processes, this method broadcasts it to all other processes;
        /// </summary>
        /// <param name="e">
        /// an exception, or null
        /// </param>
        /// <param name="comm">
        /// %
        /// </param>
        /// <remarks>
        /// The following code may cause a deadlock:
        /// <code>
        /// try {
        ///     // some code that may cause an exception on some MPI process,
        ///     // but not on all of them
        /// } catch(Exception e) {
        ///     // some statement that have an effect on control flow, e.g. 'continue;' or
        ///     return false;
        /// }
        /// // some collective call, e.g.
        /// csMPI.Raw.Allreduce( ... )
        /// </code>
        /// The collective call <c>csMPI.Raw.Allreduce( ... )</c> is never executed on some
        /// MPI processes, therefore the application will ang in a deadlock.
        /// <code>
        /// try {
        ///     Exception e = null;
        ///     try {
        ///         // some code that may cause an exception on some MPI process,
        ///         // but not on all of them
        ///     } catch(Exception ee) {
        ///         e = ee;
        ///     }
        ///     ExceptionBcast(e,csMPI.Raw._COMM.WORLD);
        /// } catch(Exception e2) {
        ///     // some statement that have an effect on control flow, e.g. 'continue;' or
        ///     return false;
        /// }
        /// // some collective call, e.g.
        /// csMPI.Raw.Allreduce( ... )
        /// </code>
        /// Note that, if <paramref name="e"/> is not serializable,
        /// only the message of <paramref name="e"/> will be broad-casted and
        /// wrapped into a new exception.
        /// </remarks>
        static public void ExceptionBcast(this Exception e, MPI_Comm comm)
        {
            int ExcSrc = int.MaxValue;

            if (e != null)
            {
                csMPI.Raw.Comm_Rank(comm, out ExcSrc);
            }

            unsafe {
                int res = 0;
                csMPI.Raw.Allreduce((IntPtr)(&ExcSrc), (IntPtr)(&res), 1, csMPI.Raw._DATATYPE.INT, csMPI.Raw._OP.MIN, comm);
                ExcSrc = res;
            }


            if (ExcSrc < int.MaxValue)
            {
                // an exception occured on some process

                int myRank;
                csMPI.Raw.Comm_Rank(comm, out myRank);

                object reduced;

                if (myRank == ExcSrc)
                {
                    // sender branch


                    if (e.GetType().GetCustomAttributes(typeof(SerializableAttribute), false).Length > 0)
                    {
                        // exception is serializeable -> bcast exception itself
                        reduced = MPIBroadcast <object>(e, ExcSrc, comm);
                    }
                    else
                    {
                        // bcast exception message
                        reduced = MPIBroadcast <object>(e.GetType().Name + ": '" + e.Message + "'", ExcSrc, comm);
                    }
                }
                else
                {
                    // receiver branch

                    reduced = MPIBroadcast <object>(null, ExcSrc, comm);
                }

                if (reduced is string)
                {
                    throw new ApplicationException("On MPI Process #" + myRank + ": " + ((string)reduced));
                }
                else if (reduced is Exception)
                {
                    throw ((Exception)reduced);
                }
                else
                {
                    throw new ApplicationException("should never occur");
                }
            }
        }