コード例 #1
0
        public static void Total(FloatResidentArray a, int N, float[] total)
        {
            var cache = new SharedMemoryAllocator <float>().allocate(blockDim.x);

            int   tid        = threadIdx.x + blockDim.x * blockIdx.x;
            int   cacheIndex = threadIdx.x;
            float sum        = 0f;

            while (tid < N)
            {
                sum  = sum + a[tid];
                tid += blockDim.x * gridDim.x;
            }
            cache[cacheIndex] = sum;
            CUDAIntrinsics.__syncthreads();
            int i = blockDim.x / 2;

            while (i != 0)
            {
                if (cacheIndex < i)
                {
                    cache[cacheIndex] = cache[cacheIndex] + cache[cacheIndex + i];
                }
                CUDAIntrinsics.__syncthreads();
                i >>= 1;
            }

            if (cacheIndex == 0)
            {
                AtomicExpr.apply(ref total[0], cache[0], (x, y) => x + y);
            }
        }
コード例 #2
0
        private static void ScalarProd(float[] result, FloatResidentArray r1, FloatResidentArray r2, int N)
        {
            var cache      = new SharedMemoryAllocator <float>().allocate(blockDim.x);
            int tid        = threadIdx.x + blockDim.x * blockIdx.x;
            int cacheIndex = threadIdx.x;

            float tmp = 0.0F;

            while (tid < N)
            {
                tmp += r1[tid] * r2[tid];
                tid += blockDim.x * gridDim.x;
            }

            cache[cacheIndex] = tmp;

            CUDAIntrinsics.__syncthreads();

            int i = blockDim.x / 2;

            while (i != 0)
            {
                if (cacheIndex < i)
                {
                    cache[cacheIndex] += cache[cacheIndex + i];
                }

                CUDAIntrinsics.__syncthreads();
                i >>= 1;
            }

            if (cacheIndex == 0)
            {
                AtomicExpr.apply(ref result[0], cache[0], (x, y) => x + y);
            }
        }
コード例 #3
0
        public static void InnerReduce(float[] result, float[] input, int N, float neutral, Func <float, float, float> reductor)
        {
            var cache      = new SharedMemoryAllocator <float>().allocate(blockDim.x);
            int tid        = threadIdx.x + blockDim.x * blockIdx.x;
            int cacheIndex = threadIdx.x;

            float tmp = neutral;

            while (tid < N)
            {
                tmp  = reductor(tmp, input[tid]);
                tid += blockDim.x * gridDim.x;
            }

            cache[cacheIndex] = tmp;

            CUDAIntrinsics.__syncthreads();

            int i = blockDim.x / 2;

            while (i != 0)
            {
                if (cacheIndex < i)
                {
                    cache[cacheIndex] = reductor(cache[cacheIndex], cache[cacheIndex + i]);
                }

                CUDAIntrinsics.__syncthreads();
                i >>= 1;
            }

            if (cacheIndex == 0)
            {
                AtomicExpr.apply(ref result[0], cache[0], reductor);
            }
        }