コード例 #1
0
        internal AsynchronousChannel(int index, int capacity, int chunkSize, CancellationToken cancellationToken, IntValueEvent consumerEvent)
        {
            if (chunkSize == 0)
            {
                chunkSize = Scheduling.GetDefaultChunkSize <T>();
            }

            Debug.Assert(chunkSize > 0, "chunk size must be greater than 0");
            Debug.Assert(capacity > 1, "this impl doesn't support capacity of 1 or 0");

            // Initialize a buffer with enough space to hold 'capacity' elements.
            // We need one extra unused element as a sentinel to detect a full buffer,
            // thus we add one to the capacity requested.
            _index  = index;
            _buffer = new T[capacity + 1][];
            _producerBufferIndex = 0;
            _consumerBufferIndex = 0;

            _producerEvent      = new ManualResetEventSlim();
            _consumerEvent      = consumerEvent;
            _chunkSize          = chunkSize;
            _producerChunk      = new T[chunkSize];
            _producerChunkIndex = 0;
            _cancellationToken  = cancellationToken;
        }
コード例 #2
0
 internal Mutables()
 {
     m_nextChunkMaxSize  = 1;                                           // We start the chunk size at 1 and grow it later.
     m_chunkBuffer       = new T[Scheduling.GetDefaultChunkSize <T>()]; // Pre-allocate the array at the maximum size.
     m_currentChunkSize  = 0;                                           // The chunk begins life begins empty.
     m_currentChunkIndex = -1;
     m_chunkBaseIndex    = 0;
     m_chunkCounter      = 0;
 }
コード例 #3
0
 internal Mutables()
 {
     this.m_nextChunkMaxSize  = 1;
     this.m_chunkBuffer       = new T[Scheduling.GetDefaultChunkSize <T>()];
     this.m_currentChunkSize  = 0;
     this.m_currentChunkIndex = -1;
     this.m_chunkBaseIndex    = 0;
     this.m_chunkCounter      = 0;
 }
コード例 #4
0
        private void InitializePartitions(IEnumerable <T> source, int partitionCount, bool useStriping)
        {
            ParallelEnumerableWrapper <T> wrapper = source as ParallelEnumerableWrapper <T>;

            if (wrapper != null)
            {
                source = wrapper.WrappedEnumerable;
            }
            IList <T> data = source as IList <T>;

            if (data != null)
            {
                QueryOperatorEnumerator <T, int>[] enumeratorArray = new QueryOperatorEnumerator <T, int> [partitionCount];
                int count        = data.Count;
                T[] localArray   = source as T[];
                int maxChunkSize = -1;
                if (useStriping)
                {
                    maxChunkSize = Scheduling.GetDefaultChunkSize <T>();
                    if (maxChunkSize < 1)
                    {
                        maxChunkSize = 1;
                    }
                }
                for (int i = 0; i < partitionCount; i++)
                {
                    if (localArray != null)
                    {
                        if (useStriping)
                        {
                            enumeratorArray[i] = new ArrayIndexRangeEnumerator <T>(localArray, partitionCount, i, maxChunkSize);
                        }
                        else
                        {
                            enumeratorArray[i] = new ArrayContiguousIndexRangeEnumerator <T>(localArray, partitionCount, i);
                        }
                    }
                    else if (useStriping)
                    {
                        enumeratorArray[i] = new ListIndexRangeEnumerator <T>(data, partitionCount, i, maxChunkSize);
                    }
                    else
                    {
                        enumeratorArray[i] = new ListContiguousIndexRangeEnumerator <T>(data, partitionCount, i);
                    }
                }
                base.m_partitions = enumeratorArray;
            }
            else
            {
                base.m_partitions = PartitionedDataSource <T> .MakePartitions(source.GetEnumerator(), partitionCount);
            }
        }
コード例 #5
0
 internal AsynchronousChannel(int capacity, int chunkSize, CancellationToken cancellationToken)
 {
     if (chunkSize == 0)
     {
         chunkSize = Scheduling.GetDefaultChunkSize <T>();
     }
     this.m_buffer = new T[capacity + 1][];
     this.m_producerBufferIndex = 0;
     this.m_consumerBufferIndex = 0;
     this.m_producerEvent       = new ManualResetEventSlim();
     this.m_consumerEvent       = new ManualResetEventSlim();
     this.m_chunkSize           = chunkSize;
     this.m_producerChunk       = new T[chunkSize];
     this.m_producerChunkIndex  = 0;
     this.m_cancellationToken   = cancellationToken;
 }
コード例 #6
0
        //---------------------------------------------------------------------------------------
        // This method just creates the individual partitions given a data source.
        //
        // Notes:
        //     We check whether the data source is an IList<T> and, if so, we can partition
        //     "in place" by calculating a set of indexes. Otherwise, we return an enumerator that
        //     performs partitioning lazily. Depending on which case it is, the enumerator may
        //     contain synchronization (i.e. the latter case), meaning callers may occ----ionally
        //     block when enumerating it.
        //

        private void InitializePartitions(IEnumerable <T> source, int partitionCount, bool useStriping)
        {
            Contract.Assert(source != null);
            Contract.Assert(partitionCount > 0);

            // If this is a wrapper, grab the internal wrapped data source so we can uncover its real type.
            ParallelEnumerableWrapper <T> wrapper = source as ParallelEnumerableWrapper <T>;

            if (wrapper != null)
            {
                source = wrapper.WrappedEnumerable;
                Contract.Assert(source != null);
            }

            // Check whether we have an indexable data source.
            IList <T> sourceAsList = source as IList <T>;

            if (sourceAsList != null)
            {
                QueryOperatorEnumerator <T, int>[] partitions = new QueryOperatorEnumerator <T, int> [partitionCount];
                int listCount = sourceAsList.Count;

                // We use this below to specialize enumerators when possible.
                T[] sourceAsArray = source as T[];

                // If range partitioning is used, chunk size will be unlimited, i.e. -1.
                int maxChunkSize = -1;

                if (useStriping)
                {
                    maxChunkSize = Scheduling.GetDefaultChunkSize <T>();

                    // The minimum chunk size is 1.
                    if (maxChunkSize < 1)
                    {
                        maxChunkSize = 1;
                    }
                }

                // Calculate indexes and construct enumerators that walk a subset of the input.
                for (int i = 0; i < partitionCount; i++)
                {
                    if (sourceAsArray != null)
                    {
                        // If the source is an array, we can use a fast path below to index using
                        // 'ldelem' instructions rather than making interface method calls.
                        if (useStriping)
                        {
                            partitions[i] = new ArrayIndexRangeEnumerator(sourceAsArray, partitionCount, i, maxChunkSize);
                        }
                        else
                        {
                            partitions[i] = new ArrayContiguousIndexRangeEnumerator(sourceAsArray, partitionCount, i);
                        }
                        TraceHelpers.TraceInfo("ContigousRangePartitionExchangeStream::MakePartitions - (array) #{0} {1}", i, maxChunkSize);
                    }
                    else
                    {
                        // Create a general purpose list enumerator object.
                        if (useStriping)
                        {
                            partitions[i] = new ListIndexRangeEnumerator(sourceAsList, partitionCount, i, maxChunkSize);
                        }
                        else
                        {
                            partitions[i] = new ListContiguousIndexRangeEnumerator(sourceAsList, partitionCount, i);
                        }
                        TraceHelpers.TraceInfo("ContigousRangePartitionExchangeStream::MakePartitions - (list) #{0} {1})", i, maxChunkSize);
                    }
                }

                Contract.Assert(partitions.Length == partitionCount);
                m_partitions = partitions;
            }
            else
            {
                // We couldn't use an in-place partition. Shucks. Defer to the other overload which
                // accepts an enumerator as input instead.
                m_partitions = MakePartitions(source.GetEnumerator(), partitionCount);
            }
        }