Beispiel #1
0
        /// <summary>
        /// Checks whether an ordered pipelining merge pipelines the results
        /// instead of running in a stop-and-go fashion.
        /// </summary>
        private static bool OrderedPipeliningTest2(bool buffered)
        {
            TestHarness.TestLog("OrderedPipeliningTest2: buffered={0}", buffered);
            ParallelMergeOptions merge = buffered ? ParallelMergeOptions.AutoBuffered : ParallelMergeOptions.NotBuffered;

            IEnumerable <int> src = Enumerable.Range(0, int.MaxValue)
                                    .Select(x => { if (x == 1000000)
                                                   {
                                                       throw new Exception();
                                                   }
                                                   return(x); });


            try
            {
                int expect = 0;
                int got    = Enumerable.First(src.AsParallel().AsOrdered().WithMergeOptions(merge).Select(x => x));
                if (got != expect)
                {
                    TestHarness.TestLog("> FAILED: Expected {0}, got {1}.", expect, got);
                    return(false);
                }
            }
            catch (Exception e)
            {
                TestHarness.TestLog("> FAILED: Caught an exception: {0}.", e.GetType());
            }

            return(true);
        }
Beispiel #2
0
        /// <summary>
        /// Checks whether an ordered pipelining merge pipelines the results
        /// instead of running in a stop-and-go fashion.
        /// </summary>
        private static void OrderedPipeliningTest2(bool buffered)
        {
            ParallelMergeOptions merge = buffered ? ParallelMergeOptions.AutoBuffered : ParallelMergeOptions.NotBuffered;

            IEnumerable <int> src = Enumerable.Range(0, int.MaxValue)
                                    .Select(x => { if (x == 100000000)
                                                   {
                                                       throw new Exception();
                                                   }
                                                   return(x); });

            try
            {
                int expect = 0;
                int got    = Enumerable.First(src.AsParallel().AsOrdered().WithMergeOptions(merge).Select(x => x));
                if (got != expect)
                {
                    Assert.True(false, string.Format("OrderedPipeliningTest2: buffered={0}  > FAILED: Expected {1}, got {2}.", buffered, expect, got));
                }
            }
            catch (Exception e)
            {
                Assert.True(false, string.Format("OrderedPipeliningTest2: buffered={0}:  > FAILED.  Caught an exception - {1}", buffered, e));
            }
        }
 internal QuerySettings(System.Threading.Tasks.TaskScheduler taskScheduler, int? degreeOfParallelism, CancellationToken externalCancellationToken, ParallelExecutionMode? executionMode, ParallelMergeOptions? mergeOptions)
 {
     this.m_taskScheduler = taskScheduler;
     this.m_degreeOfParallelism = degreeOfParallelism;
     this.m_cancellationState = new System.Linq.Parallel.CancellationState(externalCancellationToken);
     this.m_executionMode = executionMode;
     this.m_mergeOptions = mergeOptions;
     this.m_queryId = -1;
 }
Beispiel #4
0
 internal PartitionedStreamMerger(bool forEffectMerge, ParallelMergeOptions mergeOptions, TaskScheduler taskScheduler, bool outputOrdered, CancellationState cancellationState, int queryId)
 {
     this.m_forEffectMerge    = forEffectMerge;
     this.m_mergeOptions      = mergeOptions;
     this.m_isOrdered         = outputOrdered;
     this.m_taskScheduler     = taskScheduler;
     this.m_cancellationState = cancellationState;
     this.m_queryId           = queryId;
 }
Beispiel #5
0
 public AutoMapperProfiles()
 {
     CreateMap <AppUser, MemberDto>()
     .ForMember(dest => dest.PhotoUrl, opt => opt.MapFrom(src =>
                                                          src.Photos.FirstOrDefault(x => x.IsMain).Url))
     .ForMember(dest => dest.Age, ParallelMergeOptions => ParallelMergeOptions.MapFrom(src => src.DateOfBirth.CalculateAge()));
     CreateMap <Photo, PhotoDto>();
     CreateMap <MemberUpdateDto, AppUser>();
     CreateMap <RegisterDto, AppUser>();
 }
Beispiel #6
0
        /// <summary>
        /// Checks whether an ordered pipelining merge produces the correct output.
        /// </summary>
        private static void OrderedPipeliningTest1(int dataSize, bool buffered)
        {
            ParallelMergeOptions merge = buffered ? ParallelMergeOptions.FullyBuffered : ParallelMergeOptions.NotBuffered;

            IEnumerable <int> src = Enumerable.Range(0, dataSize);

            if (!Enumerable.SequenceEqual(src.AsParallel().AsOrdered().WithMergeOptions(merge).Select(x => x), src))
            {
                Assert.True(false, string.Format("OrderedPipeliningTest1: dataSize={0}, buffered={1}:  > FAILED: Incorrect output.", dataSize, buffered));
            }
        }
        public static IParallelObservable <TSource> WithMergeOptions <TSource>(
            this IParallelObservable <TSource> source,
            ParallelMergeOptions mergeOptions
            )
        {
            ParallelBaseSubject <TSource> parallelsubject = (ParallelBaseSubject <TSource>)source;

            parallelsubject.ParallelMergeOptions_ = mergeOptions;
            parallelsubject.BuildScheduler();
            return(parallelsubject);
        }
Beispiel #8
0
        //-----------------------------------------------------------------------------------
        // Constructs a new settings structure.
        //
        internal QuerySettings(TaskScheduler taskScheduler, int? degreeOfParallelism,
            CancellationToken externalCancellationToken, ParallelExecutionMode? executionMode,
            ParallelMergeOptions? mergeOptions)
        {
            _taskScheduler = taskScheduler;
            _degreeOfParallelism = degreeOfParallelism;
            _cancellationState = new CancellationState(externalCancellationToken);
            _executionMode = executionMode;
            _mergeOptions = mergeOptions;
            _queryId = -1;

            Contract.Assert(_cancellationState != null);
        }
Beispiel #9
0
        /// <summary>
        /// Checks whether an ordered pipelining merge produces the correct output.
        /// </summary>
        private static bool OrderedPipeliningTest1(int dataSize, bool buffered)
        {
            TestHarness.TestLog("OrderedPipeliningTest1: dataSize={0}, buffered={1}", dataSize, buffered);
            ParallelMergeOptions merge = buffered ? ParallelMergeOptions.FullyBuffered : ParallelMergeOptions.NotBuffered;

            IEnumerable <int> src = Enumerable.Range(0, dataSize);

            if (!Enumerable.SequenceEqual(src.AsParallel().AsOrdered().WithMergeOptions(merge).Select(x => x), src))
            {
                TestHarness.TestLog("> FAILED: Incorrect output.");
                return(false);
            }

            return(true);
        }
Beispiel #10
0
        //-----------------------------------------------------------------------------------
        // Creates and executes a new merge executor object.
        //
        // Arguments:
        //     partitions   - the partitions whose data will be merged into one stream
        //     ignoreOutput - if true, we are enumerating "for effect", and we won't actually
        //                    generate data in the output stream
        //     pipeline     - whether to use a pipelined merge or not.
        //     isOrdered    - whether to perform an ordering merge.
        //

        internal static MergeExecutor <TInputOutput> Execute <TKey>(
            PartitionedStream <TInputOutput, TKey> partitions, bool ignoreOutput, ParallelMergeOptions options, TaskScheduler taskScheduler, bool isOrdered,
            CancellationState cancellationState, int queryId)
        {
            Debug.Assert(partitions != null);
            Debug.Assert(partitions.PartitionCount > 0);
            Debug.Assert(!ignoreOutput || options == ParallelMergeOptions.FullyBuffered, "Pipelining with no output is not supported.");

            MergeExecutor <TInputOutput> mergeExecutor = new MergeExecutor <TInputOutput>();

            if (isOrdered && !ignoreOutput)
            {
                if (options != ParallelMergeOptions.FullyBuffered && !partitions.OrdinalIndexState.IsWorseThan(OrdinalIndexState.Increasing))
                {
                    Debug.Assert(options == ParallelMergeOptions.NotBuffered || options == ParallelMergeOptions.AutoBuffered);
                    bool autoBuffered = (options == ParallelMergeOptions.AutoBuffered);

                    if (partitions.PartitionCount > 1)
                    {
                        Debug.Assert(!ParallelEnumerable.SinglePartitionMode);
                        // We use a pipelining ordered merge
                        mergeExecutor._mergeHelper = new OrderPreservingPipeliningMergeHelper <TInputOutput, TKey>(
                            partitions, taskScheduler, cancellationState, autoBuffered, queryId, partitions.KeyComparer);
                    }
                    else
                    {
                        // When DOP=1, the default merge simply returns the single producer enumerator to the consumer. This way, ordering
                        // does not add any extra overhead, and no producer task needs to be scheduled.
                        mergeExecutor._mergeHelper = new DefaultMergeHelper <TInputOutput, TKey>(
                            partitions, false, options, taskScheduler, cancellationState, queryId);
                    }
                }
                else
                {
                    // We use a stop-and-go ordered merge helper
                    mergeExecutor._mergeHelper = new OrderPreservingMergeHelper <TInputOutput, TKey>(partitions, taskScheduler, cancellationState, queryId);
                }
            }
            else
            {
                // We use a default - unordered - merge helper.
                mergeExecutor._mergeHelper = new DefaultMergeHelper <TInputOutput, TKey>(partitions, ignoreOutput, options, taskScheduler, cancellationState, queryId);
            }

            mergeExecutor.Execute();
            return(mergeExecutor);
        }
Beispiel #11
0
		public QueryOptions (ParallelMergeOptions? options,
		                     ParallelExecutionMode? mode,
		                     CancellationToken token,
		                     bool useStrip,
		                     bool? behindOrderGuard,
		                     int partitionCount,
		                     CancellationToken implementerToken)
		{
			Options = options;
			Mode = mode;
			Token = token;
			UseStrip = useStrip;
			BehindOrderGuard = behindOrderGuard;
			PartitionCount = partitionCount;
			PartitionerSettings = null;
			ImplementerToken = implementerToken;
		}
Beispiel #12
0
        /// <summary>
        /// Verifies that a pipelining merge does not create any helper tasks in the DOP=1 case.
        /// </summary>
        private static bool SequentialPipeliningTest(int inputSize, bool buffered, bool ordered)
        {
            TestHarness.TestLog("SequentialPipeliningTest: inputSize={0}, buffered={1}, ordered={2}", inputSize, buffered, ordered);
            ParallelMergeOptions merge = buffered ? ParallelMergeOptions.AutoBuffered : ParallelMergeOptions.NotBuffered;

            bool success          = true;
            int  consumerThreadId = System.Threading.Thread.CurrentThread.ManagedThreadId;

            System.Linq.ParallelQuery <int> src =
                System.Linq.ParallelEnumerable.Range(0, inputSize);
            if (ordered)
            {
                src = src.AsOrdered();
            }

            src =
                src.WithMergeOptions(merge)
                .WithDegreeOfParallelism(1)
                .Select(
                    x =>
            {
                if (System.Threading.Thread.CurrentThread.ManagedThreadId != consumerThreadId)
                {
                    success = false;
                }
                return(x);
            });

            foreach (var x in src)
            {
            }

            if (!success)
            {
                TestHarness.TestLog("> The producer task executed on a wrong thread.");
                return(false);
            }

            return(true);
        }
Beispiel #13
0
 // FailingMergeData has enumerables that throw errors when attempting to perform the nth enumeration.
 // This test checks whether the query runs in a pipelined or buffered fashion.
 public static void Merge_Ordered_Pipelining(Labeled <ParallelQuery <int> > labeled, int count, ParallelMergeOptions options)
 {
     Assert.Equal(0, labeled.Item.WithDegreeOfParallelism(count - 1).WithMergeOptions(options).First());
 }
Beispiel #14
0
 public static void Merge_Ordered_Longrunning(Labeled <ParallelQuery <int> > labeled, int count, ParallelMergeOptions options)
 {
     Merge_Ordered(labeled, count, options);
 }
Beispiel #15
0
        public static void Merge_Ordered(Labeled <ParallelQuery <int> > labeled, int count, ParallelMergeOptions options)
        {
            int seen = 0;

            foreach (int i in labeled.Item.WithMergeOptions(options).Select(i => i))
            {
                Assert.Equal(seen++, i);
            }
        }
 public static ParallelQuery <TSource> WithMergeOptions <TSource>(this ParallelQuery <TSource> source, ParallelMergeOptions mergeOptions)
 {
     throw new NotImplementedException();
 }
Beispiel #17
0
        internal static AsynchronousChannel <TInputOutput>[] MakeAsynchronousChannels(int partitionCount, ParallelMergeOptions options, CancellationToken cancellationToken)
        {
            AsynchronousChannel <TInputOutput>[] channelArray = new AsynchronousChannel <TInputOutput> [partitionCount];
            int chunkSize = 0;

            if (options == ParallelMergeOptions.NotBuffered)
            {
                chunkSize = 1;
            }
            for (int i = 0; i < channelArray.Length; i++)
            {
                channelArray[i] = new AsynchronousChannel <TInputOutput>(chunkSize, cancellationToken);
            }
            return(channelArray);
        }
Beispiel #18
0
 public static void Merge_Ordered(Labeled<ParallelQuery<int>> labeled, int count, ParallelMergeOptions options)
 {
     int seen = 0;
     foreach (int i in labeled.Item.WithMergeOptions(options).Select(i => i))
     {
         Assert.Equal(seen++, i);
     }
 }
 internal ParallelMergeOptionsNode(ParallelMergeOptions opts, QueryBaseNode <T> parent)
     : base(parent)
 {
     this.opts = opts;
 }
Beispiel #20
0
        //-----------------------------------------------------------------------------------
        // This internal helper method is used to generate a set of asynchronous channels.
        // The algorithm used by each channel contains the necessary synchronizationis to
        // ensure it is suitable for pipelined consumption.
        //
        // Arguments:
        //     partitionsCount - the number of partitions for which to create new channels.
        //
        // Return Value:
        //     An array of asynchronous channels, one for each partition.
        //

        internal static AsynchronousChannel<TInputOutput>[] MakeAsynchronousChannels(int partitionCount, ParallelMergeOptions options, IntValueEvent consumerEvent, CancellationToken cancellationToken)
        {
            AsynchronousChannel<TInputOutput>[] channels = new AsynchronousChannel<TInputOutput>[partitionCount];

            Debug.Assert(options == ParallelMergeOptions.NotBuffered || options == ParallelMergeOptions.AutoBuffered);
            TraceHelpers.TraceInfo("MergeExecutor::MakeChannels: setting up {0} async channels in prep for pipeline", partitionCount);

            // If we are pipelining, we need a channel that contains the necessary synchronization
            // in it. We choose a bounded/blocking channel data structure: bounded so that we can
            // limit the amount of memory overhead used by the query by putting a cap on the
            // buffer size into which producers place data, and blocking so that the consumer can
            // wait for additional data to arrive in the case that it's found to be empty.

            int chunkSize = 0; // 0 means automatic chunk size
            if (options == ParallelMergeOptions.NotBuffered)
            {
                chunkSize = 1;
            }

            for (int i = 0; i < channels.Length; i++)
            {
                channels[i] = new AsynchronousChannel<TInputOutput>(i, chunkSize, cancellationToken, consumerEvent);
            }

            return channels;
        }
Beispiel #21
0
        /// <summary>
        /// create the merge expression
        /// </summary>
        /// <param name="ParentExp"></param>
        /// <param name="option"></param>
        /// <returns></returns>
        private MethodCallExpression DataItems_WithMergeOptions(MethodCallExpression ParentExp, ParallelMergeOptions option)
        {
            //get dataitems type
            var dictionaryType = DataItems.GetType();

            //get the generic arguments for the type
            var genericArguments = dictionaryType.GetGenericArguments();
            var keyType          = genericArguments[0];
            var elementType      = genericArguments[1];

            //get the method info
            var mi = typeof(ParallelEnumerable).GetMethods().Where(x => x.Name == "WithMergeOptions" && x.IsGenericMethod && x.GetParameters().Length == 2).FirstOrDefault();

            //create the generic method info
            var gen_mi = mi.MakeGenericMethod(elementType);

            //create the constant expression
            var param = Expression.Constant(option, option.GetType());

            //create the call
            var call = Expression.Call
                       (
                method: gen_mi,
                arg0: ParentExp,
                arg1: param
                       );

            return(call);
        }
Beispiel #22
0
 // FailingMergeData has enumerables that throw errors when attempting to perform the nth enumeration.
 // This test checks whether the query runs in a pipelined or buffered fashion.
 public static void Merge_Ordered_Pipelining(Labeled<ParallelQuery<int>> labeled, int count, ParallelMergeOptions options)
 {
     Assert.Equal(0, labeled.Item.WithDegreeOfParallelism(count - 1).WithMergeOptions(options).First());
 }
        public static ParallelQuery <TSource> WithMergeOptions <TSource>(ParallelQuery <TSource> source, ParallelMergeOptions mergeOptions)
        {
            Contract.Ensures(Contract.Result <System.Linq.ParallelQuery <TSource> >() != null);

            return(default(ParallelQuery <TSource>));
        }
Beispiel #24
0
 public static void WithMergeOptions_Multiple(ParallelMergeOptions first, ParallelMergeOptions second)
 {
     Assert.Throws<InvalidOperationException>(() => ParallelEnumerable.Range(0, 1).WithMergeOptions(first).WithMergeOptions(second));
 }
Beispiel #25
0
        // This test checks whether the query runs in a pipelined or buffered fashion.
        public static void Merge_Ordered_Pipelining_Select(Labeled <ParallelQuery <int> > labeled, int count, ParallelMergeOptions options)
        {
            int             countdown = count;
            Func <int, int> down      = i =>
            {
                if (Interlocked.Decrement(ref countdown) == 0)
                {
                    throw new DeliberateTestException();
                }
                return(i);
            };

            Assert.Equal(0, labeled.Item.WithDegreeOfParallelism(count - 1).WithMergeOptions(options).Select(down).First());
        }
Beispiel #26
0
 public static void WithMergeOptions_Multiple(ParallelMergeOptions first, ParallelMergeOptions second)
 {
     Assert.Throws <InvalidOperationException>(() => ParallelEnumerable.Range(0, 1).WithMergeOptions(first).WithMergeOptions(second));
 }
Beispiel #27
0
        /// <summary>
        /// create the merge expression
        /// </summary>
        /// <param name="ParentExp"></param>
        /// <param name="option"></param>
        /// <returns></returns>
        private MethodCallExpression DataItems_WithMergeOptions(MethodCallExpression ParentExp, ParallelMergeOptions option)
        {
            //get dataitems type
            var dictionaryType = DataItems.GetType();

            //get the generic arguments for the type
            var genericArguments = dictionaryType.GetGenericArguments();
            var keyType = genericArguments[0];
            var elementType = genericArguments[1];

            //get the method info
            var mi = typeof(ParallelEnumerable).GetMethods().Where(x => x.Name == "WithMergeOptions" && x.IsGenericMethod && x.GetParameters().Length == 2).FirstOrDefault();

            //create the generic method info
            var gen_mi = mi.MakeGenericMethod(elementType);

            //create the constant expression
            var param = Expression.Constant(option, option.GetType());
            
            //create the call
            var call = Expression.Call
            (
                method: gen_mi,
                arg0: ParentExp,
                arg1: param
            );
            return call;
        }
        private bool m_ignoreOutput;                                       // Whether we're enumerating "for effect".

        //-----------------------------------------------------------------------------------
        // Instantiates a new merge helper.
        //
        // Arguments:
        //     partitions   - the source partitions from which to consume data.
        //     ignoreOutput - whether we're enumerating "for effect" or for output.
        //     pipeline     - whether to use a pipelined merge.
        //

        internal DefaultMergeHelper(PartitionedStream <TInputOutput, TIgnoreKey> partitions, bool ignoreOutput, ParallelMergeOptions options,
                                    TaskScheduler taskScheduler, CancellationState cancellationState, int queryId)
        {
            Contract.Assert(partitions != null);

            m_taskGroupState = new QueryTaskGroupState(cancellationState, queryId);
            m_partitions     = partitions;
            m_taskScheduler  = taskScheduler;
            m_ignoreOutput   = ignoreOutput;
            IntValueEvent consumerEvent = new IntValueEvent();

            TraceHelpers.TraceInfo("DefaultMergeHelper::.ctor(..): creating a default merge helper");

            // If output won't be ignored, we need to manufacture a set of channels for the consumer.
            // Otherwise, when the merge is executed, we'll just invoke the activities themselves.
            if (!ignoreOutput)
            {
                // Create the asynchronous or synchronous channels, based on whether we're pipelining.
                if (options != ParallelMergeOptions.FullyBuffered)
                {
                    if (partitions.PartitionCount > 1)
                    {
                        m_asyncChannels =
                            MergeExecutor <TInputOutput> .MakeAsynchronousChannels(partitions.PartitionCount, options, consumerEvent, cancellationState.MergedCancellationToken);

                        m_channelEnumerator = new AsynchronousChannelMergeEnumerator <TInputOutput>(m_taskGroupState, m_asyncChannels, consumerEvent);
                    }
                    else
                    {
                        // If there is only one partition, we don't need to create channels. The only producer enumerator
                        // will be used as the result enumerator.
                        m_channelEnumerator = ExceptionAggregator.WrapQueryEnumerator(partitions[0], m_taskGroupState.CancellationState).GetEnumerator();
                    }
                }
                else
                {
                    m_syncChannels =
                        MergeExecutor <TInputOutput> .MakeSynchronousChannels(partitions.PartitionCount);

                    m_channelEnumerator = new SynchronousChannelMergeEnumerator <TInputOutput>(m_taskGroupState, m_syncChannels);
                }

                Contract.Assert(m_asyncChannels == null || m_asyncChannels.Length == partitions.PartitionCount);
                Contract.Assert(m_syncChannels == null || m_syncChannels.Length == partitions.PartitionCount);
                Contract.Assert(m_channelEnumerator != null, "enumerator can't be null if we're not ignoring output");
            }
        }
Beispiel #29
0
        internal DefaultMergeHelper(PartitionedStream <TInputOutput, TIgnoreKey> partitions, bool ignoreOutput, ParallelMergeOptions options, TaskScheduler taskScheduler, CancellationState cancellationState, int queryId)
        {
            this.m_taskGroupState = new QueryTaskGroupState(cancellationState, queryId);
            this.m_partitions     = partitions;
            this.m_taskScheduler  = taskScheduler;
            this.m_ignoreOutput   = ignoreOutput;
            if (!ignoreOutput)
            {
                if (options != ParallelMergeOptions.FullyBuffered)
                {
                    if (partitions.PartitionCount > 1)
                    {
                        this.m_asyncChannels = MergeExecutor <TInputOutput> .MakeAsynchronousChannels(partitions.PartitionCount, options, cancellationState.MergedCancellationToken);

                        this.m_channelEnumerator = new AsynchronousChannelMergeEnumerator <TInputOutput>(this.m_taskGroupState, this.m_asyncChannels);
                    }
                    else
                    {
                        this.m_channelEnumerator = ExceptionAggregator.WrapQueryEnumerator <TInputOutput, TIgnoreKey>(partitions[0], this.m_taskGroupState.CancellationState).GetEnumerator();
                    }
                }
                else
                {
                    this.m_syncChannels = MergeExecutor <TInputOutput> .MakeSynchronousChannels(partitions.PartitionCount);

                    this.m_channelEnumerator = new SynchronousChannelMergeEnumerator <TInputOutput>(this.m_taskGroupState, this.m_syncChannels);
                }
            }
        }
Beispiel #30
0
 public static void Merge_Ordered_Longrunning(Labeled<ParallelQuery<int>> labeled, int count, ParallelMergeOptions options)
 {
     Merge_Ordered(labeled, count, options);
 }
Beispiel #31
0
        internal static MergeExecutor <TInputOutput> Execute <TKey>(PartitionedStream <TInputOutput, TKey> partitions, bool ignoreOutput, ParallelMergeOptions options, TaskScheduler taskScheduler, bool isOrdered, CancellationState cancellationState, int queryId)
        {
            MergeExecutor <TInputOutput> executor = new MergeExecutor <TInputOutput>();

            if (isOrdered && !ignoreOutput)
            {
                if ((options != ParallelMergeOptions.FullyBuffered) && !partitions.OrdinalIndexState.IsWorseThan(OrdinalIndexState.Increasing))
                {
                    bool autoBuffered = options == ParallelMergeOptions.AutoBuffered;
                    if (partitions.PartitionCount > 1)
                    {
                        executor.m_mergeHelper = new OrderPreservingPipeliningMergeHelper <TInputOutput>((PartitionedStream <TInputOutput, int>)partitions, taskScheduler, cancellationState, autoBuffered, queryId);
                    }
                    else
                    {
                        executor.m_mergeHelper = new DefaultMergeHelper <TInputOutput, TKey>(partitions, false, options, taskScheduler, cancellationState, queryId);
                    }
                }
                else
                {
                    executor.m_mergeHelper = new OrderPreservingMergeHelper <TInputOutput, TKey>(partitions, taskScheduler, cancellationState, queryId);
                }
            }
            else
            {
                executor.m_mergeHelper = new DefaultMergeHelper <TInputOutput, TKey>(partitions, ignoreOutput, options, taskScheduler, cancellationState, queryId);
            }
            executor.Execute();
            return(executor);
        }
Beispiel #32
0
 // This test checks whether the query runs in a pipelined or buffered fashion.
 public static void Merge_Ordered_Pipelining_Select(Labeled<ParallelQuery<int>> labeled, int count, ParallelMergeOptions options)
 {
     int countdown = count;
     Func<int, int> down = i =>
     {
         if (Interlocked.Decrement(ref countdown) == 0) throw new DeliberateTestException();
         return i;
     };
     Assert.Equal(0, labeled.Item.WithDegreeOfParallelism(count - 1).WithMergeOptions(options).Select(down).First());
 }
 public static ParallelQuery <TSource> WithMergeOptions <TSource>(this ParallelQuery <TSource> source, ParallelMergeOptions mergeOptions);