public void Run2() { var nums = Enumerable.Range(0, 10000000).ToArray(); // 開啟負載平衡,告訴PLINQ要使用區塊分割。 Partitioner <int> customPartitioner = Partitioner.Create(nums, true); long startTime = DateTime.Now.Ticks; //Nanosecond var q = (from x in customPartitioner.AsParallel() select x *Math.PI).ToArray(); long endTime = DateTime.Now.Ticks; //Nanosecond Console.WriteLine("Time1 : " + (endTime - startTime).ToString()); //From what I understand, PLINQ will choose range or chunk partitioning depending on whether //the source sequence is an IList or not. If it is an IList, the bounds are known and elements //can be accessed by index, so PLINQ chooses range partitioning to divide the list evenly //between threads. For instance, if you have 1000 items in your list and you use 4 threads, //each thread will have 250 items to process. On the other hand, if the source sequence //is not an IList, PLINQ can't use range partitioning because it doesn't know what the //ranges would be; so it uses chunk partitioning instead. //In your case, if you have an IList and you want to force chunk partitioning, you //can just make it look like a simple IEnumerable: instead of writing this: }
public static int MaxPlinqPartitioned(this int[] source) { // Create a load-balancing partitioner. Or specify false for static partitioning. Partitioner <int> customPartitioner = Partitioner.Create(source, true); return(customPartitioner.AsParallel().Max()); }
internal static void ChunkPartitioningForPartitioner() { const int ChunkRepeatCount = 3; Partitioner <int> partitioner = Partitioner.Create( Enumerable.Range(0, (1 + 2) * ChunkRepeatCount * Environment.ProcessorCount + 4)); partitioner.AsParallel() .Visualize(ParallelEnumerable.Select, value => value + ComputingWorkload()) .WriteLines(); }
private static void ParseLogsParallel(Partitioner <string> logsPartitioner, out ConcurrentBag <string> stackTraces, out long workingTime) { var parallelTimer = Stopwatch.StartNew(); var parallelStackTraces = new ConcurrentBag <string>(); logsPartitioner.AsParallel() .ForAll(line => ProcessLine(line, ref parallelStackTraces)); stackTraces = parallelStackTraces; workingTime = parallelTimer.ElapsedMilliseconds; }
static void ParallelLoopsWithPartitioner() { //<snippet02> // Static partitioning requires indexable source. Load balancing // can use any IEnumerable. var nums = Enumerable.Range(0, 100000000).ToArray(); // Create a load-balancing partitioner. Or specify false for static partitioning. Partitioner <int> customPartitioner = Partitioner.Create(nums, true); // The partitioner is the query's data source. var q = from x in customPartitioner.AsParallel() select x *Math.PI; q.ForAll((x) => { ProcessData(x); }); //</snippet02> Stopwatch sw = Stopwatch.StartNew(); // Must be load balanced partitioner // for this simple data source. // Partitioner<int> p = Partitioner.Create(nums, true); Parallel.ForEach(customPartitioner, (x) => { double d = (double)x * Math.PI; } ); Console.WriteLine("elapsed for Parallel.For: {0}", sw.ElapsedMilliseconds); sw = Stopwatch.StartNew(); customPartitioner = Partitioner.Create(nums, true); var q7 = from x in customPartitioner.AsParallel() select x;// *Math.PI; q7.ForAll((x) => { double d = (double)x * Math.PI; }); Console.WriteLine("elapsed for PLINQ with load-balancing: {0}", sw.ElapsedMilliseconds); // Console.WriteLine("Clearing memory cache"); //for (int i = 0; i < arr.Length; i++) // arr[i] = Math.PI; sw = Stopwatch.StartNew(); customPartitioner = Partitioner.Create(nums, false); var q2 = from x in customPartitioner.AsParallel() select x;// *Math.PI; q2.ForAll((x) => { double d = (double)x * Math.PI; }); Console.WriteLine("elapsed for PLINQ without load-balancing: {0}", sw.ElapsedMilliseconds); }