Ejemplo n.º 1
0
        /// <inhreritdoc />
        public override Possible <ISourceFile>[] ParseAndBindSpecs(SpecWithOwningModule[] specs)
        {
            // It is very important to dispose the cancellation registration for parse/bind case as well.
            cancellationTokenChain.Dispose();

            // Not using queue here for now.
            var result = ParallelAlgorithms.ParallelSelect(
                specs,
                spec =>
            {
                // Parsing and binding the given spec.
                return
                (TryParseSpec(spec).GetAwaiter().GetResult()
                 .Then(ps =>
                {
                    BindSourceFile(new ParsedSpecWithOwningModule(parsedFile: ps, owningModule: spec.OwningModule));

                    return ps.BindDiagnostics.Count == 0 ? new Possible <ISourceFile>(ps) : new BindingFailure(spec.OwningModule.Descriptor, ps);
                }));
            },
                DegreeOfParallelism,
                CancellationToken);

            return(result.ToArray());
        }
Ejemplo n.º 2
0
        public void Test_ParallelAlgorithms_SpeculativeInvoke_Invoke()
        {
            //并行执行两个任务,其中一个任务完成 就返回被执行的那个委托值
            var result = ParallelAlgorithms.SpeculativeInvoke(() => 1, () => { Thread.Sleep(2000); return(3); });

            Assert.AreEqual(1, result);
        }
Ejemplo n.º 3
0
    private static int ParallelEditDistance(string s1, string s2)
    {
        int[,] dist = new int[s1.Length + 1, s2.Length + 1];
        for (int i = 0; i <= s1.Length; i++)
        {
            dist[i, 0] = i;
        }
        for (int j = 0; j <= s2.Length; j++)
        {
            dist[0, j] = j;
        }
        int numBlocks = Environment.ProcessorCount * 4;

        ParallelAlgorithms.Wavefront(
            s1.Length, s2.Length,
            numBlocks, numBlocks,
            (start_i, end_i, start_j, end_j) =>
        {
            for (int i = start_i + 1; i <= end_i; i++)
            {
                for (int j = start_j + 1; j <= end_j; j++)
                {
                    dist[i, j] = (s1[i - 1] == s2[j - 1]) ?
                                 dist[i - 1, j - 1] :
                                 1 + Math.Min(dist[i - 1, j],
                                              Math.Min(dist[i, j - 1],
                                                       dist[i - 1, j - 1]));
                }
            }
        });

        return(dist[s1.Length, s2.Length]);
    }
Ejemplo n.º 4
0
        public void Test_ParallelAlgorithms_SpeculativeFor_ArrayForEach()
        {
            var list = new int[] { 100, 4, 5, 56, 23, 2, 1, 0, -99, 456, 234, 11, 9999, 44, 2 };
            //内部执行并行循环,cas ,替换掉数组内某个位置的值
            var result = ParallelAlgorithms.SpeculativeForEach(list, e => e);

            Assert.IsTrue(true);
        }
Ejemplo n.º 5
0
        public void Test_ParallelAlgorithms_Scan_Task_ScanInPlace()
        {
            var list = "a bb a bb cc Beim ersten Aufruf von FailMethod von FailMethod".Split(new char[] { ' ' }).ToList();

            ParallelAlgorithms.ScanInPlace(list.ToArray(), (e1, e2) => e1 + e2);

            Assert.IsTrue(true);
        }
Ejemplo n.º 6
0
        public void Test_ParallelAlgorithms_ForRange_Add()
        {
            var start      = 0;
            var loopResutl = ParallelAlgorithms
                             //这里 e1,e2 分别为数据元素 ,e3 为 local初始变量
                             .ForRange <int>(0, 5, () => start, (e1, e2, stat, e3) => e3 = e1 + e2, e => { start = e; });

            Assert.AreEqual(start, start);
        }
Ejemplo n.º 7
0
        public void Test_ParallelAlgorithms_WhileNotEmpty_While()
        {
            var list = new int[] { 100, 4, 5, 56, 23, 2, 1, 0, -99, 456, 234, 11, 9999, 44, 2 };

            //待研究
            ParallelAlgorithms.WhileNotEmpty(list, (e1, e2) => e2(0));

            Assert.IsTrue(true);
        }
Ejemplo n.º 8
0
        public void Test_ParallelAlgorithms_Sort_ArraySort()
        {
            var list = new int[] { 100, 4, 5, 56, 23, 2, 1, 0, -99, 456, 234, 11, 9999, 44, 2 };

            ParallelAlgorithms.Sort(list);

            Assert.AreEqual(-99, list[0]);
            Assert.AreEqual(9999, list[list.Length - 1]);
        }
Ejemplo n.º 9
0
        public void Test_ParallelAlgorithms_Scan_Task_Scan()
        {
            var list = "a bb a bb cc Beim ersten Aufruf von FailMethod von FailMethod".Split(new char[] { ' ' }).ToList();
            //前缀扫描集合中元素
            var result = ParallelAlgorithms.Scan(list, (e1, e2) => e1 + e2);

            Assert.AreEqual("a", result[0]);
            Assert.AreEqual("abb", result[1]);
        }
Ejemplo n.º 10
0
        public List <bool> HashRecLookup(HashRec[] hashArr)
        {
            int Count = hashArr.Length;
            var rv    = new List <bool>(Count);

            ParallelAlgorithms.Sort <HashRec>(hashArr, 0, Count, GetICompareer <HashRec>(SortByDBSizeMask));

            using (var fs = new FileStream(DBFile, FileMode.Open, FileAccess.ReadWrite, FileShare.ReadWrite, DB_READ_SIZE))
            {
                // we need 2 pages now since were block reading and we might pick a hash that start's scan
                // at the very end of a page
                byte[] buff = new byte[DB_READ_SIZE];
                byte[] zero = new byte[HASH_REC_BYTES];
                int    i = 0, firstIndex = 0;

                do
                {
                    var Index = hashArr[i].Index;
                    // convert Index to PageIndex
                    var DBPage = (ulong)((Index & SortMask) & ~DB_PAGE_MASK);

                    // find block offset for this hash
                    fs.Seek((long)DBPage, SeekOrigin.Begin);
                    fs.Read(buff, 0, DB_READ_SIZE);

                    do
                    {
                        // re-read Inxex since we could be on the inner loop
                        Index = hashArr[i].Index;
                        // Index inside of a page
                        var PageIndex = Index & DB_PAGE_MASK;

                        // Hash to populate the DB with
                        var toRead = BitConverter.GetBytes(hashArr[i].CompressedHash);

                        // do we already have this hash from disk?
                        firstIndex = buff.SearchBytes(toRead, (int)PageIndex, toRead.Length);
                        if (firstIndex >= 0)
                        {
                            rv.Add(true);
                        }
                        else
                        {
                            rv.Add(false);
                        }

                        i++;

                        // continue to next entry if it's in the same block
                    } while (i < Count && (((hashArr[i].Index & SortMask) & ~DB_PAGE_MASK) == DBPage));
                } while (i < Count);
            }
            return(rv);
        }
Ejemplo n.º 11
0
        public Task PutSameContentManyTimesTest(bool useRedundantPutFileShortcut)
        {
            var context = new Context(Logger);

            ContentStoreSettings = new ContentStoreSettings()
            {
                UseRedundantPutFileShortcut = useRedundantPutFileShortcut
            };

            return(TestStore(context, Clock, async store =>
            {
                byte[] bytes = ThreadSafeRandom.GetBytes(ValueSize);
                ContentHash contentHash = bytes.CalculateHash(ContentHashType);

                // Verify content doesn't exist yet in store
                Assert.False(await store.ContainsAsync(context, contentHash, null));

                using (var tempDirectory = new DisposableDirectory(FileSystem))
                {
                    ContentHash hashFromPut;
                    using (var pinContext = store.CreatePinContext())
                    {
                        var concurrency = 24;
                        var iterations = 100;

                        var items = Enumerable.Range(0, concurrency).Select(i =>
                        {
                            AbsolutePath pathToContent = tempDirectory.Path / $"tempContent{i}.txt";
                            FileSystem.WriteAllBytes(pathToContent, bytes);
                            return (pathToContent, iterations);
                        }).ToArray();

                        await ParallelAlgorithms.WhenDoneAsync(24, CancellationToken.None, async(scheduleItem, item) =>
                        {
                            // Put the content into the store w/ hard link
                            var r = await store.PutFileAsync(
                                context, item.pathToContent, FileRealizationMode.Any, ContentHashType, new PinRequest(pinContext));
                            hashFromPut = r.ContentHash;
                            Clock.Increment();
                            Assert.True(pinContext.Contains(hashFromPut));

                            if (item.iterations != 0)
                            {
                                scheduleItem((item.pathToContent, item.iterations - 1));
                            }
                        },
                                                               items);
                    }
                }
            }));
        }
Ejemplo n.º 12
0
        private IReadOnlyCollection <FileModuleLiteral> ConvertWorkspaceInParallel(Workspace workspace, AbsolutePath configPath)
        {
            var package       = CreateDummyPackageFromPath(configPath);
            var parserContext = CreateParserContext(resolver: null, package: package, origin: null);

            // Need to use ConfigurationModule and not a set of source specs.
            // We convert configuration which is not a source specs.
            Contract.Assert(workspace.ConfigurationModule != null);
            var specs = workspace.ConfigurationModule.Specs.ToList();

            return(ParallelAlgorithms.ParallelSelect(
                       specs,
                       kvp => ConvertAndRegisterSourceFile(parserContext, workspace, sourceFile: kvp.Value, path: kvp.Key, isConfig: kvp.Key == configPath),
                       DegreeOfParallelism));
        }
Ejemplo n.º 13
0
        public void Test_ParallelAlgorithms_Map_ParallelAlgorithms_Reduce_ArrayMaxOrMinValue_MR()
        {
            //MR 求数组中最值
            var listmaxValueV3 = new List <int> {
                1, 2, 3, 4, 5, 6, 76, 87, 9, 10, 0, 98, 23, 5, 12, 3453, 34, 56, 6, 23, 1, 34, 89
            };
            var inputMaxList3 = ParallelAlgorithms.Map(listmaxValueV3, e => new List <int> {
                e
            });
            var maxValue3 = ParallelAlgorithms.Reduce(0, inputMaxList3.Length, new ParallelOptions {
                MaxDegreeOfParallelism = Environment.ProcessorCount
            }, /*参数e 为集合索引*/ e => listmaxValueV3[e], 0, (e, e1) => { return(Math.Max(e1, e)); });

            Assert.AreEqual(3453, maxValue3);
        }
Ejemplo n.º 14
0
        private static void SortAndFilterItems(ParallelState state, out HashSet <object>[] uniqueFilterItems, out List <object> items)
        {
            int filtersCount = state.ValueProvider.GetFiltersCount();

            uniqueFilterItems = new HashSet <object> [filtersCount];
            for (int i = 0; i < filtersCount; i++)
            {
                uniqueFilterItems[i] = new HashSet <object>();
            }


            if (filtersCount > 0)
            {
                items = new List <object>();

                foreach (var item in state.DataView.InternalList)
                {
                    object[] filterItems = state.ValueProvider.GetFilterItems(item);
                    for (int i = 0; i < uniqueFilterItems.Length; i++)
                    {
                        uniqueFilterItems[i].Add(filterItems[i]);
                    }

                    bool passesFilter = state.ValueProvider.PassesFilter(filterItems);
                    if (passesFilter)
                    {
                        items.Add(item);
                    }
                }
            }
            else
            {
                items = new List <object>(state.DataView.InternalList);
            }

            state.Items = items;

            var sortComparer = state.ValueProvider.GetSortComparer();

            if (sortComparer != null)
            {
                ParallelAlgorithms.Sort(items, sortComparer);
            }
        }
Ejemplo n.º 15
0
        public async Task TestParallelAlgorithmsCancellationTokenAsync()
        {
            // cancel after 2 seconds
            var cts = new CancellationTokenSource();

            cts.CancelAfter(TimeSpan.FromSeconds(2));

            // run something that never ends in parallel
            await ParallelAlgorithms.WhenDoneAsync(
                degreeOfParallelism : 20,
                cts.Token,
                action : (scheduleItem, item) =>
            {
                // keep rescheduling the same item forever
                scheduleItem(item);
                return(Task.Delay(TimeSpan.FromMilliseconds(10)));
            },
                items : Enumerable.Range(0, 1000));

            XAssert.IsTrue(cts.IsCancellationRequested);
        }
        /// <summary>
        /// Starts by parsing <paramref name="configPath"/>, recursively continuing to parse any files imported via an 'importFile' call.
        ///
        /// Any errors are logged to <see cref="Logger"/>.
        ///
        /// Returns a map of parsed files; the result is never null, but in case of an error the content may be unspecified.
        /// </summary>
        private async Task <IReadOnlyDictionary <AbsolutePath, ISourceFile> > ParseConfigFileAsync(AbsolutePath configPath)
        {
            // Set of specs being processed or queued for processing
            var queuedSpecs = new ConcurrentDictionary <AbsolutePath, Unit>()
            {
                { configPath, Unit.Void }
            };

            // Set of parsed files
            var result = new ConcurrentDictionary <AbsolutePath, ISourceFile>();
            await ParallelAlgorithms.WhenDoneAsync(
                DegreeOfParallelism,
                Context.CancellationToken,
                async (addItem, path) =>
            {
                // TODO: File bug to ensure we fail on errors.
                var parseResult = await ParseFileAndDiscoverImportsAsync(path);

                var numberOfProcessedConfigs = FrontEndStatistics.ConfigurationProcessing.Increment();

                NotifyProgress(numberOfProcessedConfigs);

                result[path] = parseResult.Source;

                if (parseResult.Imports?.Count > 0)
                {
                    foreach (var dependency in parseResult.Imports)
                    {
                        // Add the dependency for parsing only if the dependency was not processed or scheduled for processing.
                        if (queuedSpecs.TryAdd(dependency, Unit.Void))
                        {
                            addItem(dependency);
                        }
                    }
                }
            },
                configPath);

            return(result.ToDictionary(kvp => kvp.Key, kvp => kvp.Value));
        }
Ejemplo n.º 17
0
 private async Task UploadFilesAsync(OperationContext context, List <AbsolutePath> files, ConcurrentDictionary <string, string> newCheckpointInfo, string incrementalCheckpointsPrefix)
 {
     if (_configuration.IncrementalCheckpointDegreeOfParallelism <= 1)
     {
         foreach (var file in files)
         {
             await UploadOrTouchFileAsync(context, file, newCheckpointInfo, incrementalCheckpointsPrefix);
         }
     }
     else
     {
         await ParallelAlgorithms.WhenDoneAsync(
             _configuration.IncrementalCheckpointDegreeOfParallelism,
             context.Token,
             action : async(addItem, file) =>
         {
             // Intentionally using async/await to generate a state machine that will have the current method name in it (to simplify postmortem).
             await UploadOrTouchFileAsync(context, file, newCheckpointInfo, incrementalCheckpointsPrefix);
         },
             items : files.ToArray());
     }
 }
Ejemplo n.º 18
0
        /// <summary>
        /// Recursively goes down each directory and collects project files. The search stops in directories that contain
        /// a project configuration or a configuration file.
        /// </summary>
        private static void CollectAllPathsToProjectsRecursively(IFileSystem fileSystem, AbsolutePath pathToPackageDirectory,
                                                                 List <AbsolutePath> projects)
        {
            var pathTable = fileSystem.GetPathTable();

            Action <AbsolutePath, Action <AbsolutePath> > collectPackages = (directory, adder) =>
            {
                if (!IsWellKnownConfigurationFileExists(directory, pathTable, fileSystem))
                {
                    CollectAllPathsToProjects(fileSystem, directory, projects);
                    var subDirectories = fileSystem.EnumerateDirectories(directory);
                    foreach (var subDirectory in subDirectories)
                    {
                        adder(subDirectory);
                    }
                }
            };

            ParallelAlgorithms.WhileNotEmpty(
                fileSystem.EnumerateDirectories(pathToPackageDirectory),
                collectPackages);
        }
Ejemplo n.º 19
0
        private void ParallelVisitNode(INode node, DiagnosticContext context)
        {
            ParallelAlgorithms.WhileNotEmpty(new[] { node }, (item, adder) =>
            {
                // Only non-injected nodes are checked by the linter.
                if (item.IsInjectedForDScript())
                {
                    return;
                }

                Handle(item, context);
                using (var list = NodeWalker.GetChildrenFast(item))
                {
                    foreach (var child in list.Instance)
                    {
                        foreach (var e in child)
                        {
                            adder(e);
                        }
                    }
                }
            });
        }
Ejemplo n.º 20
0
 private async Task RestoreFilesAsync(OperationContext context, AbsolutePath checkpointTargetDirectory, Dictionary <string, string> newCheckpointInfo)
 {
     if (_configuration.IncrementalCheckpointDegreeOfParallelism <= 1)
     {
         foreach (var(key, value) in newCheckpointInfo)
         {
             await RestoreFileAsync(context, checkpointTargetDirectory, key, value).ThrowIfFailure();
         }
     }
     else
     {
         await ParallelAlgorithms.WhenDoneAsync(
             _configuration.IncrementalCheckpointDegreeOfParallelism,
             context.Token,
             action : async(addItem, kvp) =>
         {
             var key   = kvp.Key;
             var value = kvp.Value;
             await RestoreFileAsync(context, checkpointTargetDirectory, key, value).ThrowIfFailure();
         },
             items : newCheckpointInfo.ToArray());
     }
 }
Ejemplo n.º 21
0
 public void Test_ParallelAlgorithms_SpeculativeFor_ArrayFor()
 {
     //内部执行并行循环,cas
     var result = ParallelAlgorithms.SpeculativeFor(0, 5, e => e);
 }
Ejemplo n.º 22
0
        /// <summary>
        /// Gets nodes to schedule.
        /// </summary>
        /// <param name="scheduleDependents">If true, then include all transitive dependents of the explicitly scheduled nodes.</param>
        /// <param name="explicitlyScheduledNodes">Explicitly scheduled nodes.</param>
        /// <param name="forceSkipDepsMode">If not disabled, then skip dependencies. This corresponds to "dirty" build.</param>
        /// <param name="scheduleMetaPips">If true, metapips will be scheduled</param>
        /// <returns>Nodes to schedule.</returns>
        public GetScheduledNodesResult GetNodesToSchedule(
            bool scheduleDependents,
            IEnumerable<NodeId> explicitlyScheduledNodes,
            ForceSkipDependenciesMode forceSkipDepsMode,
            bool scheduleMetaPips)
        {
            int explicitlySelectedNodeCount;
            int explicitlySelectedProcessCount;
            int dirtyNodeCount;
            int dirtyProcessCount;
            int nonMaterializedNodeCount;
            int nonMaterializedProcessCount;
            int processesInBuildCone = 0;

            HashSet<NodeId> nodesToSchedule;
            VisitationTracker transitiveDependencyNodeFilter;

            using (m_counters.StartStopwatch(PipExecutorCounter.BuildSetCalculatorComputeBuildCone))
            {
                var visitedNodes = new VisitationTracker(m_graph);
                nodesToSchedule = new HashSet<NodeId>(explicitlyScheduledNodes);
                explicitlySelectedNodeCount = nodesToSchedule.Count;
                explicitlySelectedProcessCount = nodesToSchedule.Count(IsProcess);

                // 1. Calculate dirty nodes.
                // The filter-passing set may include nodes which are dirty/clean and schedulable/not-schedulable (w.r.t. state).
                // We want stats on dirty vs. not-dirty, and want to drop anything not schedulable.
                // This step also marks dirty non-materialized nodes.
                CalculateDirtyNodes(
                    nodesToSchedule,
                    out dirtyNodeCount,
                    out dirtyProcessCount,
                    out nonMaterializedNodeCount,
                    out nonMaterializedProcessCount);

                if (dirtyNodeCount == 0)
                {
                    int duration = (int) m_counters.GetElapsedTime(PipExecutorCounter.BuildSetCalculatorComputeBuildCone).TotalMilliseconds;

                    // Build cone is the same as the explicitly selected processes.
                    Logger.Log.BuildSetCalculatorProcessStats(
                        m_loggingContext,
                        m_graph.Nodes.Count(IsProcess),
                        explicitlySelectedProcessCount,
                        explicitlySelectedProcessCount,
                        explicitlySelectedProcessCount,
                        0,
                        duration);
                    Logger.Log.BuildSetCalculatorStats(
                        m_loggingContext,
                        0,
                        0,
                        explicitlySelectedNodeCount,
                        explicitlySelectedProcessCount,
                        nonMaterializedNodeCount,
                        nonMaterializedProcessCount,
                        duration,
                        0,
                        0,
                        0,
                        0);
                    return GetScheduledNodesResult.CreateForNoOperationBuild(explicitlySelectedProcessCount);
                }

                // 2. Add transitive dependents of explicitly scheduled nodes (if requested).
                if (scheduleDependents)
                {
                    m_visitor.VisitTransitiveDependents(
                        nodesToSchedule,
                        visitedNodes,
                        node =>
                        {
                            // Don't schedule dependents that are meta pips. These may artificially connect unrequested
                            // pips since we will later schedule their dependencies. For example, this would cause
                            // everything referenced by a spec file pip to be scheduled as a single unit.
                            PipType pipType = GetPipType(node);
                            if (!pipType.IsMetaPip())
                            {
                                nodesToSchedule.Add(node);

                                if (pipType == PipType.Process)
                                {
                                    ++processesInBuildCone;
                                }

                                return true;
                            }

                            return false;
                        });
                }

                // At this point nodesToSchedule contains
                // (1) all nodes that are explicitly scheduled (explicitlyScheduledNodes), and
                // (2) if scheduleDependents is true, all dependents of (1) transitively.
                transitiveDependencyNodeFilter = visitedNodes;

                // 3. Collect/visit transitive dependencies, but don't put it in nodesToSchedule.
                transitiveDependencyNodeFilter.UnsafeReset();

                // The code below essentially does m_visitor.VisitTransitiveDependencies(nodesToSchedule, transitiveDependencyNodeFilter, node => true), but in parallel.
                foreach (var nodeId in nodesToSchedule)
                {
                    if (transitiveDependencyNodeFilter.MarkVisited(nodeId))
                    {
                        if (IsProcess(nodeId))
                        {
                            ++processesInBuildCone;
                        }
                    }
                }

                ParallelAlgorithms.WhileNotEmpty(
                    nodesToSchedule,
                    (node, add) =>
                    {
                        foreach (Edge inEdge in m_graph.GetIncomingEdges(node))
                        {
                            if (visitedNodes.MarkVisited(inEdge.OtherNode))
                            {
                                add(inEdge.OtherNode);
                                if (IsProcess(inEdge.OtherNode))
                                {
                                    Interlocked.Increment(ref processesInBuildCone);
                                }
                            }
                        }
                    });

                // At this point nodesToSchedule hasn't change from step 2.
                // But now, transitiveDependencyNodeFilter have already marked all nodes in nodesToSchedule, plus
                // their dependencies transitively.
            }

            IEnumerable<NodeId> scheduledNodes;
            var mustExecute = new HashSet<NodeId>();
            var stats = new BuildSetCalculatorStats();
            var metaPipCount = 0;

            using (m_counters.StartStopwatch(PipExecutorCounter.BuildSetCalculatorGetNodesToSchedule))
            {
                scheduledNodes = GetNodesToSchedule(
                    nodesToSchedule,
                    transitiveDependencyNodeFilter,
                    forceSkipDepsMode,
                    scheduleMetaPips,
                    mustExecute,
                    stats,
                    ref metaPipCount);
            }

            int buildConeDuration = (int) m_counters.GetElapsedTime(PipExecutorCounter.BuildSetCalculatorComputeBuildCone).TotalMilliseconds;
            int getScheduledNodesDuration = (int) m_counters.GetElapsedTime(PipExecutorCounter.BuildSetCalculatorGetNodesToSchedule).TotalMilliseconds;
            int scheduledProcessCount = scheduledNodes.Count(IsProcess);

            Logger.Log.BuildSetCalculatorProcessStats(
                m_loggingContext,
                m_graph.Nodes.Count(IsProcess),
                explicitlySelectedProcessCount,
                processesInBuildCone,
                (processesInBuildCone - scheduledProcessCount) + stats.CleanMaterializedProcessFrontierCount,
                scheduledProcessCount,
                buildConeDuration + getScheduledNodesDuration);

            Logger.Log.BuildSetCalculatorStats(
                m_loggingContext,
                dirtyNodeCount,
                dirtyProcessCount,
                explicitlySelectedNodeCount,
                explicitlySelectedProcessCount,
                nonMaterializedNodeCount,
                nonMaterializedProcessCount,
                buildConeDuration,
                scheduledNodes.Count(),
                scheduledProcessCount,
                metaPipCount,
                getScheduledNodesDuration);

            int incrementalSchedulingCacheHits = forceSkipDepsMode == ForceSkipDependenciesMode.Disabled
                ? (processesInBuildCone - scheduledProcessCount + stats.CleanMaterializedProcessFrontierCount)
                : 0;

            return new GetScheduledNodesResult(
                scheduledNodes: scheduledNodes,
                mustExecuteNodes: mustExecute,
                incrementalSchedulingCacheHitProcesses: incrementalSchedulingCacheHits,
                cleanMaterializedProcessFrontierCount: forceSkipDepsMode == ForceSkipDependenciesMode.Disabled ? stats.CleanMaterializedProcessFrontierCount : 0);
        }
Ejemplo n.º 23
0
        void DumpBufToDisk(ParallelOptions po)
        {
            Stopwatch sw;
            long      TotalDBWrites  = 0;
            long      TotalRequested = 0;
            long      DBPage         = 0;

            SortMask = HDB.DBEntriesMask << HASH_SHIFT;
            do
            {
                var hashArrTpl = ReadyQueue.Take(po.CancellationToken);
                var hashArr    = hashArrTpl.Item2;
                var Count      = hashArrTpl.Item1;

                ParallelAlgorithms.Sort <HashRec>(hashArr, 0, Count, GetICompareer <HashRec>(SortByDBSizeMask));
                TotalRequested += Count;

                if (Vtero.VerboseLevel >= 1)
                {
                    WriteColor(ConsoleColor.Cyan, $"Hash entries to store: {Count:N0}");
                }

                using (var fs = new FileStream(DBFile, FileMode.Open, FileAccess.ReadWrite, FileShare.ReadWrite, DB_READ_SIZE))
                {
                    // we need 2 pages now since were block reading and we might pick a hash that start's scan
                    // at the very end of a page
                    byte[] buff = new byte[DB_READ_SIZE];
                    byte[] zero = new byte[HASH_REC_BYTES];
                    int    i = 0, firstIndex = 0, zeroIndex = 0;
                    bool   WriteBack = false;

                    sw = Stopwatch.StartNew();
                    do
                    {
                        var Index = hashArr[i].Index;
                        // convert Index to PageIndex
                        DBPage = (long)((Index & SortMask) & ~DB_PAGE_MASK);

                        // find block offset for this hash
                        fs.Seek(DBPage, SeekOrigin.Begin);
                        fs.Read(buff, 0, DB_READ_SIZE);
                        WriteBack = false;
                        if (po.CancellationToken.IsCancellationRequested)
                        {
                            return;
                        }
                        po.CancellationToken.ThrowIfCancellationRequested();

                        do
                        {
                            // skip duplicates
                            if (i + 1 < Count &&
                                hashArr[i].Index == hashArr[i + 1].Index)
                            //&& UnsafeHelp.UnsafeCompare(hashArr[i].HashData, hashArr[i + 1].HashData))
                            {
                                i++;
                                continue;
                            }

                            if (i < Count)
                            {
                                // re-read Inxex since we could be on the inner loop
                                Index = hashArr[i].Index;
                                // Index inside of a page
                                var PageIndex = (int)(Index & DB_PAGE_MASK);

                                // Hash to populate the DB with
                                var toWrite = BitConverter.GetBytes(hashArr[i].CompressedHash);

                                // do we already have this hash from disk?
                                firstIndex = buff.SearchBytes(toWrite, PageIndex, HASH_REC_BYTES);
                                if (firstIndex < 0)
                                {
                                    zeroIndex = buff.SearchBytes(zero, PageIndex, HASH_REC_BYTES);
                                    if (zeroIndex >= 0)
                                    {
                                        // we want the modified buffer to get written back
                                        WriteBack = true;

                                        // we requested this to be pre-gen'd for us
                                        toWrite = hashArr[i].Serialized;

                                        // update buff with new hash entry for write back
                                        //Array.Copy(toWrite, 0, buff, zeroIndex, toWrite.Length);
                                        for (int j = zeroIndex, k = 0; j < zeroIndex + toWrite.Length; j++, k++)
                                        {
                                            buff[j] = toWrite[k];
                                        }

                                        TotalDBWrites++;

                                        // set to the origional index, shift down since were bit aligned
                                        HDB.SetIdxBit(Index);
                                    }
                                    else if (zeroIndex < 0)
                                    {
                                        var strerr = $"HASH TABLE SATURATED!!! ({DBPage:X}:{PageIndex:X}) YOU NEED TO MAKE THE DB LARGER!!";
                                        WriteColor(ConsoleColor.Red, strerr);
                                        source.Cancel();
                                    }
                                }
                            }
                            i++;

                            if (i % 100000 == 0 && sw.Elapsed.TotalSeconds > 0)
                            {
                                WriteColor(ConsoleColor.Cyan, $"DB commit entries: {i:N0} - per second {(i / sw.Elapsed.TotalSeconds):N0}");
                            }

                            // continue to next entry if it's in the same block
                        } while (i < Count && (((hashArr[i].Index & SortMask) & ~DB_PAGE_MASK) == (ulong)DBPage));

                        if (WriteBack)
                        {
                            if (po.CancellationToken.IsCancellationRequested)
                            {
                                return;
                            }
                            // reset seek position
                            fs.Seek(DBPage, SeekOrigin.Begin);
                            // only write back 1 page if we can help it
                            fs.Write(buff, 0, DB_READ_SIZE);
                        }
                    } while (i < Count);

                    WriteColor(ConsoleColor.Cyan, $"DB entries: {i:N0} - per second {(i / sw.Elapsed.TotalSeconds):N0}");
                    //aPool.Return(hashArr);
                }
            } while (!DoneHashLoad || ReadyQueue.Count() > 0);

            WriteColor(ConsoleColor.Cyan, $"Finished DB write {TotalDBWrites:N0} NEW entries. Requsted {TotalRequested:N0} (reduced count reflects de-duplication). Task time: {sw.Elapsed}");
        }
Ejemplo n.º 24
0
        private static void SortAndFilterItems(ParallelState state, out HashSet <object>[] uniqueFilterItems, out List <object> items)
        {
            int filtersCount = state.ValueProvider.GetFiltersCount();

            uniqueFilterItems = new HashSet <object> [filtersCount];
            for (int i = 0; i < filtersCount; i++)
            {
                uniqueFilterItems[i] = new HashSet <object>();
            }

            if (filtersCount > 0)
            {
                items = new List <object>();

                if (state.DataView.SourceGroups.Count > 0)
                {
                    foreach (var group in state.DataView.SourceGroups)
                    {
                        List <object> filteredGroupedItems = new List <object>();

                        for (int g = group.Item2; g < group.Item3; g++)
                        {
                            object[] filterItems = state.ValueProvider.GetFilterItems(state.DataView.InternalList[g]);
                            for (int i = 0; i < uniqueFilterItems.Length; i++)
                            {
                                uniqueFilterItems[i].Add(filterItems[i]);
                            }

                            bool passesFilter = state.ValueProvider.PassesFilter(filterItems);
                            if (passesFilter)
                            {
                                filteredGroupedItems.Add(state.DataView.InternalList[g]);
                            }
                        }

                        var sortComparer = state.ValueProvider.GetSortComparer();
                        if (sortComparer != null)
                        {
                            ParallelAlgorithms.Sort(filteredGroupedItems, sortComparer);
                        }

                        items.AddRange(filteredGroupedItems);

                        state.SourceGroups.Add(new Tuple <object, int>(group.Item1, items.Count));
                    }
                }
                else
                {
                    foreach (var item in state.DataView.InternalList)
                    {
                        object[] filterItems = state.ValueProvider.GetFilterItems(item);
                        for (int i = 0; i < uniqueFilterItems.Length; i++)
                        {
                            uniqueFilterItems[i].Add(filterItems[i]);
                        }

                        bool passesFilter = state.ValueProvider.PassesFilter(filterItems);
                        if (passesFilter)
                        {
                            items.Add(item);
                        }
                    }

                    var sortComparer = state.ValueProvider.GetSortComparer();
                    if (sortComparer != null)
                    {
                        ParallelAlgorithms.Sort(items, sortComparer);
                    }
                }
            }
            else
            {
                items = new List <object>();

                if (state.DataView.SourceGroups.Count > 0)
                {
                    List <object> groupedItems = new List <object>();

                    foreach (var group in state.DataView.SourceGroups)
                    {
                        groupedItems = new List <object>();
                        for (int g = group.Item2; g < group.Item3; g++)
                        {
                            groupedItems.Add(state.DataView.InternalList[g]);
                        }

                        var sortComparer = state.ValueProvider.GetSortComparer();
                        if (sortComparer != null)
                        {
                            ParallelAlgorithms.Sort(groupedItems, sortComparer);
                        }
                        items.AddRange(groupedItems);


                        state.SourceGroups.Add(new Tuple <object, int>(group.Item1, items.Count));
                    }
                }
                else
                {
                    items = new List <object>(state.DataView.InternalList);

                    var sortComparer = state.ValueProvider.GetSortComparer();
                    if (sortComparer != null)
                    {
                        ParallelAlgorithms.Sort(items, sortComparer);
                    }
                }
            }

            state.Items = items;
        }
Ejemplo n.º 25
0
 public void Test_ParallelAlgorithms_For_Add()
 {
     ParallelAlgorithms.For(0, 1000, e => e++);
     Assert.IsTrue(true);
 }