internal override Node <T> opEvaluateParallel <P_IN>(PipelineHelper <T> helper, Spliterator <P_IN> spliterator, IntFunction <T[]> generator) { if (StreamOpFlag.DISTINCT.isKnown(helper.StreamAndOpFlags)) { // No-op return(helper.Evaluate(spliterator, false, generator)); } else if (StreamOpFlag.ORDERED.isKnown(helper.StreamAndOpFlags)) { return(reduce(helper, spliterator)); } else { // Holder of null state since ConcurrentHashMap does not support null values AtomicBoolean seenNull = new AtomicBoolean(false); ConcurrentDictionary <T, Boolean> map = new ConcurrentDictionary <T, Boolean>(); //JAVA TO C# CONVERTER TODO TASK: Java lambdas satisfy functional interfaces, while .NET lambdas satisfy delegates - change the appropriate interface to a delegate: TerminalOp <T, Void> forEachOp = ForEachOps.makeRef(Stream_Fields.t => { if (Stream_Fields.t == null) { seenNull.Set(Stream_Fields.True); } else { map.GetOrAdd(Stream_Fields.t, true); } }, false);
internal override Node <T> opEvaluateParallel <P_IN>(PipelineHelper <T> helper, Spliterator <P_IN> spliterator, IntFunction <T[]> generator) { long size = helper.ExactOutputSizeIfKnown(spliterator); if (size > 0 && spliterator.hasCharacteristics(java.util.Spliterator_Fields.SUBSIZED)) { // Because the pipeline is SIZED the slice spliterator // can be created from the source, this requires matching // to shape of the source, and is potentially more efficient // than creating the slice spliterator from the pipeline // wrapping spliterator Spliterator <P_IN> s = SliceSpliterator(helper.SourceShape, spliterator, Skip, Limit); return(Nodes.Collect(helper, s, Stream_Fields.True, generator)); } else if (!StreamOpFlag.ORDERED.isKnown(helper.StreamAndOpFlags)) { Spliterator <T> s = unorderedSkipLimitSpliterator(helper.WrapSpliterator(spliterator), Skip, Limit, size); // Collect using this pipeline, which is empty and therefore // can be used with the pipeline wrapping spliterator // Note that we cannot create a slice spliterator from // the source spliterator if the pipeline is not SIZED return(Nodes.Collect(this, s, Stream_Fields.True, generator)); } else { return((new SliceTask <>(this, helper, spliterator, generator, Skip, Limit)).invoke()); } }
internal override Spliterator <T> opEvaluateParallelLazy <P_IN>(PipelineHelper <T> helper, Spliterator <P_IN> spliterator) { long size = helper.ExactOutputSizeIfKnown(spliterator); if (size > 0 && spliterator.hasCharacteristics(java.util.Spliterator_Fields.SUBSIZED)) { return(new StreamSpliterators.SliceSpliterator.OfRef <>(helper.WrapSpliterator(spliterator), Skip, CalcSliceFence(Skip, Limit))); } else if (!StreamOpFlag.ORDERED.isKnown(helper.StreamAndOpFlags)) { return(unorderedSkipLimitSpliterator(helper.WrapSpliterator(spliterator), Skip, Limit, size)); } else { // @@@ OOMEs will occur for LongStream.longs().filter(i -> true).limit(n) // regardless of the value of n // Need to adjust the target size of splitting for the // SliceTask from say (size / k) to say min(size / k, 1 << 14) // This will limit the size of the buffers created at the leaf nodes // cancellation will be more aggressive cancelling later tasks // if the target slice size has been reached from a given task, // cancellation should also clear local results if any return((new SliceTask <>(this, helper, spliterator, CastingArray(), Skip, Limit)).invoke().spliterator()); } }
internal virtual Node <T> reduce <P_IN>(PipelineHelper <T> helper, Spliterator <P_IN> spliterator) { // If the stream is SORTED then it should also be ORDERED so the following will also // preserve the sort order TerminalOp <T, LinkedHashSet <T> > reduceOp = ReduceOps.MakeRef <T, LinkedHashSet <T> >(LinkedHashSet::new, LinkedHashSet::add, LinkedHashSet::addAll); return(Nodes.Node(reduceOp.evaluateParallel(helper, spliterator))); }
public override Void evaluateParallel <S>(PipelineHelper <T> helper, Spliterator <S> spliterator) { if (Ordered) { (new ForEachOrderedTask <>(helper, spliterator, this)).Invoke(); } else { (new ForEachTask <>(helper, spliterator, helper.WrapSink(this))).Invoke(); } return(null); }
public override Void evaluateSequential <S>(PipelineHelper <T> helper, Spliterator <S> spliterator) { return(helper.WrapAndCopyInto(this, spliterator).get()); }