All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.landawn.abacus.util.stream.BaseStream Maven / Gradle / Ivy

There is a newer version: 1.2.9
Show newest version
/*
 * Copyright (c) 2012, 2013, Oracle and/or its affiliates. All rights reserved.
 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
 *
 * This code is free software; you can redistribute it and/or modify it
 * under the terms of the GNU General Public License version 2 only, as
 * published by the Free Software Foundation.  Oracle designates this
 * particular file as subject to the "Classpath" exception as provided
 * by Oracle in the LICENSE file that accompanied this code.
 *
 * This code is distributed in the hope that it will be useful, but WITHOUT
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 * version 2 for more details (a copy is included in the LICENSE file that
 * accompanied this code).
 *
 * You should have received a copy of the GNU General Public License version
 * 2 along with this work; if not, write to the Free Software Foundation,
 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 *
 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 * or visit www.oracle.com if you need additional information or have any
 * questions.
 */
package com.landawn.abacus.util.stream;

import java.util.Collection;
import java.util.List;
import java.util.Map;
import java.util.Random;
import java.util.Set;

import com.landawn.abacus.util.ImmutableIterator;
import com.landawn.abacus.util.IntList;
import com.landawn.abacus.util.LongMultiset;
import com.landawn.abacus.util.Multiset;
import com.landawn.abacus.util.Nth;
import com.landawn.abacus.util.Optional;
import com.landawn.abacus.util.Percentage;
import com.landawn.abacus.util.Try;
import com.landawn.abacus.util.function.BiPredicate;
import com.landawn.abacus.util.function.Consumer;
import com.landawn.abacus.util.function.Function;
import com.landawn.abacus.util.function.Supplier;

/**
 * Note: It's copied from OpenJDK at: http://hg.openjdk.java.net/jdk8u/hs-dev/jdk
 * 
* * Base interface for streams, which are sequences of elements supporting * sequential and parallel aggregate operations. The following example * illustrates an aggregate operation using the stream types {@link Stream} * and {@link IntStream}, computing the sum of the weights of the red widgets: * *
{@code
 *     int sum = widgets.stream()
 *                      .filter(w -> w.getColor() == RED)
 *                      .mapToInt(w -> w.getWeight())
 *                      .sum();
 * }
* * See the class documentation for {@link Stream} and the package documentation * for java.util.stream for additional * specification of streams, stream operations, stream pipelines, and * parallelism, which governs the behavior of all stream types. * * @param the type of the stream elements * @param the type of array * @param

the type of predicate * @param the type of consumer * @param the type of PrimitiveList/List * @param the type of Optional * @param the type of Indexed * @param the type of of the stream implementing {@code BaseStream} * @since 1.8 * @see Stream * @see IntStream * @see LongStream * @see DoubleStream * @see java.util.stream */ public interface BaseStream> extends AutoCloseable { /** * Returns a stream consisting of the elements of this stream that match * the given predicate. * *

This is an intermediate * operation. * * @param predicate a non-interfering, * stateless * predicate to apply to each element to determine if it * should be included * @return the new stream */ S filter(P predicate); /** * Keep the elements until the given predicate returns false. The stream should be sorted, which means if x is the first element: predicate.text(x) returns false, any element y behind x: predicate.text(y) should returns false. * * In parallel Streams, the elements after the first element which predicate returns false may be tested by predicate too. * * @param predicate * @return */ S takeWhile(P predicate); /** * Remove the elements until the given predicate returns false. The stream should be sorted, which means if x is the first element: predicate.text(x) returns true, any element y behind x: predicate.text(y) should returns true. * * In parallel Streams, the elements after the first element which predicate returns false may be tested by predicate too. * * @param predicate * @return */ S dropWhile(P predicate); /** * Returns a stream consisting of the remaining elements of this stream * after removing and consuming the first {@code n} elements of the stream. * If this stream contains fewer than {@code n} elements then an * empty stream will be returned. * * @param n * @param consumer * @return */ S remove(long n, C consumer); S removeIf(P predicate); S removeIf(P predicate, C consumer); /** * Returns a stream consisting of the remaining elements of this stream * after removing and consuming until the specified predicate return false. * If there is no more elements then an empty stream will be returned. * * @param predicate * @param consumer * @return */ S removeWhile(P predicate, C consumer); /** * Returns Stream of ByteStream with consecutive sub sequences of the elements, each of the same size (the final sequence may be smaller). * * @param size * @return */ Stream split(int size); /** * Returns Stream of Stream with consecutive sub sequences of the elements, each of the same size (the final sequence may be smaller). * *
* This method only run sequentially, even in parallel stream. * * @param size * @return */ public abstract Stream splitToList(int size); /** * Split the stream by the specified predicate. * * * This stream should be sorted by value which is used to verify the border. *
* This method only run sequentially, even in parallel stream. * * @param predicate * @return */ Stream split(final P predicate); /** * Split the stream by the specified predicate. * * This stream should be sorted by value which is used to verify the border. *
* This method only run sequentially, even in parallel stream. * * @param predicate * @return */ Stream splitToList(final P predicate); /** * Split the stream by the specified predicate. * *

     * 
     * // split the number sequence by window 5.
     * Stream.of(1, 2, 3, 5, 7, 9, 10, 11, 19).splitToList(MutableInt.of(5), (e, b) -> e <= b.intValue(), b -> b.addAndGet(5)).forEach(N::println);
     * 
     * 
* * This stream should be sorted by value which is used to verify the border. *
* This method only run sequentially, even in parallel stream. * * @param seed * @param predicate * @param seedUpdate * @return */ Stream split(final U seed, final BiPredicate predicate, final Consumer seedUpdate); /** * Split the stream by the specified predicate. * *
     * 
     * // split the number sequence by window 5.
     * Stream.of(1, 2, 3, 5, 7, 9, 10, 11, 19).splitToList(MutableInt.of(5), (e, b) -> e <= b.intValue(), b -> b.addAndGet(5)).forEach(N::println);
     * 
     * 
* * This stream should be sorted by value which is used to verify the border. *
* This method only run sequentially, even in parallel stream. * * @param seed * @param predicate * @param seedUpdate * @return */ Stream splitToList(final U seed, final BiPredicate predicate, final Consumer seedUpdate); /** * Split the stream into two pieces at where * * @param where * @return */ Stream splitAt(int where); /** * Split the stream into two pieces at where turns to {@code false} * *
     * 
     * Stream.of(1, 3, 2, 4, 2, 5).splitBy(i -> i <= 3).forEach(s -> s.println()); // [1, 3, 2], [4, 2, 5]
     * 
     * 
* * @param where * @return */ Stream splitBy(P where); /** * * @param windowSize * @return * @see #sliding(int, int) */ Stream sliding(int windowSize); /** * * @param windowSize * @return * @see #sliding(int, int) */ Stream slidingToList(int windowSize); /** * Stream.of(1, 2, 3, 4, 5, 6, 7, 8).sliding(3, 1).forEach(Stream::println) *
output:
* [1, 2, 3]
* [2, 3, 4]
* [3, 4, 5]
* [4, 5, 6]
* [5, 6, 7]
* [6, 7, 8]
* *
============================================================================
* Stream.of(1, 2, 3, 4, 5, 6, 7, 8).sliding(3, 3).forEach(Stream::println) *
output:
* [1, 2, 3]
* [4, 5, 6]
* [7, 8]
* *
============================================================================
* Stream.of(1, 2, 3, 4, 5, 6, 7, 5).sliding(3, 5).forEach(Stream::println) *
output:
* [1, 2, 3]
* [6, 7, 8]
* *
* This method only run sequentially, even in parallel stream. * * @param windowSize * @param increment * @return */ Stream sliding(int windowSize, int increment); /** * * @param windowSize * @param increment * @return * @see #sliding(int, int) */ Stream slidingToList(int windowSize, int increment); /** *
* This method only run sequentially, even in parallel stream. * * @param c * @return * @see IntList#intersection(IntList) */ S intersection(Collection c); /** *
* This method only run sequentially, even in parallel stream. * * @param c * @return * @see IntList#difference(IntList) */ S difference(Collection c); /** *
* This method only run sequentially, even in parallel stream. * * @param c * @return * @see IntList#symmetricDifference(IntList) */ S symmetricDifference(Collection c); /** *
* All elements will be loaded to memory and sorted if not yet. * * @return */ Optional> percentiles(); /** * *
* This method only run sequentially, even in parallel stream and all elements will be loaded to memory. * * @return */ S reversed(); /** * *
* This method only run sequentially, even in parallel stream and all elements will be loaded to memory. * * @return */ S shuffled(); /** * *
* This method only run sequentially, even in parallel stream and all elements will be loaded to memory. * * @return */ S shuffled(Random rnd); /** * *
* This method only run sequentially, even in parallel stream and all elements will be loaded to memory. * * @return */ S rotated(int distance); /** * Returns a stream consisting of the distinct elements of this stream. * *

This is a stateful * intermediate operation. * * @return the new stream */ S distinct(); /** * Returns a stream consisting of the elements of this stream in sorted * order. * *
* All elements will be loaded to memory. * * @return the new stream */ S sorted(); S reverseSorted(); S append(S s); S prepend(S stream); /** *
* This method only run sequentially, even in parallel stream and all elements will be loaded to memory. * * @return */ S cached(); /** *
* This method only run sequentially, even in parallel stream. * * @return */ Stream indexed(); String join(CharSequence delimiter); String join(final CharSequence delimiter, final CharSequence prefix, final CharSequence suffix); /** * Returns a stream consisting of the remaining elements of this stream * after discarding the first {@code n} elements of the stream. * If this stream contains fewer than {@code n} elements then an * empty stream will be returned. * *

This is a stateful * intermediate operation. * * @apiNote * While {@code skip()} is generally a cheap operation on sequential * stream pipelines, it can be quite expensive on ordered parallel pipelines, * especially for large values of {@code n}, since {@code skip(n)} * is constrained to skip not just any n elements, but the * first n elements in the encounter order. Using an unordered * stream source or removing the * ordering constraint with {@link #unordered()} may result in significant * speedups of {@code skip()} in parallel pipelines, if the semantics of * your situation permit. If consistency with encounter order is required, * and you are experiencing poor performance or memory utilization with * {@code skip()} in parallel pipelines, switching to sequential execution * with {@link #sequential()} may improve performance. * * @param n the number of leading elements to skip * @return the new stream * @throws IllegalArgumentException if {@code n} is negative */ S skip(long n); /** * Returns a stream consisting of the elements of this stream, truncated * to be no longer than {@code maxSize} in length. * *

This is a byte-circuiting * stateful intermediate operation. * * @apiNote * While {@code limit()} is generally a cheap operation on sequential * stream pipelines, it can be quite expensive on ordered parallel pipelines, * especially for large values of {@code maxSize}, since {@code limit(n)} * is constrained to return not just any n elements, but the * first n elements in the encounter order. Using an unordered * stream source or removing the * ordering constraint with {@link #unordered()} may result in significant * speedups of {@code limit()} in parallel pipelines, if the semantics of * your situation permit. If consistency with encounter order is required, * and you are experiencing poor performance or memory utilization with * {@code limit()} in parallel pipelines, switching to sequential execution * with {@link #sequential()} may improve performance. * * @param maxSize the number of elements the stream should be limited to * @return the new stream * @throws IllegalArgumentException if {@code maxSize} is negative */ S limit(long maxSize); S step(long step); /** * Returns the count of elements in this stream. This is a special case of * a reduction and is * equivalent to: *

{@code
     *     return mapToLong(e -> 1L).sum();
     * }
* *

This is a terminal operation. * * @return the count of elements in this stream */ long count(); /** * Returns a stream consisting of the elements of this stream, additionally * performing the provided action on each element as elements are consumed * from the resulting stream. * *

This is an intermediate * operation. * *

For parallel stream pipelines, the action may be called at * whatever time and in whatever thread the element is made available by the * upstream operation. If the action modifies shared state, * it is responsible for providing the required synchronization. * * @apiNote This method exists mainly to support debugging, where you want * to see the elements as they flow past a certain point in a pipeline: *

{@code
     *     Stream.of("one", "two", "three", "four")
     *         .filter(e -> e.length() > 3)
     *         .peek(e -> System.out.println("Filtered value: " + e))
     *         .map(String::toUpperCase)
     *         .peek(e -> System.out.println("Mapped value: " + e))
     *         .collect(Collectors.toList());
     * }
* * @param action a * non-interfering action to perform on the elements as * they are consumed from the stream * @return this stream or a new stream with same elements. */ S peek(C action); /** * Same as {@code peek} * * @param action * @return * @see #peek(Object) */ S carry(C action); // /** // * Performs an action for each element of this stream. // * // *

This is a terminal // * operation. // * // *

The behavior of this operation is explicitly nondeterministic. // * For parallel stream pipelines, this operation does not // * guarantee to respect the encounter order of the stream, as doing so // * would sacrifice the benefit of parallelism. For any given element, the // * action may be performed at whatever time and in whatever thread the // * library chooses. If the action accesses shared state, it is // * responsible for providing the required synchronization. // * // * @param action a // * non-interfering action to perform on the elements // */ // void forEach(C action); // /** // * Returns whether any elements of this stream match the provided // * predicate. May not evaluate the predicate on all elements if not // * necessary for determining the result. If the stream is empty then // * {@code false} is returned and the predicate is not evaluated. // * // *

This is a short-circuiting // * terminal operation. // * // * @apiNote // * This method evaluates the existential quantification of the // * predicate over the elements of the stream (for some x P(x)). // * // * @param predicate a non-interfering, // * stateless // * predicate to apply to elements of this stream // * @return {@code true} if any elements of the stream match the provided // * predicate, otherwise {@code false} // */ // boolean anyMatch(P predicate); // // /** // * Returns whether all elements of this stream match the provided predicate. // * May not evaluate the predicate on all elements if not necessary for // * determining the result. If the stream is empty then {@code true} is // * returned and the predicate is not evaluated. // * // *

This is a short-circuiting // * terminal operation. // * // * @apiNote // * This method evaluates the universal quantification of the // * predicate over the elements of the stream (for all x P(x)). If the // * stream is empty, the quantification is said to be vacuously // * satisfied and is always {@code true} (regardless of P(x)). // * // * @param predicate a non-interfering, // * stateless // * predicate to apply to elements of this stream // * @return {@code true} if either all elements of the stream match the // * provided predicate or the stream is empty, otherwise {@code false} // */ // boolean allMatch(P predicate); // // /** // * Returns whether no elements of this stream match the provided predicate. // * May not evaluate the predicate on all elements if not necessary for // * determining the result. If the stream is empty then {@code true} is // * returned and the predicate is not evaluated. // * // *

This is a short-circuiting // * terminal operation. // * // * @apiNote // * This method evaluates the universal quantification of the // * negated predicate over the elements of the stream (for all x ~P(x)). If // * the stream is empty, the quantification is said to be vacuously satisfied // * and is always {@code true}, regardless of P(x). // * // * @param predicate a non-interfering, // * stateless // * predicate to apply to elements of this stream // * @return {@code true} if either no elements of the stream match the // * provided predicate or the stream is empty, otherwise {@code false} // */ // boolean noneMatch(P predicate); // // /** // * Returns an {@link Optional} describing the first element of this stream, // * or an empty {@code Optional} if the stream is empty. If the stream has // * no encounter order, then any element may be returned. // * // *

This is a short-circuiting // * terminal operation. // * // * @return an {@code Optional} describing the first element of this stream, // * or an empty {@code Optional} if the stream is empty // */ // OT findFirst(P predicate); // // /** // * Sometimes, stream.reverse().findFirst(predicate) has better performance than stream.findLast(predicate). // * // * @param predicate // * @return // */ // OT findLast(P predicate); // // /** // * Returns the first element which is tested by the specified predicateForFirst, // * or the last element which is tested by the specified predicateForLast if the first element is not found. // * or an empty Optional if both first and last elements are not found. // * // *
// * This method only run sequentially, even in parallel stream. // * // * @param predicateForFirst // * @param predicateForLast // * @return // */ // OT findFirstOrLast(P predicateForFirst, P predicateForLast); // // /** // * Returns an {@link Optional} describing some element of the stream, or an // * empty {@code Optional} if the stream is empty. // * // *

This is a short-circuiting // * terminal operation. // * // *

The behavior of this operation is explicitly nondeterministic; it is // * free to select any element in the stream. This is to allow for maximal // * performance in parallel operations; the cost is that multiple invocations // * on the same source may not return the same result. (If a stable result // * is desired, use {@link #findFirst()} instead.) // * // * @return an {@code Optional} describing some element of this stream, or an // * empty {@code Optional} if the stream is empty // * @see #findFirst() // */ // OT findAny(P predicate); OT first(); OT last(); /** * Returns an array containing the elements of this stream. * *

This is a terminal * operation. * * @return an array containing the elements of this stream */ A toArray(); List toList(); > R toList(Supplier supplier); Set toSet(); > R toSet(Supplier supplier); Multiset toMultiset(); Multiset toMultiset(Supplier> supplier); LongMultiset toLongMultiset(); LongMultiset toLongMultiset(Supplier> supplier); /** * Returns an iterator for the elements of this stream. * * @return the element iterator for this stream */ ImmutableIterator iterator(); void println(); /** * Returns whether this stream, if a terminal operation were to be executed, * would execute in parallel. Calling this method after invoking an * terminal stream operation method may yield unpredictable results. * * @return {@code true} if this stream would execute in parallel if executed */ boolean isParallel(); /** * Returns an equivalent stream that is sequential. May return * itself, either because the stream was already sequential, or because * the underlying stream state was modified to be sequential. * * @return a sequential stream */ S sequential(); /** * Returns an equivalent stream that is parallel. May return * itself if the stream was already parallel. Any parallel should be closed by try-catch or call tried before last step. * * @return a parallel stream * @see #parallel(int, Splitor) */ S parallel(); /** * Returns an equivalent stream that is parallel. May return * itself if the stream was already parallel with the same maxThreadNum as the specified one. * * @param maxThreadNum * @return * @see #parallel(int, Splitor) */ S parallel(int maxThreadNum); /** * Returns an equivalent stream that is parallel. May return * itself if the stream was already parallel with the same splitor as the specified one. * * @param splitor * @return * @see #parallel(int, Splitor) */ S parallel(Splitor splitor); /** * Returns an equivalent stream that is parallel. May return itself if the stream was already parallel with the same maxThreadNum and splitor as the specified ones. * *

* When to use parallel Streams? *

*
     * 
     * Profiler.run(1, 1, 3, "sequential", () -> Stream.of(list).operation(F)...).printResult();
     * Profiler.run(1, 1, 3, "parallel", () -> Stream.of(list).parallel().operation(F)...).printResult();
     * 
     * 
* * Here is a sample performance test with computer: CPU Intel i7-3520M 4-cores 2.9 GHz, JDK 1.8.0_101, Windows 7: * *
     * 
     * public void test_perf() {
     *     final String[] strs = new String[10_000];
     *     N.fill(strs, N.uuid());
     * 
     *     final int m = 1;
     *     final Function mapper = str -> {
     *         long result = 0;
     *         for (int i = 0; i < m; i++) {
     *             result += sum(str.toCharArray()) + 1;
     *         }
     *         return result;
     *     };
     * 
     *     final MutableLong sum = MutableLong.of(0);
     * 
     *     for (int i = 0, len = strs.length; i < len; i++) {
     *         sum.add(mapper.apply(strs[i]));
     *     }
     * 
     *     final int threadNum = 1, loopNum = 100, roundNum = 3;
     * 
     *     Profiler.run(threadNum, loopNum, roundNum, "For Loop", () -> {
     *         long result = 0;
     *         for (int i = 0, len = strs.length; i < len; i++) {
     *             result += mapper.apply(strs[i]);
     *         }
     *         assertEquals(sum.longValue(), result);
     *     }).printResult();
     * 
     *     Profiler.run(threadNum, loopNum, roundNum, "JDK Sequential",
     *             () -> assertEquals(sum.longValue(), java.util.stream.Stream.of(strs).map(mapper).mapToLong(e -> e).sum())).printResult();
     * 
     *     Profiler.run(threadNum, loopNum, roundNum, "JDK Parallel",
     *             () -> assertEquals(sum.longValue(), java.util.stream.Stream.of(strs).parallel().map(mapper).mapToLong(e -> e).sum())).printResult();
     * 
     *     Profiler.run(threadNum, loopNum, roundNum, "Abcus Sequential",
     *             () -> assertEquals(sum.longValue(), Stream.of(strs).map(mapper).mapToLong(e -> e).sum().longValue())).printResult();
     * 
     *     Profiler.run(threadNum, loopNum, roundNum, "Abcus Parallel",
     *             () -> assertEquals(sum.longValue(), Stream.of(strs).parallel().map(mapper).mapToLong(e -> e).sum().longValue())).printResult();
     * }
     * 
     * 
* And test result: Unit is milliseconds. N(the number of elements) is 10_000, Q(cost per element of F, the per-element function (usually a lambda), here is mapper) is calculated by: value of 'For loop' / N(10_000). * * * * * * * * *
m = 1 m = 10m = 50m = 100m = 500m = 1000
Q 0.000020.00020.0010.0020.010.02
For Loop0.232.31122110219
JDK Sequential0.282.31122114212
JDK Parallel0.221.361266122
Abcus Sequential0.321122112212
Abcus Parallel1111111677128
* * Comparison: *
    *
  • Again, do NOT and should NOT use parallel Streams if you don't have any performance problem with sequential Streams, because using parallel Streams has extra cost.
  • *
  • Again, consider using parallel Streams only when N(the number of elements) * Q(cost per element of F, the per-element function (usually a lambda)) is big enough.
  • *
  • The implementation of parallel Streams in Abacus is more than 10 times, slower than parallel Streams in JDK when Q is tiny(here is less than 0.0002 milliseconds by the test):
  • *
      *
    • The implementation of parallel Streams in JDK 8 still can beat the sequential/for loop when Q is tiny(Here is 0.00002 milliseconds by the test). * That's amazing, considering the extra cost brought by parallel computation. It's well done.
    • *
    • The implementation of parallel Streams in Abacus is pretty simple and straight forward. * The extra cost(starting threads/synchronization/queue...) brought by parallel Streams in Abacus is too bigger to tiny Q(Here is less than 0.001 milliseconds by the test). * But it starts to be faster than sequential Streams when Q is big enough(Here is 0.001 milliseconds by the test) and starts to catch the parallel Streams in JDK when Q is bigger(Here is 0.01 milliseconds by the test).
    • *
    • Consider using the parallel Streams in Abacus when Q is big enough, specially when IO involved in F. * Because one IO operation(e.g. DB/web service request..., Reading/Writing file...) usually takes 1 to 1000 milliseconds, or even longer. * By the parallel Streams APIs in Abacus, it's very simple to specify max thread numbers. Sometimes, it's much faster to execute IO/Network requests with a bit more threads. * It's fair to say that the parallel Streams in Abacus is high efficient, may same as or faster than the parallel Streams in JDK when Q is big enough, except F is heavy cpu-used operation. * Most of the times, the Q is big enough to consider using parallel Stream is because IO/Network is involved in F.
    • *
    *
  • JDK 7 is supported by the Streams in Abacus. It's perfect to work with retrolambda on Android
  • *
  • All primitive types are supported by Stream APIs in Abacus except boolean
  • *
* *

* A bit more about Lambdas/Stream APIs, you may heard that Lambdas/Stream APIs is 5 time slower than imperative programming. * It's true when Q and F is VERY, VERY tiny, like f = (int a, int b) -> a + b;. * But if we look into the samples in the article and think about it: it just takes less than 1 milliseconds to get the max value in 100k numbers. * There is potential performance issue only if the "get the max value in 100K numbers" call many, many times in your API or single request. * Otherwise, the difference between 0.1 milliseconds to 0.5 milliseconds can be totally ignored. * Usually we meet performance issue only if Q and F is big enough. However, the performance of Lambdas/Streams APIs is closed to for loop when Q and F is big enough. * No matter in which scenario, We don't need and should not concern the performance of Lambdas/Stream APIs. * *

* Although it's is parallel Streams, it doesn't means all the methods are executed in parallel. * Because the sequential way is as fast, or even faster than the parallel way for some methods, or is pretty difficult, if not possible, to implement the method by parallel approach. * Here are the methods which are executed sequentially even in parallel Streams. *

* splitXXX/splitAt/splitBy/slidingXXX/collapse, distinct, reverse, rotate, shuffle, indexed, cached, top, kthLargest, count, toArray, toList, toList, toSet, toMultiset, toLongMultiset, * intersection(Collection c), difference(Collection c), symmetricDifference(Collection c), forEach(identity, accumulator, predicate), findFirstOrLast, findFirstAndLast * * @param maxThreadNum Default value is the number of cpu-cores. Steps/operations will be executed sequentially if maxThreadNum is 1. * @param splitor The target array is split by ranges for multiple threads if splitor is splitor.ARRAY and target stream composed by array. It looks like: * *

     * for (int i = 0; i < maxThreadNum; i++) {
     *     final int sliceIndex = i;
     * 
     *     futureList.add(asyncExecutor.execute(new Runnable() {
     *         public void run() {
     *             int cursor = fromIndex + sliceIndex * sliceSize;
     *             final int to = toIndex - cursor > sliceSize ? cursor + sliceSize : toIndex;
     *             while (cursor < to) {
     *                 action.accept(elements[cursor++]);
     *             }
     *        }
     *    }));
     * }
     * 
* Otherwise, each thread will get the elements from the target array/iterator in the stream one by one with the target array/iterator synchronized. It looks like: *

     * for (int i = 0; i < maxThreadNum; i++) {
     *     futureList.add(asyncExecutor.execute(new Runnable() {
     *         public void run() {
     *             T next = null;
     * 
     *             while (true) {
     *                 synchronized (elements) {
     *                     if (cursor.intValue() < toIndex) {
     *                         next = elements[cursor.getAndIncrement()];
     *                     } else {
     *                         break;
     *                     }
     *                 }
     * 
     *                 action.accept(next);
     *             }
     *         }
     *     }));
     * }
     * 
* Using splitor.ARRAY only when F (the per-element function (usually a lambda)) is very tiny and the cost of synchronization on the target array/iterator is too big to it. * For the F involving IO or taking 'long' to complete, choose splitor.ITERATOR. Default value is splitor.ITERATOR. * @return * @see Nth * @see com.landawn.abacus.util.Profiler#run(int, int, int, String, Runnable) * @see Understanding Parallel Stream Performance in Java SE 8 * @see When to use parallel Streams */ S parallel(int maxThreadNum, Splitor splitor); /** * Return the underlying maxThreadNum if the stream is parallel, otherwise 1 is returned. * * @return */ int maxThreadNum(); /** * Returns a parallel stream with the specified maxThreadNum . Or return * itself, either because the stream was already parallel with same maxThreadNum, or because * it's a sequential stream. * * @param maxThreadNum * @return */ S maxThreadNum(int maxThreadNum); /** * Return the underlying splitor if the stream is parallel, otherwise the default value splitor.ITERATOR is returned. * * @return */ Splitor splitor(); /** * Returns a parallel stream with the specified splitor . Or return * itself, either because the stream was already parallel with same splitor, or because * it's a sequential stream. * * @param splitor * @return */ S splitor(Splitor splitor); R __(Function transfer); // /** // * Short-cut for s.parallel().__(op).sequential(). // * // * @param op // * @return // */ // @SuppressWarnings("rawtypes") // SS p_s(Function op); // // /** // * // * Short-cut for s.parallel(maxThreadNum).__(op).sequential(). // * // * @param maxThreadNum // * @param op // * @return // */ // @SuppressWarnings("rawtypes") // SS p_s(int maxThreadNum, Function op); Try tried(); /** * Returns an equivalent stream with an additional close handler. Close * handlers are run when the {@link #close()} method * is called on the stream, and are executed in the order they were * added. All close handlers are run, even if earlier close handlers throw * exceptions. If any close handler throws an exception, the first * exception thrown will be relayed to the caller of {@code close()}, with * any remaining exceptions added to that exception as suppressed exceptions * (unless one of the remaining exceptions is the same exception as the * first exception, since an exception cannot suppress itself.) May * return itself. * *

This is an intermediate * operation. * * @param closeHandler A task to execute when the stream is closed * @return a stream with a handler that is run if the stream is closed */ S onClose(Runnable closeHandler); /** * Closes this stream, causing all close handlers for this stream pipeline * to be called. * * @see AutoCloseable#close() */ @Override void close(); public static enum Splitor { ARRAY, ITERATOR; } }





© 2015 - 2025 Weber Informatics LLC | Privacy Policy