Merge pull request #19148 from akka/wip-19145-fix-FlowGroupBySpec-RK

#19145 fix FlowGroupBySpec
This commit is contained in:
Roland Kuhn 2015-12-13 15:19:57 +01:00
commit faf352a189
6 changed files with 63 additions and 17 deletions

View file

@ -27,17 +27,34 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo
def asScala: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[In, Out, Mat]#Repr, scaladsl.Sink[In, Mat]] @uncheckedVariance = delegate
/**
* Flatten the sub-flows back into the super-flow by performing a merge.
* Flatten the sub-flows back into the super-flow by performing a merge
* without parallelism limit (i.e. having an unbounded number of sub-flows
* active concurrently).
*
* This is identical in effect to `mergeSubstreamsWithParallelism(Integer.MAX_VALUE)`.
*/
def mergeSubstreams(): Flow[In, Out, Mat] =
new Flow(delegate.mergeSubstreams)
/**
* Flatten the sub-flows back into the super-flow by performing a merge
* with the given parallelism limit. This means that only up to `parallelism`
* substreams will be executed at any given time. Substreams that are not
* yet executed are also not materialized, meaning that back-pressure will
* be exerted at the operator that creates the substreams when the parallelism
* limit is reached.
*/
def mergeSubstreamsWithParallelism(parallelism: Int): Flow[In, Out, Mat] =
new Flow(delegate.mergeSubstreamsWithParallelism(parallelism))
/**
* Flatten the sub-flows back into the super-flow by concatenating them.
* This is usually a bad idea when combined with `groupBy` since it can
* easily lead to deadlockthe concatenation does not consume from the second
* substream until the first has finished and the `groupBy` stage will get
* back-pressure from the second stream.
*
* This is identical in effect to `mergeSubstreamsWithParallelism(1)`.
*/
def concatSubstreams(): Flow[In, Out, Mat] =
new Flow(delegate.concatSubstreams)