2014-04-01 15:19:42 +02:00
/* *
* Copyright ( C ) 2014 Typesafe Inc . < http : //www.typesafe.com>
*/
2014-04-01 16:16:26 +02:00
package akka.stream.scaladsl
2014-04-01 15:19:42 +02:00
2015-01-28 14:19:50 +01:00
import akka.stream.impl.Stages. { MaterializingStageFactory , StageModule }
import akka.stream.impl.StreamLayout. { EmptyModule , Module }
import akka.stream._
2014-12-01 20:07:55 +02:00
import akka.stream.scaladsl.OperationAttributes._
2014-10-27 14:35:41 +01:00
import akka.util.Collections.EmptyImmutableSeq
2015-01-28 14:19:50 +01:00
import org.reactivestreams.Processor
import scala.annotation.unchecked.uncheckedVariance
2014-04-01 15:19:42 +02:00
import scala.collection.immutable
2014-10-27 14:35:41 +01:00
import scala.concurrent.duration. { Duration , FiniteDuration }
2014-04-01 15:19:42 +02:00
import scala.concurrent.Future
2014-10-27 14:35:41 +01:00
import scala.language.higherKinds
2014-11-12 10:43:39 +01:00
import akka.stream.stage._
2015-01-28 14:19:50 +01:00
import akka.stream.impl. { Stages , StreamLayout , FlowModule }
2014-04-01 15:19:42 +02:00
2014-04-23 10:05:09 +02:00
/* *
2014-10-27 14:35:41 +01:00
* A `Flow` is a set of stream processing steps that has one open input and one open output .
2014-04-23 10:05:09 +02:00
*/
2015-01-28 14:19:50 +01:00
final class Flow [ - In , + Out , + Mat ] ( private [ stream ] override val module : Module )
extends FlowOps [ Out , Mat ] with Graph [ FlowShape [ In , Out ] , Mat ] {
override val shape : FlowShape [ In , Out ] = module . shape . asInstanceOf [ FlowShape [ In , Out ] ]
override type Repr [ + O , + M ] = Flow [ In @ uncheckedVariance , O , M ]
private [ stream ] def isIdentity : Boolean = this . module . isInstanceOf [ Stages . Identity ]
/* *
* Transform this [ [ Flow ] ] by appending the given processing steps .
*/
def via [ T , Mat2 ] ( flow : Flow [ Out , T , Mat2 ] ) : Flow [ In , T , Mat ] = viaMat ( flow ) ( Keep . left )
2014-10-27 14:35:41 +01:00
2014-04-01 19:35:56 +02:00
/* *
2014-10-31 10:43:42 +02:00
* Transform this [ [ Flow ] ] by appending the given processing steps .
2014-10-27 14:35:41 +01:00
*/
2015-01-28 14:19:50 +01:00
def viaMat [ T , Mat2 , Mat3 ] ( flow : Flow [ Out , T , Mat2 ] ) ( combine : ( Mat , Mat2 ) ⇒ Mat3 ) : Flow [ In , T , Mat3 ] = {
if ( this . isIdentity ) flow . asInstanceOf [ Flow [ In , T , Mat3 ] ]
else {
val flowCopy = flow . module . carbonCopy
new Flow (
module
. growConnect ( flowCopy , shape . outlet , flowCopy . shape . inlets . head , combine )
. replaceShape ( FlowShape ( shape . inlet , flowCopy . shape . outlets . head ) ) )
}
}
2014-10-27 14:35:41 +01:00
/* *
2014-10-31 10:43:42 +02:00
* Connect this [ [ Flow ] ] to a [ [ Sink ] ] , concatenating the processing steps of both .
2014-10-27 14:35:41 +01:00
*/
2015-01-28 14:19:50 +01:00
def to [ Mat2 ] ( sink : Sink [ Out , Mat2 ] ) : Sink [ In , Mat ] = {
toMat ( sink ) ( Keep . left )
}
2014-10-27 14:35:41 +01:00
2014-11-28 10:41:57 +01:00
/* *
2015-01-28 14:19:50 +01:00
* Connect this [ [ Flow ] ] to a [ [ Sink ] ] , concatenating the processing steps of both .
*/
def toMat [ Mat2 , Mat3 ] ( sink : Sink [ Out , Mat2 ] ) ( combine : ( Mat , Mat2 ) ⇒ Mat3 ) : Sink [ In , Mat3 ] = {
if ( isIdentity ) sink . asInstanceOf [ Sink [ In , Mat3 ] ]
else {
val sinkCopy = sink . module . carbonCopy
new Sink (
module
. growConnect ( sinkCopy , shape . outlet , sinkCopy . shape . inlets . head , combine )
. replaceShape ( SinkShape ( shape . inlet ) ) )
}
}
/* *
* Transform the materialized value of this Flow , leaving all other properties as they were .
2014-11-28 10:41:57 +01:00
*/
2015-01-28 14:19:50 +01:00
def mapMaterialized [ Mat2 ] ( f : Mat ⇒ Mat2 ) : Repr [ Out , Mat2 ] =
new Flow ( module . transformMaterializedValue ( f . asInstanceOf [ Any ⇒ Any ] ) )
2014-11-28 10:41:57 +01:00
2014-10-27 14:35:41 +01:00
/* *
2015-01-28 14:19:50 +01:00
* Join this [ [ Flow ] ] to another [ [ Flow ] ] , by cross connecting the inputs and outputs , creating a [ [ RunnableFlow ] ]
2014-10-27 14:35:41 +01:00
*/
2015-01-28 14:19:50 +01:00
def joinMat [ Mat2 , Mat3 ] ( flow : Flow [ Out , In , Mat2 ] ) ( combine : ( Mat , Mat2 ) ⇒ Mat3 ) : RunnableFlow [ Mat3 ] = {
val flowCopy = flow . module . carbonCopy
RunnableFlow (
module
. grow ( flowCopy , combine )
. connect ( shape . outlet , flowCopy . shape . inlets . head )
. connect ( flowCopy . shape . outlets . head , shape . inlet ) )
2014-10-27 14:35:41 +01:00
}
2014-11-03 12:59:05 +01:00
/* *
2015-01-28 14:19:50 +01:00
* Join this [ [ Flow ] ] to another [ [ Flow ] ] , by cross connecting the inputs and outputs , creating a [ [ RunnableFlow ] ]
2014-11-03 12:59:05 +01:00
*/
2015-01-28 14:19:50 +01:00
def join [ Mat2 ] ( flow : Flow [ Out , In , Mat2 ] ) : RunnableFlow [ ( Mat , Mat2 ) ] = {
joinMat ( flow ) ( Keep . both )
}
2014-11-03 12:59:05 +01:00
2015-01-28 14:19:50 +01:00
def concat [ Out2 >: Out , Mat2 ] ( source : Source [ Out2 , Mat2 ] ) : Flow [ In , Out2 , ( Mat , Mat2 ) ] = {
this . viaMat ( Flow ( source ) { implicit builder ⇒
s ⇒
import FlowGraph.Implicits._
val concat = builder . add ( Concat [ Out2 ] ( ) )
s . outlet ~> concat . in ( 1 )
( concat . in ( 0 ) , concat . out )
} ) ( Keep . both )
}
2014-11-03 12:59:05 +01:00
2015-01-28 14:19:50 +01:00
/* * INTERNAL API */
override private [ stream ] def andThen [ U ] ( op : StageModule ) : Repr [ U , Mat ] = {
//No need to copy here, op is a fresh instanc
if ( this . isIdentity ) new Flow ( op ) . asInstanceOf [ Repr [ U , Mat ] ]
else new Flow ( module . growConnect ( op , shape . outlet , op . inPort ) . replaceShape ( FlowShape ( shape . inlet , op . outPort ) ) )
}
private [ stream ] def andThenMat [ U , Mat2 ] ( op : MaterializingStageFactory ) : Repr [ U , Mat2 ] = {
if ( this . isIdentity ) new Flow ( op ) . asInstanceOf [ Repr [ U , Mat2 ] ]
else new Flow ( module . growConnect ( op , shape . outlet , op . inPort , Keep . right ) . replaceShape ( FlowShape ( shape . inlet , op . outPort ) ) )
}
private [ stream ] def andThenMat [ U , Mat2 , O >: Out ] ( processorFactory : ( ) ⇒ ( Processor [ O , U ] , Mat2 ) ) : Repr [ U , Mat2 ] = {
val op = Stages . DirectProcessor ( processorFactory . asInstanceOf [ ( ) ⇒ ( Processor [ Any , Any ] , Any ) ] )
if ( this . isIdentity ) new Flow ( op ) . asInstanceOf [ Repr [ U , Mat2 ] ]
else new Flow [ In , U , Mat2 ] ( module . growConnect ( op , shape . outlet , op . inPort , Keep . right ) . replaceShape ( FlowShape ( shape . inlet , op . outPort ) ) )
}
override def withAttributes ( attr : OperationAttributes ) : Repr [ Out , Mat ] = {
require ( this . module ne EmptyModule , "Cannot set the attributes of empty flow" )
new Flow ( module . withAttributes ( attr ) . wrap ( ) )
2014-11-03 12:59:05 +01:00
}
2014-11-28 10:41:57 +01:00
/* *
2015-01-28 14:19:50 +01:00
* Connect the `Source` to this `Flow` and then connect it to the `Sink` and run it . The returned tuple contains
* the materialized values of the `Source` and `Sink` , e . g . the `Subscriber` of a [ [ SubscriberSource ] ] and
* and `Publisher` of a [ [ PublisherSink ] ] .
2014-11-28 10:41:57 +01:00
*/
2015-02-26 22:42:34 +01:00
def runWith [ Mat1 , Mat2 ] ( source : Source [ In , Mat1 ] , sink : Sink [ Out , Mat2 ] ) ( implicit materializer : FlowMaterializer ) : ( Mat1 , Mat2 ) = {
2015-01-28 14:19:50 +01:00
source . via ( this ) . toMat ( sink ) ( Keep . both ) . run ( )
}
def section [ O , O2 >: Out , Mat2 , Mat3 ] ( attributes : OperationAttributes , combine : ( Mat , Mat2 ) ⇒ Mat3 ) ( section : Flow [ O2 , O2 , Unit ] ⇒ Flow [ O2 , O , Mat2 ] ) : Flow [ In , O , Mat3 ] = {
val subFlow = section ( Flow [ O2 ] ) . module . carbonCopy . withAttributes ( attributes ) . wrap ( )
if ( this . isIdentity ) new Flow ( subFlow ) . asInstanceOf [ Flow [ In , O , Mat3 ] ]
else new Flow (
module
. growConnect ( subFlow , shape . outlet , subFlow . shape . inlets . head , combine )
. replaceShape ( FlowShape ( shape . inlet , subFlow . shape . outlets . head ) ) )
}
2014-12-01 20:07:55 +02:00
/* *
* Applies given [ [ OperationAttributes ] ] to a given section .
*/
2015-01-28 14:19:50 +01:00
def section [ O , O2 >: Out , Mat2 ] ( attributes : OperationAttributes ) ( section : Flow [ O2 , O2 , Unit ] ⇒ Flow [ O2 , O , Mat2 ] ) : Flow [ In , O , Mat2 ] = {
this . section [ O , O2 , Mat2 , Mat2 ] ( attributes , Keep . right ) ( section )
}
2014-12-01 20:07:55 +02:00
2014-10-27 14:35:41 +01:00
}
2014-04-01 15:19:42 +02:00
2015-01-28 14:19:50 +01:00
object Flow extends FlowApply {
private def shape [ I , O ] ( name : String ) : FlowShape [ I , O ] = FlowShape ( new Inlet ( name + ".in" ) , new Outlet ( name + ".out" ) )
2014-11-28 10:41:57 +01:00
2014-04-01 19:35:56 +02:00
/* *
2014-10-27 14:35:41 +01:00
* Helper to create `Flow` without a [ [ Source ] ] or a [ [ Sink ] ] .
* Example usage : `Flow[Int]`
2014-04-01 19:35:56 +02:00
*/
2015-01-28 14:19:50 +01:00
def apply [ T ] : Flow [ T , T , Unit ] = new Flow [ Any , Any , Any ] ( Stages . Identity ( ) ) . asInstanceOf [ Flow [ T , T , Unit ] ]
2014-04-29 15:16:05 +02:00
2014-05-22 20:58:38 +02:00
/* *
2015-01-28 14:19:50 +01:00
* A graph with the shape of a source logically is a source , this method makes
* it so also in type .
2014-05-22 20:58:38 +02:00
*/
2015-01-28 14:19:50 +01:00
def wrap [ I , O , M ] ( g : Graph [ FlowShape [ I , O ] , M ] ) : Flow [ I , O , M ] = new Flow ( g . module )
2014-10-27 14:35:41 +01:00
2014-04-01 15:19:42 +02:00
}
2014-04-01 19:35:56 +02:00
/* *
2014-10-27 14:35:41 +01:00
* Flow with attached input and output , can be executed .
2014-04-01 19:35:56 +02:00
*/
2015-02-26 11:58:29 +01:00
case class RunnableFlow [ + Mat ] ( private [ stream ] val module : StreamLayout . Module ) extends Graph [ ClosedShape , Mat ] {
2015-01-28 14:19:50 +01:00
assert ( module . isRunnable )
2015-02-26 11:58:29 +01:00
def shape = ClosedShape
2015-01-28 14:19:50 +01:00
2014-12-02 16:38:14 +01:00
/* *
2015-01-28 14:19:50 +01:00
* Transform only the materialized value of this RunnableFlow , leaving all other properties as they were .
2014-12-02 16:38:14 +01:00
*/
2015-01-28 14:19:50 +01:00
def mapMaterialized [ Mat2 ] ( f : Mat ⇒ Mat2 ) : RunnableFlow [ Mat2 ] =
copy ( module . transformMaterializedValue ( f . asInstanceOf [ Any ⇒ Any ] ) )
2014-12-02 16:38:14 +01:00
/* *
2015-01-28 14:19:50 +01:00
* Run this flow and return the materialized instance from the flow .
2014-12-02 16:38:14 +01:00
*/
2015-02-26 22:42:34 +01:00
def run ( ) ( implicit materializer : FlowMaterializer ) : Mat = materializer . materialize ( this )
2014-10-27 14:35:41 +01:00
}
/* *
2014-10-31 10:43:42 +02:00
* Scala API : Operations offered by Sources and Flows with a free output side : the DSL flows left - to - right only .
2014-10-27 14:35:41 +01:00
*/
2015-01-28 14:19:50 +01:00
trait FlowOps [ + Out , + Mat ] {
import akka.stream.impl.Stages._
2014-10-27 14:35:41 +01:00
import FlowOps._
2015-01-28 14:19:50 +01:00
type Repr [ + O , + M ] <: FlowOps [ O , M ]
2014-04-01 19:35:56 +02:00
/* *
* Transform this stream by applying the given function to each of the elements
* as they pass through this processing step .
*/
2015-01-28 14:19:50 +01:00
def map [ T ] ( f : Out ⇒ T ) : Repr [ T , Mat ] = andThen ( Map ( f . asInstanceOf [ Any ⇒ Any ] ) )
2014-10-27 14:35:41 +01:00
/* *
* Transform each input element into a sequence of output elements that is
* then flattened into the output stream .
*/
2015-01-28 14:19:50 +01:00
def mapConcat [ T ] ( f : Out ⇒ immutable . Seq [ T ] ) : Repr [ T , Mat ] = andThen ( MapConcat ( f . asInstanceOf [ Any ⇒ immutable . Seq [ Any ] ] ) )
2014-04-01 19:35:56 +02:00
2014-05-23 13:52:39 +02:00
/* *
* Transform this stream by applying the given function to each of the elements
2014-12-18 10:34:59 +01:00
* as they pass through this processing step . The function returns a `Future` and the
* value of that future will be emitted downstreams . As many futures as requested elements by
2014-05-23 13:52:39 +02:00
* downstream may run in parallel and may complete in any order , but the elements that
2014-12-18 10:34:59 +01:00
* are emitted downstream are in the same order as received from upstream .
2014-10-27 14:35:41 +01:00
*
2015-02-04 09:26:32 +01:00
* If the group by function `f` throws an exception or if the `Future` is completed
* with failure and the supervision decision is [ [ akka . stream . Supervision . Stop ] ]
* the stream will be completed with failure .
*
* If the group by function `f` throws an exception or if the `Future` is completed
* with failure and the supervision decision is [ [ akka . stream . Supervision . Resume ] ] or
* [ [ akka . stream . Supervision . Restart ] ] the element is dropped and the stream continues .
*
2014-10-27 14:35:41 +01:00
* @see [ [ # mapAsyncUnordered ] ]
2014-05-23 13:52:39 +02:00
*/
2015-01-28 14:19:50 +01:00
def mapAsync [ T ] ( f : Out ⇒ Future [ T ] ) : Repr [ T , Mat ] =
2014-10-27 14:35:41 +01:00
andThen ( MapAsync ( f . asInstanceOf [ Any ⇒ Future [ Any ] ] ) )
/* *
* Transform this stream by applying the given function to each of the elements
2014-12-18 10:34:59 +01:00
* as they pass through this processing step . The function returns a `Future` and the
* value of that future will be emitted downstreams . As many futures as requested elements by
2014-10-27 14:35:41 +01:00
* downstream may run in parallel and each processed element will be emitted dowstream
* as soon as it is ready , i . e . it is possible that the elements are not emitted downstream
2014-12-18 10:34:59 +01:00
* in the same order as received from upstream .
2014-10-27 14:35:41 +01:00
*
2015-02-04 09:26:32 +01:00
* If the group by function `f` throws an exception or if the `Future` is completed
* with failure and the supervision decision is [ [ akka . stream . Supervision . Stop ] ]
* the stream will be completed with failure .
*
* If the group by function `f` throws an exception or if the `Future` is completed
* with failure and the supervision decision is [ [ akka . stream . Supervision . Resume ] ] or
* [ [ akka . stream . Supervision . Restart ] ] the element is dropped and the stream continues .
*
2014-10-27 14:35:41 +01:00
* @see [ [ # mapAsync ] ]
*/
2015-01-28 14:19:50 +01:00
def mapAsyncUnordered [ T ] ( f : Out ⇒ Future [ T ] ) : Repr [ T , Mat ] =
2014-10-27 14:35:41 +01:00
andThen ( MapAsyncUnordered ( f . asInstanceOf [ Any ⇒ Future [ Any ] ] ) )
2014-05-23 13:52:39 +02:00
2014-04-01 19:35:56 +02:00
/* *
* Only pass on those elements that satisfy the given predicate .
*/
2015-01-28 14:19:50 +01:00
def filter ( p : Out ⇒ Boolean ) : Repr [ Out , Mat ] = andThen ( Filter ( p . asInstanceOf [ Any ⇒ Boolean ] ) )
2014-04-01 19:35:56 +02:00
2014-05-08 09:32:38 +02:00
/* *
* Transform this stream by applying the given partial function to each of the elements
* on which the function is defined as they pass through this processing step .
* Non - matching elements are filtered out .
*/
2015-01-28 14:19:50 +01:00
def collect [ T ] ( pf : PartialFunction [ Out , T ] ) : Repr [ T , Mat ] = andThen ( Collect ( pf . asInstanceOf [ PartialFunction [ Any , Any ] ] ) )
2014-05-08 09:32:38 +02:00
2014-04-01 19:35:56 +02:00
/* *
2014-10-27 14:35:41 +01:00
* Chunk up this stream into groups of the given size , with the last group
* possibly smaller than requested due to end - of - stream .
*
* `n` must be positive , otherwise IllegalArgumentException is thrown .
2014-04-01 19:35:56 +02:00
*/
2015-01-28 14:19:50 +01:00
def grouped ( n : Int ) : Repr [ immutable . Seq [ Out ] , Mat ] = andThen ( Grouped ( n ) )
2014-11-09 21:09:50 +01:00
/* *
* Similar to `fold` but is not a terminal operation ,
* emits its current value which starts at `zero` and then
* applies the current and next value to the given function `f` ,
* emitting the next current value .
2015-02-04 09:26:32 +01:00
*
* If the function `f` throws an exception and the supervision decision is
* [ [ akka . stream . Supervision . Restart ] ] current value starts at `zero` again
* the stream will continue .
2014-11-09 21:09:50 +01:00
*/
2015-01-28 14:19:50 +01:00
def scan [ T ] ( zero : T ) ( f : ( T , Out ) ⇒ T ) : Repr [ T , Mat ] = andThen ( Scan ( zero , f . asInstanceOf [ ( Any , Any ) ⇒ Any ] ) )
2014-10-27 14:35:41 +01:00
/* *
* Chunk up this stream into groups of elements received within a time window ,
* or limited by the given number of elements , whatever happens first .
* Empty groups will not be emitted if no elements are received from upstream .
* The last group before end - of - stream will contain the buffered elements
* since the previously emitted group .
*
* `n` must be positive , and `d` must be greater than 0 seconds , otherwise
* IllegalArgumentException is thrown .
*/
2015-01-28 14:19:50 +01:00
def groupedWithin ( n : Int , d : FiniteDuration ) : Repr [ Out , Mat ] # Repr [ immutable . Seq [ Out ] , Mat ] = {
2014-10-27 14:35:41 +01:00
require ( n > 0 , "n must be greater than 0" )
require ( d > Duration . Zero )
2014-12-01 20:07:55 +02:00
withAttributes ( name ( "groupedWithin" ) ) . timerTransform ( ( ) ⇒ new TimerTransformer [ Out , immutable . Seq [ Out ] ] {
2014-10-27 14:35:41 +01:00
schedulePeriodically ( GroupedWithinTimerKey , d )
var buf : Vector [ Out ] = Vector . empty
def onNext ( in : Out ) = {
buf : += in
if ( buf . size == n ) {
// start new time window
schedulePeriodically ( GroupedWithinTimerKey , d )
emitGroup ( )
} else Nil
}
override def onTermination ( e : Option [ Throwable ] ) = if ( buf . isEmpty ) Nil else List ( buf )
def onTimer ( timerKey : Any ) = emitGroup ( )
private def emitGroup ( ) : immutable.Seq [ immutable . Seq [ Out ] ] =
if ( buf . isEmpty ) EmptyImmutableSeq
else {
val group = buf
buf = Vector . empty
List ( group )
}
} )
}
2014-04-01 19:35:56 +02:00
/* *
* Discard the given number of elements at the beginning of the stream .
2014-08-26 11:36:55 +02:00
* No elements will be dropped if `n` is zero or negative .
2014-04-01 19:35:56 +02:00
*/
2015-03-03 10:57:25 +01:00
def drop ( n : Long ) : Repr [ Out , Mat ] = andThen ( Drop ( n ) )
2014-04-01 19:35:56 +02:00
2014-05-20 13:46:35 +02:00
/* *
* Discard the elements received within the given duration at beginning of the stream .
*/
2015-01-28 14:19:50 +01:00
def dropWithin ( d : FiniteDuration ) : Repr [ Out , Mat ] # Repr [ Out , Mat ] =
2014-12-01 20:07:55 +02:00
withAttributes ( name ( "dropWithin" ) ) . timerTransform ( ( ) ⇒ new TimerTransformer [ Out , Out ] {
2014-10-27 14:35:41 +01:00
scheduleOnce ( DropWithinTimerKey , d )
2014-11-12 10:43:39 +01:00
var delegate : TransformerLike [ Out , Out ] =
new TransformerLike [ Out , Out ] {
2014-10-27 14:35:41 +01:00
def onNext ( in : Out ) = Nil
}
def onNext ( in : Out ) = delegate . onNext ( in )
def onTimer ( timerKey : Any ) = {
2014-11-09 21:09:50 +01:00
delegate = FlowOps . identityTransformer [ Out ]
2014-10-27 14:35:41 +01:00
Nil
}
} )
2014-05-20 13:46:35 +02:00
2014-04-01 19:35:56 +02:00
/* *
2014-07-22 12:21:53 +02:00
* Terminate processing ( and cancel the upstream publisher ) after the given
2014-04-01 19:35:56 +02:00
* number of elements . Due to input buffering some elements may have been
2014-07-22 12:21:53 +02:00
* requested from upstream publishers that will then not be processed downstream
2014-04-01 19:35:56 +02:00
* of this step .
2014-08-26 11:36:55 +02:00
*
* The stream will be completed without producing any elements if `n` is zero
* or negative .
2014-04-01 19:35:56 +02:00
*/
2015-03-03 10:57:25 +01:00
def take ( n : Long ) : Repr [ Out , Mat ] = andThen ( Take ( n ) )
2014-04-01 19:35:56 +02:00
2014-05-20 13:46:35 +02:00
/* *
2014-07-22 12:21:53 +02:00
* Terminate processing ( and cancel the upstream publisher ) after the given
2014-05-20 13:46:35 +02:00
* duration . Due to input buffering some elements may have been
2014-07-22 12:21:53 +02:00
* requested from upstream publishers that will then not be processed downstream
2014-05-20 13:46:35 +02:00
* of this step .
*
* Note that this can be combined with [ [ # take ] ] to limit the number of elements
* within the duration .
*/
2015-01-28 14:19:50 +01:00
def takeWithin ( d : FiniteDuration ) : Repr [ Out , Mat ] # Repr [ Out , Mat ] =
2014-12-01 20:07:55 +02:00
withAttributes ( name ( "takeWithin" ) ) . timerTransform ( ( ) ⇒ new TimerTransformer [ Out , Out ] {
2014-10-27 14:35:41 +01:00
scheduleOnce ( TakeWithinTimerKey , d )
2014-11-12 10:43:39 +01:00
var delegate : TransformerLike [ Out , Out ] = FlowOps . identityTransformer [ Out ]
2014-10-27 14:35:41 +01:00
2014-11-09 21:09:50 +01:00
override def onNext ( in : Out ) = delegate . onNext ( in )
2014-10-27 14:35:41 +01:00
override def isComplete = delegate . isComplete
2014-11-09 21:09:50 +01:00
override def onTimer ( timerKey : Any ) = {
delegate = FlowOps . completedTransformer [ Out ]
2014-10-27 14:35:41 +01:00
Nil
}
} )
2014-05-20 13:46:35 +02:00
2014-04-01 19:35:56 +02:00
/* *
2014-10-27 14:35:41 +01:00
* Allows a faster upstream to progress independently of a slower subscriber by conflating elements into a summary
* until the subscriber is ready to accept them . For example a conflate step might average incoming numbers if the
* upstream publisher is faster .
2014-08-26 11:36:55 +02:00
*
2014-10-27 14:35:41 +01:00
* This element only rolls up elements if the upstream is faster , but if the downstream is faster it will not
* duplicate elements .
*
* @param seed Provides the first state for a conflated value using the first unconsumed element as a start
* @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate
2014-04-01 19:35:56 +02:00
*/
2015-01-28 14:19:50 +01:00
def conflate [ S ] ( seed : Out ⇒ S ) ( aggregate : ( S , Out ) ⇒ S ) : Repr [ S , Mat ] =
2014-11-09 21:09:50 +01:00
andThen ( Conflate ( seed . asInstanceOf [ Any ⇒ Any ] , aggregate . asInstanceOf [ ( Any , Any ) ⇒ Any ] ) )
2014-04-01 19:35:56 +02:00
2014-05-20 13:46:35 +02:00
/* *
2014-10-27 14:35:41 +01:00
* Allows a faster downstream to progress independently of a slower publisher by extrapolating elements from an older
* element until new element comes from the upstream . For example an expand step might repeat the last element for
* the subscriber until it receives an update from upstream .
2014-08-26 11:36:55 +02:00
*
2014-10-27 14:35:41 +01:00
* This element will never "drop" upstream elements as all elements go through at least one extrapolation step .
* This means that if the upstream is actually faster than the upstream it will be backpressured by the downstream
* subscriber .
*
2015-02-04 09:26:32 +01:00
* Expand does not support [ [ akka . stream . Supervision . Restart ] ] and [ [ akka . stream . Supervision . Resume ] ] .
* Exceptions from the `seed` or `extrapolate` functions will complete the stream with failure .
*
2014-10-27 14:35:41 +01:00
* @param seed Provides the first state for extrapolation using the first unconsumed element
* @param extrapolate Takes the current extrapolation state to produce an output element and the next extrapolation
* state .
2014-05-20 13:46:35 +02:00
*/
2015-01-28 14:19:50 +01:00
def expand [ S , U ] ( seed : Out ⇒ S ) ( extrapolate : S ⇒ ( U , S ) ) : Repr [ U , Mat ] =
2014-11-09 21:09:50 +01:00
andThen ( Expand ( seed . asInstanceOf [ Any ⇒ Any ] , extrapolate . asInstanceOf [ Any ⇒ ( Any , Any ) ] ) )
2014-05-20 13:46:35 +02:00
2014-04-01 19:35:56 +02:00
/* *
2014-10-27 14:35:41 +01:00
* Adds a fixed size buffer in the flow that allows to store elements from a faster upstream until it becomes full .
2014-11-06 14:03:01 +01:00
* Depending on the defined [ [ akka . stream . OverflowStrategy ] ] it might drop elements or backpressure the upstream if
* there is no space available
2014-10-27 14:35:41 +01:00
*
* @param size The size of the buffer in element count
* @param overflowStrategy Strategy that is used when incoming elements cannot fit inside the buffer
2014-04-01 19:35:56 +02:00
*/
2015-01-28 14:19:50 +01:00
def buffer ( size : Int , overflowStrategy : OverflowStrategy ) : Repr [ Out , Mat ] =
2014-11-09 21:09:50 +01:00
andThen ( Buffer ( size , overflowStrategy ) )
2014-04-01 19:35:56 +02:00
/* *
2014-11-12 10:43:39 +01:00
* Generic transformation of a stream with a custom processing [ [ akka . stream . stage . Stage ] ] .
* This operator makes it possible to extend the `Flow` API when there is no specialized
* operator that performs the transformation .
2014-04-01 19:35:56 +02:00
*/
2015-01-28 14:19:50 +01:00
def transform [ T ] ( mkStage : ( ) ⇒ Stage [ Out , T ] ) : Repr [ T , Mat ] =
2014-12-01 20:07:55 +02:00
andThen ( StageFactory ( mkStage ) )
2014-04-01 19:35:56 +02:00
2015-01-28 14:19:50 +01:00
private [ akka ] def transformMaterializing [ T , M ] ( mkStageAndMaterialized : ( ) ⇒ ( Stage [ Out , T ] , M ) ) : Repr [ T , M ] =
andThenMat ( MaterializingStageFactory ( mkStageAndMaterialized ) )
2014-05-16 14:21:15 +02:00
/* *
2014-08-26 11:54:27 +02:00
* Takes up to `n` elements from the stream and returns a pair containing a strict sequence of the taken element
2014-05-16 14:21:15 +02:00
* and a stream representing the remaining elements . If ' 'n' ' is zero or negative , then this will return a pair
* of an empty collection and a stream containing the whole upstream unchanged .
*/
2015-01-28 14:19:50 +01:00
def prefixAndTail [ U >: Out ] ( n : Int ) : Repr [ ( immutable . Seq [ Out ] , Source [ U , Unit ] ) , Mat ] =
2014-10-27 14:35:41 +01:00
andThen ( PrefixAndTail ( n ) )
2014-05-16 14:21:15 +02:00
2014-04-01 19:35:56 +02:00
/* *
* This operation demultiplexes the incoming stream into separate output
* streams , one for each element key . The key is computed for each element
* using the given function . When a new key is encountered for the first time
2014-07-22 12:21:53 +02:00
* it is emitted to the downstream subscriber together with a fresh
2014-10-27 14:35:41 +01:00
* flow that will eventually produce all the elements of the substream
2014-04-01 19:35:56 +02:00
* for that key . Not consuming the elements from the created streams will
* stop this processor from processing more elements , therefore you must take
* care to unblock ( or cancel ) all of the produced streams even if you want
* to consume only one of them .
2015-02-04 09:26:32 +01:00
*
* If the group by function `f` throws an exception and the supervision decision
* is [ [ akka . stream . Supervision . Stop ] ] the stream and substreams will be completed
* with failure .
*
* If the group by function `f` throws an exception and the supervision decision
* is [ [ akka . stream . Supervision . Resume ] ] or [ [ akka . stream . Supervision . Restart ] ]
* the element is dropped and the stream and substreams continue .
2014-04-01 19:35:56 +02:00
*/
2015-01-28 14:19:50 +01:00
def groupBy [ K , U >: Out ] ( f : Out ⇒ K ) : Repr [ ( K , Source [ U , Unit ] ) , Mat ] =
2014-10-27 14:35:41 +01:00
andThen ( GroupBy ( f . asInstanceOf [ Any ⇒ Any ] ) )
2014-04-01 19:35:56 +02:00
/* *
* This operation applies the given predicate to all incoming elements and
* emits them to a stream of output streams , always beginning a new one with
2014-04-16 15:20:09 +02:00
* the current element if the given predicate returns true for it . This means
* that for the following series of predicate values , three substreams will
* be produced with lengths 1 , 2 , and 3 :
2014-04-28 15:10:20 +02:00
*
2014-04-16 15:20:09 +02:00
* { { {
* false , // element goes into first substream
* true , false , // elements go into second substream
* true , false , false // elements go into third substream
* } } }
2015-02-04 09:26:32 +01:00
*
* If the split predicate `p` throws an exception and the supervision decision
* is [ [ akka . stream . Supervision . Stop ] ] the stream and substreams will be completed
* with failure .
*
* If the split predicate `p` throws an exception and the supervision decision
* is [ [ akka . stream . Supervision . Resume ] ] or [ [ akka . stream . Supervision . Restart ] ]
* the element is dropped and the stream and substreams continue .
2014-04-01 19:35:56 +02:00
*/
2015-01-28 14:19:50 +01:00
def splitWhen [ U >: Out ] ( p : Out ⇒ Boolean ) : Repr [ Source [ U , Unit ] , Mat ] =
2014-10-27 14:35:41 +01:00
andThen ( SplitWhen ( p . asInstanceOf [ Any ⇒ Boolean ] ) )
2014-05-07 14:15:42 +02:00
2014-05-16 14:21:15 +02:00
/* *
* Transforms a stream of streams into a contiguous stream of elements using the provided flattening strategy .
2014-11-06 14:03:01 +01:00
* This operation can be used on a stream of element type [ [ akka . stream . scaladsl . Source ] ] .
2014-05-15 09:35:42 +02:00
*/
2015-01-28 14:19:50 +01:00
def flatten [ U ] ( strategy : akka.stream.FlattenStrategy [ Out , U ] ) : Repr [ U , Mat ] = strategy match {
2014-12-01 20:07:55 +02:00
case _ : FlattenStrategy.Concat [ Out ] ⇒ andThen ( ConcatAll ( ) )
2014-10-27 14:35:41 +01:00
case _ ⇒
throw new IllegalArgumentException ( s" Unsupported flattening strategy [ ${ strategy . getClass . getName } ] " )
}
2014-05-15 09:35:42 +02:00
2014-04-01 19:35:56 +02:00
/* *
2014-11-26 12:15:15 +01:00
* INTERNAL API - meant for removal / rewrite . See https : //github.com/akka/akka/issues/16393
*
2014-10-27 14:35:41 +01:00
* Transformation of a stream , with additional support for scheduled events .
2014-04-02 09:03:59 +02:00
*
2014-11-12 10:43:39 +01:00
* For each element the [ [ akka . stream . TransformerLike # onNext ] ]
2014-10-27 14:35:41 +01:00
* function is invoked , expecting a ( possibly empty ) sequence of output elements
* to be produced .
* After handing off the elements produced from one input element to the downstream
2014-11-12 10:43:39 +01:00
* subscribers , the [ [ akka . stream . TransformerLike # isComplete ] ] predicate determines whether to end
2014-10-27 14:35:41 +01:00
* stream processing at this point ; in that case the upstream subscription is
* canceled . Before signaling normal completion to the downstream subscribers ,
2014-11-12 10:43:39 +01:00
* the [ [ akka . stream . TransformerLike # onTermination ] ] function is invoked to produce a ( possibly empty )
2014-10-27 14:35:41 +01:00
* sequence of elements in response to the end - of - stream event .
2014-04-07 14:12:47 +02:00
*
2014-11-12 10:43:39 +01:00
* [ [ akka . stream . TransformerLike # onError ] ] is called when failure is signaled from upstream .
2014-04-02 09:03:59 +02:00
*
2015-01-30 10:30:56 +01:00
* After normal completion or failure the [ [ akka . stream . TransformerLike # cleanup ] ] function is called .
2014-04-29 15:47:37 +02:00
*
2014-10-27 14:35:41 +01:00
* It is possible to keep state in the concrete [ [ akka . stream . Transformer ] ] instance with
* ordinary instance variables . The [ [ akka . stream . Transformer ] ] is executed by an actor and
* therefore you do not have to add any additional thread safety or memory
* visibility constructs to access the state from the callback methods .
2014-04-29 15:47:37 +02:00
*
2014-10-27 14:35:41 +01:00
* Note that you can use [ [ # transform ] ] if you just need to transform elements time plays no role in the transformation .
2014-04-29 15:47:37 +02:00
*/
2015-01-28 14:19:50 +01:00
private [ akka ] def timerTransform [ U ] ( mkStage : ( ) ⇒ TimerTransformer [ Out , U ] ) : Repr [ U , Mat ] =
2014-12-01 20:07:55 +02:00
andThen ( TimerTransform ( mkStage . asInstanceOf [ ( ) ⇒ TimerTransformer [ Any , Any ] ] ) )
2015-01-28 14:19:50 +01:00
def withAttributes ( attr : OperationAttributes ) : Repr [ Out , Mat ]
2014-04-29 15:47:37 +02:00
2014-10-27 14:35:41 +01:00
/* * INTERNAL API */
2015-01-28 14:19:50 +01:00
private [ scaladsl ] def andThen [ U ] ( op : StageModule ) : Repr [ U , Mat ]
private [ scaladsl ] def andThenMat [ U , Mat2 ] ( op : MaterializingStageFactory ) : Repr [ U , Mat2 ]
2014-10-27 14:35:41 +01:00
}
2014-08-15 15:37:09 +02:00
2014-10-27 14:35:41 +01:00
/* *
* INTERNAL API
*/
2014-11-09 21:09:50 +01:00
private [ stream ] object FlowOps {
2014-10-27 14:35:41 +01:00
private case object TakeWithinTimerKey
private case object DropWithinTimerKey
private case object GroupedWithinTimerKey
2014-11-12 10:43:39 +01:00
private [ this ] final case object CompletedTransformer extends TransformerLike [ Any , Any ] {
2014-10-27 14:35:41 +01:00
override def onNext ( elem : Any ) = Nil
override def isComplete = true
}
2014-11-12 10:43:39 +01:00
private [ this ] final case object IdentityTransformer extends TransformerLike [ Any , Any ] {
2014-10-27 14:35:41 +01:00
override def onNext ( elem : Any ) = List ( elem )
}
2014-11-09 21:09:50 +01:00
2014-11-12 10:43:39 +01:00
def completedTransformer [ T ] : TransformerLike [ T , T ] = CompletedTransformer . asInstanceOf [ TransformerLike [ T , T ] ]
def identityTransformer [ T ] : TransformerLike [ T , T ] = IdentityTransformer . asInstanceOf [ TransformerLike [ T , T ] ]
2015-03-03 10:57:25 +01:00
}