2014-10-03 17:33:14 +02:00
/* *
2017-01-04 17:37:10 +01:00
* Copyright ( C ) 2014 - 2017 Lightbend Inc . < http : //www.lightbend.com>
2014-10-03 17:33:14 +02:00
*/
package akka.stream.javadsl
2015-11-27 22:24:24 -05:00
import java.util
2016-01-14 16:20:39 +01:00
import java.util.Optional
2016-07-27 13:29:23 +02:00
2017-05-11 00:00:42 +08:00
import akka.util.ConstantFun
2016-01-20 10:00:37 +02:00
import akka. { Done , NotUsed }
2015-08-19 23:04:20 -04:00
import akka.actor. { ActorRef , Cancellable , Props }
2015-04-09 12:21:12 +02:00
import akka.event.LoggingAdapter
2015-09-21 08:10:45 -04:00
import akka.japi. { Pair , Util , function }
2014-10-20 14:09:24 +02:00
import akka.stream._
2017-05-11 00:00:42 +08:00
import akka.stream.impl. { LinearTraversalBuilder , SourceQueueAdapter , StreamLayout }
2015-08-19 23:04:20 -04:00
import org.reactivestreams. { Publisher , Subscriber }
2016-07-27 13:29:23 +02:00
2014-10-03 17:33:14 +02:00
import scala.annotation.unchecked.uncheckedVariance
import scala.collection.JavaConverters._
2015-08-19 23:04:20 -04:00
import scala.collection.immutable
2015-12-04 09:46:50 -05:00
import scala.collection.immutable.Range.Inclusive
2014-10-03 17:33:14 +02:00
import scala.concurrent.duration.FiniteDuration
2015-08-19 23:04:20 -04:00
import scala.concurrent. { Future , Promise }
2016-01-14 16:20:39 +01:00
import scala.compat.java8.OptionConverters._
2016-01-21 16:37:26 +01:00
import java.util.concurrent.CompletionStage
import java.util.concurrent.CompletableFuture
2016-07-27 13:29:23 +02:00
2016-01-21 16:37:26 +01:00
import scala.compat.java8.FutureConverters._
2016-01-14 16:20:39 +01:00
2014-10-20 14:09:24 +02:00
/* * Java API */
object Source {
2016-01-20 10:00:37 +02:00
private [ this ] val _empty = new Source [ Any , NotUsed ] ( scaladsl . Source . empty )
2014-10-03 17:33:14 +02:00
/* *
2014-10-20 14:09:24 +02:00
* Create a `Source` with no elements , i . e . an empty stream that is completed immediately
* for every connected `Sink` .
2014-10-03 17:33:14 +02:00
*/
2016-01-20 10:00:37 +02:00
def empty [ O ] ( ) : Source [ O , NotUsed ] = _empty . asInstanceOf [ Source [ O , NotUsed ] ]
2014-10-03 17:33:14 +02:00
2015-01-29 10:21:54 +01:00
/* *
2016-01-21 16:37:26 +01:00
* Create a `Source` which materializes a [ [ java . util . concurrent . CompletableFuture ] ] which controls what element
2015-10-21 22:45:39 +02:00
* will be emitted by the Source .
2016-01-21 16:37:26 +01:00
* If the materialized promise is completed with a filled Optional , that value will be produced downstream ,
2015-10-21 22:45:39 +02:00
* followed by completion .
2016-01-21 16:37:26 +01:00
* If the materialized promise is completed with an empty Optional , no value will be produced downstream and completion will
2015-10-21 22:45:39 +02:00
* be signalled immediately .
* If the materialized promise is completed with a failure , then the returned source will terminate with that error .
* If the downstream of this source cancels before the promise has been completed , then the promise will be completed
2016-01-21 16:37:26 +01:00
* with an empty Optional .
2015-01-29 10:21:54 +01:00
*/
2016-01-21 16:37:26 +01:00
def maybe [ T ] : Source [ T , CompletableFuture [ Optional [ T ] ] ] = {
2016-01-14 16:20:39 +01:00
new Source ( scaladsl . Source . maybe [ T ] . mapMaterializedValue { scalaOptionPromise : Promise [ Option [ T ] ] ⇒
2016-01-21 16:37:26 +01:00
val javaOptionPromise = new CompletableFuture [ Optional [ T ] ] ( )
2016-01-14 16:20:39 +01:00
scalaOptionPromise . completeWith (
2016-01-21 16:37:26 +01:00
javaOptionPromise . toScala
2016-01-14 16:20:39 +01:00
. map ( _ . asScala ) ( akka . dispatch . ExecutionContexts . sameThreadExecutionContext ) )
javaOptionPromise
} )
}
2015-01-29 10:21:54 +01:00
2014-10-03 17:33:14 +02:00
/* *
* Helper to create [ [ Source ] ] from `Publisher` .
*
* Construct a transformation starting with given publisher . The transformation steps
* are executed by a series of [ [ org . reactivestreams . Processor ] ] instances
* that mediate the flow of elements downstream and the propagation of
* back - pressure upstream .
*/
2016-01-20 10:00:37 +02:00
def fromPublisher [ O ] ( publisher : Publisher [ O ] ) : javadsl.Source [ O , NotUsed ] =
2015-12-17 11:48:30 +02:00
new Source ( scaladsl . Source . fromPublisher ( publisher ) )
2014-10-03 17:33:14 +02:00
/* *
* Helper to create [ [ Source ] ] from `Iterator` .
2014-10-20 14:09:24 +02:00
* Example usage :
*
* { { {
* List < Integer > data = new ArrayList < Integer > ( ) ;
* data . add ( 1 ) ;
* data . add ( 2 ) ;
* data . add ( 3 ) ;
2015-03-05 12:21:17 +01:00
* Source . from ( ( ) -> data . iterator ( ) ) ;
2014-10-20 14:09:24 +02:00
* } } }
2014-10-03 17:33:14 +02:00
*
* Start a new `Source` from the given Iterator . The produced stream of elements
* will continue until the iterator runs empty or fails during evaluation of
* the `next()` method . Elements are pulled out of the iterator
* in accordance with the demand coming from the downstream transformation
* steps .
*/
2016-01-20 10:00:37 +02:00
def fromIterator [ O ] ( f : function.Creator [ java . util . Iterator [ O ] ] ) : javadsl.Source [ O , NotUsed ] =
2015-12-17 11:48:30 +02:00
new Source ( scaladsl . Source . fromIterator ( ( ) ⇒ f . create ( ) . asScala ) )
2014-10-03 17:33:14 +02:00
2016-03-22 21:59:52 -05:00
/* *
* Helper to create 'cycled' [ [ Source ] ] from iterator provider .
* Example usage :
*
* { { {
* Source . cycle ( ( ) -> Arrays . asList ( 1 , 2 , 3 ) . iterator ( ) ) ;
* } } }
*
* Start a new 'cycled' `Source` from the given elements . The producer stream of elements
* will continue infinitely by repeating the sequence of elements provided by function parameter .
*/
def cycle [ O ] ( f : function.Creator [ java . util . Iterator [ O ] ] ) : javadsl.Source [ O , NotUsed ] =
new Source ( scaladsl . Source . cycle ( ( ) ⇒ f . create ( ) . asScala ) )
2014-10-03 17:33:14 +02:00
/* *
* Helper to create [ [ Source ] ] from `Iterable` .
2014-10-20 14:09:24 +02:00
* Example usage :
* { { {
* List < Integer > data = new ArrayList < Integer > ( ) ;
* data . add ( 1 ) ;
* data . add ( 2 ) ;
* data . add ( 3 ) ;
2016-02-05 18:58:32 +02:00
* Source . from ( data ) ;
2014-10-20 14:09:24 +02:00
* } } }
2014-10-03 17:33:14 +02:00
*
* Starts a new `Source` from the given `Iterable` . This is like starting from an
* Iterator , but every Subscriber directly attached to the Publisher of this
* stream will see an individual flow of elements ( always starting from the
* beginning ) regardless of when they subscribed .
2015-04-20 15:03:03 +02:00
*
* Make sure that the `Iterable` is immutable or at least not modified after
* being used as a `Source` . Otherwise the stream may fail with
* `ConcurrentModificationException` or other more subtle errors may occur .
*/
2016-01-20 10:00:37 +02:00
def from [ O ] ( iterable : java.lang.Iterable [ O ] ) : javadsl.Source [ O , NotUsed ] = {
2016-03-18 17:06:34 +01:00
// this adapter is not immutable if the underlying java.lang.Iterable is modified
2015-04-20 15:03:03 +02:00
// but there is not anything we can do to prevent that from happening.
// ConcurrentModificationException will be thrown in some cases.
val scalaIterable = new immutable . Iterable [ O ] {
2015-06-06 17:17:23 +02:00
2015-04-20 15:03:03 +02:00
import collection.JavaConverters._
2015-06-06 17:17:23 +02:00
2015-04-20 15:03:03 +02:00
override def iterator : Iterator [ O ] = iterable . iterator ( ) . asScala
}
new Source ( scaladsl . Source ( scalaIterable ) )
}
2014-10-03 17:33:14 +02:00
2015-11-27 22:24:24 -05:00
/* *
2015-12-02 11:20:47 -05:00
* Creates [ [ Source ] ] that represents integer values in range ' ' [ start ; end ] ' ' , step equals to 1.
* It allows to create `Source` out of range as simply as on Scala `Source(1 to N)`
*
* Uses [ [ scala . collection . immutable . Range . inclusive ( Int , Int ) ] ] internally
*
* @see [ [ scala . collection . immutable . Range . inclusive ( Int , Int ) ] ]
*/
2016-01-20 10:00:37 +02:00
def range ( start : Int , end : Int ) : javadsl.Source [ Integer , NotUsed ] = range ( start , end , 1 )
2015-12-02 11:20:47 -05:00
/* *
* Creates [ [ Source ] ] that represents integer values in range ' ' [ start ; end ] ' ' , with the given step .
* It allows to create `Source` out of range as simply as on Scala `Source(1 to N)`
*
* Uses [ [ scala . collection . immutable . Range . inclusive ( Int , Int , Int ) ] ] internally
*
* @see [ [ scala . collection . immutable . Range . inclusive ( Int , Int , Int ) ] ]
2015-11-27 22:24:24 -05:00
*/
2016-01-20 10:00:37 +02:00
def range ( start : Int , end : Int , step : Int ) : javadsl.Source [ Integer , NotUsed ] =
2015-12-02 11:20:47 -05:00
fromIterator [ Integer ] ( new function . Creator [ util . Iterator [ Integer ] ] ( ) {
def create ( ) : util.Iterator [ Integer ] =
2016-09-27 13:21:49 +02:00
Range . inclusive ( start , end , step ) . iterator . asJava . asInstanceOf [ util . Iterator [ Integer ] ]
2015-11-27 22:24:24 -05:00
} )
2014-10-03 17:33:14 +02:00
/* *
* Start a new `Source` from the given `Future` . The stream will consist of
* one element when the `Future` is completed with a successful value , which
* may happen before or after materializing the `Flow` .
2015-01-30 10:30:56 +01:00
* The stream terminates with a failure if the `Future` is completed with a failure .
2014-10-03 17:33:14 +02:00
*/
2016-01-20 10:00:37 +02:00
def fromFuture [ O ] ( future : Future [ O ] ) : javadsl.Source [ O , NotUsed ] =
2015-12-17 11:48:30 +02:00
new Source ( scaladsl . Source . fromFuture ( future ) )
2014-10-03 17:33:14 +02:00
2016-01-21 16:37:26 +01:00
/* *
2016-09-04 16:11:49 +02:00
* Starts a new `Source` from the given `CompletionStage` . The stream will consist of
2016-01-21 16:37:26 +01:00
* one element when the `CompletionStage` is completed with a successful value , which
* may happen before or after materializing the `Flow` .
* The stream terminates with a failure if the `CompletionStage` is completed with a failure .
*/
def fromCompletionStage [ O ] ( future : CompletionStage [ O ] ) : javadsl.Source [ O , NotUsed ] =
new Source ( scaladsl . Source . fromCompletionStage ( future ) )
2016-09-04 16:11:49 +02:00
/* *
* Streams the elements of the given future source once it successfully completes .
* If the future fails the stream is failed .
*/
2017-07-20 23:02:34 +10:00
def fromFutureSource [ T , M ] ( future : Future [ _ <: Graph [ SourceShape [ T ] , M ] ] ) : javadsl.Source [ T , Future [ M ] ] = new Source ( scaladsl . Source . fromFutureSource ( future ) )
2016-09-04 16:11:49 +02:00
/* *
* Streams the elements of an asynchronous source once its given `completion` stage completes .
* If the `completion` fails the stream is failed with that exception .
*/
2017-07-20 23:02:34 +10:00
def fromSourceCompletionStage [ T , M ] ( completion : CompletionStage [ _ <: Graph [ SourceShape [ T ] , M ] ] ) : javadsl.Source [ T , CompletionStage [ M ] ] =
new Source ( scaladsl . Source . fromSourceCompletionStage ( completion ) )
2016-09-04 16:11:49 +02:00
2014-10-03 17:33:14 +02:00
/* *
2015-01-26 14:16:57 +01:00
* Elements are emitted periodically with the specified interval .
2014-10-03 17:33:14 +02:00
* The tick element will be delivered to downstream consumers that has requested any elements .
* If a consumer has not requested any elements at the point in time when the tick
* element is produced it will not receive that tick element later . It will
* receive new tick elements as soon as it has requested more elements .
*/
2015-11-04 11:43:11 +02:00
def tick [ O ] ( initialDelay : FiniteDuration , interval : FiniteDuration , tick : O ) : javadsl.Source [ O , Cancellable ] =
new Source ( scaladsl . Source . tick ( initialDelay , interval , tick ) )
2015-01-16 17:55:03 +01:00
2014-10-20 14:09:24 +02:00
/* *
* Create a `Source` with one element .
* Every connected `Sink` of this stream will see an individual stream consisting of one element .
*/
2016-01-20 10:00:37 +02:00
def single [ T ] ( element : T ) : Source [ T , NotUsed ] =
2014-12-16 17:02:27 +01:00
new Source ( scaladsl . Source . single ( element ) )
2014-10-03 17:33:14 +02:00
2015-02-26 12:36:46 +01:00
/* *
* Create a `Source` that will continually emit the given element .
*/
2016-01-20 10:00:37 +02:00
def repeat [ T ] ( element : T ) : Source [ T , NotUsed ] =
2015-02-26 12:36:46 +01:00
new Source ( scaladsl . Source . repeat ( element ) )
2015-12-10 12:49:59 +02:00
/* *
2015-12-14 13:28:21 +02:00
* Create a `Source` that will unfold a value of type `S` into
2015-12-10 12:49:59 +02:00
* a pair of the next state `S` and output elements of type `E` .
*/
2016-02-05 17:04:50 +01:00
def unfold [ S , E ] ( s : S , f : function.Function [ S , Optional [ Pair [ S , E ] ] ] ) : Source [ E , NotUsed ] =
new Source ( scaladsl . Source . unfold ( s ) ( ( s : S ) ⇒ f . apply ( s ) . asScala . map ( _ . toScala ) ) )
2015-12-10 12:49:59 +02:00
/* *
2015-12-14 13:28:21 +02:00
* Same as [ [ unfold ] ] , but uses an async function to generate the next state - element tuple .
2015-12-10 12:49:59 +02:00
*/
2016-02-05 17:04:50 +01:00
def unfoldAsync [ S , E ] ( s : S , f : function.Function [ S , CompletionStage [ Optional [ Pair [ S , E ] ] ] ] ) : Source [ E , NotUsed ] =
2016-01-14 16:20:39 +01:00
new Source (
scaladsl . Source . unfoldAsync ( s ) (
2016-02-05 17:04:50 +01:00
( s : S ) ⇒ f . apply ( s ) . toScala . map ( _ . asScala . map ( _ . toScala ) ) ( akka . dispatch . ExecutionContexts . sameThreadExecutionContext ) ) )
2015-12-10 12:49:59 +02:00
2014-10-20 14:09:24 +02:00
/* *
2015-01-30 10:30:56 +01:00
* Create a `Source` that immediately ends the stream with the `cause` failure to every connected `Sink` .
2014-10-20 14:09:24 +02:00
*/
2016-01-20 10:00:37 +02:00
def failed [ T ] ( cause : Throwable ) : Source [ T , NotUsed ] =
2014-10-27 14:35:41 +01:00
new Source ( scaladsl . Source . failed ( cause ) )
2014-10-03 17:33:14 +02:00
2016-11-25 16:25:26 +01:00
/* *
* Creates a `Source` that is not materialized until there is downstream demand , when the source gets materialized
* the materialized future is completed with its value , if downstream cancels or fails without any demand the
* `create` factory is never called and the materialized `CompletionStage` is failed .
*/
def lazily [ T , M ] ( create : function.Creator [ Source [ T , M ] ] ) : Source [ T , CompletionStage [ M ] ] =
2016-11-29 08:33:36 +01:00
scaladsl . Source . lazily [ T , M ] ( ( ) ⇒ create . create ( ) . asScala ) . mapMaterializedValue ( _ . toJava ) . asJava
2016-11-25 16:25:26 +01:00
2014-10-20 14:09:24 +02:00
/* *
* Creates a `Source` that is materialized as a [ [ org . reactivestreams . Subscriber ] ]
*/
2015-12-17 11:48:30 +02:00
def asSubscriber [ T ] ( ) : Source [ T , Subscriber [ T ] ] =
new Source ( scaladsl . Source . asSubscriber )
2014-10-20 14:09:24 +02:00
2015-03-31 15:13:57 +02:00
/* *
* Creates a `Source` that is materialized to an [ [ akka . actor . ActorRef ] ] which points to an Actor
* created according to the passed in [ [ akka . actor . Props ] ] . Actor created by the `props` should
* be [ [ akka . stream . actor . ActorPublisher ] ] .
2016-12-09 14:08:13 +01:00
*
2016-12-08 17:22:01 +01:00
* @deprecated Use `akka.stream.stage.GraphStage` and `fromGraph` instead , it allows for all operations an Actor would and is more type - safe as well as guaranteed to be ReactiveStreams compliant .
2015-03-31 15:13:57 +02:00
*/
2016-12-08 17:22:01 +01:00
@deprecated ( "Use `akka.stream.stage.GraphStage` and `fromGraph` instead, it allows for all operations an Actor would and is more type-safe as well as guaranteed to be ReactiveStreams compliant." , since = "2.5.0" )
2015-03-31 15:13:57 +02:00
def actorPublisher [ T ] ( props : Props ) : Source [ T , ActorRef ] =
new Source ( scaladsl . Source . actorPublisher ( props ) )
/* *
* Creates a `Source` that is materialized as an [ [ akka . actor . ActorRef ] ] .
* Messages sent to this actor will be emitted to the stream if there is demand from downstream ,
* otherwise they will be buffered until request for demand is received .
*
* Depending on the defined [ [ akka . stream . OverflowStrategy ] ] it might drop elements if
* there is no space available in the buffer .
*
2015-07-09 10:18:18 +02:00
* The strategy [ [ akka . stream . OverflowStrategy . backpressure ] ] is not supported , and an
* IllegalArgument ( "Backpressure overflowStrategy not supported" ) will be thrown if it is passed as argument .
*
2016-05-01 19:58:49 -06:00
* The buffer can be disabled by using `bufferSize` of 0 and then received messages are dropped if there is no demand
* from downstream . When `bufferSize` is 0 the `overflowStrategy` does not matter . An async boundary is added after
* this Source ; as such , it is never safe to assume the downstream will always generate demand .
2015-03-31 15:13:57 +02:00
*
2016-04-15 16:29:53 +02:00
* The stream can be completed successfully by sending the actor reference a [ [ akka . actor . Status . Success ] ]
* ( whose content will be ignored ) in which case already buffered elements will be signaled before signaling
* completion , or by sending [ [ akka . actor . PoisonPill ] ] in which case completion will be signaled immediately .
2015-03-31 15:13:57 +02:00
*
2016-04-15 16:29:53 +02:00
* The stream can be completed with failure by sending a [ [ akka . actor . Status . Failure ] ] to the
* actor reference . In case the Actor is still draining its internal buffer ( after having received
* a [ [ akka . actor . Status . Success ] ] ) before signaling completion and it receives a [ [ akka . actor . Status . Failure ] ] ,
* the failure will be signaled downstream immediately ( instead of the completion signal ) .
2015-03-31 15:13:57 +02:00
*
2015-09-28 22:23:59 -07:00
* The actor will be stopped when the stream is completed , failed or canceled from downstream ,
2015-03-31 15:13:57 +02:00
* i . e . you can watch it to get notified when that happens .
*
2016-05-01 19:58:49 -06:00
* See also [ [ akka . stream . javadsl . Source . queue ] ] .
*
2015-03-31 15:13:57 +02:00
* @param bufferSize The size of the buffer in element count
* @param overflowStrategy Strategy that is used when incoming elements cannot fit inside the buffer
*/
def actorRef [ T ] ( bufferSize : Int , overflowStrategy : OverflowStrategy ) : Source [ T , ActorRef ] =
new Source ( scaladsl . Source . actorRef ( bufferSize , overflowStrategy ) )
2015-03-04 15:22:33 +01:00
/* *
* A graph with the shape of a source logically is a source , this method makes
* it so also in type .
*/
2015-10-21 22:45:39 +02:00
def fromGraph [ T , M ] ( g : Graph [ SourceShape [ T ] , M ] ) : Source [ T , M ] =
2015-06-06 17:17:23 +02:00
g match {
2015-10-21 22:45:39 +02:00
case s : Source [ T , M ] ⇒ s
case s if s eq scaladsl . Source . empty ⇒ empty ( ) . asInstanceOf [ Source [ T , M ] ]
case other ⇒ new Source ( scaladsl . Source . fromGraph ( other ) )
2015-06-06 17:17:23 +02:00
}
2015-06-29 23:47:31 -04:00
/* *
* Combines several sources with fan - in strategy like `Merge` or `Concat` and returns `Source` .
*/
2016-01-21 16:37:26 +01:00
def combine [ T , U ] ( first : Source [ T , _ <: Any ] , second : Source [ T , _ <: Any ] , rest : java.util.List [ Source [ T , _ <: Any ] ] ,
2016-01-20 10:00:37 +02:00
strategy : function.Function [ java . lang . Integer , _ <: Graph [ UniformFanInShape [ T , U ] , NotUsed ] ] ) : Source [ U , NotUsed ] = {
2016-04-22 12:04:28 +02:00
val seq = if ( rest != null ) Util . immutableSeq ( rest ) . map ( _ . asScala ) else immutable . Seq ( )
2015-06-29 23:47:31 -04:00
new Source ( scaladsl . Source . combine ( first . asScala , second . asScala , seq : _ * ) ( num ⇒ strategy . apply ( num ) ) )
}
2015-08-19 23:04:20 -04:00
2017-11-02 10:34:40 +09:00
/* *
* Combines two sources with fan - in strategy like `Merge` or `Concat` and returns `Source` with a materialized value .
*/
def combineMat [ T , U , M1 , M2 , M ] ( first : Source [ T , M1 ] , second : Source [ T , M2 ] ,
strategy : function.Function [ java . lang . Integer , _ <: Graph [ UniformFanInShape [ T , U ] , NotUsed ] ] ,
combine : function.Function2 [ M1 , M2 , M ] ) : Source [ U , M ] = {
new Source ( scaladsl . Source . combineMat ( first . asScala , second . asScala ) ( num ⇒ strategy . apply ( num ) ) ( combinerToScala ( combine ) ) )
}
2016-04-22 12:04:28 +02:00
/* *
* Combine the elements of multiple streams into a stream of lists .
*/
def zipN [ T ] ( sources : java.util.List [ Source [ T , _ <: Any ] ] ) : Source [ java . util . List [ T ] , NotUsed ] = {
val seq = if ( sources != null ) Util . immutableSeq ( sources ) . map ( _ . asScala ) else immutable . Seq ( )
new Source ( scaladsl . Source . zipN ( seq ) . map ( _ . asJava ) )
}
/*
* Combine the elements of multiple streams into a stream of lists using a combiner function .
*/
def zipWithN [ T , O ] ( zipper : function.Function [ java . util . List [ T ] , O ] , sources : java.util.List [ Source [ T , _ <: Any ] ] ) : Source [ O , NotUsed ] = {
val seq = if ( sources != null ) Util . immutableSeq ( sources ) . map ( _ . asScala ) else immutable . Seq ( )
2016-06-02 14:06:57 +02:00
new Source ( scaladsl . Source . zipWithN [ T , O ] ( seq ⇒ zipper . apply ( seq . asJava ) ) ( seq ) )
2016-04-22 12:04:28 +02:00
}
2015-08-19 23:04:20 -04:00
/* *
2016-08-11 07:37:54 -05:00
* Creates a `Source` that is materialized as an [ [ akka . stream . javadsl . SourceQueue ] ] .
2015-08-19 23:04:20 -04:00
* You can push elements to the queue and they will be emitted to the stream if there is demand from downstream ,
2016-01-16 12:17:19 -05:00
* otherwise they will be buffered until request for demand is received . Elements in the buffer will be discarded
* if downstream is terminated .
2015-08-19 23:04:20 -04:00
*
* Depending on the defined [ [ akka . stream . OverflowStrategy ] ] it might drop elements if
* there is no space available in the buffer .
*
* Acknowledgement mechanism is available .
2016-10-26 10:24:51 +02:00
* [ [ akka . stream . javadsl . SourceQueue . offer ] ] returns `CompletionStage<QueueOfferResult>` which completes with
* `QueueOfferResult.enqueued` if element was added to buffer or sent downstream . It completes with
* `QueueOfferResult.dropped` if element was dropped . Can also complete with `QueueOfferResult.Failure` -
* when stream failed or `QueueOfferResult.QueueClosed` when downstream is completed .
2015-08-19 23:04:20 -04:00
*
2016-01-21 16:37:26 +01:00
* The strategy [ [ akka . stream . OverflowStrategy . backpressure ] ] will not complete last `offer():CompletionStage`
2016-01-16 12:17:19 -05:00
* call when buffer is full .
2015-08-19 23:04:20 -04:00
*
2016-08-11 07:37:54 -05:00
* You can watch accessibility of stream with [ [ akka . stream . javadsl . SourceQueue . watchCompletion ] ] .
2016-01-16 12:17:19 -05:00
* It returns future that completes with success when stream is completed or fail when stream is failed .
2015-08-19 23:04:20 -04:00
*
2016-11-22 16:11:09 +03:00
* The buffer can be disabled by using `bufferSize` of 0 and then received message will wait
* for downstream demand unless there is another message waiting for downstream demand , in that case
* offer result will be completed according to the overflow strategy .
2016-01-16 12:17:19 -05:00
*
* SourceQueue that current source is materialized to is for single thread usage only .
*
* @param bufferSize size of buffer in element count
2015-08-19 23:04:20 -04:00
* @param overflowStrategy Strategy that is used when incoming elements cannot fit inside the buffer
*/
2016-02-25 16:05:35 +01:00
def queue [ T ] ( bufferSize : Int , overflowStrategy : OverflowStrategy ) : Source [ T , SourceQueueWithComplete [ T ] ] =
2016-01-21 16:37:26 +01:00
new Source ( scaladsl . Source . queue [ T ] ( bufferSize , overflowStrategy ) . mapMaterializedValue ( new SourceQueueAdapter ( _ ) ) )
2015-11-14 22:42:22 +01:00
2016-02-22 23:22:47 -05:00
/* *
2016-06-02 14:06:57 +02:00
* Start a new `Source` from some resource which can be opened , read and closed .
* Interaction with resource happens in a blocking way .
*
* Example :
* { { {
* Source . unfoldResource (
* ( ) -> new BufferedReader ( new FileReader ( "..." ) ) ,
* reader -> reader . readLine ( ) ,
* reader -> reader . close ( ) )
* } } }
*
* You can use the supervision strategy to handle exceptions for `read` function . All exceptions thrown by `create`
* or `close` will fail the stream .
*
* `Restart` supervision strategy will close and create blocking IO again . Default strategy is `Stop` which means
* that stream will be terminated on error in `read` function by default .
*
* You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or
* set it for a given Source by using [ [ ActorAttributes ] ] .
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2016-06-02 14:06:57 +02:00
* @param create - function that is called on stream start and creates / opens resource .
* @param read - function that reads data from opened resource . It is called each time backpressure signal
* is received . Stream calls close and completes when `read` returns None .
* @param close - function that closes resource
*/
def unfoldResource [ T , S ] (
create : function.Creator [ S ] ,
read : function.Function [ S , Optional [ T ] ] ,
close : function.Procedure [ S ] ) : javadsl.Source [ T , NotUsed ] =
new Source ( scaladsl . Source . unfoldResource [ T , S ] (
2017-03-27 18:05:54 +02:00
create . create _ ,
2016-02-22 23:22:47 -05:00
( s : S ) ⇒ read . apply ( s ) . asScala , close . apply ) )
/* *
2016-06-02 14:06:57 +02:00
* Start a new `Source` from some resource which can be opened , read and closed .
2016-08-11 07:37:54 -05:00
* It 's similar to `unfoldResource` but takes functions that return `CompletionStage` instead of plain values .
2016-06-02 14:06:57 +02:00
*
* You can use the supervision strategy to handle exceptions for `read` function or failures of produced `Futures` .
* All exceptions thrown by `create` or `close` as well as fails of returned futures will fail the stream .
*
* `Restart` supervision strategy will close and create resource . Default strategy is `Stop` which means
* that stream will be terminated on error in `read` function ( or future ) by default .
*
* You can configure the default dispatcher for this Source by changing the `akka.stream.blocking-io-dispatcher` or
* set it for a given Source by using [ [ ActorAttributes ] ] .
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2016-06-02 14:06:57 +02:00
* @param create - function that is called on stream start and creates / opens resource .
* @param read - function that reads data from opened resource . It is called each time backpressure signal
* is received . Stream calls close and completes when `CompletionStage` from read function returns None .
* @param close - function that closes resource
*/
def unfoldResourceAsync [ T , S ] (
create : function.Creator [ CompletionStage [ S ] ] ,
read : function.Function [ S , CompletionStage [ Optional [ T ] ] ] ,
close : function.Function [ S , CompletionStage [ Done ] ] ) : javadsl.Source [ T , NotUsed ] =
new Source ( scaladsl . Source . unfoldResourceAsync [ T , S ] (
( ) ⇒ create . create ( ) . toScala ,
( s : S ) ⇒ read . apply ( s ) . toScala . map ( _ . asScala ) ( akka . dispatch . ExecutionContexts . sameThreadExecutionContext ) ,
( s : S ) ⇒ close . apply ( s ) . toScala ) )
2014-10-20 14:09:24 +02:00
}
/* *
* Java API
*
* A `Source` is a set of stream processing steps that has one open output and an attached input .
* Can be used as a `Publisher`
*/
2015-11-01 21:03:28 +01:00
final class Source [ + Out , + Mat ] ( delegate : scaladsl.Source [ Out , Mat ] ) extends Graph [ SourceShape [ Out ] , Mat ] {
2015-10-21 22:45:39 +02:00
2014-10-20 14:09:24 +02:00
import scala.collection.JavaConverters._
2014-10-03 17:33:14 +02:00
2015-01-28 14:19:50 +01:00
override def shape : SourceShape [ Out ] = delegate . shape
2015-10-21 22:45:39 +02:00
2016-07-27 13:29:23 +02:00
override def traversalBuilder : LinearTraversalBuilder = delegate . traversalBuilder
2015-01-28 14:19:50 +01:00
2016-03-11 17:08:30 +01:00
override def toString : String = delegate . toString
2016-08-11 07:37:54 -05:00
/* *
* Converts this Java DSL element to its Scala DSL counterpart .
*/
2015-01-28 14:19:50 +01:00
def asScala : scaladsl.Source [ Out , Mat ] = delegate
/* *
* Transform only the materialized value of this Source , leaving all other properties as they were .
*/
2015-05-05 10:29:41 +02:00
def mapMaterializedValue [ Mat2 ] ( f : function.Function [ Mat , Mat2 ] ) : Source [ Out , Mat2 ] =
new Source ( delegate . mapMaterializedValue ( f . apply _ ) )
2014-10-03 17:33:14 +02:00
2014-10-20 14:09:24 +02:00
/* *
2014-10-31 10:43:42 +02:00
* Transform this [ [ Source ] ] by appending the given processing stages .
2015-11-25 19:58:48 +01:00
* { { {
* +----------------------------+
* | Resulting Source |
* | |
* | +------+ +------+ |
* | | | | | |
* | | this | ~ Out ~> | flow | ~~> T
* | | | | | |
* | +------+ +------+ |
* +----------------------------+
* } } }
* The materialized value of the combined [ [ Flow ] ] will be the materialized
* value of the current flow ( ignoring the other Flow ’ s value ) , use
* `viaMat` if a different strategy is needed .
2014-10-20 14:09:24 +02:00
*/
2015-04-24 12:14:04 +02:00
def via [ T , M ] ( flow : Graph [ FlowShape [ Out , T ] , M ] ) : javadsl.Source [ T , Mat ] =
new Source ( delegate . via ( flow ) )
2014-10-03 17:33:14 +02:00
2014-10-20 14:09:24 +02:00
/* *
2015-01-28 14:19:50 +01:00
* Transform this [ [ Source ] ] by appending the given processing stages .
2015-11-25 19:58:48 +01:00
* { { {
* +----------------------------+
* | Resulting Source |
* | |
* | +------+ +------+ |
* | | | | | |
* | | this | ~ Out ~> | flow | ~~> T
* | | | | | |
* | +------+ +------+ |
* +----------------------------+
* } } }
* The `combine` function is used to compose the materialized values of this flow and that
* flow into the materialized value of the resulting Flow .
2016-03-11 17:08:30 +01:00
*
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values .
2014-10-20 14:09:24 +02:00
*/
2015-06-23 16:24:06 +03:00
def viaMat [ T , M , M2 ] ( flow : Graph [ FlowShape [ Out , T ] , M ] , combine : function.Function2 [ Mat , M , M2 ] ) : javadsl.Source [ T , M2 ] =
2015-04-24 12:14:04 +02:00
new Source ( delegate . viaMat ( flow ) ( combinerToScala ( combine ) ) )
2014-10-03 17:33:14 +02:00
2014-10-20 14:09:24 +02:00
/* *
2015-01-28 14:19:50 +01:00
* Connect this [ [ Source ] ] to a [ [ Sink ] ] , concatenating the processing steps of both .
2015-11-25 19:58:48 +01:00
* { { {
* +----------------------------+
* | Resulting RunnableGraph |
* | |
* | +------+ +------+ |
* | | | | | |
* | | this | ~ Out ~> | sink | |
* | | | | | |
* | +------+ +------+ |
* +----------------------------+
* } } }
* The materialized value of the combined [ [ Sink ] ] will be the materialized
* value of the current flow ( ignoring the given Sink ’ s value ) , use
* `toMat` if a different strategy is needed .
2014-10-20 14:09:24 +02:00
*/
2015-06-23 18:41:55 +02:00
def to [ M ] ( sink : Graph [ SinkShape [ Out ] , M ] ) : javadsl.RunnableGraph [ Mat ] =
2015-10-21 22:45:39 +02:00
RunnableGraph . fromGraph ( delegate . to ( sink ) )
2014-10-20 14:09:24 +02:00
2015-02-26 22:42:34 +01:00
/* *
* Connect this [ [ Source ] ] to a [ [ Sink ] ] , concatenating the processing steps of both .
2015-11-25 19:58:48 +01:00
* { { {
* +----------------------------+
* | Resulting RunnableGraph |
* | |
* | +------+ +------+ |
* | | | | | |
* | | this | ~ Out ~> | sink | |
* | | | | | |
* | +------+ +------+ |
* +----------------------------+
* } } }
* The `combine` function is used to compose the materialized values of this flow and that
* Sink into the materialized value of the resulting Sink .
2016-03-11 17:08:30 +01:00
*
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values .
2015-02-26 22:42:34 +01:00
*/
2015-06-23 18:53:36 +02:00
def toMat [ M , M2 ] ( sink : Graph [ SinkShape [ Out ] , M ] , combine : function.Function2 [ Mat , M , M2 ] ) : javadsl.RunnableGraph [ M2 ] =
2015-10-21 22:45:39 +02:00
RunnableGraph . fromGraph ( delegate . toMat ( sink ) ( combinerToScala ( combine ) ) )
2015-02-26 22:42:34 +01:00
2014-10-20 14:09:24 +02:00
/* *
2014-10-20 14:09:24 +02:00
* Connect this `Source` to a `Sink` and run it . The returned value is the materialized value
2015-12-17 11:48:30 +02:00
* of the `Sink` , e . g . the `Publisher` of a `Sink.asPublisher` .
2014-10-20 14:09:24 +02:00
*/
2015-06-23 18:28:53 +02:00
def runWith [ M ] ( sink : Graph [ SinkShape [ Out ] , M ] , materializer : Materializer ) : M =
2015-04-24 12:14:04 +02:00
delegate . runWith ( sink ) ( materializer )
2014-10-20 14:09:24 +02:00
/* *
* Shortcut for running this `Source` with a fold function .
* The given function is invoked for every received element , giving it its previous
* output ( or the given `zero` value ) and the element as input .
2016-01-21 16:37:26 +01:00
* The returned [ [ java . util . concurrent . CompletionStage ] ] will be completed with value of the final
2014-10-20 14:09:24 +02:00
* function evaluation when the input stream ends , or completed with `Failure`
2015-01-30 10:30:56 +01:00
* if there is a failure is signaled in the stream .
2014-10-20 14:09:24 +02:00
*/
2016-01-21 16:37:26 +01:00
def runFold [ U ] ( zero : U , f : function.Function2 [ U , Out , U ] , materializer : Materializer ) : CompletionStage [ U ] =
2014-10-17 14:05:50 +02:00
runWith ( Sink . fold ( zero , f ) , materializer )
2014-10-03 17:33:14 +02:00
2016-08-24 21:02:32 +02:00
/* *
* Shortcut for running this `Source` with an asynchronous fold function .
* The given function is invoked for every received element , giving it its previous
* output ( or the given `zero` value ) and the element as input .
* The returned [ [ java . util . concurrent . CompletionStage ] ] will be completed with value of the final
* function evaluation when the input stream ends , or completed with `Failure`
* if there is a failure is signaled in the stream .
*/
def runFoldAsync [ U ] ( zero : U , f : function.Function2 [ U , Out , CompletionStage [ U ] ] , materializer : Materializer ) : CompletionStage [ U ] = runWith ( Sink . foldAsync ( zero , f ) , materializer )
2016-01-15 22:51:26 -05:00
/* *
* Shortcut for running this `Source` with a reduce function .
* The given function is invoked for every received element , giving it its previous
* output ( from the second ones ) an the element as input .
2016-01-21 16:37:26 +01:00
* The returned [ [ java . util . concurrent . CompletionStage ] ] will be completed with value of the final
2016-01-15 22:51:26 -05:00
* function evaluation when the input stream ends , or completed with `Failure`
* if there is a failure is signaled in the stream .
2016-04-11 15:36:10 +02:00
*
* If the stream is empty ( i . e . completes before signalling any elements ) ,
* the reduce stage will fail its downstream with a [ [ NoSuchElementException ] ] ,
* which is semantically in - line with that Scala 's standard library collections
* do in such situations .
2016-01-15 22:51:26 -05:00
*/
2016-01-21 16:37:26 +01:00
def runReduce [ U >: Out ] ( f : function.Function2 [ U , U , U ] , materializer : Materializer ) : CompletionStage [ U ] =
2016-01-15 22:51:26 -05:00
runWith ( Sink . reduce ( f ) , materializer )
2014-10-20 14:09:24 +02:00
/* *
2015-10-03 23:26:06 -04:00
* Concatenate this [ [ Source ] ] with the given one , meaning that once current
2015-09-21 08:10:45 -04:00
* is exhausted and all result elements have been generated ,
2015-10-03 23:26:06 -04:00
* the given source elements will be produced .
*
* Note that given [ [ Source ] ] is materialized together with this Flow and just kept
* from producing elements by asserting back - pressure until its time comes .
*
* If this [ [ Source ] ] gets upstream error - no elements from the given [ [ Source ] ] will be pulled .
*
* ''' Emits when ''' element is available from current source or from the given [ [ Source ] ] when current is completed
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' given [ [ Source ] ] completes
*
* ''' Cancels when ''' downstream cancels
2015-09-21 08:10:45 -04:00
*/
2015-10-03 23:26:06 -04:00
def concat [ T >: Out , M ] ( that : Graph [ SourceShape [ T ] , M ] ) : javadsl.Source [ T , Mat ] =
new Source ( delegate . concat ( that ) )
2015-09-21 08:10:45 -04:00
/* *
2015-10-03 23:26:06 -04:00
* Concatenate this [ [ Source ] ] with the given one , meaning that once current
2015-09-21 08:10:45 -04:00
* is exhausted and all result elements have been generated ,
2015-10-03 23:26:06 -04:00
* the given source elements will be produced .
*
* Note that given [ [ Source ] ] is materialized together with this Flow and just kept
* from producing elements by asserting back - pressure until its time comes .
*
* If this [ [ Source ] ] gets upstream error - no elements from the given [ [ Source ] ] will be pulled .
*
2016-03-11 17:08:30 +01:00
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values .
*
2015-10-03 23:26:06 -04:00
* @see [ [ # concat ] ] .
2015-09-21 08:10:45 -04:00
*/
2016-06-02 14:06:57 +02:00
def concatMat [ T >: Out , M , M2 ] (
that : Graph [ SourceShape [ T ] , M ] ,
matF : function.Function2 [ Mat , M , M2 ] ) : javadsl.Source [ T , M2 ] =
2015-10-03 23:26:06 -04:00
new Source ( delegate . concatMat ( that ) ( combinerToScala ( matF ) ) )
2015-09-21 08:10:45 -04:00
2015-12-07 12:16:59 +00:00
/* *
* Prepend the given [ [ Source ] ] to this one , meaning that once the given source
* is exhausted and all result elements have been generated , the current source 's
* elements will be produced .
*
* Note that the current [ [ Source ] ] is materialized together with this Flow and just kept
* from producing elements by asserting back - pressure until its time comes .
*
* If the given [ [ Source ] ] gets upstream error - no elements from this [ [ Source ] ] will be pulled .
*
* ''' Emits when ''' element is available from current source or from the given [ [ Source ] ] when current is completed
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' given [ [ Source ] ] completes
*
* ''' Cancels when ''' downstream cancels
*/
def prepend [ T >: Out , M ] ( that : Graph [ SourceShape [ T ] , M ] ) : javadsl.Source [ T , Mat ] =
new Source ( delegate . prepend ( that ) )
/* *
* Prepend the given [ [ Source ] ] to this one , meaning that once the given source
* is exhausted and all result elements have been generated , the current source 's
* elements will be produced .
*
* Note that the current [ [ Source ] ] is materialized together with this Flow and just kept
* from producing elements by asserting back - pressure until its time comes .
*
* If the given [ [ Source ] ] gets upstream error - no elements from this [ [ Source ] ] will be pulled .
*
2016-03-11 17:08:30 +01:00
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values .
*
2015-12-07 12:16:59 +00:00
* @see [ [ # prepend ] ] .
*/
2016-06-02 14:06:57 +02:00
def prependMat [ T >: Out , M , M2 ] (
that : Graph [ SourceShape [ T ] , M ] ,
matF : function.Function2 [ Mat , M , M2 ] ) : javadsl.Source [ T , M2 ] =
2015-12-07 12:16:59 +00:00
new Source ( delegate . prependMat ( that ) ( combinerToScala ( matF ) ) )
2016-08-30 13:35:59 +02:00
/* *
* Provides a secondary source that will be consumed if this source completes without any
* elements passing by . As soon as the first element comes through this stream , the alternative
* will be cancelled .
*
* Note that this Flow will be materialized together with the [ [ Source ] ] and just kept
* from producing elements by asserting back - pressure until its time comes or it gets
* cancelled .
*
* On errors the stage is failed regardless of source of the error .
*
* ''' Emits when ''' element is available from first stream or first stream closed without emitting any elements and an element
* is available from the second stream
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' the primary stream completes after emitting at least one element , when the primary stream completes
* without emitting and the secondary stream already has completed or when the secondary stream completes
*
* ''' Cancels when ''' downstream cancels and additionally the alternative is cancelled as soon as an element passes
* by from this stream .
*/
def orElse [ T >: Out , M ] ( secondary : Graph [ SourceShape [ T ] , M ] ) : javadsl.Source [ T , Mat ] =
new Source ( delegate . orElse ( secondary ) )
/* *
* Provides a secondary source that will be consumed if this source completes without any
* elements passing by . As soon as the first element comes through this stream , the alternative
* will be cancelled .
*
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values .
*
* @see [ [ # orElse ] ]
*/
def orElseMat [ T >: Out , M , M2 ] ( secondary : Graph [ SourceShape [ T ] , M ] , matF : function.Function2 [ Mat , M , M2 ] ) : javadsl.Source [ T , M2 ] =
new Source ( delegate . orElseMat ( secondary ) ( combinerToScala ( matF ) ) )
2015-10-05 23:20:52 +02:00
/* *
* Attaches the given [ [ Sink ] ] to this [ [ Flow ] ] , meaning that elements that passes
* through will also be sent to the [ [ Sink ] ] .
*
* ''' Emits when ''' element is available and demand exists both from the Sink and the downstream .
*
* ''' Backpressures when ''' downstream or Sink backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*/
def alsoTo ( that : Graph [ SinkShape [ Out ] , _ ] ) : javadsl.Source [ Out , Mat ] =
new Source ( delegate . alsoTo ( that ) )
/* *
* Attaches the given [ [ Sink ] ] to this [ [ Flow ] ] , meaning that elements that passes
* through will also be sent to the [ [ Sink ] ] .
*
2016-03-11 17:08:30 +01:00
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values .
*
2015-10-05 23:20:52 +02:00
* @see [ [ # alsoTo ] ]
*/
2016-06-02 14:06:57 +02:00
def alsoToMat [ M2 , M3 ] (
that : Graph [ SinkShape [ Out ] , M2 ] ,
matF : function.Function2 [ Mat , M2 , M3 ] ) : javadsl.Source [ Out , M3 ] =
2015-10-05 23:20:52 +02:00
new Source ( delegate . alsoToMat ( that ) ( combinerToScala ( matF ) ) )
2015-12-09 21:52:53 -05:00
/* *
2015-12-10 21:44:02 -05:00
* Interleave is a deterministic merge of the given [ [ Source ] ] with elements of this [ [ Source ] ] .
* It first emits `segmentSize` number of elements from this flow to downstream , then - same amount for `that` source ,
* then repeat process .
2015-12-09 21:52:53 -05:00
*
* Example :
* { { {
* Source . from ( Arrays . asList ( 1 , 2 , 3 ) ) . interleave ( Source . from ( Arrays . asList ( 4 , 5 , 6 , 7 ) , 2 )
* // 1, 2, 4, 5, 3, 6, 7
* } } }
*
* After one of sources is complete than all the rest elements will be emitted from the second one
*
* If one of sources gets upstream error - stream completes with failure .
*
2015-12-10 21:44:02 -05:00
* ''' Emits when ''' element is available from the currently consumed upstream
2015-12-09 21:52:53 -05:00
*
2015-12-10 21:44:02 -05:00
* ''' Backpressures when ''' downstream backpressures . Signal to current
* upstream , switch to next upstream when received `segmentSize` elements
2015-12-09 21:52:53 -05:00
*
* ''' Completes when ''' this [ [ Source ] ] and given one completes
*
* ''' Cancels when ''' downstream cancels
*/
2015-12-10 21:44:02 -05:00
def interleave [ T >: Out ] ( that : Graph [ SourceShape [ T ] , _ ] , segmentSize : Int ) : javadsl.Source [ T , Mat ] =
new Source ( delegate . interleave ( that , segmentSize ) )
2015-12-09 21:52:53 -05:00
/* *
2015-12-10 21:44:02 -05:00
* Interleave is a deterministic merge of the given [ [ Source ] ] with elements of this [ [ Source ] ] .
* It first emits `segmentSize` number of elements from this flow to downstream , then - same amount for `that` source ,
* then repeat process .
2015-12-09 21:52:53 -05:00
*
* After one of sources is complete than all the rest elements will be emitted from the second one
*
* If one of sources gets upstream error - stream completes with failure .
*
2016-03-11 17:08:30 +01:00
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values .
*
2015-12-09 21:52:53 -05:00
* @see [ [ # interleave ] ] .
*/
2015-12-10 21:44:02 -05:00
def interleaveMat [ T >: Out , M , M2 ] ( that : Graph [ SourceShape [ T ] , M ] , segmentSize : Int ,
2015-12-09 21:52:53 -05:00
matF : function.Function2 [ Mat , M , M2 ] ) : javadsl.Source [ T , M2 ] =
2015-12-10 21:44:02 -05:00
new Source ( delegate . interleaveMat ( that , segmentSize ) ( combinerToScala ( matF ) ) )
2015-12-09 21:52:53 -05:00
2015-09-21 08:10:45 -04:00
/* *
2015-10-03 23:26:06 -04:00
* Merge the given [ [ Source ] ] to the current one , taking elements as they arrive from input streams ,
2015-09-21 08:10:45 -04:00
* picking randomly when several elements ready .
2015-10-03 23:26:06 -04:00
*
* ''' Emits when ''' one of the inputs has an element available
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' all upstreams complete
*
* ''' Cancels when ''' downstream cancels
2015-09-21 08:10:45 -04:00
*/
2015-10-03 23:26:06 -04:00
def merge [ T >: Out ] ( that : Graph [ SourceShape [ T ] , _ ] ) : javadsl.Source [ T , Mat ] =
new Source ( delegate . merge ( that ) )
2015-09-21 08:10:45 -04:00
/* *
2015-10-03 23:26:06 -04:00
* Merge the given [ [ Source ] ] to the current one , taking elements as they arrive from input streams ,
2015-09-21 08:10:45 -04:00
* picking randomly when several elements ready .
2015-10-03 23:26:06 -04:00
*
2016-03-11 17:08:30 +01:00
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values .
*
2015-10-03 23:26:06 -04:00
* @see [ [ # merge ] ] .
2015-09-21 08:10:45 -04:00
*/
2016-06-02 14:06:57 +02:00
def mergeMat [ T >: Out , M , M2 ] (
that : Graph [ SourceShape [ T ] , M ] ,
matF : function.Function2 [ Mat , M , M2 ] ) : javadsl.Source [ T , M2 ] =
2015-10-03 23:26:06 -04:00
new Source ( delegate . mergeMat ( that ) ( combinerToScala ( matF ) ) )
2015-09-21 08:10:45 -04:00
2015-11-18 17:41:52 +01:00
/* *
* Merge the given [ [ Source ] ] to this [ [ Source ] ] , taking elements as they arrive from input streams ,
* picking always the smallest of the available elements ( waiting for one element from each side
* to be available ) . This means that possible contiguity of the input streams is not exploited to avoid
* waiting for elements , this merge will block when one of the inputs does not have more elements ( and
* does not complete ) .
*
* ''' Emits when ''' all of the inputs have an element available
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' all upstreams complete
*
* ''' Cancels when ''' downstream cancels
*/
def mergeSorted [ U >: Out , M ] ( that : Graph [ SourceShape [ U ] , M ] , comp : util.Comparator [ U ] ) : javadsl.Source [ U , Mat ] =
new Source ( delegate . mergeSorted ( that ) ( Ordering . comparatorToOrdering ( comp ) ) )
/* *
* Merge the given [ [ Source ] ] to this [ [ Source ] ] , taking elements as they arrive from input streams ,
* picking always the smallest of the available elements ( waiting for one element from each side
* to be available ) . This means that possible contiguity of the input streams is not exploited to avoid
* waiting for elements , this merge will block when one of the inputs does not have more elements ( and
* does not complete ) .
*
2016-03-11 17:08:30 +01:00
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values .
*
2015-11-18 17:41:52 +01:00
* @see [ [ # mergeSorted ] ] .
*/
def mergeSortedMat [ U >: Out , Mat2 , Mat3 ] ( that : Graph [ SourceShape [ U ] , Mat2 ] , comp : util.Comparator [ U ] ,
matF : function.Function2 [ Mat , Mat2 , Mat3 ] ) : javadsl.Source [ U , Mat3 ] =
new Source ( delegate . mergeSortedMat ( that ) ( combinerToScala ( matF ) ) ( Ordering . comparatorToOrdering ( comp ) ) )
2015-09-21 08:10:45 -04:00
/* *
2015-10-03 23:26:06 -04:00
* Combine the elements of current [ [ Source ] ] and the given one into a stream of tuples .
*
* ''' Emits when ''' all of the inputs has an element available
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' any upstream completes
*
* ''' Cancels when ''' downstream cancels
2015-09-21 08:10:45 -04:00
*/
2015-10-03 23:26:06 -04:00
def zip [ T ] ( that : Graph [ SourceShape [ T ] , _ ] ) : javadsl.Source [ Out @ uncheckedVariance Pair T , Mat ] =
2015-10-04 08:40:13 -04:00
zipMat ( that , Keep . left )
2015-09-21 08:10:45 -04:00
/* *
2015-10-03 23:26:06 -04:00
* Combine the elements of current [ [ Source ] ] and the given one into a stream of tuples .
*
2016-03-11 17:08:30 +01:00
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values .
*
2015-10-03 23:26:06 -04:00
* @see [ [ # zip ] ] .
2015-09-21 08:10:45 -04:00
*/
2016-06-02 14:06:57 +02:00
def zipMat [ T , M , M2 ] (
that : Graph [ SourceShape [ T ] , M ] ,
matF : function.Function2 [ Mat , M , M2 ] ) : javadsl.Source [ Out @ uncheckedVariance Pair T , M2 ] =
2016-01-20 10:00:37 +02:00
this . viaMat ( Flow . create [ Out ] . zipMat ( that , Keep . right [ NotUsed , M ] ) , matF )
2015-09-21 08:10:45 -04:00
/* *
2015-10-03 23:26:06 -04:00
* Put together the elements of current [ [ Source ] ] and the given one
2015-09-21 08:10:45 -04:00
* into a stream of combined elements using a combiner function .
2015-10-03 23:26:06 -04:00
*
* ''' Emits when ''' all of the inputs has an element available
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' any upstream completes
*
* ''' Cancels when ''' downstream cancels
2014-10-20 14:09:24 +02:00
*/
2016-06-02 14:06:57 +02:00
def zipWith [ Out2 , Out3 ] (
that : Graph [ SourceShape [ Out2 ] , _ ] ,
combine : function.Function2 [ Out , Out2 , Out3 ] ) : javadsl.Source [ Out3 , Mat ] =
2015-10-03 23:26:06 -04:00
new Source ( delegate . zipWith [ Out2 , Out3 ] ( that ) ( combinerToScala ( combine ) ) )
2014-10-03 17:33:14 +02:00
2015-06-23 16:24:06 +03:00
/* *
2015-10-03 23:26:06 -04:00
* Put together the elements of current [ [ Source ] ] and the given one
2015-09-21 08:10:45 -04:00
* into a stream of combined elements using a combiner function .
2015-10-03 23:26:06 -04:00
*
2016-03-11 17:08:30 +01:00
* It is recommended to use the internally optimized `Keep.left` and `Keep.right` combiners
* where appropriate instead of manually writing functions that pass through one of the values .
*
2015-10-03 23:26:06 -04:00
* @see [ [ # zipWith ] ] .
2015-06-23 16:24:06 +03:00
*/
2016-06-02 14:06:57 +02:00
def zipWithMat [ Out2 , Out3 , M , M2 ] (
that : Graph [ SourceShape [ Out2 ] , M ] ,
combine : function.Function2 [ Out , Out2 , Out3 ] ,
matF : function.Function2 [ Mat , M , M2 ] ) : javadsl.Source [ Out3 , M2 ] =
2015-10-03 23:26:06 -04:00
new Source ( delegate . zipWithMat [ Out2 , Out3 , M , M2 ] ( that ) ( combinerToScala ( combine ) ) ( combinerToScala ( matF ) ) )
2015-06-23 16:24:06 +03:00
2016-09-21 01:41:56 -05:00
/* *
* Combine the elements of current [ [ Source ] ] into a stream of tuples consisting
* of all elements paired with their index . Indices start at 0.
*
* ''' Emits when ''' upstream emits an element and is paired with their index
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*/
def zipWithIndex : javadsl.Source [ Pair [ Out @ uncheckedVariance , Long ] , Mat ] =
new Source ( delegate . zipWithIndex . map { case ( elem , index ) ⇒ Pair ( elem , index ) } )
2014-10-20 14:09:24 +02:00
/* *
* Shortcut for running this `Source` with a foreach procedure . The given procedure is invoked
* for each received element .
2016-01-21 16:37:26 +01:00
* The returned [ [ java . util . concurrent . CompletionStage ] ] will be completed normally when reaching the
* normal end of the stream , or completed exceptionally if there is a failure is signaled in
2014-10-20 14:09:24 +02:00
* the stream .
*/
2016-01-21 16:37:26 +01:00
def runForeach ( f : function.Procedure [ Out ] , materializer : Materializer ) : CompletionStage [ Done ] =
2014-10-17 14:05:50 +02:00
runWith ( Sink . foreach ( f ) , materializer )
2014-10-03 17:33:14 +02:00
// COMMON OPS //
2014-10-20 14:09:24 +02:00
/* *
* Transform this stream by applying the given function to each of the elements
* as they pass through this processing step .
2015-11-25 19:58:48 +01:00
*
* ''' Emits when ''' the mapping function returns an element
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
2014-10-20 14:09:24 +02:00
*/
2015-04-23 20:59:55 +02:00
def map [ T ] ( f : function.Function [ Out , T ] ) : javadsl.Source [ T , Mat ] =
2014-10-20 14:09:24 +02:00
new Source ( delegate . map ( f . apply ) )
2014-10-03 17:33:14 +02:00
2015-06-13 14:02:37 -04:00
/* *
* Recover allows to send last element on failure and gracefully complete the stream
* Since the underlying failure signal onError arrives out - of - band , it might jump over existing elements .
* This stage can recover the failure signal , but not the skipped elements , which will be dropped .
2015-11-25 19:58:48 +01:00
*
2016-12-12 17:57:14 +01:00
* Throwing an exception inside `recover` _will_ be logged on ERROR level automatically .
*
2015-11-25 19:58:48 +01:00
* ''' Emits when ''' element is available from the upstream or upstream is failed and pf returns an element
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes or upstream failed with exception pf can handle
*
* ''' Cancels when ''' downstream cancels
2015-06-13 14:02:37 -04:00
*/
2016-04-20 06:24:12 -07:00
@deprecated ( "Use recoverWithRetries instead." , "2.4.4" )
2015-06-13 14:02:37 -04:00
def recover [ T >: Out ] ( pf : PartialFunction [ Throwable , T ] ) : javadsl.Source [ T , Mat ] =
new Source ( delegate . recover ( pf ) )
2016-12-12 17:57:14 +01:00
/* *
* While similar to [ [ recover ] ] this stage can be used to transform an error signal to a different one * without * logging
* it as an error in the process . So in that sense it is NOT exactly equivalent to `recover(t => throw t2)` since recover
* would log the `t2` error .
*
* Since the underlying failure signal onError arrives out - of - band , it might jump over existing elements .
* This stage can recover the failure signal , but not the skipped elements , which will be dropped .
*
* Similarily to [ [ recover ] ] throwing an exception inside `mapError` _will_ be logged .
*
* ''' Emits when ''' element is available from the upstream or upstream is failed and pf returns an element
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes or upstream failed with exception pf can handle
*
* ''' Cancels when ''' downstream cancels
*
*/
def mapError ( pf : PartialFunction [ Throwable , Throwable ] ) : javadsl.Source [ Out , Mat ] =
new Source ( delegate . mapError ( pf ) )
2016-01-29 22:06:36 -05:00
/* *
2016-02-15 16:53:57 +01:00
* RecoverWith allows to switch to alternative Source on flow failure . It will stay in effect after
* a failure has been recovered so that each time there is a failure it is fed into the `pf` and a new
* Source may be materialized .
*
* Since the underlying failure signal onError arrives out - of - band , it might jump over existing elements .
* This stage can recover the failure signal , but not the skipped elements , which will be dropped .
*
2016-12-12 17:57:14 +01:00
* Throwing an exception inside `recoverWith` _will_ be logged on ERROR level automatically .
*
2016-02-15 16:53:57 +01:00
* ''' Emits when ''' element is available from the upstream or upstream is failed and element is available
* from alternative Source
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes or upstream failed with exception pf can handle
*
* ''' Cancels when ''' downstream cancels
*
*/
2016-01-29 22:06:36 -05:00
def recoverWith [ T >: Out ] ( pf : PartialFunction [ Throwable , _ <: Graph [ SourceShape [ T ] , NotUsed ] ] ) : Source [ T , Mat @ uncheckedVariance ] =
new Source ( delegate . recoverWith ( pf ) )
2016-04-20 06:24:12 -07:00
/* *
2016-06-02 14:06:57 +02:00
* RecoverWithRetries allows to switch to alternative Source on flow failure . It will stay in effect after
* a failure has been recovered up to `attempts` number of times so that each time there is a failure
* it is fed into the `pf` and a new Source may be materialized . Note that if you pass in 0 , this won 't
2017-06-13 18:18:46 +09:00
* attempt to recover at all .
*
* A negative `attempts` number is interpreted as "infinite" , which results in the exact same behavior as `recoverWith` .
2016-06-02 14:06:57 +02:00
*
* Since the underlying failure signal onError arrives out - of - band , it might jump over existing elements .
* This stage can recover the failure signal , but not the skipped elements , which will be dropped .
*
2016-12-12 17:57:14 +01:00
* Throwing an exception inside `recoverWithRetries` _will_ be logged on ERROR level automatically .
*
2016-06-02 14:06:57 +02:00
* ''' Emits when ''' element is available from the upstream or upstream is failed and element is available
* from alternative Source
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes or upstream failed with exception pf can handle
*
* ''' Cancels when ''' downstream cancels
*
*/
def recoverWithRetries [ T >: Out ] ( attempts : Int , pf : PartialFunction [ Throwable , _ <: Graph [ SourceShape [ T ] , NotUsed ] ] ) : Source [ T , Mat @ uncheckedVariance ] =
2016-04-20 06:24:12 -07:00
new Source ( delegate . recoverWithRetries ( attempts , pf ) )
2016-08-11 07:37:54 -05:00
2014-10-20 14:09:24 +02:00
/* *
2016-01-27 00:00:39 -05:00
* Transform each input element into an `Iterable` of output elements that is
2014-10-20 14:09:24 +02:00
* then flattened into the output stream .
2015-04-15 17:41:41 +02:00
*
2015-11-25 19:58:48 +01:00
* Make sure that the `Iterable` is immutable or at least not modified after
* being used as an output sequence . Otherwise the stream may fail with
* `ConcurrentModificationException` or other more subtle errors may occur .
*
* The returned `Iterable` MUST NOT contain `null` values ,
2015-04-15 17:41:41 +02:00
* as they are illegal as stream elements - according to the Reactive Streams specification .
2015-11-25 19:58:48 +01:00
*
* ''' Emits when ''' the mapping function returns an element or there are still remaining elements
* from the previously calculated collection
*
* ''' Backpressures when ''' downstream backpressures or there are still remaining elements from the
* previously calculated collection
*
* ''' Completes when ''' upstream completes and all remaining elements has been emitted
*
* ''' Cancels when ''' downstream cancels
2014-10-20 14:09:24 +02:00
*/
2015-11-25 19:58:48 +01:00
def mapConcat [ T ] ( f : function.Function [ Out , _ <: java . lang . Iterable [ T ] ] ) : javadsl.Source [ T , Mat ] =
2016-02-15 16:53:57 +01:00
new Source ( delegate . mapConcat ( elem ⇒ Util . immutableSeq ( f . apply ( elem ) ) ) )
/* *
* Transform each input element into an `Iterable` of output elements that is
* then flattened into the output stream . The transformation is meant to be stateful ,
* which is enabled by creating the transformation function anew for every materialization —
* the returned function will typically close over mutable objects to store state between
* invocations . For the stateless variant see [ [ # mapConcat ] ] .
*
* Make sure that the `Iterable` is immutable or at least not modified after
* being used as an output sequence . Otherwise the stream may fail with
* `ConcurrentModificationException` or other more subtle errors may occur .
*
* The returned `Iterable` MUST NOT contain `null` values ,
* as they are illegal as stream elements - according to the Reactive Streams specification .
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2016-02-15 16:53:57 +01:00
* ''' Emits when ''' the mapping function returns an element or there are still remaining elements
* from the previously calculated collection
*
* ''' Backpressures when ''' downstream backpressures or there are still remaining elements from the
* previously calculated collection
*
* ''' Completes when ''' upstream completes and all remaining elements has been emitted
*
* ''' Cancels when ''' downstream cancels
*/
2016-01-27 00:00:39 -05:00
def statefulMapConcat [ T ] ( f : function.Creator [ function . Function [ Out , java . lang . Iterable [ T ] ] ] ) : javadsl.Source [ T , Mat ] =
2016-02-15 16:53:57 +01:00
new Source ( delegate . statefulMapConcat { ( ) ⇒
2016-01-27 00:00:39 -05:00
val fun = f . create ( )
elem ⇒ Util . immutableSeq ( fun ( elem ) )
2016-02-15 16:53:57 +01:00
} )
2014-10-03 17:33:14 +02:00
2014-10-20 14:09:24 +02:00
/* *
* Transform this stream by applying the given function to each of the elements
2016-01-21 16:37:26 +01:00
* as they pass through this processing step . The function returns a `CompletionStage` and the
2016-10-04 21:18:08 +01:00
* value of that future will be emitted downstream . The number of CompletionStages
* that shall run in parallel is given as the first argument to ` `mapAsync` ` .
* These CompletionStages may complete in any order , but the elements that
2014-12-18 10:34:59 +01:00
* are emitted downstream are in the same order as received from upstream .
2014-10-20 14:09:24 +02:00
*
2016-01-21 16:37:26 +01:00
* If the function `f` throws an exception or if the `CompletionStage` is completed
2015-11-25 19:58:48 +01:00
* with failure and the supervision decision is [ [ akka . stream . Supervision # stop ] ]
* the stream will be completed with failure .
*
2016-01-21 16:37:26 +01:00
* If the function `f` throws an exception or if the `CompletionStage` is completed
2015-11-25 19:58:48 +01:00
* with failure and the supervision decision is [ [ akka . stream . Supervision # resume ] ] or
* [ [ akka . stream . Supervision # restart ] ] the element is dropped and the stream continues .
*
* The function `f` is always invoked on the elements in the order they arrive .
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2016-01-21 16:37:26 +01:00
* ''' Emits when ''' the CompletionStage returned by the provided function finishes for the next element in sequence
2015-11-25 19:58:48 +01:00
*
2016-01-21 16:37:26 +01:00
* ''' Backpressures when ''' the number of CompletionStages reaches the configured parallelism and the downstream
* backpressures or the first CompletionStage is not completed
2015-11-25 19:58:48 +01:00
*
2016-01-21 16:37:26 +01:00
* ''' Completes when ''' upstream completes and all CompletionStages has been completed and all elements has been emitted
2015-11-25 19:58:48 +01:00
*
* ''' Cancels when ''' downstream cancels
*
2014-10-20 14:09:24 +02:00
* @see [ [ # mapAsyncUnordered ] ]
*/
2016-01-21 16:37:26 +01:00
def mapAsync [ T ] ( parallelism : Int , f : function.Function [ Out , CompletionStage [ T ] ] ) : javadsl.Source [ T , Mat ] =
2016-02-15 19:40:18 +01:00
new Source ( delegate . mapAsync ( parallelism ) ( x ⇒ f ( x ) . toScala ) )
2014-10-03 17:33:14 +02:00
2014-10-20 14:09:24 +02:00
/* *
* Transform this stream by applying the given function to each of the elements
2016-01-21 16:37:26 +01:00
* as they pass through this processing step . The function returns a `CompletionStage` and the
2016-10-04 21:18:08 +01:00
* value of that future will be emitted downstream . The number of CompletionStages
* that shall run in parallel is given as the first argument to ` `mapAsyncUnordered` ` .
* Each processed element will be emitted downstream as soon as it is ready , i . e . it is possible
* that the elements are not emitted downstream in the same order as received from upstream .
2014-10-20 14:09:24 +02:00
*
2016-01-21 16:37:26 +01:00
* If the function `f` throws an exception or if the `CompletionStage` is completed
2015-11-25 19:58:48 +01:00
* with failure and the supervision decision is [ [ akka . stream . Supervision # stop ] ]
* the stream will be completed with failure .
*
2016-01-21 16:37:26 +01:00
* If the function `f` throws an exception or if the `CompletionStage` is completed
2015-11-25 19:58:48 +01:00
* with failure and the supervision decision is [ [ akka . stream . Supervision # resume ] ] or
* [ [ akka . stream . Supervision # restart ] ] the element is dropped and the stream continues .
*
2016-01-21 16:37:26 +01:00
* The function `f` is always invoked on the elements in the order they arrive ( even though the result of the CompletionStages
2015-11-25 19:58:48 +01:00
* returned by `f` might be emitted in a different order ) .
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2016-01-21 16:37:26 +01:00
* ''' Emits when ''' any of the CompletionStages returned by the provided function complete
2015-11-25 19:58:48 +01:00
*
2016-01-21 16:37:26 +01:00
* ''' Backpressures when ''' the number of CompletionStages reaches the configured parallelism and the downstream backpressures
2015-11-25 19:58:48 +01:00
*
2016-01-21 16:37:26 +01:00
* ''' Completes when ''' upstream completes and all CompletionStages has been completed and all elements has been emitted
2015-11-25 19:58:48 +01:00
*
* ''' Cancels when ''' downstream cancels
*
2014-10-20 14:09:24 +02:00
* @see [ [ # mapAsync ] ]
*/
2016-01-21 16:37:26 +01:00
def mapAsyncUnordered [ T ] ( parallelism : Int , f : function.Function [ Out , CompletionStage [ T ] ] ) : javadsl.Source [ T , Mat ] =
2016-02-15 19:40:18 +01:00
new Source ( delegate . mapAsyncUnordered ( parallelism ) ( x ⇒ f ( x ) . toScala ) )
2014-10-03 17:33:14 +02:00
2014-10-20 14:09:24 +02:00
/* *
* Only pass on those elements that satisfy the given predicate .
2015-11-25 19:58:48 +01:00
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2015-11-25 19:58:48 +01:00
* ''' Emits when ''' the given predicate returns true for the element
*
* ''' Backpressures when ''' the given predicate returns true for the element and downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*
2014-10-20 14:09:24 +02:00
*/
2015-04-23 20:59:55 +02:00
def filter ( p : function.Predicate [ Out ] ) : javadsl.Source [ Out , Mat ] =
2014-10-20 14:09:24 +02:00
new Source ( delegate . filter ( p . test ) )
2014-10-03 17:33:14 +02:00
2015-10-25 15:38:47 -04:00
/* *
* Only pass on those elements that NOT satisfy the given predicate .
2015-11-25 19:58:48 +01:00
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2015-11-25 19:58:48 +01:00
* ''' Emits when ''' the given predicate returns false for the element
*
* ''' Backpressures when ''' the given predicate returns false for the element and downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
2015-10-25 15:38:47 -04:00
*/
def filterNot ( p : function.Predicate [ Out ] ) : javadsl.Source [ Out , Mat ] =
new Source ( delegate . filterNot ( p . test ) )
2014-10-20 14:09:24 +02:00
/* *
* Transform this stream by applying the given partial function to each of the elements
* on which the function is defined as they pass through this processing step .
* Non - matching elements are filtered out .
2015-11-25 19:58:48 +01:00
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2015-11-25 19:58:48 +01:00
* ''' Emits when ''' the provided partial function is defined for the element
*
* ''' Backpressures when ''' the partial function is defined for the element and downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
2014-10-20 14:09:24 +02:00
*/
2015-01-28 14:19:50 +01:00
def collect [ T ] ( pf : PartialFunction [ Out , T ] ) : javadsl.Source [ T , Mat ] =
2014-10-20 14:09:24 +02:00
new Source ( delegate . collect ( pf ) )
2014-10-03 17:33:14 +02:00
2014-10-20 14:09:24 +02:00
/* *
* Chunk up this stream into groups of the given size , with the last group
* possibly smaller than requested due to end - of - stream .
*
2015-11-25 19:58:48 +01:00
* `n` must be positive , otherwise IllegalArgumentException is thrown .
*
* ''' Emits when ''' the specified number of elements has been accumulated or upstream completed
*
* ''' Backpressures when ''' a group has been assembled and downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
2014-10-20 14:09:24 +02:00
*/
2015-01-28 14:19:50 +01:00
def grouped ( n : Int ) : javadsl.Source [ java . util . List [ Out @ uncheckedVariance ] , Mat ] =
2014-10-20 14:09:24 +02:00
new Source ( delegate . grouped ( n ) . map ( _ . asJava ) )
2014-10-03 17:33:14 +02:00
2015-11-19 00:11:07 +08:00
/* *
* Ensure stream boundedness by limiting the number of elements from upstream .
* If the number of incoming elements exceeds max , it will signal
* upstream failure `StreamLimitException` downstream .
*
* Due to input buffering some elements may have been
* requested from upstream publishers that will then not be processed downstream
* of this step .
*
* The stream will be completed without producing any elements if `n` is zero
* or negative .
*
* ''' Emits when ''' the specified number of elements to take has not yet been reached
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' the defined number of elements has been taken or upstream completes
*
* ''' Cancels when ''' the defined number of elements has been taken or downstream cancels
*
* See also [ [ Flow . take ] ] , [ [ Flow . takeWithin ] ] , [ [ Flow . takeWhile ] ]
*/
def limit ( n : Int ) : javadsl.Source [ Out , Mat ] = new Source ( delegate . limit ( n ) )
/* *
* Ensure stream boundedness by evaluating the cost of incoming elements
* using a cost function . Exactly how many elements will be allowed to travel downstream depends on the
* evaluated cost of each element . If the accumulated cost exceeds max , it will signal
* upstream failure `StreamLimitException` downstream .
*
* Due to input buffering some elements may have been
* requested from upstream publishers that will then not be processed downstream
* of this step .
*
* The stream will be completed without producing any elements if `n` is zero
* or negative .
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2015-11-19 00:11:07 +08:00
* ''' Emits when ''' the specified number of elements to take has not yet been reached
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' the defined number of elements has been taken or upstream completes
*
* ''' Cancels when ''' the defined number of elements has been taken or downstream cancels
*
* See also [ [ Flow . take ] ] , [ [ Flow . takeWithin ] ] , [ [ Flow . takeWhile ] ]
*/
2017-07-03 14:39:00 +02:00
def limitWeighted ( n : Long ) ( costFn : function.Function [ Out , java . lang . Long ] ) : javadsl.Source [ Out , Mat ] = {
2015-11-19 00:11:07 +08:00
new Source ( delegate . limitWeighted ( n ) ( costFn . apply ) )
}
2015-07-27 11:39:54 +02:00
/* *
* Apply a sliding window over the stream and return the windows as groups of elements , with the last group
* possibly smaller than requested due to end - of - stream .
*
2015-11-25 19:58:48 +01:00
* `n` must be positive , otherwise IllegalArgumentException is thrown .
* `step` must be positive , otherwise IllegalArgumentException is thrown .
*
* ''' Emits when ''' enough elements have been collected within the window or upstream completed
*
* ''' Backpressures when ''' a window has been assembled and downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
2015-07-27 11:39:54 +02:00
*/
def sliding ( n : Int , step : Int ) : javadsl.Source [ java . util . List [ Out @ uncheckedVariance ] , Mat ] =
new Source ( delegate . sliding ( n , step ) . map ( _ . asJava ) )
2014-11-09 21:09:50 +01:00
/* *
* Similar to `fold` but is not a terminal operation ,
* emits its current value which starts at `zero` and then
* applies the current and next value to the given function `f` ,
2015-11-25 19:58:48 +01:00
* emitting the next current value .
*
* If the function `f` throws an exception and the supervision decision is
* [ [ akka . stream . Supervision # restart ] ] current value starts at `zero` again
* the stream will continue .
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2015-11-25 19:58:48 +01:00
* ''' Emits when ''' the function scanning the element returns a new element
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
2014-11-09 21:09:50 +01:00
*/
2015-04-23 20:59:55 +02:00
def scan [ T ] ( zero : T ) ( f : function.Function2 [ T , Out , T ] ) : javadsl.Source [ T , Mat ] =
2014-11-09 21:09:50 +01:00
new Source ( delegate . scan ( zero ) ( f . apply ) )
2016-10-17 12:43:11 -02:00
/* *
* Similar to `scan` but with a asynchronous function ,
* emits its current value which starts at `zero` and then
* applies the current and next value to the given function `f` ,
* emitting a `Future` that resolves to the next current value .
*
* If the function `f` throws an exception and the supervision decision is
* [ [ akka . stream . Supervision . Restart ] ] current value starts at `zero` again
* the stream will continue .
*
* If the function `f` throws an exception and the supervision decision is
* [ [ akka . stream . Supervision . Resume ] ] current value starts at the previous
* current value , or zero when it doesn 't have one , and the stream will continue .
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2016-10-17 12:43:11 -02:00
* ''' Emits when ''' the future returned by f ` completes
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes and the last future returned by `f` completes
*
* ''' Cancels when ''' downstream cancels
*
* See also [ [ FlowOps . scan ] ]
*/
def scanAsync [ T ] ( zero : T ) ( f : function.Function2 [ T , Out , CompletionStage [ T ] ] ) : javadsl.Source [ T , Mat ] =
new Source ( delegate . scanAsync ( zero ) { ( out , in ) ⇒ f ( out , in ) . toScala } )
2015-06-14 03:12:30 -04:00
/* *
2015-11-25 19:58:48 +01:00
* Similar to `scan` but only emits its result when the upstream completes ,
* after which it also completes . Applies the given function `f` towards its current and next value ,
2015-06-14 03:12:30 -04:00
* yielding the next current value .
2015-11-25 19:58:48 +01:00
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2015-11-25 19:58:48 +01:00
* If the function `f` throws an exception and the supervision decision is
* [ [ akka . stream . Supervision # restart ] ] current value starts at `zero` again
* the stream will continue .
*
* ''' Emits when ''' upstream completes
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
2015-06-14 03:12:30 -04:00
*/
def fold [ T ] ( zero : T ) ( f : function.Function2 [ T , Out , T ] ) : javadsl.Source [ T , Mat ] =
new Source ( delegate . fold ( zero ) ( f . apply ) )
2016-08-24 21:02:32 +02:00
/* *
* Similar to `fold` but with an asynchronous function .
* Applies the given function towards its current and next value ,
* yielding the next current value .
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2016-08-24 21:02:32 +02:00
* If the function `f` returns a failure and the supervision decision is
* [ [ akka . stream . Supervision . Restart ] ] current value starts at `zero` again
* the stream will continue .
*
* ''' Emits when ''' upstream completes
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*/
def foldAsync [ T ] ( zero : T ) ( f : function.Function2 [ T , Out , CompletionStage [ T ] ] ) : javadsl.Source [ T , Mat ] = new Source ( delegate . foldAsync ( zero ) { ( out , in ) ⇒ f ( out , in ) . toScala } )
2016-01-15 22:51:26 -05:00
/* *
* Similar to `fold` but uses first element as zero element .
* Applies the given function towards its current and next value ,
* yielding the next current value .
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2016-01-15 22:51:26 -05:00
* ''' Emits when ''' upstream completes
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*/
def reduce ( f : function.Function2 [ Out , Out , Out @ uncheckedVariance ] ) : javadsl.Source [ Out , Mat ] =
new Source ( delegate . reduce ( f . apply ) )
2015-10-16 01:55:20 +02:00
/* *
* Intersperses stream with provided element , similar to how [ [ scala . collection . immutable . List . mkString ] ]
* injects a separator between a List 's elements .
*
* Additionally can inject start and end marker elements to stream .
*
* Examples :
*
* { { {
* Source < Integer , ?> nums = Source . from ( Arrays . asList ( 0 , 1 , 2 , 3 ) ) ;
* nums . intersperse ( "," ) ; // 1 , 2 , 3
* nums . intersperse ( "[" , "," , "]" ) ; // [ 1 , 2 , 3 ]
* } } }
*
* In case you want to only prepend or only append an element ( yet still use the `intercept` feature
* to inject a separator between elements , you may want to use the following pattern instead of the 3 - argument
* version of intersperse ( See [ [ Source . concat ] ] for semantics details ) :
*
* { { {
* Source . single ( ">> " ) . concat ( list . intersperse ( "," ) )
* list . intersperse ( "," ) . concat ( Source . single ( "END" ) )
* } } }
* ''' Emits when ''' upstream emits ( or before with the `start` element if provided )
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*/
def intersperse [ T >: Out ] ( start : T , inject : T , end : T ) : javadsl.Source [ T , Mat ] =
new Source ( delegate . intersperse ( start , inject , end ) )
/* *
* Intersperses stream with provided element , similar to how [ [ scala . collection . immutable . List . mkString ] ]
* injects a separator between a List 's elements .
*
* Additionally can inject start and end marker elements to stream .
*
* Examples :
*
* { { {
* Source < Integer , ?> nums = Source . from ( Arrays . asList ( 0 , 1 , 2 , 3 ) ) ;
* nums . intersperse ( "," ) ; // 1 , 2 , 3
* nums . intersperse ( "[" , "," , "]" ) ; // [ 1 , 2 , 3 ]
* } } }
*
* ''' Emits when ''' upstream emits ( or before with the `start` element if provided )
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*/
def intersperse [ T >: Out ] ( inject : T ) : javadsl.Source [ T , Mat ] =
new Source ( delegate . intersperse ( inject ) )
2014-10-20 14:09:24 +02:00
/* *
* Chunk up this stream into groups of elements received within a time window ,
* or limited by the given number of elements , whatever happens first .
* Empty groups will not be emitted if no elements are received from upstream .
* The last group before end - of - stream will contain the buffered elements
* since the previously emitted group .
*
2017-04-28 16:07:06 +03:00
* ''' Emits when ''' the configured time elapses since the last group has been emitted or `n` elements is buffered
2015-11-25 19:58:48 +01:00
*
2017-04-28 16:07:06 +03:00
* ''' Backpressures when ''' downstream backpressures , and there are `n+1` buffered elements
2015-11-25 19:58:48 +01:00
*
* ''' Completes when ''' upstream completes ( emits last group )
*
* ''' Cancels when ''' downstream completes
*
* `n` must be positive , and `d` must be greater than 0 seconds , otherwise
* IllegalArgumentException is thrown .
2014-10-20 14:09:24 +02:00
*/
2015-01-28 14:19:50 +01:00
def groupedWithin ( n : Int , d : FiniteDuration ) : javadsl.Source [ java . util . List [ Out @ uncheckedVariance ] , Mat ] =
2015-04-20 15:03:03 +02:00
new Source ( delegate . groupedWithin ( n , d ) . map ( _ . asJava ) ) // TODO optimize to one step
2014-10-03 17:33:14 +02:00
2017-04-28 16:07:06 +03:00
/* *
* Chunk up this stream into groups of elements received within a time window ,
* or limited by the weight of the elements , whatever happens first .
* Empty groups will not be emitted if no elements are received from upstream .
* The last group before end - of - stream will contain the buffered elements
* since the previously emitted group .
*
* ''' Emits when ''' the configured time elapses since the last group has been emitted or weight limit reached
*
* ''' Backpressures when ''' downstream backpressures , and buffered group ( + pending element ) weighs more than `maxWeight`
*
* ''' Completes when ''' upstream completes ( emits last group )
*
* ''' Cancels when ''' downstream completes
*
* `maxWeight` must be positive , and `d` must be greater than 0 seconds , otherwise
* IllegalArgumentException is thrown .
*/
2017-07-03 14:39:00 +02:00
def groupedWeightedWithin ( maxWeight : Long , costFn : function.Function [ Out , java . lang . Long ] , d : FiniteDuration ) : javadsl.Source [ java . util . List [ Out @ uncheckedVariance ] , Mat ] =
2017-04-28 16:07:06 +03:00
new Source ( delegate . groupedWeightedWithin ( maxWeight , d ) ( costFn . apply ) . map ( _ . asJava ) )
2015-11-21 13:48:10 -05:00
/* *
2015-11-25 21:29:35 -05:00
* Shifts elements emission in time by a specified amount . It allows to store elements
* in internal buffer while waiting for next element to be emitted . Depending on the defined
* [ [ akka . stream . DelayOverflowStrategy ] ] it might drop elements or backpressure the upstream if
* there is no space available in the buffer .
2015-11-21 13:48:10 -05:00
*
2015-12-12 16:59:05 +01:00
* Delay precision is 10 ms to avoid unnecessary timer scheduling cycles
2015-12-02 14:58:30 -05:00
*
2015-11-25 21:29:35 -05:00
* Internal buffer has default capacity 16. You can set buffer size by calling `withAttributes(inputBuffer)`
2015-11-21 13:48:10 -05:00
*
2015-11-25 21:29:35 -05:00
* ''' Emits when ''' there is a pending element in the buffer and configured time for this element elapsed
* * EmitEarly - strategy do not wait to emit element if buffer is full
2015-11-21 13:48:10 -05:00
*
2015-11-25 21:29:35 -05:00
* ''' Backpressures when ''' depending on OverflowStrategy
* * Backpressure - backpressures when buffer is full
* * DropHead , DropTail , DropBuffer - never backpressures
* * Fail - fails the stream if buffer gets full
*
* ''' Completes when ''' upstream completes and buffered elements has been drained
*
* ''' Cancels when ''' downstream cancels
2015-11-21 13:48:10 -05:00
*
2015-11-25 21:29:35 -05:00
* @param of time to shift all messages
* @param strategy Strategy that is used when incoming elements cannot fit inside the buffer
2015-11-21 13:48:10 -05:00
*/
2015-12-12 16:59:05 +01:00
def delay ( of : FiniteDuration , strategy : DelayOverflowStrategy ) : Source [ Out , Mat ] =
2015-11-25 21:29:35 -05:00
new Source ( delegate . delay ( of , strategy ) )
2015-11-21 13:48:10 -05:00
2014-10-20 14:09:24 +02:00
/* *
* Discard the given number of elements at the beginning of the stream .
* No elements will be dropped if `n` is zero or negative .
2015-11-25 19:58:48 +01:00
*
* ''' Emits when ''' the specified number of elements has been dropped already
*
* ''' Backpressures when ''' the specified number of elements has been dropped and downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
2014-10-20 14:09:24 +02:00
*/
2015-03-03 10:57:25 +01:00
def drop ( n : Long ) : javadsl.Source [ Out , Mat ] =
2014-10-20 14:09:24 +02:00
new Source ( delegate . drop ( n ) )
2014-10-03 17:33:14 +02:00
2014-10-20 14:09:24 +02:00
/* *
* Discard the elements received within the given duration at beginning of the stream .
2015-11-25 19:58:48 +01:00
*
* ''' Emits when ''' the specified time elapsed and a new upstream element arrives
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
2014-10-20 14:09:24 +02:00
*/
2015-01-28 14:19:50 +01:00
def dropWithin ( d : FiniteDuration ) : javadsl.Source [ Out , Mat ] =
2014-10-20 14:09:24 +02:00
new Source ( delegate . dropWithin ( d ) )
2014-10-03 17:33:14 +02:00
2015-06-12 23:22:36 -04:00
/* *
2015-11-25 19:58:48 +01:00
* Terminate processing ( and cancel the upstream publisher ) after predicate
* returns false for the first time . Due to input buffering some elements may have been
2015-06-12 23:22:36 -04:00
* requested from upstream publishers that will then not be processed downstream
* of this step .
*
2015-11-25 19:58:48 +01:00
* The stream will be completed without producing any elements if predicate is false for
* the first stream element .
*
* ''' Emits when ''' the predicate is true
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' predicate returned false or upstream completes
*
* ''' Cancels when ''' predicate returned false or downstream cancels
2015-06-12 23:22:36 -04:00
*/
def takeWhile ( p : function.Predicate [ Out ] ) : javadsl.Source [ Out , Mat ] = new Source ( delegate . takeWhile ( p . test ) )
/* *
* Discard elements at the beginning of the stream while predicate is true .
* No elements will be dropped after predicate first time returned false .
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2015-11-25 19:58:48 +01:00
* ''' Emits when ''' predicate returned false and for all following stream elements
*
* ''' Backpressures when ''' predicate returned false and downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*
2015-06-12 23:22:36 -04:00
* @param p predicate is evaluated for each new element until first time returns false
*/
def dropWhile ( p : function.Predicate [ Out ] ) : javadsl.Source [ Out , Mat ] = new Source ( delegate . dropWhile ( p . test ) )
2014-10-20 14:09:24 +02:00
/* *
* Terminate processing ( and cancel the upstream publisher ) after the given
* number of elements . Due to input buffering some elements may have been
* requested from upstream publishers that will then not be processed downstream
* of this step .
*
2015-11-25 19:58:48 +01:00
* The stream will be completed without producing any elements if `n` is zero
* or negative .
*
* ''' Emits when ''' the specified number of elements to take has not yet been reached
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' the defined number of elements has been taken or upstream completes
*
* ''' Cancels when ''' the defined number of elements has been taken or downstream cancels
2014-10-20 14:09:24 +02:00
*/
2015-03-03 10:57:25 +01:00
def take ( n : Long ) : javadsl.Source [ Out , Mat ] =
2014-10-20 14:09:24 +02:00
new Source ( delegate . take ( n ) )
2014-10-03 17:33:14 +02:00
2014-10-20 14:09:24 +02:00
/* *
* Terminate processing ( and cancel the upstream publisher ) after the given
* duration . Due to input buffering some elements may have been
* requested from upstream publishers that will then not be processed downstream
* of this step .
*
* Note that this can be combined with [ [ # take ] ] to limit the number of elements
* within the duration .
2015-11-25 19:58:48 +01:00
*
* ''' Emits when ''' an upstream element arrives
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes or timer fires
*
* ''' Cancels when ''' downstream cancels or timer fires
2014-10-20 14:09:24 +02:00
*/
2015-01-28 14:19:50 +01:00
def takeWithin ( d : FiniteDuration ) : javadsl.Source [ Out , Mat ] =
2014-10-20 14:09:24 +02:00
new Source ( delegate . takeWithin ( d ) )
2014-10-03 17:33:14 +02:00
2014-10-20 14:09:24 +02:00
/* *
* Allows a faster upstream to progress independently of a slower subscriber by conflating elements into a summary
* until the subscriber is ready to accept them . For example a conflate step might average incoming numbers if the
* upstream publisher is faster .
*
2016-01-22 15:22:30 +01:00
* This version of conflate allows to derive a seed from the first element and change the aggregated type to be
* different than the input type . See [ [ Flow . conflate ] ] for a simpler version that does not change types .
*
2014-10-20 14:09:24 +02:00
* This element only rolls up elements if the upstream is faster , but if the downstream is faster it will not
* duplicate elements .
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2015-11-25 19:58:48 +01:00
* ''' Emits when ''' downstream stops backpressuring and there is a conflated element available
*
* ''' Backpressures when ''' never
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*
2016-01-22 15:22:30 +01:00
* see also [ [ Source . conflate ] ] [ [ Source . batch ] ] [ [ Source . batchWeighted ] ]
2016-01-19 23:03:36 +02:00
*
2014-10-20 14:09:24 +02:00
* @param seed Provides the first state for a conflated value using the first unconsumed element as a start
* @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate
*/
2016-01-22 15:22:30 +01:00
def conflateWithSeed [ S ] ( seed : function.Function [ Out , S ] , aggregate : function.Function2 [ S , Out , S ] ) : javadsl.Source [ S , Mat ] =
new Source ( delegate . conflateWithSeed ( seed . apply ) ( aggregate . apply ) )
/* *
* Allows a faster upstream to progress independently of a slower subscriber by conflating elements into a summary
* until the subscriber is ready to accept them . For example a conflate step might average incoming numbers if the
* upstream publisher is faster .
* This version of conflate does not change the output type of the stream . See [ [ Source . conflateWithSeed ] ] for a
* more flexible version that can take a seed function and transform elements while rolling up .
*
* This element only rolls up elements if the upstream is faster , but if the downstream is faster it will not
* duplicate elements .
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2016-01-22 15:22:30 +01:00
* ''' Emits when ''' downstream stops backpressuring and there is a conflated element available
*
* ''' Backpressures when ''' never
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*
* see also [ [ Source . conflateWithSeed ] ] [ [ Source . batch ] ] [ [ Source . batchWeighted ] ]
*
* @param aggregate Takes the currently aggregated value and the current pending element to produce a new aggregate
*/
def conflate [ O2 >: Out ] ( aggregate : function.Function2 [ O2 , O2 , O2 ] ) : javadsl.Source [ O2 , Mat ] =
new Source ( delegate . conflate ( aggregate . apply ) )
2014-10-03 17:33:14 +02:00
2016-01-18 17:21:14 +02:00
/* *
2016-01-19 23:03:36 +02:00
* Allows a faster upstream to progress independently of a slower subscriber by aggregating elements into batches
* until the subscriber is ready to accept them . For example a batch step might store received elements in
2016-01-18 17:21:14 +02:00
* an array up to the allowed max limit if the upstream publisher is faster .
*
* This element only rolls up elements if the upstream is faster , but if the downstream is faster it will not
* duplicate elements .
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2016-01-18 17:21:14 +02:00
* ''' Emits when ''' downstream stops backpressuring and there is an aggregated element available
*
2016-01-19 23:03:36 +02:00
* ''' Backpressures when ''' there are `max` batched elements and 1 pending element and downstream backpressures
2016-01-18 17:21:14 +02:00
*
2016-01-19 23:03:36 +02:00
* ''' Completes when ''' upstream completes and there is no batched / pending element waiting
2016-01-18 17:21:14 +02:00
*
* ''' Cancels when ''' downstream cancels
*
2016-01-19 23:03:36 +02:00
* See also [ [ Source . conflate ] ] , [ [ Source . batchWeighted ] ]
*
* @param max maximum number of elements to batch before backpressuring upstream ( must be positive non - zero )
* @param seed Provides the first state for a batched value using the first unconsumed element as a start
* @param aggregate Takes the currently batched value and the current pending element to produce a new aggregate
2016-01-18 17:21:14 +02:00
*/
2016-01-16 12:17:19 -05:00
def batch [ S ] ( max : Long , seed : function.Function [ Out , S ] , aggregate : function.Function2 [ S , Out , S ] ) : javadsl.Source [ S , Mat ] =
2016-01-19 23:03:36 +02:00
new Source ( delegate . batch ( max , seed . apply ) ( aggregate . apply ) )
2016-01-18 17:21:14 +02:00
/* *
2016-01-19 23:03:36 +02:00
* Allows a faster upstream to progress independently of a slower subscriber by aggregating elements into batches
* until the subscriber is ready to accept them . For example a batch step might concatenate `ByteString`
2016-01-18 17:21:14 +02:00
* elements up to the allowed max limit if the upstream publisher is faster .
*
* This element only rolls up elements if the upstream is faster , but if the downstream is faster it will not
* duplicate elements .
*
2016-01-19 23:03:36 +02:00
* Batching will apply for all elements , even if a single element cost is greater than the total allowed limit .
* In this case , previous batched elements will be emitted , then the "heavy" element will be emitted ( after
* being applied with the `seed` function ) without batching further elements with it , and then the rest of the
* incoming elements are batched .
2016-01-18 17:21:14 +02:00
*
2016-01-19 23:03:36 +02:00
* ''' Emits when ''' downstream stops backpressuring and there is a batched element available
2016-01-18 17:21:14 +02:00
*
2016-01-19 23:03:36 +02:00
* ''' Backpressures when ''' there are `max` weighted batched elements + 1 pending element and downstream backpressures
2016-01-18 17:21:14 +02:00
*
2016-01-19 23:03:36 +02:00
* ''' Completes when ''' upstream completes and there is no batched / pending element waiting
2016-01-18 17:21:14 +02:00
*
* ''' Cancels when ''' downstream cancels
*
2016-01-19 23:03:36 +02:00
* See also [ [ Source . conflate ] ] , [ [ Source . batch ] ]
*
* @param max maximum weight of elements to batch before backpressuring upstream ( must be positive non - zero )
2016-01-18 17:21:14 +02:00
* @param costFn a function to compute a single element weight
2016-01-19 23:03:36 +02:00
* @param seed Provides the first state for a batched value using the first unconsumed element as a start
* @param aggregate Takes the currently batched value and the current pending element to produce a new batch
2016-01-18 17:21:14 +02:00
*/
2017-07-03 14:39:00 +02:00
def batchWeighted [ S ] ( max : Long , costFn : function.Function [ Out , java . lang . Long ] , seed : function.Function [ Out , S ] , aggregate : function.Function2 [ S , Out , S ] ) : javadsl.Source [ S , Mat ] =
2016-01-19 23:03:36 +02:00
new Source ( delegate . batchWeighted ( max , costFn . apply , seed . apply ) ( aggregate . apply ) )
2016-01-18 17:21:14 +02:00
2014-10-20 14:09:24 +02:00
/* *
* Allows a faster downstream to progress independently of a slower publisher by extrapolating elements from an older
* element until new element comes from the upstream . For example an expand step might repeat the last element for
* the subscriber until it receives an update from upstream .
*
* This element will never "drop" upstream elements as all elements go through at least one extrapolation step .
* This means that if the upstream is actually faster than the upstream it will be backpressured by the downstream
* subscriber .
*
2015-11-25 19:58:48 +01:00
* Expand does not support [ [ akka . stream . Supervision # restart ] ] and [ [ akka . stream . Supervision # resume ] ] .
* Exceptions from the `seed` or `extrapolate` functions will complete the stream with failure .
*
* ''' Emits when ''' downstream stops backpressuring
*
2016-02-24 22:00:32 -08:00
* ''' Backpressures when ''' downstream backpressures or iterator runs empty
2015-11-25 19:58:48 +01:00
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*
2014-10-20 14:09:24 +02:00
* @param extrapolate Takes the current extrapolation state to produce an output element and the next extrapolation
* state .
*/
2016-01-18 11:29:14 +01:00
def expand [ U ] ( extrapolate : function.Function [ Out , java . util . Iterator [ U ] ] ) : javadsl.Source [ U , Mat ] =
new Source ( delegate . expand ( in ⇒ extrapolate ( in ) . asScala ) )
2014-10-03 17:33:14 +02:00
2014-10-20 14:09:24 +02:00
/* *
* Adds a fixed size buffer in the flow that allows to store elements from a faster upstream until it becomes full .
2014-11-06 14:03:01 +01:00
* Depending on the defined [ [ akka . stream . OverflowStrategy ] ] it might drop elements or backpressure the upstream if
* there is no space available
2014-10-20 14:09:24 +02:00
*
2015-11-25 19:58:48 +01:00
* ''' Emits when ''' downstream stops backpressuring and there is a pending element in the buffer
*
2017-06-28 00:07:19 +09:00
* ''' Backpressures when ''' downstream backpressures or depending on OverflowStrategy :
* < ul >
* < li > Backpressure - backpressures when buffer is full </ li >
* < li > DropHead , DropTail , DropBuffer - never backpressures </ li >
* < li > Fail - fails the stream if buffer gets full </ li >
* </ ul >
2015-11-25 19:58:48 +01:00
*
* ''' Completes when ''' upstream completes and buffered elements has been drained
*
* ''' Cancels when ''' downstream cancels
*
2014-10-20 14:09:24 +02:00
* @param size The size of the buffer in element count
* @param overflowStrategy Strategy that is used when incoming elements cannot fit inside the buffer
*/
2015-01-28 14:19:50 +01:00
def buffer ( size : Int , overflowStrategy : OverflowStrategy ) : javadsl.Source [ Out , Mat ] =
2014-10-20 14:09:24 +02:00
new Source ( delegate . buffer ( size , overflowStrategy ) )
2014-10-03 17:33:14 +02:00
2014-10-20 14:09:24 +02:00
/* *
2015-11-25 19:58:48 +01:00
* Takes up to `n` elements from the stream ( less than `n` if the upstream completes before emitting `n` elements )
2015-07-09 14:42:28 +02:00
* and returns a pair containing a strict sequence of the taken element
2014-10-20 14:09:24 +02:00
* and a stream representing the remaining elements . If ' 'n' ' is zero or negative , then this will return a pair
* of an empty collection and a stream containing the whole upstream unchanged .
2015-07-09 14:42:28 +02:00
*
* In case of an upstream error , depending on the current state
* - the master stream signals the error if less than `n` elements has been seen , and therefore the substream
* has not yet been emitted
* - the tail substream signals the error after the prefix and tail has been emitted by the main stream
* ( at that point the main stream has already completed )
2015-11-25 19:58:48 +01:00
*
* ''' Emits when ''' the configured number of prefix elements are available . Emits this prefix , and the rest
* as a substream
*
* ''' Backpressures when ''' downstream backpressures or substream backpressures
*
* ''' Completes when ''' prefix elements has been consumed and substream has been consumed
*
* ''' Cancels when ''' downstream cancels or substream cancels
2014-10-20 14:09:24 +02:00
*/
2016-01-20 10:00:37 +02:00
def prefixAndTail ( n : Int ) : javadsl.Source [ akka . japi . Pair [ java . util . List [ Out @ uncheckedVariance ] , javadsl . Source [ Out @ uncheckedVariance , NotUsed ] ] , Mat ] =
2014-10-20 14:09:24 +02:00
new Source ( delegate . prefixAndTail ( n ) . map { case ( taken , tail ) ⇒ akka . japi . Pair ( taken . asJava , tail . asJava ) } )
2014-10-03 17:33:14 +02:00
2014-10-20 14:09:24 +02:00
/* *
* This operation demultiplexes the incoming stream into separate output
* streams , one for each element key . The key is computed for each element
* using the given function . When a new key is encountered for the first time
2015-11-25 19:58:48 +01:00
* a new substream is opened and subsequently fed with all elements belonging to
* that key .
*
* The object returned from this method is not a normal [ [ Flow ] ] ,
* it is a [ [ SubSource ] ] . This means that after this combinator all transformations
* are applied to all encountered substreams in the same fashion . Substream mode
* is exited either by closing the substream ( i . e . connecting it to a [ [ Sink ] ] )
* or by merging the substreams back together ; see the `to` and `mergeBack` methods
* on [ [ SubSource ] ] for more information .
*
* It is important to note that the substreams also propagate back - pressure as
* any other stream , which means that blocking one substream will block the `groupBy`
* operator itself — and thereby all substreams — once all internal or
* explicit buffers are filled .
*
* If the group by function `f` throws an exception and the supervision decision
* is [ [ akka . stream . Supervision # stop ] ] the stream and substreams will be completed
* with failure .
*
* If the group by function `f` throws an exception and the supervision decision
* is [ [ akka . stream . Supervision # resume ] ] or [ [ akka . stream . Supervision # restart ] ]
* the element is dropped and the stream and substreams continue .
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2015-11-25 19:58:48 +01:00
* ''' Emits when ''' an element for which the grouping function returns a group that has not yet been created .
* Emits the new group
*
* ''' Backpressures when ''' there is an element pending for a group whose substream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels and all substreams cancel
*
* @param maxSubstreams configures the maximum number of substreams ( keys )
* that are supported ; if more distinct keys are encountered then the stream fails
2014-10-20 14:09:24 +02:00
*/
2015-11-25 19:58:48 +01:00
def groupBy [ K ] ( maxSubstreams : Int , f : function.Function [ Out , K ] ) : SubSource [ Out @ uncheckedVariance , Mat ] =
new SubSource ( delegate . groupBy ( maxSubstreams , f . apply ) )
2014-10-03 17:33:14 +02:00
2014-10-20 14:09:24 +02:00
/* *
* This operation applies the given predicate to all incoming elements and
* emits them to a stream of output streams , always beginning a new one with
* the current element if the given predicate returns true for it . This means
* that for the following series of predicate values , three substreams will
* be produced with lengths 1 , 2 , and 3 :
*
* { { {
* false , // element goes into first substream
* true , false , // elements go into second substream
* true , false , false // elements go into third substream
* } } }
2015-04-14 13:44:24 +02:00
*
* In case the * first * element of the stream matches the predicate , the first
* substream emitted by splitWhen will start from that element . For example :
*
* { { {
* true , false , false // first substream starts from the split-by element
* true , false // subsequent substreams operate the same way
* } } }
*
2015-11-25 19:58:48 +01:00
* The object returned from this method is not a normal [ [ Flow ] ] ,
* it is a [ [ SubSource ] ] . This means that after this combinator all transformations
* are applied to all encountered substreams in the same fashion . Substream mode
* is exited either by closing the substream ( i . e . connecting it to a [ [ Sink ] ] )
* or by merging the substreams back together ; see the `to` and `mergeBack` methods
* on [ [ SubSource ] ] for more information .
*
* It is important to note that the substreams also propagate back - pressure as
* any other stream , which means that blocking one substream will block the `splitWhen`
* operator itself — and thereby all substreams — once all internal or
* explicit buffers are filled .
*
2015-04-14 13:44:24 +02:00
* If the split predicate `p` throws an exception and the supervision decision
* is [ [ akka . stream . Supervision . Stop ] ] the stream and substreams will be completed
* with failure .
*
* If the split predicate `p` throws an exception and the supervision decision
* is [ [ akka . stream . Supervision . Resume ] ] or [ [ akka . stream . Supervision . Restart ] ]
* the element is dropped and the stream and substreams continue .
*
* ''' Emits when ''' an element for which the provided predicate is true , opening and emitting a new substream for subsequent element
*
* ''' Backpressures when ''' there is an element pending for the next substream , but the previous is not fully consumed yet , or the substream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels and substreams cancel
*
* See also [ [ Source . splitAfter ] ] .
2014-10-20 14:09:24 +02:00
*/
2015-11-25 19:58:48 +01:00
def splitWhen ( p : function.Predicate [ Out ] ) : SubSource [ Out , Mat ] =
new SubSource ( delegate . splitWhen ( p . test ) )
2014-10-03 17:33:14 +02:00
2015-04-14 13:44:24 +02:00
/* *
* This operation applies the given predicate to all incoming elements and
* emits them to a stream of output streams . It * ends * the current substream when the
* predicate is true . This means that for the following series of predicate values ,
* three substreams will be produced with lengths 2 , 2 , and 3 :
*
* { { {
* false , true , // elements go into first substream
* false , true , // elements go into second substream
* false , false , true // elements go into third substream
* } } }
*
2015-11-25 19:58:48 +01:00
* The object returned from this method is not a normal [ [ Flow ] ] ,
* it is a [ [ SubSource ] ] . This means that after this combinator all transformations
* are applied to all encountered substreams in the same fashion . Substream mode
* is exited either by closing the substream ( i . e . connecting it to a [ [ Sink ] ] )
* or by merging the substreams back together ; see the `to` and `mergeBack` methods
* on [ [ SubSource ] ] for more information .
*
* It is important to note that the substreams also propagate back - pressure as
* any other stream , which means that blocking one substream will block the `splitAfter`
* operator itself — and thereby all substreams — once all internal or
* explicit buffers are filled .
*
2015-04-14 13:44:24 +02:00
* If the split predicate `p` throws an exception and the supervision decision
* is [ [ akka . stream . Supervision . Stop ] ] the stream and substreams will be completed
* with failure .
*
* If the split predicate `p` throws an exception and the supervision decision
* is [ [ akka . stream . Supervision . Resume ] ] or [ [ akka . stream . Supervision . Restart ] ]
* the element is dropped and the stream and substreams continue .
*
2016-02-24 22:00:32 -08:00
* ''' Emits when ''' an element passes through . When the provided predicate is true it emits the element
2015-04-14 13:44:24 +02:00
* and opens a new substream for subsequent element
*
* ''' Backpressures when ''' there is an element pending for the next substream , but the previous
* is not fully consumed yet , or the substream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels and substreams cancel
*
* See also [ [ Source . splitWhen ] ] .
*/
2015-11-25 19:58:48 +01:00
def splitAfter [ U >: Out ] ( p : function.Predicate [ Out ] ) : SubSource [ Out , Mat ] =
new SubSource ( delegate . splitAfter ( p . test ) )
2015-04-14 13:44:24 +02:00
2014-10-20 14:09:24 +02:00
/* *
2015-11-03 14:46:17 +01:00
* Transform each input element into a `Source` of output elements that is
* then flattened into the output stream by concatenation ,
* fully consuming one Source after the other .
*
* ''' Emits when ''' a currently consumed substream has an element available
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes and all consumed substreams complete
*
* ''' Cancels when ''' downstream cancels
2015-12-01 18:03:30 +01:00
*/
2015-11-25 19:58:48 +01:00
def flatMapConcat [ T , M ] ( f : function.Function [ Out , _ <: Graph [ SourceShape [ T ] , M ] ] ) : Source [ T , Mat ] =
new Source ( delegate . flatMapConcat [ T , M ] ( x ⇒ f ( x ) ) )
2015-12-01 18:03:30 +01:00
/* *
* Transform each input element into a `Source` of output elements that is
* then flattened into the output stream by merging , where at most `breadth`
* substreams are being consumed at any given time .
*
* ''' Emits when ''' a currently consumed substream has an element available
2015-11-03 14:46:17 +01:00
*
2015-12-01 18:03:30 +01:00
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes and all consumed substreams complete
*
* ''' Cancels when ''' downstream cancels
2014-10-20 14:09:24 +02:00
*/
2015-11-25 19:58:48 +01:00
def flatMapMerge [ T , M ] ( breadth : Int , f : function.Function [ Out , _ <: Graph [ SourceShape [ T ] , M ] ] ) : Source [ T , Mat ] =
new Source ( delegate . flatMapMerge ( breadth , o ⇒ f ( o ) ) )
2014-10-03 17:33:14 +02:00
2015-11-01 13:13:35 +01:00
/* *
* If the first element has not passed through this stage before the provided timeout , the stream is failed
* with a [ [ java . util . concurrent . TimeoutException ] ] .
2015-11-02 15:30:10 +01:00
*
* ''' Emits when ''' upstream emits an element
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes or fails if timeout elapses before first element arrives
*
* ''' Cancels when ''' downstream cancels
2015-11-01 13:13:35 +01:00
*/
def initialTimeout ( timeout : FiniteDuration ) : javadsl.Source [ Out , Mat ] =
new Source ( delegate . initialTimeout ( timeout ) )
/* *
* If the completion of the stream does not happen until the provided timeout , the stream is failed
* with a [ [ java . util . concurrent . TimeoutException ] ] .
2015-11-02 15:30:10 +01:00
*
* ''' Emits when ''' upstream emits an element
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes or fails if timeout elapses before upstream completes
*
* ''' Cancels when ''' downstream cancels
2015-11-01 13:13:35 +01:00
*/
def completionTimeout ( timeout : FiniteDuration ) : javadsl.Source [ Out , Mat ] =
new Source ( delegate . completionTimeout ( timeout ) )
/* *
2016-04-19 16:31:17 +02:00
* If the time between two processed elements exceeds the provided timeout , the stream is failed
* with a [ [ java . util . concurrent . TimeoutException ] ] . The timeout is checked periodically ,
* so the resolution of the check is one period ( equals to timeout value ) .
2015-11-02 15:30:10 +01:00
*
* ''' Emits when ''' upstream emits an element
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes or fails if timeout elapses between two emitted elements
*
* ''' Cancels when ''' downstream cancels
2015-11-01 13:13:35 +01:00
*/
def idleTimeout ( timeout : FiniteDuration ) : javadsl.Source [ Out , Mat ] =
new Source ( delegate . idleTimeout ( timeout ) )
2015-11-02 15:30:10 +01:00
/* *
2016-06-02 14:06:57 +02:00
* If the time between the emission of an element and the following downstream demand exceeds the provided timeout ,
* the stream is failed with a [ [ java . util . concurrent . TimeoutException ] ] . The timeout is checked periodically ,
* so the resolution of the check is one period ( equals to timeout value ) .
*
* ''' Emits when ''' upstream emits an element
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes or fails if timeout elapses between element emission and downstream demand .
*
* ''' Cancels when ''' downstream cancels
*/
2016-04-19 16:31:17 +02:00
def backpressureTimeout ( timeout : FiniteDuration ) : javadsl.Source [ Out , Mat ] =
new Source ( delegate . backpressureTimeout ( timeout ) )
/* *
* Injects additional elements if upstream does not emit for a configured amount of time . In other words , this
2015-11-02 15:30:10 +01:00
* stage attempts to maintains a base rate of emitted elements towards the downstream .
*
* If the downstream backpressures then no element is injected until downstream demand arrives . Injected elements
* do not accumulate during this period .
*
* Upstream elements are always preferred over injected elements .
*
* ''' Emits when ''' upstream emits an element or if the upstream was idle for the configured period
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*/
def keepAlive [ U >: Out ] ( maxIdle : FiniteDuration , injectedElem : function.Creator [ U ] ) : javadsl.Source [ U , Mat ] =
new Source ( delegate . keepAlive ( maxIdle , ( ) ⇒ injectedElem . create ( ) ) )
2015-11-08 19:27:03 -05:00
/* *
* Sends elements downstream with speed limited to `elements/per` . In other words , this stage set the maximum rate
* for emitting messages . This combinator works for streams where all elements have the same cost or length .
*
* Throttle implements the token bucket model . There is a bucket with a given token capacity ( burst size or maximumBurst ) .
* Tokens drops into the bucket at a given rate and can be `spared` for later use up to bucket capacity
2016-02-24 22:00:32 -08:00
* to allow some burstiness . Whenever stream wants to send an element , it takes as many
2015-11-08 19:27:03 -05:00
* tokens from the bucket as number of elements . If there isn 't any , throttle waits until the
2016-01-23 17:55:03 -05:00
* bucket accumulates enough tokens . Bucket is full when stream just materialized and started .
2015-11-08 19:27:03 -05:00
*
* Parameter `mode` manages behaviour when upstream is faster than throttle rate :
* - [ [ akka . stream . ThrottleMode . Shaping ] ] makes pauses before emitting messages to meet throttle rate
* - [ [ akka . stream . ThrottleMode . Enforcing ] ] fails with exception when upstream is faster than throttle rate
*
2016-02-25 11:19:52 +01:00
* It is recommended to use non - zero burst sizes as they improve both performance and throttling precision by allowing
* the implementation to avoid using the scheduler when input rates fall below the enforced limit and to reduce
* most of the inaccuracy caused by the scheduler resolution ( which is in the range of milliseconds ) .
*
* Throttler always enforces the rate limit , but in certain cases ( mostly due to limited scheduler resolution ) it
* enforces a tighter bound than what was prescribed . This can be also mitigated by increasing the burst size .
*
2015-11-08 19:27:03 -05:00
* ''' Emits when ''' upstream emits an element and configured time per each element elapsed
*
2017-06-27 21:32:53 +09:00
* ''' Backpressures when ''' downstream backpressures or the incoming rate is higher than the speed limit
2015-11-08 19:27:03 -05:00
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*/
def throttle ( elements : Int , per : FiniteDuration , maximumBurst : Int ,
mode : ThrottleMode ) : javadsl.Source [ Out , Mat ] =
new Source ( delegate . throttle ( elements , per , maximumBurst , mode ) )
/* *
* Sends elements downstream with speed limited to `cost/per` . Cost is
* calculating for each element individually by calling `calculateCost` function .
* This combinator works for streams when elements have different cost ( length ) .
* Streams of `ByteString` for example .
*
* Throttle implements the token bucket model . There is a bucket with a given token capacity ( burst size or maximumBurst ) .
* Tokens drops into the bucket at a given rate and can be `spared` for later use up to bucket capacity
2016-02-24 22:00:32 -08:00
* to allow some burstiness . Whenever stream wants to send an element , it takes as many
2015-11-08 19:27:03 -05:00
* tokens from the bucket as element cost . If there isn 't any , throttle waits until the
* bucket accumulates enough tokens . Elements that costs more than the allowed burst will be delayed proportionally
* to their cost minus available tokens , meeting the target rate .
*
* Parameter `mode` manages behaviour when upstream is faster than throttle rate :
* - [ [ akka . stream . ThrottleMode . Shaping ] ] makes pauses before emitting messages to meet throttle rate
* - [ [ akka . stream . ThrottleMode . Enforcing ] ] fails with exception when upstream is faster than throttle rate . Enforcing
* cannot emit elements that cost more than the maximumBurst
*
* ''' Emits when ''' upstream emits an element and configured time per each element elapsed
*
2017-06-27 21:32:53 +09:00
* ''' Backpressures when ''' downstream backpressures or the incoming rate is higher than the speed limit
2015-11-08 19:27:03 -05:00
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*/
def throttle ( cost : Int , per : FiniteDuration , maximumBurst : Int ,
costCalculation : function.Function [ Out , Integer ] , mode : ThrottleMode ) : javadsl.Source [ Out , Mat ] =
new Source ( delegate . throttle ( cost , per , maximumBurst , costCalculation . apply _ , mode ) )
2015-12-20 11:25:53 +01:00
/* *
* Detaches upstream demand from downstream demand without detaching the
* stream rates ; in other words acts like a buffer of size 1.
*
* ''' Emits when ''' upstream emits an element
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*/
def detach : javadsl.Source [ Out , Mat ] = new Source ( delegate . detach )
2016-01-16 11:46:36 -05:00
/* *
* Materializes to `Future[Done]` that completes on getting termination message .
* The Future completes with success when received complete message from upstream or cancel
* from downstream . It fails with the same error when received error message from
* downstream .
*/
2016-01-25 11:14:31 +01:00
def watchTermination [ M ] ( ) ( matF : function.Function2 [ Mat , CompletionStage [ Done ] , M ] ) : javadsl.Source [ Out , M ] =
2016-02-15 19:40:18 +01:00
new Source ( delegate . watchTermination ( ) ( ( left , right ) ⇒ matF ( left , right . toJava ) ) )
2016-01-16 11:46:36 -05:00
2016-02-21 13:03:00 +02:00
/* *
2016-11-22 10:14:37 +09:00
* Materializes to `FlowMonitor[Out]` that allows monitoring of the current flow . All events are propagated
2016-06-02 14:06:57 +02:00
* by the monitor unchanged . Note that the monitor inserts a memory barrier every time it processes an
* event , and may therefor affect performance .
* The `combine` function is used to combine the `FlowMonitor` with this flow 's materialized value .
*/
2016-02-21 13:03:00 +02:00
def monitor [ M ] ( ) ( combine : function.Function2 [ Mat , FlowMonitor [ Out ] , M ] ) : javadsl.Source [ Out , M ] =
new Source ( delegate . monitor ( ) ( combinerToScala ( combine ) ) )
2015-11-02 15:30:10 +01:00
/* *
* Delays the initial element by the specified duration .
*
2016-04-19 16:31:17 +02:00
* ''' Emits when ''' upstream emits an element if the initial delay is already elapsed
2015-11-02 15:30:10 +01:00
*
2016-04-19 16:31:17 +02:00
* ''' Backpressures when ''' downstream backpressures or initial delay is not yet elapsed
2015-11-02 15:30:10 +01:00
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*/
def initialDelay ( delay : FiniteDuration ) : javadsl.Source [ Out , Mat ] =
new Source ( delegate . initialDelay ( delay ) )
2015-12-22 20:56:02 +01:00
/* *
* Change the attributes of this [ [ Source ] ] to the given ones and seal the list
* of attributes . This means that further calls will not be able to remove these
* attributes , but instead add new ones . Note that this
* operation has no effect on an empty Flow ( because the attributes apply
* only to the contained processing stages ) .
*/
2015-06-23 17:32:55 +02:00
override def withAttributes ( attr : Attributes ) : javadsl.Source [ Out , Mat ] =
2015-04-10 16:49:49 +02:00
new Source ( delegate . withAttributes ( attr ) )
2015-03-05 12:21:17 +01:00
2015-12-22 20:56:02 +01:00
/* *
* Add the given attributes to this Source . Further calls to `withAttributes`
* will not remove these attributes . Note that this
* operation has no effect on an empty Flow ( because the attributes apply
* only to the contained processing stages ) .
*/
override def addAttributes ( attr : Attributes ) : javadsl.Source [ Out , Mat ] =
new Source ( delegate . addAttributes ( attr ) )
/* *
2016-08-11 07:37:54 -05:00
* Add a ` `name` ` attribute to this Source .
2015-12-22 20:56:02 +01:00
*/
2015-04-14 08:59:37 +02:00
override def named ( name : String ) : javadsl.Source [ Out , Mat ] =
2015-03-05 12:21:17 +01:00
new Source ( delegate . named ( name ) )
2016-02-10 13:56:38 +01:00
/* *
* Put an asynchronous boundary around this `Source`
*/
override def async : javadsl.Source [ Out , Mat ] =
new Source ( delegate . async )
2015-04-09 12:21:12 +02:00
/* *
* Logs elements flowing through the stream as well as completion and erroring .
*
* By default element and completion signals are logged on debug level , and errors are logged on Error level .
2015-09-28 22:23:59 -07:00
* This can be adjusted according to your needs by providing a custom [ [ Attributes . LogLevels ] ] attribute on the given Flow :
2015-04-09 12:21:12 +02:00
*
* The `extract` function will be applied to each element before logging , so it is possible to log only those fields
* of a complex object flowing through this element .
*
* Uses the given [ [ LoggingAdapter ] ] for logging .
*
2017-07-26 16:23:46 +02:00
* Adheres to the [ [ ActorAttributes . SupervisionStrategy ] ] attribute .
*
2015-04-09 12:21:12 +02:00
* ''' Emits when ''' the mapping function returns an element
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*/
def log ( name : String , extract : function.Function [ Out , Any ] , log : LoggingAdapter ) : javadsl.Source [ Out , Mat ] =
new Source ( delegate . log ( name , e ⇒ extract . apply ( e ) ) ( log ) )
/* *
* Logs elements flowing through the stream as well as completion and erroring .
*
* By default element and completion signals are logged on debug level , and errors are logged on Error level .
2015-09-28 22:23:59 -07:00
* This can be adjusted according to your needs by providing a custom [ [ Attributes . LogLevels ] ] attribute on the given Flow :
2015-04-09 12:21:12 +02:00
*
* The `extract` function will be applied to each element before logging , so it is possible to log only those fields
* of a complex object flowing through this element .
*
* Uses an internally created [ [ LoggingAdapter ] ] which uses `akka.stream.Log` as it 's source ( use this class to configure slf4j loggers ) .
*
* ''' Emits when ''' the mapping function returns an element
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*/
def log ( name : String , extract : function.Function [ Out , Any ] ) : javadsl.Source [ Out , Mat ] =
this . log ( name , extract , null )
/* *
* Logs elements flowing through the stream as well as completion and erroring .
*
* By default element and completion signals are logged on debug level , and errors are logged on Error level .
2015-09-28 22:23:59 -07:00
* This can be adjusted according to your needs by providing a custom [ [ Attributes . LogLevels ] ] attribute on the given Flow :
2015-04-09 12:21:12 +02:00
*
* Uses the given [ [ LoggingAdapter ] ] for logging .
*
* ''' Emits when ''' the mapping function returns an element
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*/
def log ( name : String , log : LoggingAdapter ) : javadsl.Source [ Out , Mat ] =
2015-10-21 22:45:39 +02:00
this . log ( name , ConstantFun . javaIdentityFunction [ Out ] , log )
2015-04-09 12:21:12 +02:00
/* *
* Logs elements flowing through the stream as well as completion and erroring .
*
* By default element and completion signals are logged on debug level , and errors are logged on Error level .
2015-09-28 22:23:59 -07:00
* This can be adjusted according to your needs by providing a custom [ [ Attributes . LogLevels ] ] attribute on the given Flow :
2015-04-09 12:21:12 +02:00
*
* Uses an internally created [ [ LoggingAdapter ] ] which uses `akka.stream.Log` as it 's source ( use this class to configure slf4j loggers ) .
*
* ''' Emits when ''' the mapping function returns an element
*
* ''' Backpressures when ''' downstream backpressures
*
* ''' Completes when ''' upstream completes
*
* ''' Cancels when ''' downstream cancels
*/
def log ( name : String ) : javadsl.Source [ Out , Mat ] =
2015-10-21 22:45:39 +02:00
this . log ( name , ConstantFun . javaIdentityFunction [ Out ] , null )
2014-10-03 17:33:14 +02:00
}