Replace graph with operator in Scaladoc/Javadoc
This commit is contained in:
parent
57615f1e88
commit
60eee84345
64 changed files with 394 additions and 394 deletions
|
|
@ -218,7 +218,7 @@ object Patterns {
|
||||||
* final Future<Object> f = Patterns.ask(worker, request, timeout);
|
* final Future<Object> f = Patterns.ask(worker, request, timeout);
|
||||||
* // apply some transformation (i.e. enrich with request info)
|
* // apply some transformation (i.e. enrich with request info)
|
||||||
* final Future<Object> transformed = f.map(new akka.japi.Function<Object, Object>() { ... });
|
* final Future<Object> transformed = f.map(new akka.japi.Function<Object, Object>() { ... });
|
||||||
* // send it on to the next stage
|
* // send it on to the next operator
|
||||||
* Patterns.pipe(transformed, context).to(nextActor);
|
* Patterns.pipe(transformed, context).to(nextActor);
|
||||||
* }}}
|
* }}}
|
||||||
*/
|
*/
|
||||||
|
|
@ -334,7 +334,7 @@ object PatternsCS {
|
||||||
*
|
*
|
||||||
* @param actor the actor to be asked
|
* @param actor the actor to be asked
|
||||||
* @param messageFactory function taking an actor ref and returning the message to be sent
|
* @param messageFactory function taking an actor ref and returning the message to be sent
|
||||||
* @param timeout the timeout for the response before failing the returned completion stage
|
* @param timeout the timeout for the response before failing the returned completion operator
|
||||||
*/
|
*/
|
||||||
def askWithReplyTo(actor: ActorRef, messageFactory: japi.function.Function[ActorRef, Any], timeout: Timeout): CompletionStage[AnyRef] =
|
def askWithReplyTo(actor: ActorRef, messageFactory: japi.function.Function[ActorRef, Any], timeout: Timeout): CompletionStage[AnyRef] =
|
||||||
extended.ask(actor, messageFactory.apply _)(timeout).toJava.asInstanceOf[CompletionStage[AnyRef]]
|
extended.ask(actor, messageFactory.apply _)(timeout).toJava.asInstanceOf[CompletionStage[AnyRef]]
|
||||||
|
|
@ -380,7 +380,7 @@ object PatternsCS {
|
||||||
*
|
*
|
||||||
* @param actor the actor to be asked
|
* @param actor the actor to be asked
|
||||||
* @param messageFactory function taking an actor ref to reply to and returning the message to be sent
|
* @param messageFactory function taking an actor ref to reply to and returning the message to be sent
|
||||||
* @param timeoutMillis the timeout for the response before failing the returned completion stage
|
* @param timeoutMillis the timeout for the response before failing the returned completion operator
|
||||||
*/
|
*/
|
||||||
def askWithReplyTo(actor: ActorRef, messageFactory: japi.function.Function[ActorRef, Any], timeoutMillis: Long): CompletionStage[AnyRef] =
|
def askWithReplyTo(actor: ActorRef, messageFactory: japi.function.Function[ActorRef, Any], timeoutMillis: Long): CompletionStage[AnyRef] =
|
||||||
askWithReplyTo(actor, messageFactory, Timeout(timeoutMillis.millis))
|
askWithReplyTo(actor, messageFactory, Timeout(timeoutMillis.millis))
|
||||||
|
|
@ -468,7 +468,7 @@ object PatternsCS {
|
||||||
* final CompletionStage<Object> f = PatternsCS.ask(worker, request, timeout);
|
* final CompletionStage<Object> f = PatternsCS.ask(worker, request, timeout);
|
||||||
* // apply some transformation (i.e. enrich with request info)
|
* // apply some transformation (i.e. enrich with request info)
|
||||||
* final CompletionStage<Object> transformed = f.thenApply(result -> { ... });
|
* final CompletionStage<Object> transformed = f.thenApply(result -> { ... });
|
||||||
* // send it on to the next stage
|
* // send it on to the next operator
|
||||||
* PatternsCS.pipe(transformed, context).to(nextActor);
|
* PatternsCS.pipe(transformed, context).to(nextActor);
|
||||||
* }}}
|
* }}}
|
||||||
*/
|
*/
|
||||||
|
|
@ -568,7 +568,7 @@ object PatternsCS {
|
||||||
* Returns an internally retrying [[java.util.concurrent.CompletionStage]]
|
* Returns an internally retrying [[java.util.concurrent.CompletionStage]]
|
||||||
* The first attempt will be made immediately, and each subsequent attempt will be made after 'delay'.
|
* The first attempt will be made immediately, and each subsequent attempt will be made after 'delay'.
|
||||||
* A scheduler (eg context.system.scheduler) must be provided to delay each retry
|
* A scheduler (eg context.system.scheduler) must be provided to delay each retry
|
||||||
* If attempts are exhausted the returned completion stage is simply the result of invoking attempt.
|
* If attempts are exhausted the returned completion operator is simply the result of invoking attempt.
|
||||||
* Note that the attempt function will be invoked on the given execution context for subsequent tries
|
* Note that the attempt function will be invoked on the given execution context for subsequent tries
|
||||||
* and therefore must be thread safe (not touch unsafe mutable state).
|
* and therefore must be thread safe (not touch unsafe mutable state).
|
||||||
*/
|
*/
|
||||||
|
|
|
||||||
|
|
@ -13,7 +13,7 @@ import akka.stream.stage.OutHandler
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Emits integers from 1 to the given `elementCount`. The `java.lang.Integer`
|
* Emits integers from 1 to the given `elementCount`. The `java.lang.Integer`
|
||||||
* objects are allocated in the constructor of the stage, so it should be created
|
* objects are allocated in the constructor of the operator, so it should be created
|
||||||
* before the benchmark is started.
|
* before the benchmark is started.
|
||||||
*/
|
*/
|
||||||
class BenchTestSource(elementCount: Int) extends GraphStage[SourceShape[java.lang.Integer]] {
|
class BenchTestSource(elementCount: Int) extends GraphStage[SourceShape[java.lang.Integer]] {
|
||||||
|
|
|
||||||
|
|
@ -61,7 +61,7 @@ import akka.util.WildcardIndex
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* INTERNAL API
|
* INTERNAL API
|
||||||
* Inbound API that is used by the stream stages.
|
* Inbound API that is used by the stream operators.
|
||||||
* Separate trait to facilitate testing without real transport.
|
* Separate trait to facilitate testing without real transport.
|
||||||
*/
|
*/
|
||||||
private[remote] trait InboundContext {
|
private[remote] trait InboundContext {
|
||||||
|
|
@ -71,7 +71,7 @@ private[remote] trait InboundContext {
|
||||||
def localAddress: UniqueAddress
|
def localAddress: UniqueAddress
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An inbound stage can send control message, e.g. a reply, to the origin
|
* An inbound operator can send control message, e.g. a reply, to the origin
|
||||||
* address with this method. It will be sent over the control sub-channel.
|
* address with this method. It will be sent over the control sub-channel.
|
||||||
*/
|
*/
|
||||||
def sendControl(to: Address, message: ControlMessage): Unit
|
def sendControl(to: Address, message: ControlMessage): Unit
|
||||||
|
|
@ -188,7 +188,7 @@ private[remote] final class AssociationState(
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* INTERNAL API
|
* INTERNAL API
|
||||||
* Outbound association API that is used by the stream stages.
|
* Outbound association API that is used by the stream operators.
|
||||||
* Separate trait to facilitate testing without real transport.
|
* Separate trait to facilitate testing without real transport.
|
||||||
*/
|
*/
|
||||||
private[remote] trait OutboundContext {
|
private[remote] trait OutboundContext {
|
||||||
|
|
@ -207,7 +207,7 @@ private[remote] trait OutboundContext {
|
||||||
def quarantine(reason: String): Unit
|
def quarantine(reason: String): Unit
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An inbound stage can send control message, e.g. a HandshakeReq, to the remote
|
* An inbound operator can send control message, e.g. a HandshakeReq, to the remote
|
||||||
* address of this association. It will be sent over the control sub-channel.
|
* address of this association. It will be sent over the control sub-channel.
|
||||||
*/
|
*/
|
||||||
def sendControl(message: ControlMessage): Unit
|
def sendControl(message: ControlMessage): Unit
|
||||||
|
|
@ -218,7 +218,7 @@ private[remote] trait OutboundContext {
|
||||||
def isOrdinaryMessageStreamActive(): Boolean
|
def isOrdinaryMessageStreamActive(): Boolean
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* An outbound stage can listen to control messages
|
* An outbound operator can listen to control messages
|
||||||
* via this observer subject.
|
* via this observer subject.
|
||||||
*/
|
*/
|
||||||
def controlSubject: ControlMessageSubject
|
def controlSubject: ControlMessageSubject
|
||||||
|
|
@ -321,7 +321,7 @@ private[remote] abstract class ArteryTransport(_system: ExtendedActorSystem, _pr
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Compression tables must be created once, such that inbound lane restarts don't cause dropping of the tables.
|
* Compression tables must be created once, such that inbound lane restarts don't cause dropping of the tables.
|
||||||
* However are the InboundCompressions are owned by the Decoder stage, and any call into them must be looped through the Decoder!
|
* However are the InboundCompressions are owned by the Decoder operator, and any call into them must be looped through the Decoder!
|
||||||
*
|
*
|
||||||
* Use `inboundCompressionAccess` (provided by the materialized `Decoder`) to call into the compression infrastructure.
|
* Use `inboundCompressionAccess` (provided by the materialized `Decoder`) to call into the compression infrastructure.
|
||||||
*/
|
*/
|
||||||
|
|
|
||||||
|
|
@ -207,15 +207,15 @@ private[remote] object Decoder {
|
||||||
|
|
||||||
private object Tick
|
private object Tick
|
||||||
|
|
||||||
/** Materialized value of [[Encoder]] which allows safely calling into the stage to interfact with compression tables. */
|
/** Materialized value of [[Encoder]] which allows safely calling into the operator to interfact with compression tables. */
|
||||||
private[remote] trait InboundCompressionAccess {
|
private[remote] trait InboundCompressionAccess {
|
||||||
def confirmActorRefCompressionAdvertisementAck(ack: ActorRefCompressionAdvertisementAck): Future[Done]
|
def confirmActorRefCompressionAdvertisementAck(ack: ActorRefCompressionAdvertisementAck): Future[Done]
|
||||||
def confirmClassManifestCompressionAdvertisementAck(ack: ClassManifestCompressionAdvertisementAck): Future[Done]
|
def confirmClassManifestCompressionAdvertisementAck(ack: ClassManifestCompressionAdvertisementAck): Future[Done]
|
||||||
def closeCompressionFor(originUid: Long): Future[Done]
|
def closeCompressionFor(originUid: Long): Future[Done]
|
||||||
|
|
||||||
/** For testing purposes, usually triggered by timer from within Decoder stage. */
|
/** For testing purposes, usually triggered by timer from within Decoder operator. */
|
||||||
def runNextActorRefAdvertisement(): Unit
|
def runNextActorRefAdvertisement(): Unit
|
||||||
/** For testing purposes, usually triggered by timer from within Decoder stage. */
|
/** For testing purposes, usually triggered by timer from within Decoder operator. */
|
||||||
def runNextClassManifestAdvertisement(): Unit
|
def runNextClassManifestAdvertisement(): Unit
|
||||||
/** For testing purposes */
|
/** For testing purposes */
|
||||||
def currentCompressionOriginUids: Future[Set[Long]]
|
def currentCompressionOriginUids: Future[Set[Long]]
|
||||||
|
|
@ -642,7 +642,7 @@ private[remote] class Deserializer(
|
||||||
/**
|
/**
|
||||||
* INTERNAL API: The HandshakeReq message must be passed in each inbound lane to
|
* INTERNAL API: The HandshakeReq message must be passed in each inbound lane to
|
||||||
* ensure that it arrives before any application message. Otherwise there is a risk
|
* ensure that it arrives before any application message. Otherwise there is a risk
|
||||||
* that an application message arrives in the InboundHandshake stage before the
|
* that an application message arrives in the InboundHandshake operator before the
|
||||||
* handshake is completed and then it would be dropped.
|
* handshake is completed and then it would be dropped.
|
||||||
*/
|
*/
|
||||||
private[remote] class DuplicateHandshakeReq(
|
private[remote] class DuplicateHandshakeReq(
|
||||||
|
|
|
||||||
|
|
@ -20,7 +20,7 @@ import scala.util.control.NonFatal
|
||||||
* Multiple instruments are automatically handled, however they MUST NOT overlap in their idenfitiers.
|
* Multiple instruments are automatically handled, however they MUST NOT overlap in their idenfitiers.
|
||||||
*
|
*
|
||||||
* Instances of `RemoteInstrument` are created from configuration. A new instance of RemoteInstrument
|
* Instances of `RemoteInstrument` are created from configuration. A new instance of RemoteInstrument
|
||||||
* will be created for each encoder and decoder. It's only called from the stage, so if it doesn't
|
* will be created for each encoder and decoder. It's only called from the operator, so if it doesn't
|
||||||
* delegate to any shared instance it doesn't have to be thread-safe.
|
* delegate to any shared instance it doesn't have to be thread-safe.
|
||||||
*/
|
*/
|
||||||
abstract class RemoteInstrument {
|
abstract class RemoteInstrument {
|
||||||
|
|
|
||||||
|
|
@ -45,14 +45,14 @@ import akka.util.OptionVal
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sent when an incarnation of an Association is quarantined. Consumed by the
|
* Sent when an incarnation of an Association is quarantined. Consumed by the
|
||||||
* SystemMessageDelivery stage on the sending side, i.e. not sent to remote system.
|
* SystemMessageDelivery operator on the sending side, i.e. not sent to remote system.
|
||||||
* The SystemMessageDelivery stage will clear the sequence number and other state associated
|
* The SystemMessageDelivery operator will clear the sequence number and other state associated
|
||||||
* with that incarnation.
|
* operator
|
||||||
*
|
*
|
||||||
* The incarnation counter is bumped when the handshake is completed, so a new incarnation
|
* The incarnation counter is bumped when the handshake is completed, so a new incarnation
|
||||||
* corresponds to a new UID of the remote system.
|
* corresponds to a new UID of the remote system.
|
||||||
*
|
*
|
||||||
* The SystemMessageDelivery stage also detects that the incarnation has changed when sending or resending
|
* The SystemMessageDelivery operator also detects that the incarnation has changed when sending or resending
|
||||||
* system messages.
|
* system messages.
|
||||||
*/
|
*/
|
||||||
final case class ClearSystemMessageDelivery(incarnation: Int)
|
final case class ClearSystemMessageDelivery(incarnation: Int)
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ object TestManagementCommands {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* INTERNAL API: Thread safe mutable state that is shared among
|
* INTERNAL API: Thread safe mutable state that is shared among
|
||||||
* the test stages.
|
* the test operators.
|
||||||
*/
|
*/
|
||||||
private[remote] class SharedTestState {
|
private[remote] class SharedTestState {
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -49,7 +49,7 @@ private[remote] trait InboundCompressions {
|
||||||
* INTERNAL API
|
* INTERNAL API
|
||||||
*
|
*
|
||||||
* One per incoming Aeron stream, actual compression tables are kept per-originUid and created on demand.
|
* One per incoming Aeron stream, actual compression tables are kept per-originUid and created on demand.
|
||||||
* All access is via the Decoder stage.
|
* All access is via the Decoder operator.
|
||||||
*/
|
*/
|
||||||
private[remote] final class InboundCompressionsImpl(
|
private[remote] final class InboundCompressionsImpl(
|
||||||
system: ActorSystem,
|
system: ActorSystem,
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ object GraphStageMessages {
|
||||||
case object DownstreamFinish extends StageMessage with NoSerializationVerificationNeeded
|
case object DownstreamFinish extends StageMessage with NoSerializationVerificationNeeded
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sent to the probe when the stage callback threw an exception
|
* Sent to the probe when the operator callback threw an exception
|
||||||
* @param operation The operation that failed
|
* @param operation The operation that failed
|
||||||
*/
|
*/
|
||||||
case class StageFailure(operation: StageMessage, exception: Throwable)
|
case class StageFailure(operation: StageMessage, exception: Throwable)
|
||||||
|
|
@ -35,7 +35,7 @@ object TestSinkStage {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a sink out of the `stageUnderTest` that will inform the `probe`
|
* Creates a sink out of the `stageUnderTest` that will inform the `probe`
|
||||||
* of graph stage events and callbacks by sending it the various messages found under
|
* of operator events and callbacks by sending it the various messages found under
|
||||||
* [[GraphStageMessages]].
|
* [[GraphStageMessages]].
|
||||||
*
|
*
|
||||||
* This allows for creation of a "normal" stream ending with the sink while still being
|
* This allows for creation of a "normal" stream ending with the sink while still being
|
||||||
|
|
@ -100,7 +100,7 @@ object TestSourceStage {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a source out of the `stageUnderTest` that will inform the `probe`
|
* Creates a source out of the `stageUnderTest` that will inform the `probe`
|
||||||
* of graph stage events and callbacks by sending it the various messages found under
|
* of operator events and callbacks by sending it the various messages found under
|
||||||
* [[GraphStageMessages]].
|
* [[GraphStageMessages]].
|
||||||
*
|
*
|
||||||
* This allows for creation of a "normal" stream starting with the source while still being
|
* This allows for creation of a "normal" stream starting with the source while still being
|
||||||
|
|
|
||||||
|
|
@ -17,27 +17,27 @@ import scala.collection.{ Map ⇒ SMap }
|
||||||
object GraphInterpreterSpecKit {
|
object GraphInterpreterSpecKit {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create logics and enumerate stages and ports
|
* Create logics and enumerate operators and ports
|
||||||
*
|
*
|
||||||
* @param stages Stages to "materialize" into graph stage logic instances
|
* @param operators Operators to "materialize" into operator logic instances
|
||||||
* @param upstreams Upstream boundary logics that are already instances of graph stage logic and should be
|
* @param upstreams Upstream boundary logics that are already instances of operator logic and should be
|
||||||
* part of the graph, is placed before the rest of the stages
|
* part of the graph, is placed before the rest of the operators
|
||||||
* @param downstreams Downstream boundary logics, is placed after the other stages
|
* @param downstreams Downstream boundary logics, is placed after the other operators
|
||||||
* @param attributes Optional set of attributes to pass to the stages when creating the logics
|
* @param attributes Optional set of attributes to pass to the operators when creating the logics
|
||||||
* @return Created logics and the maps of all inlets respective outlets to those logics
|
* @return Created logics and the maps of all inlets respective outlets to those logics
|
||||||
*/
|
*/
|
||||||
private[stream] def createLogics(
|
private[stream] def createLogics(
|
||||||
stages: Array[GraphStageWithMaterializedValue[_ <: Shape, _]],
|
operators: Array[GraphStageWithMaterializedValue[_ <: Shape, _]],
|
||||||
upstreams: Array[UpstreamBoundaryStageLogic[_]],
|
upstreams: Array[UpstreamBoundaryStageLogic[_]],
|
||||||
downstreams: Array[DownstreamBoundaryStageLogic[_]],
|
downstreams: Array[DownstreamBoundaryStageLogic[_]],
|
||||||
attributes: Array[Attributes] = Array.empty): (Array[GraphStageLogic], SMap[Inlet[_], GraphStageLogic], SMap[Outlet[_], GraphStageLogic]) = {
|
attributes: Array[Attributes] = Array.empty): (Array[GraphStageLogic], SMap[Inlet[_], GraphStageLogic], SMap[Outlet[_], GraphStageLogic]) = {
|
||||||
if (attributes.nonEmpty && attributes.length != stages.length)
|
if (attributes.nonEmpty && attributes.length != operators.length)
|
||||||
throw new IllegalArgumentException("Attributes must be either empty or one per stage")
|
throw new IllegalArgumentException("Attributes must be either empty or one per stage")
|
||||||
|
|
||||||
var inOwners = SMap.empty[Inlet[_], GraphStageLogic]
|
var inOwners = SMap.empty[Inlet[_], GraphStageLogic]
|
||||||
var outOwners = SMap.empty[Outlet[_], GraphStageLogic]
|
var outOwners = SMap.empty[Outlet[_], GraphStageLogic]
|
||||||
|
|
||||||
val logics = new Array[GraphStageLogic](upstreams.length + stages.length + downstreams.length)
|
val logics = new Array[GraphStageLogic](upstreams.length + operators.length + downstreams.length)
|
||||||
var idx = 0
|
var idx = 0
|
||||||
|
|
||||||
while (idx < upstreams.length) {
|
while (idx < upstreams.length) {
|
||||||
|
|
@ -50,8 +50,8 @@ object GraphInterpreterSpecKit {
|
||||||
}
|
}
|
||||||
|
|
||||||
var stageIdx = 0
|
var stageIdx = 0
|
||||||
while (stageIdx < stages.length) {
|
while (stageIdx < operators.length) {
|
||||||
val stage = stages(stageIdx)
|
val stage = operators(stageIdx)
|
||||||
setPortIds(stage.shape)
|
setPortIds(stage.shape)
|
||||||
|
|
||||||
val stageAttributes =
|
val stageAttributes =
|
||||||
|
|
@ -215,7 +215,7 @@ trait GraphInterpreterSpecKit extends StreamSpec {
|
||||||
override def toString = "Downstream"
|
override def toString = "Downstream"
|
||||||
}
|
}
|
||||||
|
|
||||||
class AssemblyBuilder(stages: Seq[GraphStageWithMaterializedValue[_ <: Shape, _]]) {
|
class AssemblyBuilder(operators: Seq[GraphStageWithMaterializedValue[_ <: Shape, _]]) {
|
||||||
private var upstreams = Vector.empty[UpstreamBoundaryStageLogic[_]]
|
private var upstreams = Vector.empty[UpstreamBoundaryStageLogic[_]]
|
||||||
private var downstreams = Vector.empty[DownstreamBoundaryStageLogic[_]]
|
private var downstreams = Vector.empty[DownstreamBoundaryStageLogic[_]]
|
||||||
private var connectedPorts = Vector.empty[(Outlet[_], Inlet[_])]
|
private var connectedPorts = Vector.empty[(Outlet[_], Inlet[_])]
|
||||||
|
|
@ -238,7 +238,7 @@ trait GraphInterpreterSpecKit extends StreamSpec {
|
||||||
}
|
}
|
||||||
|
|
||||||
def init(): Unit = {
|
def init(): Unit = {
|
||||||
val (logics, inOwners, outOwners) = createLogics(stages.toArray, upstreams.toArray, downstreams.toArray)
|
val (logics, inOwners, outOwners) = createLogics(operators.toArray, upstreams.toArray, downstreams.toArray)
|
||||||
val conns = createConnections(logics, connectedPorts, inOwners, outOwners)
|
val conns = createConnections(logics, connectedPorts, inOwners, outOwners)
|
||||||
|
|
||||||
manualInit(logics.toArray, conns)
|
manualInit(logics.toArray, conns)
|
||||||
|
|
@ -257,8 +257,8 @@ trait GraphInterpreterSpecKit extends StreamSpec {
|
||||||
_interpreter.init(null)
|
_interpreter.init(null)
|
||||||
}
|
}
|
||||||
|
|
||||||
def builder(stages: GraphStageWithMaterializedValue[_ <: Shape, _]*): AssemblyBuilder =
|
def builder(operators: GraphStageWithMaterializedValue[_ <: Shape, _]*): AssemblyBuilder =
|
||||||
new AssemblyBuilder(stages.toVector)
|
new AssemblyBuilder(operators.toVector)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -57,7 +57,7 @@ object TlsSpec {
|
||||||
def initSslContext(): SSLContext = initWithTrust("/truststore")
|
def initSslContext(): SSLContext = initWithTrust("/truststore")
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is a stage that fires a TimeoutException failure 2 seconds after it was started,
|
* This is an operator that fires a TimeoutException failure 2 seconds after it was started,
|
||||||
* independent of the traffic going through. The purpose is to include the last seen
|
* independent of the traffic going through. The purpose is to include the last seen
|
||||||
* element in the exception message to help in figuring out what went wrong.
|
* element in the exception message to help in figuring out what went wrong.
|
||||||
*/
|
*/
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,7 @@ object ActorFlow {
|
||||||
* still be in the mailbox, so defaulting to sending the second one a bit earlier than when first ask has replied maintains
|
* still be in the mailbox, so defaulting to sending the second one a bit earlier than when first ask has replied maintains
|
||||||
* a slightly healthier throughput.
|
* a slightly healthier throughput.
|
||||||
*
|
*
|
||||||
* The stage fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated,
|
* The operator fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated,
|
||||||
* or with an [[java.util.concurrent.TimeoutException]] in case the ask exceeds the timeout passed in.
|
* or with an [[java.util.concurrent.TimeoutException]] in case the ask exceeds the timeout passed in.
|
||||||
*
|
*
|
||||||
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
||||||
|
|
@ -74,7 +74,7 @@ object ActorFlow {
|
||||||
*
|
*
|
||||||
* otherwise `Nothing` will be assumed, which is most likely not what you want.
|
* otherwise `Nothing` will be assumed, which is most likely not what you want.
|
||||||
*
|
*
|
||||||
* The stage fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated,
|
* The operator fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated,
|
||||||
* or with an [[java.util.concurrent.TimeoutException]] in case the ask exceeds the timeout passed in.
|
* or with an [[java.util.concurrent.TimeoutException]] in case the ask exceeds the timeout passed in.
|
||||||
*
|
*
|
||||||
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,7 @@ object ActorSink {
|
||||||
* i.e. if the actor is not consuming the messages fast enough the mailbox
|
* i.e. if the actor is not consuming the messages fast enough the mailbox
|
||||||
* of the actor will grow. For potentially slow consumer actors it is recommended
|
* of the actor will grow. For potentially slow consumer actors it is recommended
|
||||||
* to use a bounded mailbox with zero `mailbox-push-timeout-time` or use a rate
|
* to use a bounded mailbox with zero `mailbox-push-timeout-time` or use a rate
|
||||||
* limiting stage in front of this `Sink`.
|
* limiting operator in front of this `Sink`.
|
||||||
*/
|
*/
|
||||||
def actorRef[T](ref: ActorRef[T], onCompleteMessage: T, onFailureMessage: akka.japi.function.Function[Throwable, T]): Sink[T, NotUsed] =
|
def actorRef[T](ref: ActorRef[T], onCompleteMessage: T, onFailureMessage: akka.japi.function.Function[Throwable, T]): Sink[T, NotUsed] =
|
||||||
typed.scaladsl.ActorSink.actorRef(ref, onCompleteMessage, onFailureMessage.apply).asJava
|
typed.scaladsl.ActorSink.actorRef(ref, onCompleteMessage, onFailureMessage.apply).asJava
|
||||||
|
|
|
||||||
|
|
@ -41,7 +41,7 @@ object ActorFlow {
|
||||||
* still be in the mailbox, so defaulting to sending the second one a bit earlier than when first ask has replied maintains
|
* still be in the mailbox, so defaulting to sending the second one a bit earlier than when first ask has replied maintains
|
||||||
* a slightly healthier throughput.
|
* a slightly healthier throughput.
|
||||||
*
|
*
|
||||||
* The stage fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated,
|
* The operator fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated,
|
||||||
* or with an [[java.util.concurrent.TimeoutException]] in case the ask exceeds the timeout passed in.
|
* or with an [[java.util.concurrent.TimeoutException]] in case the ask exceeds the timeout passed in.
|
||||||
*
|
*
|
||||||
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
||||||
|
|
@ -79,7 +79,7 @@ object ActorFlow {
|
||||||
*
|
*
|
||||||
* otherwise `Nothing` will be assumed, which is most likely not what you want.
|
* otherwise `Nothing` will be assumed, which is most likely not what you want.
|
||||||
*
|
*
|
||||||
* The stage fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated,
|
* The operator fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated,
|
||||||
* or with an [[java.util.concurrent.TimeoutException]] in case the ask exceeds the timeout passed in.
|
* or with an [[java.util.concurrent.TimeoutException]] in case the ask exceeds the timeout passed in.
|
||||||
*
|
*
|
||||||
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
||||||
|
|
|
||||||
|
|
@ -129,7 +129,7 @@ public final class JavaFlowSupport {
|
||||||
* A `Sink` that materializes into a {@link java.util.concurrent.Flow.Publisher}.
|
* A `Sink` that materializes into a {@link java.util.concurrent.Flow.Publisher}.
|
||||||
* <p>
|
* <p>
|
||||||
* If {@code fanout} is {@code WITH_FANOUT}, the materialized {@code Publisher} will support multiple {@code Subscriber}s and
|
* If {@code fanout} is {@code WITH_FANOUT}, the materialized {@code Publisher} will support multiple {@code Subscriber}s and
|
||||||
* the size of the {@code inputBuffer} configured for this stage becomes the maximum number of elements that
|
* the size of the {@code inputBuffer} configured for this operator becomes the maximum number of elements that
|
||||||
* the fastest {@link java.util.concurrent.Flow.Subscriber} can be ahead of the slowest one before slowing
|
* the fastest {@link java.util.concurrent.Flow.Subscriber} can be ahead of the slowest one before slowing
|
||||||
* the processing down due to back pressure.
|
* the processing down due to back pressure.
|
||||||
* <p>
|
* <p>
|
||||||
|
|
|
||||||
|
|
@ -100,7 +100,7 @@ object JavaFlowSupport {
|
||||||
* A `Sink` that materializes into a [[java.util.concurrent.Flow.Publisher]].
|
* A `Sink` that materializes into a [[java.util.concurrent.Flow.Publisher]].
|
||||||
*
|
*
|
||||||
* If `fanout` is `WITH_FANOUT`, the materialized `Publisher` will support multiple `Subscriber`s and
|
* If `fanout` is `WITH_FANOUT`, the materialized `Publisher` will support multiple `Subscriber`s and
|
||||||
* the size of the `inputBuffer` configured for this stage becomes the maximum number of elements that
|
* the size of the `inputBuffer` configured for this operator becomes the maximum number of elements that
|
||||||
* the fastest [[java.util.concurrent.Flow.Subscriber]] can be ahead of the slowest one before slowing
|
* the fastest [[java.util.concurrent.Flow.Subscriber]] can be ahead of the slowest one before slowing
|
||||||
* the processing down due to back pressure.
|
* the processing down due to back pressure.
|
||||||
*
|
*
|
||||||
|
|
|
||||||
|
|
@ -182,8 +182,8 @@ abstract class ActorMaterializer extends Materializer with MaterializerLoggingPr
|
||||||
def settings: ActorMaterializerSettings
|
def settings: ActorMaterializerSettings
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Shuts down this materializer and all the stages that have been materialized through this materializer. After
|
* Shuts down this materializer and all the operators that have been materialized through this materializer. After
|
||||||
* having shut down, this materializer cannot be used again. Any attempt to materialize stages after having
|
* having shut down, this materializer cannot be used again. Any attempt to materialize operators after having
|
||||||
* shut down will result in an IllegalStateException being thrown at materialization time.
|
* shut down will result in an IllegalStateException being thrown at materialization time.
|
||||||
*/
|
*/
|
||||||
def shutdown(): Unit
|
def shutdown(): Unit
|
||||||
|
|
@ -230,7 +230,7 @@ final case class AbruptTerminationException(actor: ActorRef)
|
||||||
extends RuntimeException(s"Processor actor [$actor] terminated abruptly") with NoStackTrace
|
extends RuntimeException(s"Processor actor [$actor] terminated abruptly") with NoStackTrace
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Signal that the stage was abruptly terminated, usually seen as a call to `postStop` of the `GraphStageLogic` without
|
* Signal that the operator was abruptly terminated, usually seen as a call to `postStop` of the `GraphStageLogic` without
|
||||||
* any of the handler callbacks seeing completion or failure from upstream or cancellation from downstream. This can happen when
|
* any of the handler callbacks seeing completion or failure from upstream or cancellation from downstream. This can happen when
|
||||||
* the actor running the graph is killed, which happens when the materializer or actor system is terminated.
|
* the actor running the graph is killed, which happens when the materializer or actor system is terminated.
|
||||||
*/
|
*/
|
||||||
|
|
@ -478,8 +478,8 @@ final class ActorMaterializerSettings @InternalApi private (
|
||||||
* overridden for specific flows of the stream operations with
|
* overridden for specific flows of the stream operations with
|
||||||
* [[akka.stream.Attributes#supervisionStrategy]].
|
* [[akka.stream.Attributes#supervisionStrategy]].
|
||||||
*
|
*
|
||||||
* Note that supervision in streams are implemented on a per stage basis and is not supported
|
* Note that supervision in streams are implemented on a per operator basis and is not supported
|
||||||
* by every stage.
|
* by every operator.
|
||||||
*/
|
*/
|
||||||
def withSupervisionStrategy(decider: Supervision.Decider): ActorMaterializerSettings = {
|
def withSupervisionStrategy(decider: Supervision.Decider): ActorMaterializerSettings = {
|
||||||
if (decider eq this.supervisionDecider) this
|
if (decider eq this.supervisionDecider) this
|
||||||
|
|
@ -491,8 +491,8 @@ final class ActorMaterializerSettings @InternalApi private (
|
||||||
* overridden for specific flows of the stream operations with
|
* overridden for specific flows of the stream operations with
|
||||||
* [[akka.stream.Attributes#supervisionStrategy]].
|
* [[akka.stream.Attributes#supervisionStrategy]].
|
||||||
*
|
*
|
||||||
* Note that supervision in streams are implemented on a per stage basis and is not supported
|
* Note that supervision in streams are implemented on a per operator basis and is not supported
|
||||||
* by every stage.
|
* by every operator.
|
||||||
*/
|
*/
|
||||||
def withSupervisionStrategy(decider: function.Function[Throwable, Supervision.Directive]): ActorMaterializerSettings = {
|
def withSupervisionStrategy(decider: function.Function[Throwable, Supervision.Directive]): ActorMaterializerSettings = {
|
||||||
import Supervision._
|
import Supervision._
|
||||||
|
|
|
||||||
|
|
@ -30,7 +30,7 @@ import scala.concurrent.duration.FiniteDuration
|
||||||
* The ``attributeList`` is ordered with the most specific attribute first, least specific last.
|
* The ``attributeList`` is ordered with the most specific attribute first, least specific last.
|
||||||
* Note that the order was the opposite in Akka 2.4.x.
|
* Note that the order was the opposite in Akka 2.4.x.
|
||||||
*
|
*
|
||||||
* Stages should in general not access the `attributeList` but instead use `get` to get the expected
|
* Operators should in general not access the `attributeList` but instead use `get` to get the expected
|
||||||
* value of an attribute.
|
* value of an attribute.
|
||||||
*/
|
*/
|
||||||
final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) {
|
final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) {
|
||||||
|
|
@ -54,10 +54,10 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) {
|
||||||
* Java API: Get the most specific attribute value for a given Attribute type or subclass thereof.
|
* Java API: Get the most specific attribute value for a given Attribute type or subclass thereof.
|
||||||
* If no such attribute exists, return a `default` value.
|
* If no such attribute exists, return a `default` value.
|
||||||
*
|
*
|
||||||
* The most specific value is the value that was added closest to the graph or stage itself or if
|
* The most specific value is the value that was added closest to the graph or operator itself or if
|
||||||
* the same attribute was added multiple times to the same graph, the last to be added.
|
* the same attribute was added multiple times to the same graph, the last to be added.
|
||||||
*
|
*
|
||||||
* This is the expected way for stages to access attributes.
|
* This is the expected way for operators to access attributes.
|
||||||
*/
|
*/
|
||||||
def getAttribute[T <: Attribute](c: Class[T], default: T): T =
|
def getAttribute[T <: Attribute](c: Class[T], default: T): T =
|
||||||
getAttribute(c).orElse(default)
|
getAttribute(c).orElse(default)
|
||||||
|
|
@ -65,10 +65,10 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) {
|
||||||
/**
|
/**
|
||||||
* Java API: Get the most specific attribute value for a given Attribute type or subclass thereof.
|
* Java API: Get the most specific attribute value for a given Attribute type or subclass thereof.
|
||||||
*
|
*
|
||||||
* The most specific value is the value that was added closest to the graph or stage itself or if
|
* The most specific value is the value that was added closest to the graph or operator itself or if
|
||||||
* the same attribute was added multiple times to the same graph, the last to be added.
|
* the same attribute was added multiple times to the same graph, the last to be added.
|
||||||
*
|
*
|
||||||
* This is the expected way for stages to access attributes.
|
* This is the expected way for operators to access attributes.
|
||||||
*/
|
*/
|
||||||
def getAttribute[T <: Attribute](c: Class[T]): Optional[T] =
|
def getAttribute[T <: Attribute](c: Class[T]): Optional[T] =
|
||||||
attributeList.collectFirst { case attr if c.isInstance(attr) ⇒ c.cast(attr) }.asJava
|
attributeList.collectFirst { case attr if c.isInstance(attr) ⇒ c.cast(attr) }.asJava
|
||||||
|
|
@ -77,10 +77,10 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) {
|
||||||
* Scala API: Get the most specific attribute value for a given Attribute type or subclass thereof or
|
* Scala API: Get the most specific attribute value for a given Attribute type or subclass thereof or
|
||||||
* if no such attribute exists, return a default value.
|
* if no such attribute exists, return a default value.
|
||||||
*
|
*
|
||||||
* The most specific value is the value that was added closest to the graph or stage itself or if
|
* The most specific value is the value that was added closest to the graph or operator itself or if
|
||||||
* the same attribute was added multiple times to the same graph, the last to be added.
|
* the same attribute was added multiple times to the same graph, the last to be added.
|
||||||
*
|
*
|
||||||
* This is the expected way for stages to access attributes.
|
* This is the expected way for operators to access attributes.
|
||||||
*/
|
*/
|
||||||
def get[T <: Attribute: ClassTag](default: T): T =
|
def get[T <: Attribute: ClassTag](default: T): T =
|
||||||
get[T] match {
|
get[T] match {
|
||||||
|
|
@ -91,10 +91,10 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) {
|
||||||
/**
|
/**
|
||||||
* Scala API: Get the most specific attribute value for a given Attribute type or subclass thereof.
|
* Scala API: Get the most specific attribute value for a given Attribute type or subclass thereof.
|
||||||
*
|
*
|
||||||
* The most specific value is the value that was added closest to the graph or stage itself or if
|
* The most specific value is the value that was added closest to the graph or operator itself or if
|
||||||
* the same attribute was added multiple times to the same graph, the last to be added.
|
* the same attribute was added multiple times to the same graph, the last to be added.
|
||||||
*
|
*
|
||||||
* This is the expected way for stages to access attributes.
|
* This is the expected way for operators to access attributes.
|
||||||
*
|
*
|
||||||
* @see [[Attributes#get()]] For providing a default value if the attribute was not set
|
* @see [[Attributes#get()]] For providing a default value if the attribute was not set
|
||||||
*/
|
*/
|
||||||
|
|
@ -187,7 +187,7 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) {
|
||||||
/**
|
/**
|
||||||
* Test whether the given attribute is contained within this attributes list.
|
* Test whether the given attribute is contained within this attributes list.
|
||||||
*
|
*
|
||||||
* Note that stages in general should not inspect the whole hierarchy but instead use
|
* Note that operators in general should not inspect the whole hierarchy but instead use
|
||||||
* `get` to get the most specific attribute value.
|
* `get` to get the most specific attribute value.
|
||||||
*/
|
*/
|
||||||
def contains(attr: Attribute): Boolean = attributeList.contains(attr)
|
def contains(attr: Attribute): Boolean = attributeList.contains(attr)
|
||||||
|
|
@ -198,7 +198,7 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) {
|
||||||
* The list is ordered with the most specific attribute first, least specific last.
|
* The list is ordered with the most specific attribute first, least specific last.
|
||||||
* Note that the order was the opposite in Akka 2.4.x.
|
* Note that the order was the opposite in Akka 2.4.x.
|
||||||
*
|
*
|
||||||
* Note that stages in general should not inspect the whole hierarchy but instead use
|
* Note that operators in general should not inspect the whole hierarchy but instead use
|
||||||
* `get` to get the most specific attribute value.
|
* `get` to get the most specific attribute value.
|
||||||
*/
|
*/
|
||||||
def getAttributeList(): java.util.List[Attribute] = {
|
def getAttributeList(): java.util.List[Attribute] = {
|
||||||
|
|
@ -213,7 +213,7 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) {
|
||||||
* The list is ordered with the most specific attribute first, least specific last.
|
* The list is ordered with the most specific attribute first, least specific last.
|
||||||
* Note that the order was the opposite in Akka 2.4.x.
|
* Note that the order was the opposite in Akka 2.4.x.
|
||||||
*
|
*
|
||||||
* Note that stages in general should not inspect the whole hierarchy but instead use
|
* Note that operators in general should not inspect the whole hierarchy but instead use
|
||||||
* `get` to get the most specific attribute value.
|
* `get` to get the most specific attribute value.
|
||||||
*/
|
*/
|
||||||
def getAttributeList[T <: Attribute](c: Class[T]): java.util.List[T] =
|
def getAttributeList[T <: Attribute](c: Class[T]): java.util.List[T] =
|
||||||
|
|
@ -230,7 +230,7 @@ final case class Attributes(attributeList: List[Attributes.Attribute] = Nil) {
|
||||||
/**
|
/**
|
||||||
* Scala API: Get all attributes of a given type (or subtypes thereof).
|
* Scala API: Get all attributes of a given type (or subtypes thereof).
|
||||||
*
|
*
|
||||||
* Note that stages in general should not inspect the whole hierarchy but instead use
|
* Note that operators in general should not inspect the whole hierarchy but instead use
|
||||||
* `get` to get the most specific attribute value.
|
* `get` to get the most specific attribute value.
|
||||||
*
|
*
|
||||||
* The list is ordered with the most specific attribute first, least specific last.
|
* The list is ordered with the most specific attribute first, least specific last.
|
||||||
|
|
@ -328,7 +328,7 @@ object Attributes {
|
||||||
/**
|
/**
|
||||||
* Java API
|
* Java API
|
||||||
*
|
*
|
||||||
* Configures `log()` stage log-levels to be used when logging.
|
* Configures `log()` operator log-levels to be used when logging.
|
||||||
* Logging a certain operation can be completely disabled by using [[LogLevels.Off]].
|
* Logging a certain operation can be completely disabled by using [[LogLevels.Off]].
|
||||||
*
|
*
|
||||||
* Passing in null as any of the arguments sets the level to its default value, which is:
|
* Passing in null as any of the arguments sets the level to its default value, which is:
|
||||||
|
|
@ -341,7 +341,7 @@ object Attributes {
|
||||||
onFailure = Option(onFailure).getOrElse(Logging.ErrorLevel))
|
onFailure = Option(onFailure).getOrElse(Logging.ErrorLevel))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Configures `log()` stage log-levels to be used when logging.
|
* Configures `log()` operator log-levels to be used when logging.
|
||||||
* Logging a certain operation can be completely disabled by using [[LogLevels.Off]].
|
* Logging a certain operation can be completely disabled by using [[LogLevels.Off]].
|
||||||
*
|
*
|
||||||
* See [[Attributes.createLogLevels]] for Java API
|
* See [[Attributes.createLogLevels]] for Java API
|
||||||
|
|
@ -402,7 +402,7 @@ object ActorAttributes {
|
||||||
/**
|
/**
|
||||||
* Scala API: Decides how exceptions from user are to be handled.
|
* Scala API: Decides how exceptions from user are to be handled.
|
||||||
*
|
*
|
||||||
* Stages supporting supervision strategies explicitly document that they do so. If a stage does not document
|
* Operators supporting supervision strategies explicitly document that they do so. If a operator does not document
|
||||||
* support for these, it should be assumed it does not support supervision.
|
* support for these, it should be assumed it does not support supervision.
|
||||||
*/
|
*/
|
||||||
def supervisionStrategy(decider: Supervision.Decider): Attributes =
|
def supervisionStrategy(decider: Supervision.Decider): Attributes =
|
||||||
|
|
@ -411,7 +411,7 @@ object ActorAttributes {
|
||||||
/**
|
/**
|
||||||
* Java API: Decides how exceptions from application code are to be handled.
|
* Java API: Decides how exceptions from application code are to be handled.
|
||||||
*
|
*
|
||||||
* Stages supporting supervision strategies explicitly document that they do so. If a stage does not document
|
* Operators supporting supervision strategies explicitly document that they do so. If a operator does not document
|
||||||
* support for these, it should be assumed it does not support supervision.
|
* support for these, it should be assumed it does not support supervision.
|
||||||
*/
|
*/
|
||||||
def withSupervisionStrategy(decider: function.Function[Throwable, Supervision.Directive]): Attributes =
|
def withSupervisionStrategy(decider: function.Function[Throwable, Supervision.Directive]): Attributes =
|
||||||
|
|
@ -420,7 +420,7 @@ object ActorAttributes {
|
||||||
/**
|
/**
|
||||||
* Java API
|
* Java API
|
||||||
*
|
*
|
||||||
* Configures `log()` stage log-levels to be used when logging.
|
* Configures `log()` operator log-levels to be used when logging.
|
||||||
* Logging a certain operation can be completely disabled by using [[LogLevels.Off]].
|
* Logging a certain operation can be completely disabled by using [[LogLevels.Off]].
|
||||||
*
|
*
|
||||||
* Passing in null as any of the arguments sets the level to its default value, which is:
|
* Passing in null as any of the arguments sets the level to its default value, which is:
|
||||||
|
|
@ -433,7 +433,7 @@ object ActorAttributes {
|
||||||
onFailure = Option(onFailure).getOrElse(Logging.ErrorLevel))
|
onFailure = Option(onFailure).getOrElse(Logging.ErrorLevel))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Configures `log()` stage log-levels to be used when logging.
|
* Configures `log()` operator log-levels to be used when logging.
|
||||||
* Logging a certain operation can be completely disabled by using [[LogLevels.Off]].
|
* Logging a certain operation can be completely disabled by using [[LogLevels.Off]].
|
||||||
*
|
*
|
||||||
* See [[Attributes.createLogLevels]] for Java API
|
* See [[Attributes.createLogLevels]] for Java API
|
||||||
|
|
|
||||||
|
|
@ -184,7 +184,7 @@ private[stream] final class TerminationSignal {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A [[UniqueKillSwitch]] is always a result of a materialization (unlike [[SharedKillSwitch]] which is constructed
|
* A [[UniqueKillSwitch]] is always a result of a materialization (unlike [[SharedKillSwitch]] which is constructed
|
||||||
* before any materialization) and it always controls that graph and stage which yielded the materialized value.
|
* before any materialization) and it always controls that graph and operator which yielded the materialized value.
|
||||||
*
|
*
|
||||||
* After calling [[UniqueKillSwitch#shutdown()]] the running instance of the [[Graph]] of [[FlowShape]] that materialized to the
|
* After calling [[UniqueKillSwitch#shutdown()]] the running instance of the [[Graph]] of [[FlowShape]] that materialized to the
|
||||||
* [[UniqueKillSwitch]] will complete its downstream and cancel its upstream (unless if finished or failed already in which
|
* [[UniqueKillSwitch]] will complete its downstream and cancel its upstream (unless if finished or failed already in which
|
||||||
|
|
|
||||||
|
|
@ -56,12 +56,12 @@ abstract class Materializer {
|
||||||
* can be used by parts of the flow to submit processing jobs for execution,
|
* can be used by parts of the flow to submit processing jobs for execution,
|
||||||
* run Future callbacks, etc.
|
* run Future callbacks, etc.
|
||||||
*
|
*
|
||||||
* Note that this is not necessarily the same execution context the stream stage itself is running on.
|
* Note that this is not necessarily the same execution context the stream operator itself is running on.
|
||||||
*/
|
*/
|
||||||
implicit def executionContext: ExecutionContextExecutor
|
implicit def executionContext: ExecutionContextExecutor
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Interface for stages that need timer services for their functionality. Schedules a
|
* Interface for operators that need timer services for their functionality. Schedules a
|
||||||
* single task with the given delay.
|
* single task with the given delay.
|
||||||
*
|
*
|
||||||
* @return A [[akka.actor.Cancellable]] that allows cancelling the timer. Cancelling is best effort, if the event
|
* @return A [[akka.actor.Cancellable]] that allows cancelling the timer. Cancelling is best effort, if the event
|
||||||
|
|
@ -70,7 +70,7 @@ abstract class Materializer {
|
||||||
def scheduleOnce(delay: FiniteDuration, task: Runnable): Cancellable
|
def scheduleOnce(delay: FiniteDuration, task: Runnable): Cancellable
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Interface for stages that need timer services for their functionality. Schedules a
|
* Interface for operators that need timer services for their functionality. Schedules a
|
||||||
* repeated task with the given interval between invocations.
|
* repeated task with the given interval between invocations.
|
||||||
*
|
*
|
||||||
* @return A [[akka.actor.Cancellable]] that allows cancelling the timer. Cancelling is best effort, if the event
|
* @return A [[akka.actor.Cancellable]] that allows cancelling the timer. Cancelling is best effort, if the event
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@ import akka.event.LoggingAdapter
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* SPI intended only to be extended by custom [[Materializer]] implementations,
|
* SPI intended only to be extended by custom [[Materializer]] implementations,
|
||||||
* that also want to provide stages they materialize with specialized [[akka.event.LoggingAdapter]] instances.
|
* that also want to provide operators they materialize with specialized [[akka.event.LoggingAdapter]] instances.
|
||||||
*/
|
*/
|
||||||
trait MaterializerLoggingProvider { this: Materializer ⇒
|
trait MaterializerLoggingProvider { this: Materializer ⇒
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,7 @@ import OverflowStrategies._
|
||||||
import akka.annotation.DoNotInherit
|
import akka.annotation.DoNotInherit
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Represents a strategy that decides how to deal with a buffer of time based stage
|
* Represents a strategy that decides how to deal with a buffer of time based operator
|
||||||
* that is full but is about to receive a new element.
|
* that is full but is about to receive a new element.
|
||||||
*/
|
*/
|
||||||
@DoNotInherit
|
@DoNotInherit
|
||||||
|
|
|
||||||
|
|
@ -136,7 +136,7 @@ case object IgnoreBoth extends IgnoreBoth
|
||||||
object TLSProtocol {
|
object TLSProtocol {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is the supertype of all messages that the SslTls stage emits on the
|
* This is the supertype of all messages that the SslTls operator emits on the
|
||||||
* plaintext side.
|
* plaintext side.
|
||||||
*/
|
*/
|
||||||
sealed trait SslTlsInbound
|
sealed trait SslTlsInbound
|
||||||
|
|
@ -166,7 +166,7 @@ object TLSProtocol {
|
||||||
final case class SessionBytes(session: SSLSession, bytes: ByteString) extends SslTlsInbound with scaladsl.ScalaSessionAPI
|
final case class SessionBytes(session: SSLSession, bytes: ByteString) extends SslTlsInbound with scaladsl.ScalaSessionAPI
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This is the supertype of all messages that the SslTls stage accepts on its
|
* This is the supertype of all messages that the SslTls operator accepts on its
|
||||||
* plaintext side.
|
* plaintext side.
|
||||||
*/
|
*/
|
||||||
sealed trait SslTlsOutbound
|
sealed trait SslTlsOutbound
|
||||||
|
|
|
||||||
|
|
@ -34,18 +34,18 @@ object Supervision {
|
||||||
def resume = Resume
|
def resume = Resume
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Scala API: The element is dropped and the stream continues after restarting the stage
|
* Scala API: The element is dropped and the stream continues after restarting the operator
|
||||||
* if application code for processing an element throws an exception.
|
* if application code for processing an element throws an exception.
|
||||||
* Restarting a stage means that any accumulated state is cleared. This is typically
|
* Restarting an operator means that any accumulated state is cleared. This is typically
|
||||||
* performed by creating a new instance of the stage.
|
* performed by creating a new instance of the operator.
|
||||||
*/
|
*/
|
||||||
case object Restart extends Directive
|
case object Restart extends Directive
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Java API: The element is dropped and the stream continues after restarting the stage
|
* Java API: The element is dropped and the stream continues after restarting the operator
|
||||||
* if application code for processing an element throws an exception.
|
* if application code for processing an element throws an exception.
|
||||||
* Restarting a stage means that any accumulated state is cleared. This is typically
|
* Restarting an operator means that any accumulated state is cleared. This is typically
|
||||||
* performed by creating a new instance of the stage.
|
* performed by creating a new instance of the operator.
|
||||||
*/
|
*/
|
||||||
def restart = Restart
|
def restart = Restart
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -7,7 +7,7 @@ package akka.stream
|
||||||
import akka.actor.ActorRef
|
import akka.actor.ActorRef
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Used as failure exception by an `ask` stage if the target actor terminates.
|
* Used as failure exception by an `ask` operator if the target actor terminates.
|
||||||
* See `Flow.ask` and `Flow.watch`.
|
* See `Flow.ask` and `Flow.watch`.
|
||||||
*/
|
*/
|
||||||
final class WatchedActorTerminatedException(val watchingStageName: String, val ref: ActorRef)
|
final class WatchedActorTerminatedException(val watchingStageName: String, val ref: ActorRef)
|
||||||
|
|
|
||||||
|
|
@ -89,7 +89,7 @@ import scala.concurrent.{ Await, ExecutionContextExecutor }
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* This materializer replaces the default phase with one that will fuse stages into an existing interpreter (via `registerShell`),
|
* This materializer replaces the default phase with one that will fuse operators into an existing interpreter (via `registerShell`),
|
||||||
* rather than start a new actor for each of them.
|
* rather than start a new actor for each of them.
|
||||||
*
|
*
|
||||||
* The default phases are left in-tact since we still respect `.async` and other tags that were marked within a sub-fused graph.
|
* The default phases are left in-tact since we still respect `.async` and other tags that were marked within a sub-fused graph.
|
||||||
|
|
|
||||||
|
|
@ -17,12 +17,12 @@ import scala.concurrent.duration.{ Duration, FiniteDuration }
|
||||||
/**
|
/**
|
||||||
* INTERNAL API
|
* INTERNAL API
|
||||||
*
|
*
|
||||||
* Various stages for controlling timeouts on IO related streams (although not necessarily).
|
* Various operators for controlling timeouts on IO related streams (although not necessarily).
|
||||||
*
|
*
|
||||||
* The common theme among the processing stages here that
|
* The common theme among the processing operators here that
|
||||||
* - they wait for certain event or events to happen
|
* - they wait for certain event or events to happen
|
||||||
* - they have a timer that may fire before these events
|
* - they have a timer that may fire before these events
|
||||||
* - if the timer fires before the event happens, these stages all fail the stream
|
* - if the timer fires before the event happens, these operators all fail the stream
|
||||||
* - otherwise, these streams do not interfere with the element flow, ordinary completion or failure
|
* - otherwise, these streams do not interfere with the element flow, ordinary completion or failure
|
||||||
*/
|
*/
|
||||||
@InternalApi private[akka] object Timers {
|
@InternalApi private[akka] object Timers {
|
||||||
|
|
|
||||||
|
|
@ -455,7 +455,7 @@ import scala.util.control.NonFatal
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param promise Will be completed upon processing the event, or failed if processing the event throws
|
* @param promise Will be completed upon processing the event, or failed if processing the event throws
|
||||||
* if the event isn't ever processed the promise (the stage stops) is failed elsewhere
|
* if the event isn't ever processed the promise (the operator stops) is failed elsewhere
|
||||||
*/
|
*/
|
||||||
final case class AsyncInput(
|
final case class AsyncInput(
|
||||||
shell: GraphInterpreterShell,
|
shell: GraphInterpreterShell,
|
||||||
|
|
|
||||||
|
|
@ -72,8 +72,8 @@ import akka.stream.Attributes.LogLevels
|
||||||
* between an output and input ports.
|
* between an output and input ports.
|
||||||
*
|
*
|
||||||
* @param id Identifier of the connection.
|
* @param id Identifier of the connection.
|
||||||
* @param inOwner The stage logic that corresponds to the input side of the connection.
|
* @param inOwner The operator logic that corresponds to the input side of the connection.
|
||||||
* @param outOwner The stage logic that corresponds to the output side of the connection.
|
* @param outOwner The operator logic that corresponds to the output side of the connection.
|
||||||
* @param inHandler The handler that contains the callback for input events.
|
* @param inHandler The handler that contains the callback for input events.
|
||||||
* @param outHandler The handler that contains the callback for output events.
|
* @param outHandler The handler that contains the callback for output events.
|
||||||
*/
|
*/
|
||||||
|
|
@ -128,7 +128,7 @@ import akka.stream.Attributes.LogLevels
|
||||||
*
|
*
|
||||||
* The [[execute()]] method of the interpreter accepts an upper bound on the events it will process. After this limit
|
* The [[execute()]] method of the interpreter accepts an upper bound on the events it will process. After this limit
|
||||||
* is reached or there are no more pending events to be processed, the call returns. It is possible to inspect
|
* is reached or there are no more pending events to be processed, the call returns. It is possible to inspect
|
||||||
* if there are unprocessed events left via the [[isSuspended]] method. [[isCompleted]] returns true once all stages
|
* if there are unprocessed events left via the [[isSuspended]] method. [[isCompleted]] returns true once all operators
|
||||||
* reported completion inside the interpreter.
|
* reported completion inside the interpreter.
|
||||||
*
|
*
|
||||||
* The internal architecture of the interpreter is based on the usage of arrays and optimized for reducing allocations
|
* The internal architecture of the interpreter is based on the usage of arrays and optimized for reducing allocations
|
||||||
|
|
@ -176,7 +176,7 @@ import akka.stream.Attributes.LogLevels
|
||||||
* is a failure
|
* is a failure
|
||||||
*
|
*
|
||||||
* Sending an event is usually the following sequence:
|
* Sending an event is usually the following sequence:
|
||||||
* - An action is requested by a stage logic (push, pull, complete, etc.)
|
* - An action is requested by an operator logic (push, pull, complete, etc.)
|
||||||
* - the state machine in portStates is transitioned from a ready state to a pending event
|
* - the state machine in portStates is transitioned from a ready state to a pending event
|
||||||
* - the affected Connection is enqueued
|
* - the affected Connection is enqueued
|
||||||
*
|
*
|
||||||
|
|
@ -184,7 +184,7 @@ import akka.stream.Attributes.LogLevels
|
||||||
* - the Connection to be processed is dequeued
|
* - the Connection to be processed is dequeued
|
||||||
* - the type of the event is determined from the bits set on portStates
|
* - the type of the event is determined from the bits set on portStates
|
||||||
* - the state machine in portStates is transitioned to a ready state
|
* - the state machine in portStates is transitioned to a ready state
|
||||||
* - using the inHandlers/outHandlers table the corresponding callback is called on the stage logic.
|
* - using the inHandlers/outHandlers table the corresponding callback is called on the operator logic.
|
||||||
*
|
*
|
||||||
* Because of the FIFO construction of the queue the interpreter is fair, i.e. a pending event is always executed
|
* Because of the FIFO construction of the queue the interpreter is fair, i.e. a pending event is always executed
|
||||||
* after a bounded number of other events. This property, together with suspendability means that even infinite cycles can
|
* after a bounded number of other events. This property, together with suspendability means that even infinite cycles can
|
||||||
|
|
@ -273,14 +273,14 @@ import akka.stream.Attributes.LogLevels
|
||||||
def isSuspended: Boolean = queueHead != queueTail
|
def isSuspended: Boolean = queueHead != queueTail
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns true if there are no more running stages and pending events.
|
* Returns true if there are no more running operators and pending events.
|
||||||
*/
|
*/
|
||||||
def isCompleted: Boolean = runningStages == 0 && !isSuspended
|
def isCompleted: Boolean = runningStages == 0 && !isSuspended
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Initializes the states of all the stage logics by calling preStart().
|
* Initializes the states of all the operator logics by calling preStart().
|
||||||
* The passed-in materializer is intended to be a SubFusingActorMaterializer
|
* The passed-in materializer is intended to be a SubFusingActorMaterializer
|
||||||
* that avoids creating new Actors when stages materialize sub-flows. If no
|
* that avoids creating new Actors when operators materialize sub-flows. If no
|
||||||
* such materializer is available, passing in `null` will reuse the normal
|
* such materializer is available, passing in `null` will reuse the normal
|
||||||
* materializer for the GraphInterpreter—fusing is only an optimization.
|
* materializer for the GraphInterpreter—fusing is only an optimization.
|
||||||
*/
|
*/
|
||||||
|
|
@ -304,7 +304,7 @@ import akka.stream.Attributes.LogLevels
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Finalizes the state of all stages by calling postStop() (if necessary).
|
* Finalizes the state of all operators by calling postStop() (if necessary).
|
||||||
*/
|
*/
|
||||||
def finish(): Unit = {
|
def finish(): Unit = {
|
||||||
var i = 0
|
var i = 0
|
||||||
|
|
|
||||||
|
|
@ -446,12 +446,12 @@ import scala.concurrent.{ Future, Promise }
|
||||||
/**
|
/**
|
||||||
* INTERNAL API.
|
* INTERNAL API.
|
||||||
*
|
*
|
||||||
* Fusing graphs that have cycles involving FanIn stages might lead to deadlocks if
|
* Fusing graphs that have cycles involving FanIn operators might lead to deadlocks if
|
||||||
* demand is not carefully managed.
|
* demand is not carefully managed.
|
||||||
*
|
*
|
||||||
* This means that FanIn stages need to early pull every relevant input on startup.
|
* This means that FanIn operators need to early pull every relevant input on startup.
|
||||||
* This can either be implemented inside the stage itself, or this method can be used,
|
* This can either be implemented inside the operator itself, or this method can be used,
|
||||||
* which adds a detacher stage to every input.
|
* which adds a detacher operator to every input.
|
||||||
*/
|
*/
|
||||||
@InternalApi private[stream] def withDetachedInputs[T](stage: GraphStage[UniformFanInShape[T, T]]) =
|
@InternalApi private[stream] def withDetachedInputs[T](stage: GraphStage[UniformFanInShape[T, T]]) =
|
||||||
GraphDSL.create() { implicit builder ⇒
|
GraphDSL.create() { implicit builder ⇒
|
||||||
|
|
|
||||||
|
|
@ -280,7 +280,7 @@ private[stream] object Collect {
|
||||||
/**
|
/**
|
||||||
* Maps error with the provided function if it is defined for an error or, otherwise, passes it on unchanged.
|
* Maps error with the provided function if it is defined for an error or, otherwise, passes it on unchanged.
|
||||||
*
|
*
|
||||||
* While similar to [[Recover]] this stage can be used to transform an error signal to a different one *without* logging
|
* While similar to [[Recover]] this operator can be used to transform an error signal to a different one *without* logging
|
||||||
* it as an error in the process. So in that sense it is NOT exactly equivalent to `recover(t => throw t2)` since recover
|
* it as an error in the process. So in that sense it is NOT exactly equivalent to `recover(t => throw t2)` since recover
|
||||||
* would log the `t2` error.
|
* would log the `t2` error.
|
||||||
*/
|
*/
|
||||||
|
|
|
||||||
|
|
@ -11,9 +11,9 @@ package akka.stream
|
||||||
* for composing streams. These DSLs are a thin wrappers around the internal [[akka.stream.impl.TraversalBuilder]]
|
* for composing streams. These DSLs are a thin wrappers around the internal [[akka.stream.impl.TraversalBuilder]]
|
||||||
* builder classes. There are Java alternatives of these DSLs in [[javadsl]] which basically wrap their scala
|
* builder classes. There are Java alternatives of these DSLs in [[javadsl]] which basically wrap their scala
|
||||||
* counterpart, delegating method calls.
|
* counterpart, delegating method calls.
|
||||||
* * The [[akka.stream.stage.GraphStage]] API is the user facing API for creating new stream processing stages. These
|
* * The [[akka.stream.stage.GraphStage]] API is the user facing API for creating new stream operators. These
|
||||||
* classes are used by the [[akka.stream.impl.fusing.GraphInterpreter]] which executes islands (subgraphs) of these
|
* classes are used by the [[akka.stream.impl.fusing.GraphInterpreter]] which executes islands (subgraphs) of these
|
||||||
* stages
|
* operators
|
||||||
* * The high level DSLs use the [[akka.stream.impl.TraversalBuilder]] classes to build instances of
|
* * The high level DSLs use the [[akka.stream.impl.TraversalBuilder]] classes to build instances of
|
||||||
* [[akka.stream.impl.Traversal]] which are the representation of a materializable stream description. These builders
|
* [[akka.stream.impl.Traversal]] which are the representation of a materializable stream description. These builders
|
||||||
* are immutable and safely shareable. Unlike the top-level DSLs, these are untyped, i.e. elements are treated as
|
* are immutable and safely shareable. Unlike the top-level DSLs, these are untyped, i.e. elements are treated as
|
||||||
|
|
@ -22,7 +22,7 @@ package akka.stream
|
||||||
* can be materialized. The builders exists solely for the purpose of producing a traversal in the end.
|
* can be materialized. The builders exists solely for the purpose of producing a traversal in the end.
|
||||||
* * The [[akka.stream.impl.PhasedFusingActorMaterializer]] is the class that is responsible for traversing and
|
* * The [[akka.stream.impl.PhasedFusingActorMaterializer]] is the class that is responsible for traversing and
|
||||||
* interpreting a [[akka.stream.impl.Traversal]]. It delegates the actual task of creating executable entities
|
* interpreting a [[akka.stream.impl.Traversal]]. It delegates the actual task of creating executable entities
|
||||||
* and Publishers/Producers to [[akka.stream.impl.PhaseIsland]]s which are plugins that understand atomic stages
|
* and Publishers/Producers to [[akka.stream.impl.PhaseIsland]]s which are plugins that understand atomic operators
|
||||||
* in the graph and able to turn them into executable entities.
|
* in the graph and able to turn them into executable entities.
|
||||||
* * The [[akka.stream.impl.fusing.GraphInterpreter]] and its actor backed wrapper [[akka.stream.impl.fusing.ActorGraphInterpreter]]
|
* * The [[akka.stream.impl.fusing.GraphInterpreter]] and its actor backed wrapper [[akka.stream.impl.fusing.ActorGraphInterpreter]]
|
||||||
* are used to execute synchronous islands (subgraphs) of [[akka.stream.stage.GraphStage]]s.
|
* are used to execute synchronous islands (subgraphs) of [[akka.stream.stage.GraphStage]]s.
|
||||||
|
|
@ -45,7 +45,7 @@ package akka.stream
|
||||||
* * materialization should not pay the price of island tracking if there is only a single island
|
* * materialization should not pay the price of island tracking if there is only a single island
|
||||||
* * assume that the number of islands is low in general
|
* * assume that the number of islands is low in general
|
||||||
* * avoid "copiedModule" i.e. wrappers that exist solely for the purpose of establishing new port identities
|
* * avoid "copiedModule" i.e. wrappers that exist solely for the purpose of establishing new port identities
|
||||||
* for stages that are used multiple times in the same graph.
|
* for operators that are used multiple times in the same graph.
|
||||||
* * Avoid hashmaps and prefer direct array lookup wherever possible
|
* * Avoid hashmaps and prefer direct array lookup wherever possible
|
||||||
*
|
*
|
||||||
* Semantically, a traversal is a list of commands that the materializer must execute to turn the description to a
|
* Semantically, a traversal is a list of commands that the materializer must execute to turn the description to a
|
||||||
|
|
@ -71,10 +71,10 @@ package akka.stream
|
||||||
* the result back on the top of the stack
|
* the result back on the top of the stack
|
||||||
* * [[akka.stream.impl.Compose]] take the top two values of the stack, invoke the provided function with these
|
* * [[akka.stream.impl.Compose]] take the top two values of the stack, invoke the provided function with these
|
||||||
* values as arguments, then put the calculated value on the top of the stack
|
* values as arguments, then put the calculated value on the top of the stack
|
||||||
* * Materialized values of atomic stages when visiting a [[akka.stream.impl.MaterializeAtomic]] must be
|
* * Materialized values of atomic operators when visiting a [[akka.stream.impl.MaterializeAtomic]] must be
|
||||||
* pushed to the stack automatically. There are no explicit PUSH commands for this
|
* pushed to the stack automatically. There are no explicit PUSH commands for this
|
||||||
* * Attributes calculation. These also are a stack language, although much simpler than the materialized value
|
* * Attributes calculation. These also are a stack language, although much simpler than the materialized value
|
||||||
* commands. For any materialized stage, the top of the attributes stack should be provided as the current
|
* commands. For any materialized operator, the top of the attributes stack should be provided as the current
|
||||||
* effective attributes.
|
* effective attributes.
|
||||||
* * [[akka.stream.impl.PushAttributes]] combines the attributes on the top of the stack with the given ones and
|
* * [[akka.stream.impl.PushAttributes]] combines the attributes on the top of the stack with the given ones and
|
||||||
* puts the result on the attributes stack
|
* puts the result on the attributes stack
|
||||||
|
|
@ -86,7 +86,7 @@ package akka.stream
|
||||||
* as exiting a "hole" means returning to the parent, enclosing island and continuing where left.
|
* as exiting a "hole" means returning to the parent, enclosing island and continuing where left.
|
||||||
* * [[akka.stream.impl.EnterIsland]] instructs the materializer that the following commands will belong to
|
* * [[akka.stream.impl.EnterIsland]] instructs the materializer that the following commands will belong to
|
||||||
* the materialization of a new island (a subgraph). The [[akka.stream.impl.IslandTag]] signals to
|
* the materialization of a new island (a subgraph). The [[akka.stream.impl.IslandTag]] signals to
|
||||||
* the materializer which [[akka.stream.impl.PhaseIsland]] should be used to turn stages of this island into
|
* the materializer which [[akka.stream.impl.PhaseIsland]] should be used to turn operators of this island into
|
||||||
* executable entities.
|
* executable entities.
|
||||||
* * [[akka.stream.impl.ExitIsland]] instructs the materializer that the current island is done and the parent
|
* * [[akka.stream.impl.ExitIsland]] instructs the materializer that the current island is done and the parent
|
||||||
* island is now the active one again.
|
* island is now the active one again.
|
||||||
|
|
@ -101,7 +101,7 @@ package akka.stream
|
||||||
*
|
*
|
||||||
* As a mental model, the wiring part of the Traversal (i.e. excluding the stack based sub-commands tracking
|
* As a mental model, the wiring part of the Traversal (i.e. excluding the stack based sub-commands tracking
|
||||||
* materialized values, attributes, islands, i.e. things that don't contribute to the wiring structure of the graph)
|
* materialized values, attributes, islands, i.e. things that don't contribute to the wiring structure of the graph)
|
||||||
* translates everything to a single, global, contiguous Array. Every input and output port of each stage is mapped
|
* translates everything to a single, global, contiguous Array. Every input and output port of each operator is mapped
|
||||||
* to exactly one slot of this "mental array". Input and output ports that are considered wired together simply map
|
* to exactly one slot of this "mental array". Input and output ports that are considered wired together simply map
|
||||||
* to the same slot. (In practice, these slots might not be mapped to an actual global array, but multiple local arrays
|
* to the same slot. (In practice, these slots might not be mapped to an actual global array, but multiple local arrays
|
||||||
* using some translation logic, but we will explain this later)
|
* using some translation logic, but we will explain this later)
|
||||||
|
|
@ -109,15 +109,15 @@ package akka.stream
|
||||||
* Input ports are mapped simply to contiguous numbers in the order they are visited. Take for example a simple
|
* Input ports are mapped simply to contiguous numbers in the order they are visited. Take for example a simple
|
||||||
* traversal:
|
* traversal:
|
||||||
*
|
*
|
||||||
* Stage1[in1, in2, out] - Stage2[out] - Stage3[in]
|
* Operator1[in1, in2, out] - Operator2[out] - Operator3[in]
|
||||||
*
|
*
|
||||||
* This results in the following slot assignments:
|
* This results in the following slot assignments:
|
||||||
*
|
*
|
||||||
* * Stage1.in1 -> 0
|
* * Operator1.in1 -> 0
|
||||||
* * Stage1.in2 -> 1
|
* * Operator1.in2 -> 1
|
||||||
* * Stage3.in -> 2
|
* * Operator3.in -> 2
|
||||||
*
|
*
|
||||||
* The materializer simply visits Stage1, Stage2, Stage3 in order, visiting the input ports of each stage in order.
|
* The materializer simply visits Stage1, Stage2, Stage3 in order, visiting the input ports of each operator in order.
|
||||||
* It then simply assigns numbers from a counter that is incremented after visiting an input port.
|
* It then simply assigns numbers from a counter that is incremented after visiting an input port.
|
||||||
* (Please note that all [[akka.stream.impl.StreamLayout.AtomicModule]]s maintain a stable order of their ports, so
|
* (Please note that all [[akka.stream.impl.StreamLayout.AtomicModule]]s maintain a stable order of their ports, so
|
||||||
* this global ordering is well defined)
|
* this global ordering is well defined)
|
||||||
|
|
@ -189,7 +189,7 @@ package akka.stream
|
||||||
* builders are their approach to port mapping.
|
* builders are their approach to port mapping.
|
||||||
*
|
*
|
||||||
* The simpler case is the [[akka.stream.impl.LinearTraversalBuilder]]. This builder only allows building linear
|
* The simpler case is the [[akka.stream.impl.LinearTraversalBuilder]]. This builder only allows building linear
|
||||||
* chains of stages, hence, it can only have at most one [[OutPort]] and [[InPort]] unwired. Since there is no
|
* chains of operators, hence, it can only have at most one [[OutPort]] and [[InPort]] unwired. Since there is no
|
||||||
* possible ambiguity between these two port types, there is no need for port mapping for these. Conversely,
|
* possible ambiguity between these two port types, there is no need for port mapping for these. Conversely,
|
||||||
* for those internal ports that are already wired, there is no need for port mapping as their relative wiring
|
* for those internal ports that are already wired, there is no need for port mapping as their relative wiring
|
||||||
* is not ambiguous (see previous section). As a result, the [[akka.stream.impl.LinearTraversalBuilder]] does not
|
* is not ambiguous (see previous section). As a result, the [[akka.stream.impl.LinearTraversalBuilder]] does not
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ private[stream] final case class SinkRefImpl[In](initialPartnerRef: ActorRef) ex
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* INTERNAL API: Actual stage implementation backing [[SinkRef]]s.
|
* INTERNAL API: Actual operator implementation backing [[SinkRef]]s.
|
||||||
*
|
*
|
||||||
* If initialPartnerRef is set, then the remote side is already set up. If it is none, then we are the side creating
|
* If initialPartnerRef is set, then the remote side is already set up. If it is none, then we are the side creating
|
||||||
* the ref.
|
* the ref.
|
||||||
|
|
|
||||||
|
|
@ -25,7 +25,7 @@ private[stream] final case class SourceRefImpl[T](initialPartnerRef: ActorRef) e
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* INTERNAL API: Actual stage implementation backing [[SourceRef]]s.
|
* INTERNAL API: Actual operator implementation backing [[SourceRef]]s.
|
||||||
*
|
*
|
||||||
* If initialPartnerRef is set, then the remote side is already set up.
|
* If initialPartnerRef is set, then the remote side is already set up.
|
||||||
* If it is none, then we are the side creating the ref.
|
* If it is none, then we are the side creating the ref.
|
||||||
|
|
|
||||||
|
|
@ -78,7 +78,7 @@ object BidiFlow {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a BidiFlow where the top and bottom flows are just one simple mapping
|
* Create a BidiFlow where the top and bottom flows are just one simple mapping
|
||||||
* stage each, expressed by the two functions.
|
* operator each, expressed by the two functions.
|
||||||
*/
|
*/
|
||||||
def fromFunctions[I1, O1, I2, O2](top: function.Function[I1, O1], bottom: function.Function[I2, O2]): BidiFlow[I1, O1, I2, O2, NotUsed] =
|
def fromFunctions[I1, O1, I2, O2](top: function.Function[I1, O1], bottom: function.Function[I2, O2]): BidiFlow[I1, O1, I2, O2, NotUsed] =
|
||||||
new BidiFlow(scaladsl.BidiFlow.fromFunctions(top.apply _, bottom.apply _))
|
new BidiFlow(scaladsl.BidiFlow.fromFunctions(top.apply _, bottom.apply _))
|
||||||
|
|
@ -87,9 +87,9 @@ object BidiFlow {
|
||||||
* If the time between two processed elements *in any direction* exceed the provided timeout, the stream is failed
|
* If the time between two processed elements *in any direction* exceed the provided timeout, the stream is failed
|
||||||
* with a [[java.util.concurrent.TimeoutException]].
|
* with a [[java.util.concurrent.TimeoutException]].
|
||||||
*
|
*
|
||||||
* There is a difference between this stage and having two idleTimeout Flows assembled into a BidiStage.
|
* There is a difference between this operator and having two idleTimeout Flows assembled into a BidiStage.
|
||||||
* If the timeout is configured to be 1 seconds, then this stage will not fail even though there are elements flowing
|
* If the timeout is configured to be 1 seconds, then this operator will not fail even though there are elements flowing
|
||||||
* every second in one direction, but no elements are flowing in the other direction. I.e. this stage considers
|
* every second in one direction, but no elements are flowing in the other direction. I.e. this operator considers
|
||||||
* the *joint* frequencies of the elements in both directions.
|
* the *joint* frequencies of the elements in both directions.
|
||||||
*/
|
*/
|
||||||
@Deprecated
|
@Deprecated
|
||||||
|
|
@ -101,9 +101,9 @@ object BidiFlow {
|
||||||
* If the time between two processed elements *in any direction* exceed the provided timeout, the stream is failed
|
* If the time between two processed elements *in any direction* exceed the provided timeout, the stream is failed
|
||||||
* with a [[java.util.concurrent.TimeoutException]].
|
* with a [[java.util.concurrent.TimeoutException]].
|
||||||
*
|
*
|
||||||
* There is a difference between this stage and having two idleTimeout Flows assembled into a BidiStage.
|
* There is a difference between this operator and having two idleTimeout Flows assembled into a BidiStage.
|
||||||
* If the timeout is configured to be 1 seconds, then this stage will not fail even though there are elements flowing
|
* If the timeout is configured to be 1 seconds, then this operator will not fail even though there are elements flowing
|
||||||
* every second in one direction, but no elements are flowing in the other direction. I.e. this stage considers
|
* every second in one direction, but no elements are flowing in the other direction. I.e. this operator considers
|
||||||
* the *joint* frequencies of the elements in both directions.
|
* the *joint* frequencies of the elements in both directions.
|
||||||
*/
|
*/
|
||||||
def bidirectionalIdleTimeout[I, O](timeout: java.time.Duration): BidiFlow[I, I, O, O, NotUsed] = {
|
def bidirectionalIdleTimeout[I, O](timeout: java.time.Duration): BidiFlow[I, I, O, O, NotUsed] = {
|
||||||
|
|
@ -221,7 +221,7 @@ final class BidiFlow[I1, O1, I2, O2, Mat](delegate: scaladsl.BidiFlow[I1, O1, I2
|
||||||
* of attributes. This means that further calls will not be able to remove these
|
* of attributes. This means that further calls will not be able to remove these
|
||||||
* attributes, but instead add new ones. Note that this
|
* attributes, but instead add new ones. Note that this
|
||||||
* operation has no effect on an empty Flow (because the attributes apply
|
* operation has no effect on an empty Flow (because the attributes apply
|
||||||
* only to the contained processing stages).
|
* only to the contained processing operators).
|
||||||
*/
|
*/
|
||||||
override def withAttributes(attr: Attributes): BidiFlow[I1, O1, I2, O2, Mat] =
|
override def withAttributes(attr: Attributes): BidiFlow[I1, O1, I2, O2, Mat] =
|
||||||
new BidiFlow(delegate.withAttributes(attr))
|
new BidiFlow(delegate.withAttributes(attr))
|
||||||
|
|
@ -230,7 +230,7 @@ final class BidiFlow[I1, O1, I2, O2, Mat](delegate: scaladsl.BidiFlow[I1, O1, I2
|
||||||
* Add the given attributes to this Source. Further calls to `withAttributes`
|
* Add the given attributes to this Source. Further calls to `withAttributes`
|
||||||
* will not remove these attributes. Note that this
|
* will not remove these attributes. Note that this
|
||||||
* operation has no effect on an empty Flow (because the attributes apply
|
* operation has no effect on an empty Flow (because the attributes apply
|
||||||
* only to the contained processing stages).
|
* only to the contained processing operators).
|
||||||
*/
|
*/
|
||||||
override def addAttributes(attr: Attributes): BidiFlow[I1, O1, I2, O2, Mat] =
|
override def addAttributes(attr: Attributes): BidiFlow[I1, O1, I2, O2, Mat] =
|
||||||
new BidiFlow(delegate.addAttributes(attr))
|
new BidiFlow(delegate.addAttributes(attr))
|
||||||
|
|
|
||||||
|
|
@ -6,12 +6,12 @@ package akka.stream.javadsl
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow them them.
|
* Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow them them.
|
||||||
* Similar to `Flow.fromSinkAndSource` however that API does not connect the completion signals of the wrapped stages.
|
* Similar to `Flow.fromSinkAndSource` however that API does not connect the completion signals of the wrapped operators.
|
||||||
*/
|
*/
|
||||||
object CoupledTerminationFlow {
|
object CoupledTerminationFlow {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Similar to [[Flow.fromSinkAndSource]] however couples the termination of these two stages.
|
* Similar to [[Flow.fromSinkAndSource]] however couples the termination of these two operators.
|
||||||
*
|
*
|
||||||
* E.g. if the emitted [[Flow]] gets a cancellation, the [[Source]] of course is cancelled,
|
* E.g. if the emitted [[Flow]] gets a cancellation, the [[Source]] of course is cancelled,
|
||||||
* however the Sink will also be completed. The table below illustrates the effects in detail:
|
* however the Sink will also be completed. The table below illustrates the effects in detail:
|
||||||
|
|
|
||||||
|
|
@ -117,7 +117,7 @@ object Flow {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow from them.
|
* Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow from them.
|
||||||
* Similar to [[Flow.fromSinkAndSource]] however couples the termination of these two stages.
|
* Similar to [[Flow.fromSinkAndSource]] however couples the termination of these two operators.
|
||||||
*
|
*
|
||||||
* The resulting flow can be visualized as:
|
* The resulting flow can be visualized as:
|
||||||
* {{{
|
* {{{
|
||||||
|
|
@ -180,7 +180,7 @@ object Flow {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow from them.
|
* Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow from them.
|
||||||
* Similar to [[Flow.fromSinkAndSource]] however couples the termination of these two stages.
|
* Similar to [[Flow.fromSinkAndSource]] however couples the termination of these two operators.
|
||||||
*
|
*
|
||||||
* The resulting flow can be visualized as:
|
* The resulting flow can be visualized as:
|
||||||
* {{{
|
* {{{
|
||||||
|
|
@ -492,11 +492,11 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
* This operation is useful for inspecting the passed through element, usually by means of side-effecting
|
* This operation is useful for inspecting the passed through element, usually by means of side-effecting
|
||||||
* operations (such as `println`, or emitting metrics), for each element without having to modify it.
|
* operations (such as `println`, or emitting metrics), for each element without having to modify it.
|
||||||
*
|
*
|
||||||
* For logging signals (elements, completion, error) consider using the [[log]] stage instead,
|
* For logging signals (elements, completion, error) consider using the [[log]] operator instead,
|
||||||
* along with appropriate `ActorAttributes.logLevels`.
|
* along with appropriate `ActorAttributes.logLevels`.
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits an element; the same element will be passed to the attached function,
|
* '''Emits when''' upstream emits an element; the same element will be passed to the attached function,
|
||||||
* as well as to the downstream stage
|
* as well as to the downstream operator
|
||||||
*
|
*
|
||||||
* '''Backpressures when''' downstream backpressures
|
* '''Backpressures when''' downstream backpressures
|
||||||
*
|
*
|
||||||
|
|
@ -637,13 +637,13 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
* The `mapTo` class parameter is used to cast the incoming responses to the expected response type.
|
* The `mapTo` class parameter is used to cast the incoming responses to the expected response type.
|
||||||
*
|
*
|
||||||
* Similar to the plain ask pattern, the target actor is allowed to reply with `akka.util.Status`.
|
* Similar to the plain ask pattern, the target actor is allowed to reply with `akka.util.Status`.
|
||||||
* An `akka.util.Status#Failure` will cause the stage to fail with the cause carried in the `Failure` message.
|
* An `akka.util.Status#Failure` will cause the operator to fail with the cause carried in the `Failure` message.
|
||||||
*
|
*
|
||||||
* Defaults to parallelism of 2 messages in flight, since while one ask message may be being worked on, the second one
|
* Defaults to parallelism of 2 messages in flight, since while one ask message may be being worked on, the second one
|
||||||
* still be in the mailbox, so defaulting to sending the second one a bit earlier than when first ask has replied maintains
|
* still be in the mailbox, so defaulting to sending the second one a bit earlier than when first ask has replied maintains
|
||||||
* a slightly healthier throughput.
|
* a slightly healthier throughput.
|
||||||
*
|
*
|
||||||
* The stage fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated.
|
* The operator fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated.
|
||||||
*
|
*
|
||||||
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
||||||
*
|
*
|
||||||
|
|
@ -667,13 +667,13 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
* The `mapTo` class parameter is used to cast the incoming responses to the expected response type.
|
* The `mapTo` class parameter is used to cast the incoming responses to the expected response type.
|
||||||
*
|
*
|
||||||
* Similar to the plain ask pattern, the target actor is allowed to reply with `akka.util.Status`.
|
* Similar to the plain ask pattern, the target actor is allowed to reply with `akka.util.Status`.
|
||||||
* An `akka.util.Status#Failure` will cause the stage to fail with the cause carried in the `Failure` message.
|
* An `akka.util.Status#Failure` will cause the operator to fail with the cause carried in the `Failure` message.
|
||||||
*
|
*
|
||||||
* Parallelism limits the number of how many asks can be "in flight" at the same time.
|
* Parallelism limits the number of how many asks can be "in flight" at the same time.
|
||||||
* Please note that the elements emitted by this stage are in-order with regards to the asks being issued
|
* Please note that the elements emitted by this operator are in-order with regards to the asks being issued
|
||||||
* (i.e. same behaviour as mapAsync).
|
* (i.e. same behaviour as mapAsync).
|
||||||
*
|
*
|
||||||
* The stage fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated.
|
* The operator fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated.
|
||||||
*
|
*
|
||||||
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
||||||
*
|
*
|
||||||
|
|
@ -691,7 +691,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
new Flow(delegate.ask[S](parallelism)(ref)(timeout, ClassTag(mapTo)))
|
new Flow(delegate.ask[S](parallelism)(ref)(timeout, ClassTag(mapTo)))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The stage fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated.
|
* The operator fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated.
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits
|
* '''Emits when''' upstream emits
|
||||||
*
|
*
|
||||||
|
|
@ -968,7 +968,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
* yielding the next current value.
|
* yielding the next current value.
|
||||||
*
|
*
|
||||||
* If the stream is empty (i.e. completes before signalling any elements),
|
* If the stream is empty (i.e. completes before signalling any elements),
|
||||||
* the reduce stage will fail its downstream with a [[NoSuchElementException]],
|
* the reduce operator will fail its downstream with a [[NoSuchElementException]],
|
||||||
* which is semantically in-line with that Scala's standard library collections
|
* which is semantically in-line with that Scala's standard library collections
|
||||||
* do in such situations.
|
* do in such situations.
|
||||||
*
|
*
|
||||||
|
|
@ -1298,7 +1298,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
/**
|
/**
|
||||||
* Recover allows to send last element on failure and gracefully complete the stream
|
* Recover allows to send last element on failure and gracefully complete the stream
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Throwing an exception inside `recover` _will_ be logged on ERROR level automatically.
|
* Throwing an exception inside `recover` _will_ be logged on ERROR level automatically.
|
||||||
*
|
*
|
||||||
|
|
@ -1316,7 +1316,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
/**
|
/**
|
||||||
* Recover allows to send last element on failure and gracefully complete the stream
|
* Recover allows to send last element on failure and gracefully complete the stream
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Throwing an exception inside `recover` _will_ be logged on ERROR level automatically.
|
* Throwing an exception inside `recover` _will_ be logged on ERROR level automatically.
|
||||||
*
|
*
|
||||||
|
|
@ -1334,12 +1334,12 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* While similar to [[recover]] this stage can be used to transform an error signal to a different one *without* logging
|
* While similar to [[recover]] this operator can be used to transform an error signal to a different one *without* logging
|
||||||
* it as an error in the process. So in that sense it is NOT exactly equivalent to `recover(t => throw t2)` since recover
|
* it as an error in the process. So in that sense it is NOT exactly equivalent to `recover(t => throw t2)` since recover
|
||||||
* would log the `t2` error.
|
* would log the `t2` error.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Similarily to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
* Similarily to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
||||||
*
|
*
|
||||||
|
|
@ -1361,7 +1361,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
* Source may be materialized.
|
* Source may be materialized.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Throwing an exception inside `recoverWith` _will_ be logged on ERROR level automatically.
|
* Throwing an exception inside `recoverWith` _will_ be logged on ERROR level automatically.
|
||||||
*
|
*
|
||||||
|
|
@ -1384,7 +1384,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
* Source may be materialized.
|
* Source may be materialized.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Throwing an exception inside `recoverWith` _will_ be logged on ERROR level automatically.
|
* Throwing an exception inside `recoverWith` _will_ be logged on ERROR level automatically.
|
||||||
*
|
*
|
||||||
|
|
@ -1412,7 +1412,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
* A negative `attempts` number is interpreted as "infinite", which results in the exact same behavior as `recoverWith`.
|
* A negative `attempts` number is interpreted as "infinite", which results in the exact same behavior as `recoverWith`.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Throwing an exception inside `recoverWithRetries` _will_ be logged on ERROR level automatically.
|
* Throwing an exception inside `recoverWithRetries` _will_ be logged on ERROR level automatically.
|
||||||
*
|
*
|
||||||
|
|
@ -1440,7 +1440,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
* A negative `attempts` number is interpreted as "infinite", which results in the exact same behavior as `recoverWith`.
|
* A negative `attempts` number is interpreted as "infinite", which results in the exact same behavior as `recoverWith`.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Throwing an exception inside `recoverWithRetries` _will_ be logged on ERROR level automatically.
|
* Throwing an exception inside `recoverWithRetries` _will_ be logged on ERROR level automatically.
|
||||||
*
|
*
|
||||||
|
|
@ -1786,7 +1786,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
* a new substream is opened and subsequently fed with all elements belonging to
|
* a new substream is opened and subsequently fed with all elements belonging to
|
||||||
* that key.
|
* that key.
|
||||||
*
|
*
|
||||||
* WARNING: If `allowClosedSubstreamRecreation` is set to `false` (default behavior) the stage
|
* WARNING: If `allowClosedSubstreamRecreation` is set to `false` (default behavior) the operator
|
||||||
* keeps track of all keys of streams that have already been closed. If you expect an infinite
|
* keeps track of all keys of streams that have already been closed. If you expect an infinite
|
||||||
* number of keys this can cause memory issues. Elements belonging to those keys are drained
|
* number of keys this can cause memory issues. Elements belonging to those keys are drained
|
||||||
* directly and not send to the substream.
|
* directly and not send to the substream.
|
||||||
|
|
@ -1842,7 +1842,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
* a new substream is opened and subsequently fed with all elements belonging to
|
* a new substream is opened and subsequently fed with all elements belonging to
|
||||||
* that key.
|
* that key.
|
||||||
*
|
*
|
||||||
* WARNING: The stage keeps track of all keys of streams that have already been closed.
|
* WARNING: The operator keeps track of all keys of streams that have already been closed.
|
||||||
* If you expect an infinite number of keys this can cause memory issues. Elements belonging
|
* If you expect an infinite number of keys this can cause memory issues. Elements belonging
|
||||||
* to those keys are drained directly and not send to the substream.
|
* to those keys are drained directly and not send to the substream.
|
||||||
*
|
*
|
||||||
|
|
@ -2095,7 +2095,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
* from producing elements by asserting back-pressure until its time comes or it gets
|
* from producing elements by asserting back-pressure until its time comes or it gets
|
||||||
* cancelled.
|
* cancelled.
|
||||||
*
|
*
|
||||||
* On errors the stage is failed regardless of source of the error.
|
* On errors the operator is failed regardless of source of the error.
|
||||||
*
|
*
|
||||||
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
|
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
|
||||||
* is available from the second stream
|
* is available from the second stream
|
||||||
|
|
@ -2249,7 +2249,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
* then repeat process.
|
* then repeat process.
|
||||||
*
|
*
|
||||||
* If eagerClose is false and one of the upstreams complete the elements from the other upstream will continue passing
|
* If eagerClose is false and one of the upstreams complete the elements from the other upstream will continue passing
|
||||||
* through the interleave stage. If eagerClose is true and one of the upstream complete interleave will cancel the
|
* through the interleave operator. If eagerClose is true and one of the upstream complete interleave will cancel the
|
||||||
* other upstream and complete itself.
|
* other upstream and complete itself.
|
||||||
*
|
*
|
||||||
* If this [[Flow]] or [[Source]] gets upstream error - stream completes with failure.
|
* If this [[Flow]] or [[Source]] gets upstream error - stream completes with failure.
|
||||||
|
|
@ -2290,7 +2290,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
* then repeat process.
|
* then repeat process.
|
||||||
*
|
*
|
||||||
* If eagerClose is false and one of the upstreams complete the elements from the other upstream will continue passing
|
* If eagerClose is false and one of the upstreams complete the elements from the other upstream will continue passing
|
||||||
* through the interleave stage. If eagerClose is true and one of the upstream complete interleave will cancel the
|
* through the interleave operator. If eagerClose is true and one of the upstream complete interleave will cancel the
|
||||||
* other upstream and complete itself.
|
* other upstream and complete itself.
|
||||||
*
|
*
|
||||||
* If this [[Flow]] or [[Source]] gets upstream error - stream completes with failure.
|
* If this [[Flow]] or [[Source]] gets upstream error - stream completes with failure.
|
||||||
|
|
@ -2480,7 +2480,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
new Flow(delegate.zipWithIndex.map { case (elem, index) ⇒ Pair[Out, java.lang.Long](elem, index) })
|
new Flow(delegate.zipWithIndex.map { case (elem, index) ⇒ Pair[Out, java.lang.Long](elem, index) })
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the first element has not passed through this stage before the provided timeout, the stream is failed
|
* If the first element has not passed through this operator before the provided timeout, the stream is failed
|
||||||
* with a [[java.util.concurrent.TimeoutException]].
|
* with a [[java.util.concurrent.TimeoutException]].
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits an element
|
* '''Emits when''' upstream emits an element
|
||||||
|
|
@ -2497,7 +2497,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
new Flow(delegate.initialTimeout(timeout))
|
new Flow(delegate.initialTimeout(timeout))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the first element has not passed through this stage before the provided timeout, the stream is failed
|
* If the first element has not passed through this operator before the provided timeout, the stream is failed
|
||||||
* with a [[java.util.concurrent.TimeoutException]].
|
* with a [[java.util.concurrent.TimeoutException]].
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits an element
|
* '''Emits when''' upstream emits an element
|
||||||
|
|
@ -2613,7 +2613,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
||||||
* stage attempts to maintains a base rate of emitted elements towards the downstream.
|
* operator attempts to maintains a base rate of emitted elements towards the downstream.
|
||||||
*
|
*
|
||||||
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
||||||
* do not accumulate during this period.
|
* do not accumulate during this period.
|
||||||
|
|
@ -2635,7 +2635,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
||||||
* stage attempts to maintains a base rate of emitted elements towards the downstream.
|
* operator attempts to maintains a base rate of emitted elements towards the downstream.
|
||||||
*
|
*
|
||||||
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
||||||
* do not accumulate during this period.
|
* do not accumulate during this period.
|
||||||
|
|
@ -2654,7 +2654,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
keepAlive(maxIdle.asScala, injectedElem)
|
keepAlive(maxIdle.asScala, injectedElem)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sends elements downstream with speed limited to `elements/per`. In other words, this stage set the maximum rate
|
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
|
||||||
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
||||||
*
|
*
|
||||||
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size).
|
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size).
|
||||||
|
|
@ -2687,7 +2687,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
new Flow(delegate.throttle(elements, per.asScala))
|
new Flow(delegate.throttle(elements, per.asScala))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sends elements downstream with speed limited to `elements/per`. In other words, this stage set the maximum rate
|
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
|
||||||
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
||||||
*
|
*
|
||||||
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
||||||
|
|
@ -2729,7 +2729,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
new Flow(delegate.throttle(elements, per, maximumBurst, mode))
|
new Flow(delegate.throttle(elements, per, maximumBurst, mode))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sends elements downstream with speed limited to `elements/per`. In other words, this stage set the maximum rate
|
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
|
||||||
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
||||||
*
|
*
|
||||||
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
||||||
|
|
@ -3022,7 +3022,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
||||||
* set directly on the individual graphs of the composite.
|
* set directly on the individual graphs of the composite.
|
||||||
*
|
*
|
||||||
* Note that this operation has no effect on an empty Flow (because the attributes apply
|
* Note that this operation has no effect on an empty Flow (because the attributes apply
|
||||||
* only to the contained processing stages).
|
* only to the contained processing operators).
|
||||||
*/
|
*/
|
||||||
override def withAttributes(attr: Attributes): javadsl.Flow[In, Out, Mat] =
|
override def withAttributes(attr: Attributes): javadsl.Flow[In, Out, Mat] =
|
||||||
new Flow(delegate.withAttributes(attr))
|
new Flow(delegate.withAttributes(attr))
|
||||||
|
|
|
||||||
|
|
@ -110,7 +110,7 @@ object Framing {
|
||||||
* @param computeFrameSize This function can be supplied if frame size is varied or needs to be computed in a special fashion.
|
* @param computeFrameSize This function can be supplied if frame size is varied or needs to be computed in a special fashion.
|
||||||
* For example, frame can have a shape like this: `[offset bytes][body size bytes][body bytes][footer bytes]`.
|
* For example, frame can have a shape like this: `[offset bytes][body size bytes][body bytes][footer bytes]`.
|
||||||
* Then computeFrameSize can be used to compute the frame size: `(offset bytes, computed size) => (actual frame size)`.
|
* Then computeFrameSize can be used to compute the frame size: `(offset bytes, computed size) => (actual frame size)`.
|
||||||
* ''Actual frame size'' must be equal or bigger than sum of `fieldOffset` and `fieldLength`, the stage fails otherwise.
|
* ''Actual frame size'' must be equal or bigger than sum of `fieldOffset` and `fieldLength`, the operator fails otherwise.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
def lengthField(
|
def lengthField(
|
||||||
|
|
|
||||||
|
|
@ -34,29 +34,29 @@ import scala.collection.parallel.immutable
|
||||||
object Merge {
|
object Merge {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `Merge` stage with the specified output type.
|
* Create a new `Merge` operator with the specified output type.
|
||||||
*/
|
*/
|
||||||
def create[T](inputPorts: Int): Graph[UniformFanInShape[T, T], NotUsed] =
|
def create[T](inputPorts: Int): Graph[UniformFanInShape[T, T], NotUsed] =
|
||||||
scaladsl.Merge(inputPorts)
|
scaladsl.Merge(inputPorts)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `Merge` stage with the specified output type.
|
* Create a new `Merge` operator with the specified output type.
|
||||||
*/
|
*/
|
||||||
def create[T](clazz: Class[T], inputPorts: Int): Graph[UniformFanInShape[T, T], NotUsed] = create(inputPorts)
|
def create[T](clazz: Class[T], inputPorts: Int): Graph[UniformFanInShape[T, T], NotUsed] = create(inputPorts)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `Merge` stage with the specified output type.
|
* Create a new `Merge` operator with the specified output type.
|
||||||
*
|
*
|
||||||
* @param eagerComplete set to true in order to make this stage eagerly
|
* @param eagerComplete set to true in order to make this operator eagerly
|
||||||
* finish as soon as one of its inputs completes
|
* finish as soon as one of its inputs completes
|
||||||
*/
|
*/
|
||||||
def create[T](inputPorts: Int, eagerComplete: Boolean): Graph[UniformFanInShape[T, T], NotUsed] =
|
def create[T](inputPorts: Int, eagerComplete: Boolean): Graph[UniformFanInShape[T, T], NotUsed] =
|
||||||
scaladsl.Merge(inputPorts, eagerComplete = eagerComplete)
|
scaladsl.Merge(inputPorts, eagerComplete = eagerComplete)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `Merge` stage with the specified output type.
|
* Create a new `Merge` operator with the specified output type.
|
||||||
*
|
*
|
||||||
* @param eagerComplete set to true in order to make this stage eagerly
|
* @param eagerComplete set to true in order to make this operator eagerly
|
||||||
* finish as soon as one of its inputs completes
|
* finish as soon as one of its inputs completes
|
||||||
*/
|
*/
|
||||||
def create[T](clazz: Class[T], inputPorts: Int, eagerComplete: Boolean): Graph[UniformFanInShape[T, T], NotUsed] =
|
def create[T](clazz: Class[T], inputPorts: Int, eagerComplete: Boolean): Graph[UniformFanInShape[T, T], NotUsed] =
|
||||||
|
|
@ -78,29 +78,29 @@ object Merge {
|
||||||
*/
|
*/
|
||||||
object MergePreferred {
|
object MergePreferred {
|
||||||
/**
|
/**
|
||||||
* Create a new `MergePreferred` stage with the specified output type.
|
* Create a new `MergePreferred` operator with the specified output type.
|
||||||
*/
|
*/
|
||||||
def create[T](secondaryPorts: Int): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] =
|
def create[T](secondaryPorts: Int): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] =
|
||||||
scaladsl.MergePreferred(secondaryPorts)
|
scaladsl.MergePreferred(secondaryPorts)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `MergePreferred` stage with the specified output type.
|
* Create a new `MergePreferred` operator with the specified output type.
|
||||||
*/
|
*/
|
||||||
def create[T](clazz: Class[T], secondaryPorts: Int): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] = create(secondaryPorts)
|
def create[T](clazz: Class[T], secondaryPorts: Int): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] = create(secondaryPorts)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `MergePreferred` stage with the specified output type.
|
* Create a new `MergePreferred` operator with the specified output type.
|
||||||
*
|
*
|
||||||
* @param eagerComplete set to true in order to make this stage eagerly
|
* @param eagerComplete set to true in order to make this operator eagerly
|
||||||
* finish as soon as one of its inputs completes
|
* finish as soon as one of its inputs completes
|
||||||
*/
|
*/
|
||||||
def create[T](secondaryPorts: Int, eagerComplete: Boolean): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] =
|
def create[T](secondaryPorts: Int, eagerComplete: Boolean): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] =
|
||||||
scaladsl.MergePreferred(secondaryPorts, eagerComplete = eagerComplete)
|
scaladsl.MergePreferred(secondaryPorts, eagerComplete = eagerComplete)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `MergePreferred` stage with the specified output type.
|
* Create a new `MergePreferred` operator with the specified output type.
|
||||||
*
|
*
|
||||||
* @param eagerComplete set to true in order to make this stage eagerly
|
* @param eagerComplete set to true in order to make this operator eagerly
|
||||||
* finish as soon as one of its inputs completes
|
* finish as soon as one of its inputs completes
|
||||||
*/
|
*/
|
||||||
def create[T](clazz: Class[T], secondaryPorts: Int, eagerComplete: Boolean): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] =
|
def create[T](clazz: Class[T], secondaryPorts: Int, eagerComplete: Boolean): Graph[scaladsl.MergePreferred.MergePreferredShape[T], NotUsed] =
|
||||||
|
|
@ -127,30 +127,30 @@ object MergePreferred {
|
||||||
*/
|
*/
|
||||||
object MergePrioritized {
|
object MergePrioritized {
|
||||||
/**
|
/**
|
||||||
* Create a new `MergePrioritized` stage with the specified output type.
|
* Create a new `MergePrioritized` operator with the specified output type.
|
||||||
*/
|
*/
|
||||||
def create[T](priorities: Array[Int]): Graph[UniformFanInShape[T, T], NotUsed] =
|
def create[T](priorities: Array[Int]): Graph[UniformFanInShape[T, T], NotUsed] =
|
||||||
scaladsl.MergePrioritized(priorities)
|
scaladsl.MergePrioritized(priorities)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `MergePrioritized` stage with the specified output type.
|
* Create a new `MergePrioritized` operator with the specified output type.
|
||||||
*/
|
*/
|
||||||
def create[T](clazz: Class[T], priorities: Array[Int]): Graph[UniformFanInShape[T, T], NotUsed] =
|
def create[T](clazz: Class[T], priorities: Array[Int]): Graph[UniformFanInShape[T, T], NotUsed] =
|
||||||
create(priorities)
|
create(priorities)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `MergePrioritized` stage with the specified output type.
|
* Create a new `MergePrioritized` operator with the specified output type.
|
||||||
*
|
*
|
||||||
* @param eagerComplete set to true in order to make this stage eagerly
|
* @param eagerComplete set to true in order to make this operator eagerly
|
||||||
* finish as soon as one of its inputs completes
|
* finish as soon as one of its inputs completes
|
||||||
*/
|
*/
|
||||||
def create[T](priorities: Array[Int], eagerComplete: Boolean): Graph[UniformFanInShape[T, T], NotUsed] =
|
def create[T](priorities: Array[Int], eagerComplete: Boolean): Graph[UniformFanInShape[T, T], NotUsed] =
|
||||||
scaladsl.MergePrioritized(priorities, eagerComplete = eagerComplete)
|
scaladsl.MergePrioritized(priorities, eagerComplete = eagerComplete)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `MergePrioritized` stage with the specified output type.
|
* Create a new `MergePrioritized` operator with the specified output type.
|
||||||
*
|
*
|
||||||
* @param eagerComplete set to true in order to make this stage eagerly
|
* @param eagerComplete set to true in order to make this operator eagerly
|
||||||
* finish as soon as one of its inputs completes
|
* finish as soon as one of its inputs completes
|
||||||
*/
|
*/
|
||||||
def create[T](clazz: Class[T], priorities: Array[Int], eagerComplete: Boolean): Graph[UniformFanInShape[T, T], NotUsed] =
|
def create[T](clazz: Class[T], priorities: Array[Int], eagerComplete: Boolean): Graph[UniformFanInShape[T, T], NotUsed] =
|
||||||
|
|
@ -174,7 +174,7 @@ object MergePrioritized {
|
||||||
*/
|
*/
|
||||||
object Broadcast {
|
object Broadcast {
|
||||||
/**
|
/**
|
||||||
* Create a new `Broadcast` stage with the specified input type.
|
* Create a new `Broadcast` operator with the specified input type.
|
||||||
*
|
*
|
||||||
* @param outputCount number of output ports
|
* @param outputCount number of output ports
|
||||||
* @param eagerCancel if true, broadcast cancels upstream if any of its downstreams cancel.
|
* @param eagerCancel if true, broadcast cancels upstream if any of its downstreams cancel.
|
||||||
|
|
@ -183,14 +183,14 @@ object Broadcast {
|
||||||
scaladsl.Broadcast(outputCount, eagerCancel = eagerCancel)
|
scaladsl.Broadcast(outputCount, eagerCancel = eagerCancel)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `Broadcast` stage with the specified input type.
|
* Create a new `Broadcast` operator with the specified input type.
|
||||||
*
|
*
|
||||||
* @param outputCount number of output ports
|
* @param outputCount number of output ports
|
||||||
*/
|
*/
|
||||||
def create[T](outputCount: Int): Graph[UniformFanOutShape[T, T], NotUsed] = create(outputCount, eagerCancel = false)
|
def create[T](outputCount: Int): Graph[UniformFanOutShape[T, T], NotUsed] = create(outputCount, eagerCancel = false)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `Broadcast` stage with the specified input type.
|
* Create a new `Broadcast` operator with the specified input type.
|
||||||
*/
|
*/
|
||||||
def create[T](clazz: Class[T], outputCount: Int): Graph[UniformFanOutShape[T, T], NotUsed] = create(outputCount)
|
def create[T](clazz: Class[T], outputCount: Int): Graph[UniformFanOutShape[T, T], NotUsed] = create(outputCount)
|
||||||
|
|
||||||
|
|
@ -211,7 +211,7 @@ object Broadcast {
|
||||||
*/
|
*/
|
||||||
object Partition {
|
object Partition {
|
||||||
/**
|
/**
|
||||||
* Create a new `Partition` stage with the specified input type, `eagerCancel` is `false`.
|
* Create a new `Partition` operator with the specified input type, `eagerCancel` is `false`.
|
||||||
*
|
*
|
||||||
* @param outputCount number of output ports
|
* @param outputCount number of output ports
|
||||||
* @param partitioner function deciding which output each element will be targeted
|
* @param partitioner function deciding which output each element will be targeted
|
||||||
|
|
@ -220,17 +220,17 @@ object Partition {
|
||||||
new scaladsl.Partition(outputCount, partitioner.apply)
|
new scaladsl.Partition(outputCount, partitioner.apply)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `Partition` stage with the specified input type.
|
* Create a new `Partition` operator with the specified input type.
|
||||||
*
|
*
|
||||||
* @param outputCount number of output ports
|
* @param outputCount number of output ports
|
||||||
* @param partitioner function deciding which output each element will be targeted
|
* @param partitioner function deciding which output each element will be targeted
|
||||||
* @param eagerCancel this stage cancels, when any (true) or all (false) of the downstreams cancel
|
* @param eagerCancel this operator cancels, when any (true) or all (false) of the downstreams cancel
|
||||||
*/
|
*/
|
||||||
def create[T](outputCount: Int, partitioner: function.Function[T, Integer], eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] =
|
def create[T](outputCount: Int, partitioner: function.Function[T, Integer], eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] =
|
||||||
new scaladsl.Partition(outputCount, partitioner.apply, eagerCancel)
|
new scaladsl.Partition(outputCount, partitioner.apply, eagerCancel)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `Partition` stage with the specified input type, `eagerCancel` is `false`.
|
* Create a new `Partition` operator with the specified input type, `eagerCancel` is `false`.
|
||||||
*
|
*
|
||||||
* @param clazz a type hint for this method
|
* @param clazz a type hint for this method
|
||||||
* @param outputCount number of output ports
|
* @param outputCount number of output ports
|
||||||
|
|
@ -240,12 +240,12 @@ object Partition {
|
||||||
new scaladsl.Partition(outputCount, partitioner.apply)
|
new scaladsl.Partition(outputCount, partitioner.apply)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `Partition` stage with the specified input type.
|
* Create a new `Partition` operator with the specified input type.
|
||||||
*
|
*
|
||||||
* @param clazz a type hint for this method
|
* @param clazz a type hint for this method
|
||||||
* @param outputCount number of output ports
|
* @param outputCount number of output ports
|
||||||
* @param partitioner function deciding which output each element will be targeted
|
* @param partitioner function deciding which output each element will be targeted
|
||||||
* @param eagerCancel this stage cancels, when any (true) or all (false) of the downstreams cancel
|
* @param eagerCancel this operator cancels, when any (true) or all (false) of the downstreams cancel
|
||||||
*/
|
*/
|
||||||
def create[T](clazz: Class[T], outputCount: Int, partitioner: function.Function[T, Integer], eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] =
|
def create[T](clazz: Class[T], outputCount: Int, partitioner: function.Function[T, Integer], eagerCancel: Boolean): Graph[UniformFanOutShape[T, T], NotUsed] =
|
||||||
new scaladsl.Partition(outputCount, partitioner.apply, eagerCancel)
|
new scaladsl.Partition(outputCount, partitioner.apply, eagerCancel)
|
||||||
|
|
@ -267,7 +267,7 @@ object Partition {
|
||||||
*/
|
*/
|
||||||
object Balance {
|
object Balance {
|
||||||
/**
|
/**
|
||||||
* Create a new `Balance` stage with the specified input type, `eagerCancel` is `false`.
|
* Create a new `Balance` operator with the specified input type, `eagerCancel` is `false`.
|
||||||
*
|
*
|
||||||
* @param outputCount number of output ports
|
* @param outputCount number of output ports
|
||||||
* @param waitForAllDownstreams if `true` it will not start emitting
|
* @param waitForAllDownstreams if `true` it will not start emitting
|
||||||
|
|
@ -277,7 +277,7 @@ object Balance {
|
||||||
scaladsl.Balance(outputCount, waitForAllDownstreams)
|
scaladsl.Balance(outputCount, waitForAllDownstreams)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `Balance` stage with the specified input type.
|
* Create a new `Balance` operator with the specified input type.
|
||||||
*
|
*
|
||||||
* @param outputCount number of output ports
|
* @param outputCount number of output ports
|
||||||
* @param waitForAllDownstreams if `true` it will not start emitting elements to downstream outputs until all of them have requested at least one element
|
* @param waitForAllDownstreams if `true` it will not start emitting elements to downstream outputs until all of them have requested at least one element
|
||||||
|
|
@ -287,7 +287,7 @@ object Balance {
|
||||||
new scaladsl.Balance(outputCount, waitForAllDownstreams, eagerCancel)
|
new scaladsl.Balance(outputCount, waitForAllDownstreams, eagerCancel)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `Balance` stage with the specified input type, both `waitForAllDownstreams` and `eagerCancel` are `false`.
|
* Create a new `Balance` operator with the specified input type, both `waitForAllDownstreams` and `eagerCancel` are `false`.
|
||||||
*
|
*
|
||||||
* @param outputCount number of output ports
|
* @param outputCount number of output ports
|
||||||
*/
|
*/
|
||||||
|
|
@ -295,7 +295,7 @@ object Balance {
|
||||||
create(outputCount, waitForAllDownstreams = false)
|
create(outputCount, waitForAllDownstreams = false)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `Balance` stage with the specified input type, both `waitForAllDownstreams` and `eagerCancel` are `false`.
|
* Create a new `Balance` operator with the specified input type, both `waitForAllDownstreams` and `eagerCancel` are `false`.
|
||||||
*
|
*
|
||||||
* @param clazz a type hint for this method
|
* @param clazz a type hint for this method
|
||||||
* @param outputCount number of output ports
|
* @param outputCount number of output ports
|
||||||
|
|
@ -304,7 +304,7 @@ object Balance {
|
||||||
create(outputCount)
|
create(outputCount)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `Balance` stage with the specified input type, `eagerCancel` is `false`.
|
* Create a new `Balance` operator with the specified input type, `eagerCancel` is `false`.
|
||||||
*
|
*
|
||||||
* @param clazz a type hint for this method
|
* @param clazz a type hint for this method
|
||||||
* @param outputCount number of output ports
|
* @param outputCount number of output ports
|
||||||
|
|
@ -314,7 +314,7 @@ object Balance {
|
||||||
create(outputCount, waitForAllDownstreams)
|
create(outputCount, waitForAllDownstreams)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `Balance` stage with the specified input type.
|
* Create a new `Balance` operator with the specified input type.
|
||||||
*
|
*
|
||||||
* @param clazz a type hint for this method
|
* @param clazz a type hint for this method
|
||||||
* @param outputCount number of output ports
|
* @param outputCount number of output ports
|
||||||
|
|
@ -343,7 +343,7 @@ object Zip {
|
||||||
import akka.japi.Pair
|
import akka.japi.Pair
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `Zip` stage with the specified input types and zipping-function
|
* Create a new `Zip` operator with the specified input types and zipping-function
|
||||||
* which creates `akka.japi.Pair`s.
|
* which creates `akka.japi.Pair`s.
|
||||||
*/
|
*/
|
||||||
def create[A, B]: Graph[FanInShape2[A, B, A Pair B], NotUsed] =
|
def create[A, B]: Graph[FanInShape2[A, B, A Pair B], NotUsed] =
|
||||||
|
|
@ -408,13 +408,13 @@ object ZipWithN {
|
||||||
object Unzip {
|
object Unzip {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new `Unzip` stage with the specified output types.
|
* Creates a new `Unzip` operator with the specified output types.
|
||||||
*/
|
*/
|
||||||
def create[A, B](): Graph[FanOutShape2[A Pair B, A, B], NotUsed] =
|
def create[A, B](): Graph[FanOutShape2[A Pair B, A, B], NotUsed] =
|
||||||
UnzipWith.create(ConstantFun.javaIdentityFunction[Pair[A, B]])
|
UnzipWith.create(ConstantFun.javaIdentityFunction[Pair[A, B]])
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Creates a new `Unzip` stage with the specified output types.
|
* Creates a new `Unzip` operator with the specified output types.
|
||||||
*/
|
*/
|
||||||
def create[A, B](left: Class[A], right: Class[B]): Graph[FanOutShape2[A Pair B, A, B], NotUsed] = create[A, B]()
|
def create[A, B](left: Class[A], right: Class[B]): Graph[FanOutShape2[A Pair B, A, B], NotUsed] = create[A, B]()
|
||||||
|
|
||||||
|
|
@ -435,17 +435,17 @@ object Unzip {
|
||||||
*/
|
*/
|
||||||
object Concat {
|
object Concat {
|
||||||
/**
|
/**
|
||||||
* Create a new anonymous `Concat` stage with the specified input types.
|
* Create a new anonymous `Concat` operator with the specified input types.
|
||||||
*/
|
*/
|
||||||
def create[T](): Graph[UniformFanInShape[T, T], NotUsed] = scaladsl.Concat[T]()
|
def create[T](): Graph[UniformFanInShape[T, T], NotUsed] = scaladsl.Concat[T]()
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new anonymous `Concat` stage with the specified input types.
|
* Create a new anonymous `Concat` operator with the specified input types.
|
||||||
*/
|
*/
|
||||||
def create[T](inputCount: Int): Graph[UniformFanInShape[T, T], NotUsed] = scaladsl.Concat[T](inputCount)
|
def create[T](inputCount: Int): Graph[UniformFanInShape[T, T], NotUsed] = scaladsl.Concat[T](inputCount)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new anonymous `Concat` stage with the specified input types.
|
* Create a new anonymous `Concat` operator with the specified input types.
|
||||||
*/
|
*/
|
||||||
def create[T](clazz: Class[T]): Graph[UniformFanInShape[T, T], NotUsed] = create()
|
def create[T](clazz: Class[T]): Graph[UniformFanInShape[T, T], NotUsed] = create()
|
||||||
|
|
||||||
|
|
@ -506,7 +506,7 @@ object GraphDSL extends GraphCreate {
|
||||||
* It is possible to call this method multiple times to get multiple [[Outlet]] instances if necessary. All of
|
* It is possible to call this method multiple times to get multiple [[Outlet]] instances if necessary. All of
|
||||||
* the outlets will emit the materialized value.
|
* the outlets will emit the materialized value.
|
||||||
*
|
*
|
||||||
* Be careful to not to feed the result of this outlet to a stage that produces the materialized value itself (for
|
* Be careful to not to feed the result of this outlet to a operator that produces the materialized value itself (for
|
||||||
* example to a [[Sink#fold]] that contributes to the materialized value) since that might lead to an unresolvable
|
* example to a [[Sink#fold]] that contributes to the materialized value) since that might lead to an unresolvable
|
||||||
* dependency cycle.
|
* dependency cycle.
|
||||||
*
|
*
|
||||||
|
|
|
||||||
|
|
@ -131,7 +131,7 @@ object PartitionHub {
|
||||||
* identifier for the given element. The function will never be called when there are no active consumers,
|
* identifier for the given element. The function will never be called when there are no active consumers,
|
||||||
* i.e. there is always at least one element in the array of identifiers.
|
* i.e. there is always at least one element in the array of identifiers.
|
||||||
* @param startAfterNrOfConsumers Elements are buffered until this number of consumers have been connected.
|
* @param startAfterNrOfConsumers Elements are buffered until this number of consumers have been connected.
|
||||||
* This is only used initially when the stage is starting up, i.e. it is not honored when consumers have
|
* This is only used initially when the operator is starting up, i.e. it is not honored when consumers have
|
||||||
* been removed (canceled).
|
* been removed (canceled).
|
||||||
* @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer
|
* @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer
|
||||||
* is backpressured.
|
* is backpressured.
|
||||||
|
|
@ -175,7 +175,7 @@ object PartitionHub {
|
||||||
* and less than number of consumers. E.g. `(size, elem) -> Math.abs(elem.hashCode()) % size`. It's also
|
* and less than number of consumers. E.g. `(size, elem) -> Math.abs(elem.hashCode()) % size`. It's also
|
||||||
* possible to use `-1` to drop the element.
|
* possible to use `-1` to drop the element.
|
||||||
* @param startAfterNrOfConsumers Elements are buffered until this number of consumers have been connected.
|
* @param startAfterNrOfConsumers Elements are buffered until this number of consumers have been connected.
|
||||||
* This is only used initially when the stage is starting up, i.e. it is not honored when consumers have
|
* This is only used initially when the operator is starting up, i.e. it is not honored when consumers have
|
||||||
* been removed (canceled).
|
* been removed (canceled).
|
||||||
* @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer
|
* @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer
|
||||||
* is backpressured.
|
* is backpressured.
|
||||||
|
|
|
||||||
|
|
@ -7,13 +7,13 @@ package akka.stream.javadsl
|
||||||
import akka.NotUsed
|
import akka.NotUsed
|
||||||
import akka.util.ByteString
|
import akka.util.ByteString
|
||||||
|
|
||||||
/** Provides JSON framing stages that can separate valid JSON objects from incoming [[akka.util.ByteString]] objects. */
|
/** Provides JSON framing operators that can separate valid JSON objects from incoming [[akka.util.ByteString]] objects. */
|
||||||
object JsonFraming {
|
object JsonFraming {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a Flow that implements a "brace counting" based framing stage for emitting valid JSON chunks.
|
* Returns a Flow that implements a "brace counting" based framing operator for emitting valid JSON chunks.
|
||||||
*
|
*
|
||||||
* Typical examples of data that one may want to frame using this stage include:
|
* Typical examples of data that one may want to frame using this operator include:
|
||||||
*
|
*
|
||||||
* **Very large arrays**:
|
* **Very large arrays**:
|
||||||
* {{{
|
* {{{
|
||||||
|
|
|
||||||
|
|
@ -33,7 +33,7 @@ trait SourceQueue[T] {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Method returns a [[CompletionStage]] that will be completed if the stream completes,
|
* Method returns a [[CompletionStage]] that will be completed if the stream completes,
|
||||||
* or will be failed when the stage faces an internal failure.
|
* or will be failed when the operator faces an internal failure.
|
||||||
*/
|
*/
|
||||||
def watchCompletion(): CompletionStage[Done]
|
def watchCompletion(): CompletionStage[Done]
|
||||||
}
|
}
|
||||||
|
|
@ -56,7 +56,7 @@ trait SourceQueueWithComplete[T] extends SourceQueue[T] {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Method returns a [[Future]] that will be completed if the stream completes,
|
* Method returns a [[Future]] that will be completed if the stream completes,
|
||||||
* or will be failed when the stage faces an internal failure or the the [[SourceQueueWithComplete.fail]] method is invoked.
|
* or will be failed when the operator faces an internal failure or the the [[SourceQueueWithComplete.fail]] method is invoked.
|
||||||
*/
|
*/
|
||||||
override def watchCompletion(): CompletionStage[Done]
|
override def watchCompletion(): CompletionStage[Done]
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -51,7 +51,7 @@ object Sink {
|
||||||
* if there is a failure signaled in the stream.
|
* if there is a failure signaled in the stream.
|
||||||
*
|
*
|
||||||
* If the stream is empty (i.e. completes before signalling any elements),
|
* If the stream is empty (i.e. completes before signalling any elements),
|
||||||
* the reduce stage will fail its downstream with a [[NoSuchElementException]],
|
* the reduce operator will fail its downstream with a [[NoSuchElementException]],
|
||||||
* which is semantically in-line with that Scala's standard library collections
|
* which is semantically in-line with that Scala's standard library collections
|
||||||
* do in such situations.
|
* do in such situations.
|
||||||
*/
|
*/
|
||||||
|
|
@ -80,7 +80,7 @@ object Sink {
|
||||||
* A `Sink` that materializes into a [[org.reactivestreams.Publisher]].
|
* A `Sink` that materializes into a [[org.reactivestreams.Publisher]].
|
||||||
*
|
*
|
||||||
* If `fanout` is `true`, the materialized `Publisher` will support multiple `Subscriber`s and
|
* If `fanout` is `true`, the materialized `Publisher` will support multiple `Subscriber`s and
|
||||||
* the size of the `inputBuffer` configured for this stage becomes the maximum number of elements that
|
* the size of the `inputBuffer` configured for this operator becomes the maximum number of elements that
|
||||||
* the fastest [[org.reactivestreams.Subscriber]] can be ahead of the slowest one before slowing
|
* the fastest [[org.reactivestreams.Subscriber]] can be ahead of the slowest one before slowing
|
||||||
* the processing down due to back pressure.
|
* the processing down due to back pressure.
|
||||||
*
|
*
|
||||||
|
|
@ -191,7 +191,7 @@ object Sink {
|
||||||
* i.e. if the actor is not consuming the messages fast enough the mailbox
|
* i.e. if the actor is not consuming the messages fast enough the mailbox
|
||||||
* of the actor will grow. For potentially slow consumer actors it is recommended
|
* of the actor will grow. For potentially slow consumer actors it is recommended
|
||||||
* to use a bounded mailbox with zero `mailbox-push-timeout-time` or use a rate
|
* to use a bounded mailbox with zero `mailbox-push-timeout-time` or use a rate
|
||||||
* limiting stage in front of this `Sink`.
|
* limiting operator in front of this `Sink`.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
def actorRef[In](ref: ActorRef, onCompleteMessage: Any): Sink[In, NotUsed] =
|
def actorRef[In](ref: ActorRef, onCompleteMessage: Any): Sink[In, NotUsed] =
|
||||||
|
|
|
||||||
|
|
@ -527,7 +527,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Transform this [[Source]] by appending the given processing stages.
|
* Transform this [[Source]] by appending the given processing operators.
|
||||||
* {{{
|
* {{{
|
||||||
* +----------------------------+
|
* +----------------------------+
|
||||||
* | Resulting Source |
|
* | Resulting Source |
|
||||||
|
|
@ -547,7 +547,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
new Source(delegate.via(flow))
|
new Source(delegate.via(flow))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Transform this [[Source]] by appending the given processing stages.
|
* Transform this [[Source]] by appending the given processing operators.
|
||||||
* {{{
|
* {{{
|
||||||
* +----------------------------+
|
* +----------------------------+
|
||||||
* | Resulting Source |
|
* | Resulting Source |
|
||||||
|
|
@ -647,7 +647,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
* if there is a failure is signaled in the stream.
|
* if there is a failure is signaled in the stream.
|
||||||
*
|
*
|
||||||
* If the stream is empty (i.e. completes before signalling any elements),
|
* If the stream is empty (i.e. completes before signalling any elements),
|
||||||
* the reduce stage will fail its downstream with a [[NoSuchElementException]],
|
* the reduce operator will fail its downstream with a [[NoSuchElementException]],
|
||||||
* which is semantically in-line with that Scala's standard library collections
|
* which is semantically in-line with that Scala's standard library collections
|
||||||
* do in such situations.
|
* do in such situations.
|
||||||
*/
|
*/
|
||||||
|
|
@ -745,7 +745,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
* from producing elements by asserting back-pressure until its time comes or it gets
|
* from producing elements by asserting back-pressure until its time comes or it gets
|
||||||
* cancelled.
|
* cancelled.
|
||||||
*
|
*
|
||||||
* On errors the stage is failed regardless of source of the error.
|
* On errors the operator is failed regardless of source of the error.
|
||||||
*
|
*
|
||||||
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
|
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
|
||||||
* is available from the second stream
|
* is available from the second stream
|
||||||
|
|
@ -1081,7 +1081,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
* This operation is useful for inspecting the passed through element, usually by means of side-effecting
|
* This operation is useful for inspecting the passed through element, usually by means of side-effecting
|
||||||
* operations (such as `println`, or emitting metrics), for each element without having to modify it.
|
* operations (such as `println`, or emitting metrics), for each element without having to modify it.
|
||||||
*
|
*
|
||||||
* For logging signals (elements, completion, error) consider using the [[log]] stage instead,
|
* For logging signals (elements, completion, error) consider using the [[log]] operator instead,
|
||||||
* along with appropriate `ActorAttributes.createLogLevels`.
|
* along with appropriate `ActorAttributes.createLogLevels`.
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits an element
|
* '''Emits when''' upstream emits an element
|
||||||
|
|
@ -1098,7 +1098,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
/**
|
/**
|
||||||
* Recover allows to send last element on failure and gracefully complete the stream
|
* Recover allows to send last element on failure and gracefully complete the stream
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Throwing an exception inside `recover` _will_ be logged on ERROR level automatically.
|
* Throwing an exception inside `recover` _will_ be logged on ERROR level automatically.
|
||||||
*
|
*
|
||||||
|
|
@ -1115,12 +1115,12 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
new Source(delegate.recover(pf))
|
new Source(delegate.recover(pf))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* While similar to [[recover]] this stage can be used to transform an error signal to a different one *without* logging
|
* While similar to [[recover]] this operator can be used to transform an error signal to a different one *without* logging
|
||||||
* it as an error in the process. So in that sense it is NOT exactly equivalent to `recover(t => throw t2)` since recover
|
* it as an error in the process. So in that sense it is NOT exactly equivalent to `recover(t => throw t2)` since recover
|
||||||
* would log the `t2` error.
|
* would log the `t2` error.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Similarily to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
* Similarily to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
||||||
*
|
*
|
||||||
|
|
@ -1142,7 +1142,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
* Source may be materialized.
|
* Source may be materialized.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Throwing an exception inside `recoverWith` _will_ be logged on ERROR level automatically.
|
* Throwing an exception inside `recoverWith` _will_ be logged on ERROR level automatically.
|
||||||
*
|
*
|
||||||
|
|
@ -1168,7 +1168,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
* A negative `attempts` number is interpreted as "infinite", which results in the exact same behavior as `recoverWith`.
|
* A negative `attempts` number is interpreted as "infinite", which results in the exact same behavior as `recoverWith`.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Throwing an exception inside `recoverWithRetries` _will_ be logged on ERROR level automatically.
|
* Throwing an exception inside `recoverWithRetries` _will_ be logged on ERROR level automatically.
|
||||||
*
|
*
|
||||||
|
|
@ -1316,13 +1316,13 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
* The `mapTo` class parameter is used to cast the incoming responses to the expected response type.
|
* The `mapTo` class parameter is used to cast the incoming responses to the expected response type.
|
||||||
*
|
*
|
||||||
* Similar to the plain ask pattern, the target actor is allowed to reply with `akka.util.Status`.
|
* Similar to the plain ask pattern, the target actor is allowed to reply with `akka.util.Status`.
|
||||||
* An `akka.util.Status#Failure` will cause the stage to fail with the cause carried in the `Failure` message.
|
* An `akka.util.Status#Failure` will cause the operator to fail with the cause carried in the `Failure` message.
|
||||||
*
|
*
|
||||||
* Defaults to parallelism of 2 messages in flight, since while one ask message may be being worked on, the second one
|
* Defaults to parallelism of 2 messages in flight, since while one ask message may be being worked on, the second one
|
||||||
* still be in the mailbox, so defaulting to sending the second one a bit earlier than when first ask has replied maintains
|
* still be in the mailbox, so defaulting to sending the second one a bit earlier than when first ask has replied maintains
|
||||||
* a slightly healthier throughput.
|
* a slightly healthier throughput.
|
||||||
*
|
*
|
||||||
* The stage fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated.
|
* The operator fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated.
|
||||||
*
|
*
|
||||||
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
||||||
*
|
*
|
||||||
|
|
@ -1346,13 +1346,13 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
* The `mapTo` class parameter is used to cast the incoming responses to the expected response type.
|
* The `mapTo` class parameter is used to cast the incoming responses to the expected response type.
|
||||||
*
|
*
|
||||||
* Similar to the plain ask pattern, the target actor is allowed to reply with `akka.util.Status`.
|
* Similar to the plain ask pattern, the target actor is allowed to reply with `akka.util.Status`.
|
||||||
* An `akka.util.Status#Failure` will cause the stage to fail with the cause carried in the `Failure` message.
|
* An `akka.util.Status#Failure` will cause the operator to fail with the cause carried in the `Failure` message.
|
||||||
*
|
*
|
||||||
* Parallelism limits the number of how many asks can be "in flight" at the same time.
|
* Parallelism limits the number of how many asks can be "in flight" at the same time.
|
||||||
* Please note that the elements emitted by this stage are in-order with regards to the asks being issued
|
* Please note that the elements emitted by this operator are in-order with regards to the asks being issued
|
||||||
* (i.e. same behaviour as mapAsync).
|
* (i.e. same behaviour as mapAsync).
|
||||||
*
|
*
|
||||||
* The stage fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated.
|
* The operator fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated.
|
||||||
*
|
*
|
||||||
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
||||||
*
|
*
|
||||||
|
|
@ -1370,7 +1370,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
new Source(delegate.ask[S](parallelism)(ref)(timeout, ClassTag(mapTo)))
|
new Source(delegate.ask[S](parallelism)(ref)(timeout, ClassTag(mapTo)))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The stage fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated.
|
* The operator fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated.
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits
|
* '''Emits when''' upstream emits
|
||||||
*
|
*
|
||||||
|
|
@ -2426,7 +2426,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
new Source(delegate.flatMapMerge(breadth, o ⇒ f(o)))
|
new Source(delegate.flatMapMerge(breadth, o ⇒ f(o)))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the first element has not passed through this stage before the provided timeout, the stream is failed
|
* If the first element has not passed through this operator before the provided timeout, the stream is failed
|
||||||
* with a [[java.util.concurrent.TimeoutException]].
|
* with a [[java.util.concurrent.TimeoutException]].
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits an element
|
* '''Emits when''' upstream emits an element
|
||||||
|
|
@ -2443,7 +2443,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
new Source(delegate.initialTimeout(timeout))
|
new Source(delegate.initialTimeout(timeout))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the first element has not passed through this stage before the provided timeout, the stream is failed
|
* If the first element has not passed through this operator before the provided timeout, the stream is failed
|
||||||
* with a [[java.util.concurrent.TimeoutException]].
|
* with a [[java.util.concurrent.TimeoutException]].
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits an element
|
* '''Emits when''' upstream emits an element
|
||||||
|
|
@ -2559,7 +2559,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
||||||
* stage attempts to maintains a base rate of emitted elements towards the downstream.
|
* operator attempts to maintains a base rate of emitted elements towards the downstream.
|
||||||
*
|
*
|
||||||
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
||||||
* do not accumulate during this period.
|
* do not accumulate during this period.
|
||||||
|
|
@ -2581,7 +2581,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
||||||
* stage attempts to maintains a base rate of emitted elements towards the downstream.
|
* operator attempts to maintains a base rate of emitted elements towards the downstream.
|
||||||
*
|
*
|
||||||
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
||||||
* do not accumulate during this period.
|
* do not accumulate during this period.
|
||||||
|
|
@ -2600,7 +2600,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
keepAlive(maxIdle.asScala, injectedElem)
|
keepAlive(maxIdle.asScala, injectedElem)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sends elements downstream with speed limited to `elements/per`. In other words, this stage set the maximum rate
|
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
|
||||||
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
||||||
*
|
*
|
||||||
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size).
|
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size).
|
||||||
|
|
@ -2633,7 +2633,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
new Source(delegate.throttle(elements, per.asScala))
|
new Source(delegate.throttle(elements, per.asScala))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sends elements downstream with speed limited to `elements/per`. In other words, this stage set the maximum rate
|
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
|
||||||
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
||||||
*
|
*
|
||||||
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
||||||
|
|
@ -2675,7 +2675,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
||||||
new Source(delegate.throttle(elements, per, maximumBurst, mode))
|
new Source(delegate.throttle(elements, per, maximumBurst, mode))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sends elements downstream with speed limited to `elements/per`. In other words, this stage set the maximum rate
|
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
|
||||||
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
||||||
*
|
*
|
||||||
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
||||||
|
|
|
||||||
|
|
@ -69,7 +69,7 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
||||||
* Flatten the sub-flows back into the super-flow by concatenating them.
|
* Flatten the sub-flows back into the super-flow by concatenating them.
|
||||||
* This is usually a bad idea when combined with `groupBy` since it can
|
* This is usually a bad idea when combined with `groupBy` since it can
|
||||||
* easily lead to deadlock—the concatenation does not consume from the second
|
* easily lead to deadlock—the concatenation does not consume from the second
|
||||||
* substream until the first has finished and the `groupBy` stage will get
|
* substream until the first has finished and the `groupBy` operator will get
|
||||||
* back-pressure from the second stream.
|
* back-pressure from the second stream.
|
||||||
*
|
*
|
||||||
* This is identical in effect to `mergeSubstreamsWithParallelism(1)`.
|
* This is identical in effect to `mergeSubstreamsWithParallelism(1)`.
|
||||||
|
|
@ -147,11 +147,11 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
||||||
* This operation is useful for inspecting the passed through element, usually by means of side-effecting
|
* This operation is useful for inspecting the passed through element, usually by means of side-effecting
|
||||||
* operations (such as `println`, or emitting metrics), for each element without having to modify it.
|
* operations (such as `println`, or emitting metrics), for each element without having to modify it.
|
||||||
*
|
*
|
||||||
* For logging signals (elements, completion, error) consider using the [[log]] stage instead,
|
* For logging signals (elements, completion, error) consider using the [[log]] operator instead,
|
||||||
* along with appropriate `ActorAttributes.logLevels`.
|
* along with appropriate `ActorAttributes.logLevels`.
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits an element; the same element will be passed to the attached function,
|
* '''Emits when''' upstream emits an element; the same element will be passed to the attached function,
|
||||||
* as well as to the downstream stage
|
* as well as to the downstream operator
|
||||||
*
|
*
|
||||||
* '''Backpressures when''' downstream backpressures
|
* '''Backpressures when''' downstream backpressures
|
||||||
*
|
*
|
||||||
|
|
@ -869,7 +869,7 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
||||||
/**
|
/**
|
||||||
* Recover allows to send last element on failure and gracefully complete the stream
|
* Recover allows to send last element on failure and gracefully complete the stream
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Throwing an exception inside `recover` _will_ be logged on ERROR level automatically.
|
* Throwing an exception inside `recover` _will_ be logged on ERROR level automatically.
|
||||||
*
|
*
|
||||||
|
|
@ -891,7 +891,7 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
||||||
* Source may be materialized.
|
* Source may be materialized.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Throwing an exception inside ``recoverWith`` _will_ be logged on ERROR level automatically.
|
* Throwing an exception inside ``recoverWith`` _will_ be logged on ERROR level automatically.
|
||||||
*
|
*
|
||||||
|
|
@ -918,7 +918,7 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
||||||
* A negative `attempts` number is interpreted as "infinite", which results in the exact same behavior as `recoverWith`.
|
* A negative `attempts` number is interpreted as "infinite", which results in the exact same behavior as `recoverWith`.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Throwing an exception inside `recoverWithRetries` _will_ be logged on ERROR level automatically.
|
* Throwing an exception inside `recoverWithRetries` _will_ be logged on ERROR level automatically.
|
||||||
*
|
*
|
||||||
|
|
@ -936,12 +936,12 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
||||||
new SubFlow(delegate.recoverWithRetries(attempts, pf))
|
new SubFlow(delegate.recoverWithRetries(attempts, pf))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* While similar to [[recover]] this stage can be used to transform an error signal to a different one *without* logging
|
* While similar to [[recover]] this operator can be used to transform an error signal to a different one *without* logging
|
||||||
* it as an error in the process. So in that sense it is NOT exactly equivalent to `recover(t => throw t2)` since recover
|
* it as an error in the process. So in that sense it is NOT exactly equivalent to `recover(t => throw t2)` since recover
|
||||||
* would log the `t2` error.
|
* would log the `t2` error.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Similarily to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
* Similarily to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
||||||
*
|
*
|
||||||
|
|
@ -1353,7 +1353,7 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
||||||
* from producing elements by asserting back-pressure until its time comes or it gets
|
* from producing elements by asserting back-pressure until its time comes or it gets
|
||||||
* cancelled.
|
* cancelled.
|
||||||
*
|
*
|
||||||
* On errors the stage is failed regardless of source of the error.
|
* On errors the operator is failed regardless of source of the error.
|
||||||
*
|
*
|
||||||
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
|
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
|
||||||
* is available from the second stream
|
* is available from the second stream
|
||||||
|
|
@ -1522,7 +1522,7 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
||||||
new SubFlow(delegate.zipWithIndex.map { case (elem, index) ⇒ akka.japi.Pair[Out, java.lang.Long](elem, index) })
|
new SubFlow(delegate.zipWithIndex.map { case (elem, index) ⇒ akka.japi.Pair[Out, java.lang.Long](elem, index) })
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the first element has not passed through this stage before the provided timeout, the stream is failed
|
* If the first element has not passed through this operator before the provided timeout, the stream is failed
|
||||||
* with a [[java.util.concurrent.TimeoutException]].
|
* with a [[java.util.concurrent.TimeoutException]].
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits an element
|
* '''Emits when''' upstream emits an element
|
||||||
|
|
@ -1539,7 +1539,7 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
||||||
new SubFlow(delegate.initialTimeout(timeout))
|
new SubFlow(delegate.initialTimeout(timeout))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the first element has not passed through this stage before the provided timeout, the stream is failed
|
* If the first element has not passed through this operator before the provided timeout, the stream is failed
|
||||||
* with a [[java.util.concurrent.TimeoutException]].
|
* with a [[java.util.concurrent.TimeoutException]].
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits an element
|
* '''Emits when''' upstream emits an element
|
||||||
|
|
@ -1655,7 +1655,7 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
||||||
* stage attempts to maintains a base rate of emitted elements towards the downstream.
|
* operator attempts to maintains a base rate of emitted elements towards the downstream.
|
||||||
*
|
*
|
||||||
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
||||||
* do not accumulate during this period.
|
* do not accumulate during this period.
|
||||||
|
|
@ -1677,7 +1677,7 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
||||||
* stage attempts to maintains a base rate of emitted elements towards the downstream.
|
* operator attempts to maintains a base rate of emitted elements towards the downstream.
|
||||||
*
|
*
|
||||||
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
||||||
* do not accumulate during this period.
|
* do not accumulate during this period.
|
||||||
|
|
@ -1696,7 +1696,7 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
||||||
keepAlive(maxIdle.asScala, injectedElem)
|
keepAlive(maxIdle.asScala, injectedElem)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sends elements downstream with speed limited to `elements/per`. In other words, this stage set the maximum rate
|
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
|
||||||
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
||||||
*
|
*
|
||||||
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size).
|
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size).
|
||||||
|
|
@ -1729,7 +1729,7 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
||||||
new SubFlow(delegate.throttle(elements, per.asScala))
|
new SubFlow(delegate.throttle(elements, per.asScala))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sends elements downstream with speed limited to `elements/per`. In other words, this stage set the maximum rate
|
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
|
||||||
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
||||||
*
|
*
|
||||||
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
||||||
|
|
@ -1771,7 +1771,7 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
||||||
new SubFlow(delegate.throttle(elements, per, maximumBurst, mode))
|
new SubFlow(delegate.throttle(elements, per, maximumBurst, mode))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sends elements downstream with speed limited to `elements/per`. In other words, this stage set the maximum rate
|
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
|
||||||
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
||||||
*
|
*
|
||||||
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
||||||
|
|
@ -2045,7 +2045,7 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
||||||
* of attributes. This means that further calls will not be able to remove these
|
* of attributes. This means that further calls will not be able to remove these
|
||||||
* attributes, but instead add new ones. Note that this
|
* attributes, but instead add new ones. Note that this
|
||||||
* operation has no effect on an empty Flow (because the attributes apply
|
* operation has no effect on an empty Flow (because the attributes apply
|
||||||
* only to the contained processing stages).
|
* only to the contained processing operators).
|
||||||
*/
|
*/
|
||||||
def withAttributes(attr: Attributes): SubFlow[In, Out, Mat] =
|
def withAttributes(attr: Attributes): SubFlow[In, Out, Mat] =
|
||||||
new SubFlow(delegate.withAttributes(attr))
|
new SubFlow(delegate.withAttributes(attr))
|
||||||
|
|
@ -2054,7 +2054,7 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
||||||
* Add the given attributes to this Source. Further calls to `withAttributes`
|
* Add the given attributes to this Source. Further calls to `withAttributes`
|
||||||
* will not remove these attributes. Note that this
|
* will not remove these attributes. Note that this
|
||||||
* operation has no effect on an empty Flow (because the attributes apply
|
* operation has no effect on an empty Flow (because the attributes apply
|
||||||
* only to the contained processing stages).
|
* only to the contained processing operators).
|
||||||
*/
|
*/
|
||||||
def addAttributes(attr: Attributes): SubFlow[In, Out, Mat] =
|
def addAttributes(attr: Attributes): SubFlow[In, Out, Mat] =
|
||||||
new SubFlow(delegate.addAttributes(attr))
|
new SubFlow(delegate.addAttributes(attr))
|
||||||
|
|
|
||||||
|
|
@ -64,7 +64,7 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
||||||
* Flatten the sub-flows back into the super-source by concatenating them.
|
* Flatten the sub-flows back into the super-source by concatenating them.
|
||||||
* This is usually a bad idea when combined with `groupBy` since it can
|
* This is usually a bad idea when combined with `groupBy` since it can
|
||||||
* easily lead to deadlock—the concatenation does not consume from the second
|
* easily lead to deadlock—the concatenation does not consume from the second
|
||||||
* substream until the first has finished and the `groupBy` stage will get
|
* substream until the first has finished and the `groupBy` operator will get
|
||||||
* back-pressure from the second stream.
|
* back-pressure from the second stream.
|
||||||
*
|
*
|
||||||
* This is identical in effect to `mergeSubstreamsWithParallelism(1)`.
|
* This is identical in effect to `mergeSubstreamsWithParallelism(1)`.
|
||||||
|
|
@ -137,11 +137,11 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
||||||
* This operation is useful for inspecting the passed through element, usually by means of side-effecting
|
* This operation is useful for inspecting the passed through element, usually by means of side-effecting
|
||||||
* operations (such as `println`, or emitting metrics), for each element without having to modify it.
|
* operations (such as `println`, or emitting metrics), for each element without having to modify it.
|
||||||
*
|
*
|
||||||
* For logging signals (elements, completion, error) consider using the [[log]] stage instead,
|
* For logging signals (elements, completion, error) consider using the [[log]] operator instead,
|
||||||
* along with appropriate `ActorAttributes.logLevels`.
|
* along with appropriate `ActorAttributes.logLevels`.
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits an element; the same element will be passed to the attached function,
|
* '''Emits when''' upstream emits an element; the same element will be passed to the attached function,
|
||||||
* as well as to the downstream stage
|
* as well as to the downstream operator
|
||||||
*
|
*
|
||||||
* '''Backpressures when''' downstream backpressures
|
* '''Backpressures when''' downstream backpressures
|
||||||
*
|
*
|
||||||
|
|
@ -857,7 +857,7 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
||||||
/**
|
/**
|
||||||
* Recover allows to send last element on failure and gracefully complete the stream
|
* Recover allows to send last element on failure and gracefully complete the stream
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* '''Emits when''' element is available from the upstream or upstream is failed and pf returns an element
|
* '''Emits when''' element is available from the upstream or upstream is failed and pf returns an element
|
||||||
*
|
*
|
||||||
|
|
@ -877,7 +877,7 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
||||||
* Source may be materialized.
|
* Source may be materialized.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* '''Emits when''' element is available from the upstream or upstream is failed and element is available
|
* '''Emits when''' element is available from the upstream or upstream is failed and element is available
|
||||||
* from alternative Source
|
* from alternative Source
|
||||||
|
|
@ -902,7 +902,7 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
||||||
* A negative `attempts` number is interpreted as "infinite", which results in the exact same behavior as `recoverWith`.
|
* A negative `attempts` number is interpreted as "infinite", which results in the exact same behavior as `recoverWith`.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* '''Emits when''' element is available from the upstream or upstream is failed and element is available
|
* '''Emits when''' element is available from the upstream or upstream is failed and element is available
|
||||||
* from alternative Source
|
* from alternative Source
|
||||||
|
|
@ -918,12 +918,12 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
||||||
new SubSource(delegate.recoverWithRetries(attempts, pf))
|
new SubSource(delegate.recoverWithRetries(attempts, pf))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* While similar to [[recover]] this stage can be used to transform an error signal to a different one *without* logging
|
* While similar to [[recover]] this operator can be used to transform an error signal to a different one *without* logging
|
||||||
* it as an error in the process. So in that sense it is NOT exactly equivalent to `recover(t => throw t2)` since recover
|
* it as an error in the process. So in that sense it is NOT exactly equivalent to `recover(t => throw t2)` since recover
|
||||||
* would log the `t2` error.
|
* would log the `t2` error.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Similarily to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
* Similarily to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
||||||
*
|
*
|
||||||
|
|
@ -1334,7 +1334,7 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
||||||
* from producing elements by asserting back-pressure until its time comes or it gets
|
* from producing elements by asserting back-pressure until its time comes or it gets
|
||||||
* cancelled.
|
* cancelled.
|
||||||
*
|
*
|
||||||
* On errors the stage is failed regardless of source of the error.
|
* On errors the operator is failed regardless of source of the error.
|
||||||
*
|
*
|
||||||
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
|
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
|
||||||
* is available from the second stream
|
* is available from the second stream
|
||||||
|
|
@ -1504,7 +1504,7 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
||||||
new SubSource(delegate.zipWithIndex.map { case (elem, index) ⇒ akka.japi.Pair(elem, index) })
|
new SubSource(delegate.zipWithIndex.map { case (elem, index) ⇒ akka.japi.Pair(elem, index) })
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the first element has not passed through this stage before the provided timeout, the stream is failed
|
* If the first element has not passed through this operator before the provided timeout, the stream is failed
|
||||||
* with a [[java.util.concurrent.TimeoutException]].
|
* with a [[java.util.concurrent.TimeoutException]].
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits an element
|
* '''Emits when''' upstream emits an element
|
||||||
|
|
@ -1521,7 +1521,7 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
||||||
new SubSource(delegate.initialTimeout(timeout))
|
new SubSource(delegate.initialTimeout(timeout))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the first element has not passed through this stage before the provided timeout, the stream is failed
|
* If the first element has not passed through this operator before the provided timeout, the stream is failed
|
||||||
* with a [[java.util.concurrent.TimeoutException]].
|
* with a [[java.util.concurrent.TimeoutException]].
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits an element
|
* '''Emits when''' upstream emits an element
|
||||||
|
|
@ -1637,7 +1637,7 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
||||||
* stage attempts to maintains a base rate of emitted elements towards the downstream.
|
* operator attempts to maintains a base rate of emitted elements towards the downstream.
|
||||||
*
|
*
|
||||||
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
||||||
* do not accumulate during this period.
|
* do not accumulate during this period.
|
||||||
|
|
@ -1659,7 +1659,7 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
||||||
* stage attempts to maintains a base rate of emitted elements towards the downstream.
|
* operator attempts to maintains a base rate of emitted elements towards the downstream.
|
||||||
*
|
*
|
||||||
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
||||||
* do not accumulate during this period.
|
* do not accumulate during this period.
|
||||||
|
|
@ -1678,7 +1678,7 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
||||||
keepAlive(maxIdle.asScala, injectedElem)
|
keepAlive(maxIdle.asScala, injectedElem)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sends elements downstream with speed limited to `elements/per`. In other words, this stage set the maximum rate
|
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
|
||||||
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
||||||
*
|
*
|
||||||
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size).
|
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size).
|
||||||
|
|
@ -1711,7 +1711,7 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
||||||
new SubSource(delegate.throttle(elements, per.asScala))
|
new SubSource(delegate.throttle(elements, per.asScala))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sends elements downstream with speed limited to `elements/per`. In other words, this stage set the maximum rate
|
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
|
||||||
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
||||||
*
|
*
|
||||||
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
||||||
|
|
@ -1753,7 +1753,7 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
||||||
new SubSource(delegate.throttle(elements, per, maximumBurst, mode))
|
new SubSource(delegate.throttle(elements, per, maximumBurst, mode))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sends elements downstream with speed limited to `elements/per`. In other words, this stage set the maximum rate
|
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
|
||||||
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
||||||
*
|
*
|
||||||
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
||||||
|
|
@ -2027,7 +2027,7 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
||||||
* of attributes. This means that further calls will not be able to remove these
|
* of attributes. This means that further calls will not be able to remove these
|
||||||
* attributes, but instead add new ones. Note that this
|
* attributes, but instead add new ones. Note that this
|
||||||
* operation has no effect on an empty Flow (because the attributes apply
|
* operation has no effect on an empty Flow (because the attributes apply
|
||||||
* only to the contained processing stages).
|
* only to the contained processing operators).
|
||||||
*/
|
*/
|
||||||
def withAttributes(attr: Attributes): SubSource[Out, Mat] =
|
def withAttributes(attr: Attributes): SubSource[Out, Mat] =
|
||||||
new SubSource(delegate.withAttributes(attr))
|
new SubSource(delegate.withAttributes(attr))
|
||||||
|
|
@ -2036,7 +2036,7 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
||||||
* Add the given attributes to this Source. Further calls to `withAttributes`
|
* Add the given attributes to this Source. Further calls to `withAttributes`
|
||||||
* will not remove these attributes. Note that this
|
* will not remove these attributes. Note that this
|
||||||
* operation has no effect on an empty Flow (because the attributes apply
|
* operation has no effect on an empty Flow (because the attributes apply
|
||||||
* only to the contained processing stages).
|
* only to the contained processing operators).
|
||||||
*/
|
*/
|
||||||
def addAttributes(attr: Attributes): SubSource[Out, Mat] =
|
def addAttributes(attr: Attributes): SubSource[Out, Mat] =
|
||||||
new SubSource(delegate.addAttributes(attr))
|
new SubSource(delegate.addAttributes(attr))
|
||||||
|
|
|
||||||
|
|
@ -40,17 +40,17 @@ import scala.util.Try
|
||||||
* The TLS specification does not permit half-closing of the user data session
|
* The TLS specification does not permit half-closing of the user data session
|
||||||
* that it transports—to be precise a half-close will always promptly lead to a
|
* that it transports—to be precise a half-close will always promptly lead to a
|
||||||
* full close. This means that canceling the plaintext output or completing the
|
* full close. This means that canceling the plaintext output or completing the
|
||||||
* plaintext input of the SslTls stage will lead to full termination of the
|
* plaintext input of the SslTls operator will lead to full termination of the
|
||||||
* secure connection without regard to whether bytes are remaining to be sent or
|
* secure connection without regard to whether bytes are remaining to be sent or
|
||||||
* received, respectively. Especially for a client the common idiom of attaching
|
* received, respectively. Especially for a client the common idiom of attaching
|
||||||
* a finite Source to the plaintext input and transforming the plaintext response
|
* a finite Source to the plaintext input and transforming the plaintext response
|
||||||
* bytes coming out will not work out of the box due to early termination of the
|
* bytes coming out will not work out of the box due to early termination of the
|
||||||
* connection. For this reason there is a parameter that determines whether the
|
* connection. For this reason there is a parameter that determines whether the
|
||||||
* SslTls stage shall ignore completion and/or cancellation events, and the
|
* SslTls operator shall ignore completion and/or cancellation events, and the
|
||||||
* default is to ignore completion (in view of the client–server scenario). In
|
* default is to ignore completion (in view of the client–server scenario). In
|
||||||
* order to terminate the connection the client will then need to cancel the
|
* order to terminate the connection the client will then need to cancel the
|
||||||
* plaintext output as soon as all expected bytes have been received. When
|
* plaintext output as soon as all expected bytes have been received. When
|
||||||
* ignoring both types of events the stage will shut down once both events have
|
* ignoring both types of events the operator will shut down once both events have
|
||||||
* been received. See also [[TLSClosing]].
|
* been received. See also [[TLSClosing]].
|
||||||
*/
|
*/
|
||||||
object TLS {
|
object TLS {
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,7 @@ object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider {
|
||||||
def unbind(): CompletionStage[Unit] = delegate.unbind().toJava
|
def unbind(): CompletionStage[Unit] = delegate.unbind().toJava
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @return A completion stage that is completed when manually unbound, or failed if the server fails
|
* @return A completion operator that is completed when manually unbound, or failed if the server fails
|
||||||
*/
|
*/
|
||||||
def whenUnbound(): CompletionStage[Done] = delegate.whenUnbound.toJava
|
def whenUnbound(): CompletionStage[Done] = delegate.whenUnbound.toJava
|
||||||
}
|
}
|
||||||
|
|
@ -165,7 +165,7 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
|
||||||
*
|
*
|
||||||
* Note that the ByteString chunk boundaries are not retained across the network,
|
* Note that the ByteString chunk boundaries are not retained across the network,
|
||||||
* to achieve application level chunks you have to introduce explicit framing in your streams,
|
* to achieve application level chunks you have to introduce explicit framing in your streams,
|
||||||
* for example using the [[Framing]] stages.
|
* for example using the [[Framing]] operators.
|
||||||
*
|
*
|
||||||
* @param remoteAddress The remote address to connect to
|
* @param remoteAddress The remote address to connect to
|
||||||
* @param localAddress Optional local address for the connection
|
* @param localAddress Optional local address for the connection
|
||||||
|
|
@ -196,7 +196,7 @@ class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
|
||||||
*
|
*
|
||||||
* Note that the ByteString chunk boundaries are not retained across the network,
|
* Note that the ByteString chunk boundaries are not retained across the network,
|
||||||
* to achieve application level chunks you have to introduce explicit framing in your streams,
|
* to achieve application level chunks you have to introduce explicit framing in your streams,
|
||||||
* for example using the [[Framing]] stages.
|
* for example using the [[Framing]] operators.
|
||||||
*/
|
*/
|
||||||
def outgoingConnection(host: String, port: Int): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] =
|
def outgoingConnection(host: String, port: Int): Flow[ByteString, ByteString, CompletionStage[OutgoingConnection]] =
|
||||||
Flow.fromGraph(delegate.outgoingConnection(new InetSocketAddress(host, port))
|
Flow.fromGraph(delegate.outgoingConnection(new InetSocketAddress(host, port))
|
||||||
|
|
|
||||||
|
|
@ -158,7 +158,7 @@ final class BidiFlow[-I1, +O1, -I2, +O2, +Mat](
|
||||||
* of attributes. This means that further calls will not be able to remove these
|
* of attributes. This means that further calls will not be able to remove these
|
||||||
* attributes, but instead add new ones. Note that this
|
* attributes, but instead add new ones. Note that this
|
||||||
* operation has no effect on an empty Flow (because the attributes apply
|
* operation has no effect on an empty Flow (because the attributes apply
|
||||||
* only to the contained processing stages).
|
* only to the contained processing operators).
|
||||||
*/
|
*/
|
||||||
override def withAttributes(attr: Attributes): BidiFlow[I1, O1, I2, O2, Mat] =
|
override def withAttributes(attr: Attributes): BidiFlow[I1, O1, I2, O2, Mat] =
|
||||||
new BidiFlow(
|
new BidiFlow(
|
||||||
|
|
@ -170,7 +170,7 @@ final class BidiFlow[-I1, +O1, -I2, +O2, +Mat](
|
||||||
* Add the given attributes to this Source. Further calls to `withAttributes`
|
* Add the given attributes to this Source. Further calls to `withAttributes`
|
||||||
* will not remove these attributes. Note that this
|
* will not remove these attributes. Note that this
|
||||||
* operation has no effect on an empty Flow (because the attributes apply
|
* operation has no effect on an empty Flow (because the attributes apply
|
||||||
* only to the contained processing stages).
|
* only to the contained processing operators).
|
||||||
*/
|
*/
|
||||||
override def addAttributes(attr: Attributes): BidiFlow[I1, O1, I2, O2, Mat] =
|
override def addAttributes(attr: Attributes): BidiFlow[I1, O1, I2, O2, Mat] =
|
||||||
withAttributes(traversalBuilder.attributes and attr)
|
withAttributes(traversalBuilder.attributes and attr)
|
||||||
|
|
@ -285,7 +285,7 @@ object BidiFlow {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a BidiFlow where the top and bottom flows are just one simple mapping
|
* Create a BidiFlow where the top and bottom flows are just one simple mapping
|
||||||
* stage each, expressed by the two functions.
|
* operator each, expressed by the two functions.
|
||||||
*/
|
*/
|
||||||
def fromFunctions[I1, O1, I2, O2](outbound: I1 ⇒ O1, inbound: I2 ⇒ O2): BidiFlow[I1, O1, I2, O2, NotUsed] =
|
def fromFunctions[I1, O1, I2, O2](outbound: I1 ⇒ O1, inbound: I2 ⇒ O2): BidiFlow[I1, O1, I2, O2, NotUsed] =
|
||||||
fromFlows(Flow[I1].map(outbound), Flow[I2].map(inbound))
|
fromFlows(Flow[I1].map(outbound), Flow[I2].map(inbound))
|
||||||
|
|
@ -294,9 +294,9 @@ object BidiFlow {
|
||||||
* If the time between two processed elements *in any direction* exceed the provided timeout, the stream is failed
|
* If the time between two processed elements *in any direction* exceed the provided timeout, the stream is failed
|
||||||
* with a [[scala.concurrent.TimeoutException]].
|
* with a [[scala.concurrent.TimeoutException]].
|
||||||
*
|
*
|
||||||
* There is a difference between this stage and having two idleTimeout Flows assembled into a BidiStage.
|
* There is a difference between this operator and having two idleTimeout Flows assembled into a BidiStage.
|
||||||
* If the timeout is configured to be 1 seconds, then this stage will not fail even though there are elements flowing
|
* If the timeout is configured to be 1 seconds, then this operator will not fail even though there are elements flowing
|
||||||
* every second in one direction, but no elements are flowing in the other direction. I.e. this stage considers
|
* every second in one direction, but no elements are flowing in the other direction. I.e. this operator considers
|
||||||
* the *joint* frequencies of the elements in both directions.
|
* the *joint* frequencies of the elements in both directions.
|
||||||
*/
|
*/
|
||||||
def bidirectionalIdleTimeout[I, O](timeout: FiniteDuration): BidiFlow[I, I, O, O, NotUsed] =
|
def bidirectionalIdleTimeout[I, O](timeout: FiniteDuration): BidiFlow[I, I, O, O, NotUsed] =
|
||||||
|
|
|
||||||
|
|
@ -9,12 +9,12 @@ import akka.stream.stage.{ GraphStage, GraphStageLogic, InHandler, OutHandler }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow them them.
|
* Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow them them.
|
||||||
* Similar to `Flow.fromSinkAndSource` however that API does not connect the completion signals of the wrapped stages.
|
* Similar to `Flow.fromSinkAndSource` however that API does not connect the completion signals of the wrapped operators.
|
||||||
*/
|
*/
|
||||||
object CoupledTerminationFlow {
|
object CoupledTerminationFlow {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Similar to [[Flow.fromSinkAndSource]] however couples the termination of these two stages.
|
* Similar to [[Flow.fromSinkAndSource]] however couples the termination of these two operators.
|
||||||
*
|
*
|
||||||
* E.g. if the emitted [[Flow]] gets a cancellation, the [[Source]] of course is cancelled,
|
* E.g. if the emitted [[Flow]] gets a cancellation, the [[Source]] of course is cancelled,
|
||||||
* however the Sink will also be completed. The table below illustrates the effects in detail:
|
* however the Sink will also be completed. The table below illustrates the effects in detail:
|
||||||
|
|
|
||||||
|
|
@ -239,7 +239,7 @@ final class Flow[-In, +Out, +Mat](
|
||||||
* set directly on the individual graphs of the composite.
|
* set directly on the individual graphs of the composite.
|
||||||
*
|
*
|
||||||
* Note that this operation has no effect on an empty Flow (because the attributes apply
|
* Note that this operation has no effect on an empty Flow (because the attributes apply
|
||||||
* only to the contained processing stages).
|
* only to the contained processing operators).
|
||||||
*/
|
*/
|
||||||
override def withAttributes(attr: Attributes): Repr[Out] =
|
override def withAttributes(attr: Attributes): Repr[Out] =
|
||||||
new Flow(
|
new Flow(
|
||||||
|
|
@ -354,8 +354,8 @@ object Flow {
|
||||||
case f: Flow[I, O, M] ⇒ f
|
case f: Flow[I, O, M] ⇒ f
|
||||||
case f: javadsl.Flow[I, O, M] ⇒ f.asScala
|
case f: javadsl.Flow[I, O, M] ⇒ f.asScala
|
||||||
case g: GraphStageWithMaterializedValue[FlowShape[I, O], M] ⇒
|
case g: GraphStageWithMaterializedValue[FlowShape[I, O], M] ⇒
|
||||||
// move these from the stage itself to make the returned source
|
// move these from the operator itself to make the returned source
|
||||||
// behave as it is the stage with regards to attributes
|
// behave as it is the operator with regards to attributes
|
||||||
val attrs = g.traversalBuilder.attributes
|
val attrs = g.traversalBuilder.attributes
|
||||||
val noAttrStage = g.withAttributes(Attributes.none)
|
val noAttrStage = g.withAttributes(Attributes.none)
|
||||||
new Flow(
|
new Flow(
|
||||||
|
|
@ -425,7 +425,7 @@ object Flow {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow from them.
|
* Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow from them.
|
||||||
* Similar to [[Flow.fromSinkAndSource]] however couples the termination of these two stages.
|
* Similar to [[Flow.fromSinkAndSource]] however couples the termination of these two operators.
|
||||||
*
|
*
|
||||||
* The resulting flow can be visualized as:
|
* The resulting flow can be visualized as:
|
||||||
* {{{
|
* {{{
|
||||||
|
|
@ -488,7 +488,7 @@ object Flow {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow from them.
|
* Allows coupling termination (cancellation, completion, erroring) of Sinks and Sources while creating a Flow from them.
|
||||||
* Similar to [[Flow.fromSinkAndSource]] however couples the termination of these two stages.
|
* Similar to [[Flow.fromSinkAndSource]] however couples the termination of these two operators.
|
||||||
*
|
*
|
||||||
* The resulting flow can be visualized as:
|
* The resulting flow can be visualized as:
|
||||||
* {{{
|
* {{{
|
||||||
|
|
@ -660,7 +660,7 @@ trait FlowOps[+Out, +Mat] {
|
||||||
/**
|
/**
|
||||||
* Recover allows to send last element on failure and gracefully complete the stream
|
* Recover allows to send last element on failure and gracefully complete the stream
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Throwing an exception inside `recover` _will_ be logged on ERROR level automatically.
|
* Throwing an exception inside `recover` _will_ be logged on ERROR level automatically.
|
||||||
*
|
*
|
||||||
|
|
@ -681,7 +681,7 @@ trait FlowOps[+Out, +Mat] {
|
||||||
* Source may be materialized.
|
* Source may be materialized.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Throwing an exception inside `recoverWith` _will_ be logged on ERROR level automatically.
|
* Throwing an exception inside `recoverWith` _will_ be logged on ERROR level automatically.
|
||||||
*
|
*
|
||||||
|
|
@ -708,7 +708,7 @@ trait FlowOps[+Out, +Mat] {
|
||||||
* A negative `attempts` number is interpreted as "infinite", which results in the exact same behavior as `recoverWith`.
|
* A negative `attempts` number is interpreted as "infinite", which results in the exact same behavior as `recoverWith`.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Throwing an exception inside `recoverWithRetries` _will_ be logged on ERROR level automatically.
|
* Throwing an exception inside `recoverWithRetries` _will_ be logged on ERROR level automatically.
|
||||||
*
|
*
|
||||||
|
|
@ -729,12 +729,12 @@ trait FlowOps[+Out, +Mat] {
|
||||||
via(new RecoverWith(attempts, pf))
|
via(new RecoverWith(attempts, pf))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* While similar to [[recover]] this stage can be used to transform an error signal to a different one *without* logging
|
* While similar to [[recover]] this operator can be used to transform an error signal to a different one *without* logging
|
||||||
* it as an error in the process. So in that sense it is NOT exactly equivalent to `recover(t => throw t2)` since recover
|
* it as an error in the process. So in that sense it is NOT exactly equivalent to `recover(t => throw t2)` since recover
|
||||||
* would log the `t2` error.
|
* would log the `t2` error.
|
||||||
*
|
*
|
||||||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||||
* This stage can recover the failure signal, but not the skipped elements, which will be dropped.
|
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||||
*
|
*
|
||||||
* Similarily to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
* Similarily to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
||||||
*
|
*
|
||||||
|
|
@ -775,11 +775,11 @@ trait FlowOps[+Out, +Mat] {
|
||||||
* This operation is useful for inspecting the passed through element, usually by means of side-effecting
|
* This operation is useful for inspecting the passed through element, usually by means of side-effecting
|
||||||
* operations (such as `println`, or emitting metrics), for each element without having to modify it.
|
* operations (such as `println`, or emitting metrics), for each element without having to modify it.
|
||||||
*
|
*
|
||||||
* For logging signals (elements, completion, error) consider using the [[log]] stage instead,
|
* For logging signals (elements, completion, error) consider using the [[log]] operator instead,
|
||||||
* along with appropriate `ActorAttributes.logLevels`.
|
* along with appropriate `ActorAttributes.logLevels`.
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits an element; the same element will be passed to the attached function,
|
* '''Emits when''' upstream emits an element; the same element will be passed to the attached function,
|
||||||
* as well as to the downstream stage
|
* as well as to the downstream operator
|
||||||
*
|
*
|
||||||
* '''Backpressures when''' downstream backpressures
|
* '''Backpressures when''' downstream backpressures
|
||||||
*
|
*
|
||||||
|
|
@ -921,9 +921,9 @@ trait FlowOps[+Out, +Mat] {
|
||||||
* a slightly healthier throughput.
|
* a slightly healthier throughput.
|
||||||
*
|
*
|
||||||
* Similar to the plain ask pattern, the target actor is allowed to reply with `akka.util.Status`.
|
* Similar to the plain ask pattern, the target actor is allowed to reply with `akka.util.Status`.
|
||||||
* An `akka.util.Status#Failure` will cause the stage to fail with the cause carried in the `Failure` message.
|
* An `akka.util.Status#Failure` will cause the operator to fail with the cause carried in the `Failure` message.
|
||||||
*
|
*
|
||||||
* The stage fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated.
|
* The operator fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated.
|
||||||
*
|
*
|
||||||
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
||||||
*
|
*
|
||||||
|
|
@ -937,7 +937,7 @@ trait FlowOps[+Out, +Mat] {
|
||||||
*
|
*
|
||||||
* '''Cancels when''' downstream cancels
|
* '''Cancels when''' downstream cancels
|
||||||
*/
|
*/
|
||||||
@implicitNotFound("Missing an implicit akka.util.Timeout for the ask() stage")
|
@implicitNotFound("Missing an implicit akka.util.Timeout for the ask() operator")
|
||||||
def ask[S](ref: ActorRef)(implicit timeout: Timeout, tag: ClassTag[S]): Repr[S] =
|
def ask[S](ref: ActorRef)(implicit timeout: Timeout, tag: ClassTag[S]): Repr[S] =
|
||||||
ask(2)(ref)(timeout, tag)
|
ask(2)(ref)(timeout, tag)
|
||||||
|
|
||||||
|
|
@ -954,10 +954,10 @@ trait FlowOps[+Out, +Mat] {
|
||||||
* otherwise `Nothing` will be assumed, which is most likely not what you want.
|
* otherwise `Nothing` will be assumed, which is most likely not what you want.
|
||||||
*
|
*
|
||||||
* Parallelism limits the number of how many asks can be "in flight" at the same time.
|
* Parallelism limits the number of how many asks can be "in flight" at the same time.
|
||||||
* Please note that the elements emitted by this stage are in-order with regards to the asks being issued
|
* Please note that the elements emitted by this operator are in-order with regards to the asks being issued
|
||||||
* (i.e. same behaviour as mapAsync).
|
* (i.e. same behaviour as mapAsync).
|
||||||
*
|
*
|
||||||
* The stage fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated,
|
* The operator fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated,
|
||||||
* or with an [[java.util.concurrent.TimeoutException]] in case the ask exceeds the timeout passed in.
|
* or with an [[java.util.concurrent.TimeoutException]] in case the ask exceeds the timeout passed in.
|
||||||
*
|
*
|
||||||
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
* Adheres to the [[ActorAttributes.SupervisionStrategy]] attribute.
|
||||||
|
|
@ -972,7 +972,7 @@ trait FlowOps[+Out, +Mat] {
|
||||||
*
|
*
|
||||||
* '''Cancels when''' downstream cancels
|
* '''Cancels when''' downstream cancels
|
||||||
*/
|
*/
|
||||||
@implicitNotFound("Missing an implicit akka.util.Timeout for the ask() stage")
|
@implicitNotFound("Missing an implicit akka.util.Timeout for the ask() operator")
|
||||||
def ask[S](parallelism: Int)(ref: ActorRef)(implicit timeout: Timeout, tag: ClassTag[S]): Repr[S] = {
|
def ask[S](parallelism: Int)(ref: ActorRef)(implicit timeout: Timeout, tag: ClassTag[S]): Repr[S] = {
|
||||||
val askFlow = Flow[Out]
|
val askFlow = Flow[Out]
|
||||||
.watch(ref)
|
.watch(ref)
|
||||||
|
|
@ -991,7 +991,7 @@ trait FlowOps[+Out, +Mat] {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* The stage fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated.
|
* The operator fails with an [[akka.stream.WatchedActorTerminatedException]] if the target actor is terminated.
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits
|
* '''Emits when''' upstream emits
|
||||||
*
|
*
|
||||||
|
|
@ -1319,7 +1319,7 @@ trait FlowOps[+Out, +Mat] {
|
||||||
* yielding the next current value.
|
* yielding the next current value.
|
||||||
*
|
*
|
||||||
* If the stream is empty (i.e. completes before signalling any elements),
|
* If the stream is empty (i.e. completes before signalling any elements),
|
||||||
* the reduce stage will fail its downstream with a [[NoSuchElementException]],
|
* the reduce operator will fail its downstream with a [[NoSuchElementException]],
|
||||||
* which is semantically in-line with that Scala's standard library collections
|
* which is semantically in-line with that Scala's standard library collections
|
||||||
* do in such situations.
|
* do in such situations.
|
||||||
*
|
*
|
||||||
|
|
@ -1762,7 +1762,7 @@ trait FlowOps[+Out, +Mat] {
|
||||||
* a new substream is opened and subsequently fed with all elements belonging to
|
* a new substream is opened and subsequently fed with all elements belonging to
|
||||||
* that key.
|
* that key.
|
||||||
*
|
*
|
||||||
* WARNING: If `allowClosedSubstreamRecreation` is set to `false` (default behavior) the stage
|
* WARNING: If `allowClosedSubstreamRecreation` is set to `false` (default behavior) the operator
|
||||||
* keeps track of all keys of streams that have already been closed. If you expect an infinite
|
* keeps track of all keys of streams that have already been closed. If you expect an infinite
|
||||||
* number of keys this can cause memory issues. Elements belonging to those keys are drained
|
* number of keys this can cause memory issues. Elements belonging to those keys are drained
|
||||||
* directly and not send to the substream.
|
* directly and not send to the substream.
|
||||||
|
|
@ -1830,7 +1830,7 @@ trait FlowOps[+Out, +Mat] {
|
||||||
* a new substream is opened and subsequently fed with all elements belonging to
|
* a new substream is opened and subsequently fed with all elements belonging to
|
||||||
* that key.
|
* that key.
|
||||||
*
|
*
|
||||||
* WARNING: The stage keeps track of all keys of streams that have already been closed.
|
* WARNING: The operator keeps track of all keys of streams that have already been closed.
|
||||||
* If you expect an infinite number of keys this can cause memory issues. Elements belonging
|
* If you expect an infinite number of keys this can cause memory issues. Elements belonging
|
||||||
* to those keys are drained directly and not send to the substream.
|
* to those keys are drained directly and not send to the substream.
|
||||||
*
|
*
|
||||||
|
|
@ -2016,7 +2016,7 @@ trait FlowOps[+Out, +Mat] {
|
||||||
def flatMapMerge[T, M](breadth: Int, f: Out ⇒ Graph[SourceShape[T], M]): Repr[T] = map(f).via(new FlattenMerge[T, M](breadth))
|
def flatMapMerge[T, M](breadth: Int, f: Out ⇒ Graph[SourceShape[T], M]): Repr[T] = map(f).via(new FlattenMerge[T, M](breadth))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* If the first element has not passed through this stage before the provided timeout, the stream is failed
|
* If the first element has not passed through this operator before the provided timeout, the stream is failed
|
||||||
* with a [[scala.concurrent.TimeoutException]].
|
* with a [[scala.concurrent.TimeoutException]].
|
||||||
*
|
*
|
||||||
* '''Emits when''' upstream emits an element
|
* '''Emits when''' upstream emits an element
|
||||||
|
|
@ -2075,7 +2075,7 @@ trait FlowOps[+Out, +Mat] {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
* Injects additional elements if upstream does not emit for a configured amount of time. In other words, this
|
||||||
* stage attempts to maintains a base rate of emitted elements towards the downstream.
|
* operator attempts to maintains a base rate of emitted elements towards the downstream.
|
||||||
*
|
*
|
||||||
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
* If the downstream backpressures then no element is injected until downstream demand arrives. Injected elements
|
||||||
* do not accumulate during this period.
|
* do not accumulate during this period.
|
||||||
|
|
@ -2094,7 +2094,7 @@ trait FlowOps[+Out, +Mat] {
|
||||||
via(new Timers.IdleInject[Out, U](maxIdle, injectedElem))
|
via(new Timers.IdleInject[Out, U](maxIdle, injectedElem))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sends elements downstream with speed limited to `elements/per`. In other words, this stage set the maximum rate
|
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
|
||||||
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
||||||
*
|
*
|
||||||
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size).
|
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size).
|
||||||
|
|
@ -2126,7 +2126,7 @@ trait FlowOps[+Out, +Mat] {
|
||||||
throttle(elements, per, maximumBurst = Throttle.AutomaticMaximumBurst, ConstantFun.oneInt, ThrottleMode.Shaping)
|
throttle(elements, per, maximumBurst = Throttle.AutomaticMaximumBurst, ConstantFun.oneInt, ThrottleMode.Shaping)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sends elements downstream with speed limited to `elements/per`. In other words, this stage set the maximum rate
|
* Sends elements downstream with speed limited to `elements/per`. In other words, this operator set the maximum rate
|
||||||
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
* for emitting messages. This operator works for streams where all elements have the same cost or length.
|
||||||
*
|
*
|
||||||
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
* Throttle implements the token bucket model. There is a bucket with a given token capacity (burst size or maximumBurst).
|
||||||
|
|
@ -2421,7 +2421,7 @@ trait FlowOps[+Out, +Mat] {
|
||||||
* source, then repeat process.
|
* source, then repeat process.
|
||||||
*
|
*
|
||||||
* If eagerClose is false and one of the upstreams complete the elements from the other upstream will continue passing
|
* If eagerClose is false and one of the upstreams complete the elements from the other upstream will continue passing
|
||||||
* through the interleave stage. If eagerClose is true and one of the upstream complete interleave will cancel the
|
* through the interleave operator. If eagerClose is true and one of the upstream complete interleave will cancel the
|
||||||
* other upstream and complete itself.
|
* other upstream and complete itself.
|
||||||
*
|
*
|
||||||
* If it gets error from one of upstreams - stream completes with failure.
|
* If it gets error from one of upstreams - stream completes with failure.
|
||||||
|
|
@ -2560,7 +2560,7 @@ trait FlowOps[+Out, +Mat] {
|
||||||
* from producing elements by asserting back-pressure until its time comes or it gets
|
* from producing elements by asserting back-pressure until its time comes or it gets
|
||||||
* cancelled.
|
* cancelled.
|
||||||
*
|
*
|
||||||
* On errors the stage is failed regardless of source of the error.
|
* On errors the operator is failed regardless of source of the error.
|
||||||
*
|
*
|
||||||
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
|
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
|
||||||
* is available from the second stream
|
* is available from the second stream
|
||||||
|
|
@ -2823,7 +2823,7 @@ trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] {
|
||||||
* then repeat process.
|
* then repeat process.
|
||||||
*
|
*
|
||||||
* If eagerClose is false and one of the upstreams complete the elements from the other upstream will continue passing
|
* If eagerClose is false and one of the upstreams complete the elements from the other upstream will continue passing
|
||||||
* through the interleave stage. If eagerClose is true and one of the upstream complete interleave will cancel the
|
* through the interleave operator. If eagerClose is true and one of the upstream complete interleave will cancel the
|
||||||
* other upstream and complete itself.
|
* other upstream and complete itself.
|
||||||
*
|
*
|
||||||
* If it gets error from one of upstreams - stream completes with failure.
|
* If it gets error from one of upstreams - stream completes with failure.
|
||||||
|
|
@ -2896,7 +2896,7 @@ trait FlowOpsMat[+Out, +Mat] extends FlowOps[Out, Mat] {
|
||||||
* from producing elements by asserting back-pressure until its time comes or it gets
|
* from producing elements by asserting back-pressure until its time comes or it gets
|
||||||
* cancelled.
|
* cancelled.
|
||||||
*
|
*
|
||||||
* On errors the stage is failed regardless of source of the error.
|
* On errors the operator is failed regardless of source of the error.
|
||||||
*
|
*
|
||||||
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
|
* '''Emits when''' element is available from first stream or first stream closed without emitting any elements and an element
|
||||||
* is available from the second stream
|
* is available from the second stream
|
||||||
|
|
|
||||||
|
|
@ -76,7 +76,7 @@ object Framing {
|
||||||
* @param computeFrameSize This function can be supplied if frame size is varied or needs to be computed in a special fashion.
|
* @param computeFrameSize This function can be supplied if frame size is varied or needs to be computed in a special fashion.
|
||||||
* For example, frame can have a shape like this: `[offset bytes][body size bytes][body bytes][footer bytes]`.
|
* For example, frame can have a shape like this: `[offset bytes][body size bytes][body bytes][footer bytes]`.
|
||||||
* Then computeFrameSize can be used to compute the frame size: `(offset bytes, computed size) => (actual frame size)`.
|
* Then computeFrameSize can be used to compute the frame size: `(offset bytes, computed size) => (actual frame size)`.
|
||||||
* ''Actual frame size'' must be equal or bigger than sum of `fieldOffset` and `fieldLength`, the stage fails otherwise.
|
* ''Actual frame size'' must be equal or bigger than sum of `fieldOffset` and `fieldLength`, the operator fails otherwise.
|
||||||
*
|
*
|
||||||
*/
|
*/
|
||||||
def lengthField(
|
def lengthField(
|
||||||
|
|
|
||||||
|
|
@ -716,7 +716,7 @@ object Partition {
|
||||||
case class PartitionOutOfBoundsException(msg: String) extends IndexOutOfBoundsException(msg) with NoStackTrace
|
case class PartitionOutOfBoundsException(msg: String) extends IndexOutOfBoundsException(msg) with NoStackTrace
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Create a new `Partition` stage with the specified input type. This method sets `eagerCancel` to `false`.
|
* Create a new `Partition` operator with the specified input type. This method sets `eagerCancel` to `false`.
|
||||||
* To specify a different value for the `eagerCancel` parameter, then instantiate Partition using the constructor.
|
* To specify a different value for the `eagerCancel` parameter, then instantiate Partition using the constructor.
|
||||||
*
|
*
|
||||||
* If `eagerCancel` is true, partition cancels upstream if any of its downstreams cancel, if false, when all have cancelled.
|
* If `eagerCancel` is true, partition cancels upstream if any of its downstreams cancel, if false, when all have cancelled.
|
||||||
|
|
@ -1196,9 +1196,9 @@ object OrElse {
|
||||||
* Takes two streams and passes the first through, the secondary stream is only passed
|
* Takes two streams and passes the first through, the secondary stream is only passed
|
||||||
* through if the primary stream completes without passing any elements through. When
|
* through if the primary stream completes without passing any elements through. When
|
||||||
* the first element is passed through from the primary the secondary is cancelled.
|
* the first element is passed through from the primary the secondary is cancelled.
|
||||||
* Both incoming streams are materialized when the stage is materialized.
|
* Both incoming streams are materialized when the operator is materialized.
|
||||||
*
|
*
|
||||||
* On errors the stage is failed regardless of source of the error.
|
* On errors the operator is failed regardless of source of the error.
|
||||||
*
|
*
|
||||||
* '''Emits when''' element is available from primary stream or the primary stream closed without emitting any elements and an element
|
* '''Emits when''' element is available from primary stream or the primary stream closed without emitting any elements and an element
|
||||||
* is available from the secondary stream
|
* is available from the secondary stream
|
||||||
|
|
@ -1365,7 +1365,7 @@ object GraphDSL extends GraphApply {
|
||||||
* It is possible to call this method multiple times to get multiple [[Outlet]] instances if necessary. All of
|
* It is possible to call this method multiple times to get multiple [[Outlet]] instances if necessary. All of
|
||||||
* the outlets will emit the materialized value.
|
* the outlets will emit the materialized value.
|
||||||
*
|
*
|
||||||
* Be careful to not to feed the result of this outlet to a stage that produces the materialized value itself (for
|
* Be careful to not to feed the result of this outlet to an operator that produces the materialized value itself (for
|
||||||
* example to a [[Sink#fold]] that contributes to the materialized value) since that might lead to an unresolvable
|
* example to a [[Sink#fold]] that contributes to the materialized value) since that might lead to an unresolvable
|
||||||
* dependency cycle.
|
* dependency cycle.
|
||||||
*
|
*
|
||||||
|
|
|
||||||
|
|
@ -759,7 +759,7 @@ object PartitionHub {
|
||||||
* identifier for the given element. The function will never be called when there are no active consumers,
|
* identifier for the given element. The function will never be called when there are no active consumers,
|
||||||
* i.e. there is always at least one element in the array of identifiers.
|
* i.e. there is always at least one element in the array of identifiers.
|
||||||
* @param startAfterNrOfConsumers Elements are buffered until this number of consumers have been connected.
|
* @param startAfterNrOfConsumers Elements are buffered until this number of consumers have been connected.
|
||||||
* This is only used initially when the stage is starting up, i.e. it is not honored when consumers have
|
* This is only used initially when the operator is starting up, i.e. it is not honored when consumers have
|
||||||
* been removed (canceled).
|
* been removed (canceled).
|
||||||
* @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer
|
* @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer
|
||||||
* is backpressured.
|
* is backpressured.
|
||||||
|
|
@ -792,7 +792,7 @@ object PartitionHub {
|
||||||
* and less than number of consumers. E.g. `(size, elem) => math.abs(elem.hashCode) % size`. It's also
|
* and less than number of consumers. E.g. `(size, elem) => math.abs(elem.hashCode) % size`. It's also
|
||||||
* possible to use `-1` to drop the element.
|
* possible to use `-1` to drop the element.
|
||||||
* @param startAfterNrOfConsumers Elements are buffered until this number of consumers have been connected.
|
* @param startAfterNrOfConsumers Elements are buffered until this number of consumers have been connected.
|
||||||
* This is only used initially when the stage is starting up, i.e. it is not honored when consumers have
|
* This is only used initially when the operator is starting up, i.e. it is not honored when consumers have
|
||||||
* been removed (canceled).
|
* been removed (canceled).
|
||||||
* @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer
|
* @param bufferSize Total number of elements that can be buffered. If this buffer is full, the producer
|
||||||
* is backpressured.
|
* is backpressured.
|
||||||
|
|
|
||||||
|
|
@ -13,14 +13,14 @@ import akka.util.ByteString
|
||||||
|
|
||||||
import scala.util.control.NonFatal
|
import scala.util.control.NonFatal
|
||||||
|
|
||||||
/** Provides JSON framing stages that can separate valid JSON objects from incoming [[ByteString]] objects. */
|
/** Provides JSON framing operators that can separate valid JSON objects from incoming [[ByteString]] objects. */
|
||||||
object JsonFraming {
|
object JsonFraming {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Returns a Flow that implements a "brace counting" based framing stage for emitting valid JSON chunks.
|
* Returns a Flow that implements a "brace counting" based framing operator for emitting valid JSON chunks.
|
||||||
* It scans the incoming data stream for valid JSON objects and returns chunks of ByteStrings containing only those valid chunks.
|
* It scans the incoming data stream for valid JSON objects and returns chunks of ByteStrings containing only those valid chunks.
|
||||||
*
|
*
|
||||||
* Typical examples of data that one may want to frame using this stage include:
|
* Typical examples of data that one may want to frame using this operator include:
|
||||||
*
|
*
|
||||||
* **Very large arrays**:
|
* **Very large arrays**:
|
||||||
* {{{
|
* {{{
|
||||||
|
|
|
||||||
|
|
@ -31,7 +31,7 @@ trait SourceQueue[T] {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Method returns a [[Future]] that will be completed if the stream completes,
|
* Method returns a [[Future]] that will be completed if the stream completes,
|
||||||
* or will be failed when the stage faces an internal failure.
|
* or will be failed when the operator faces an internal failure.
|
||||||
*/
|
*/
|
||||||
def watchCompletion(): Future[Done]
|
def watchCompletion(): Future[Done]
|
||||||
}
|
}
|
||||||
|
|
@ -54,7 +54,7 @@ trait SourceQueueWithComplete[T] extends SourceQueue[T] {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Method returns a [[Future]] that will be completed if the stream completes,
|
* Method returns a [[Future]] that will be completed if the stream completes,
|
||||||
* or will be failed when the stage faces an internal failure or the the [[SourceQueueWithComplete.fail]] method is invoked.
|
* or will be failed when the operator faces an internal failure or the the [[SourceQueueWithComplete.fail]] method is invoked.
|
||||||
*/
|
*/
|
||||||
def watchCompletion(): Future[Done]
|
def watchCompletion(): Future[Done]
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -442,7 +442,7 @@ private abstract class RestartWithBackoffLogic[S <: Shape](
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param out The permanent outlet
|
* @param out The permanent outlet
|
||||||
* @return A sub sink inlet that's sink is attached to the wrapped stage
|
* @return A sub sink inlet that's sink is attached to the wrapped operator
|
||||||
*/
|
*/
|
||||||
protected final def createSubInlet[T](out: Outlet[T]): SubSinkInlet[T] = {
|
protected final def createSubInlet[T](out: Outlet[T]): SubSinkInlet[T] = {
|
||||||
val sinkIn = new SubSinkInlet[T](s"RestartWithBackoff$name.subIn")
|
val sinkIn = new SubSinkInlet[T](s"RestartWithBackoff$name.subIn")
|
||||||
|
|
@ -482,7 +482,7 @@ private abstract class RestartWithBackoffLogic[S <: Shape](
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @param in The permanent inlet for this stage
|
* @param in The permanent inlet for this operator
|
||||||
* @return Temporary SubSourceOutlet for this "restart"
|
* @return Temporary SubSourceOutlet for this "restart"
|
||||||
*/
|
*/
|
||||||
protected final def createSubOutlet[T](in: Inlet[T]): SubSourceOutlet[T] = {
|
protected final def createSubOutlet[T](in: Inlet[T]): SubSourceOutlet[T] = {
|
||||||
|
|
|
||||||
|
|
@ -232,7 +232,7 @@ object Sink {
|
||||||
* A `Sink` that materializes into a [[org.reactivestreams.Publisher]].
|
* A `Sink` that materializes into a [[org.reactivestreams.Publisher]].
|
||||||
*
|
*
|
||||||
* If `fanout` is `true`, the materialized `Publisher` will support multiple `Subscriber`s and
|
* If `fanout` is `true`, the materialized `Publisher` will support multiple `Subscriber`s and
|
||||||
* the size of the `inputBuffer` configured for this stage becomes the maximum number of elements that
|
* the size of the `inputBuffer` configured for this operator becomes the maximum number of elements that
|
||||||
* the fastest [[org.reactivestreams.Subscriber]] can be ahead of the slowest one before slowing
|
* the fastest [[org.reactivestreams.Subscriber]] can be ahead of the slowest one before slowing
|
||||||
* the processing down due to back pressure.
|
* the processing down due to back pressure.
|
||||||
*
|
*
|
||||||
|
|
@ -325,7 +325,7 @@ object Sink {
|
||||||
* if there is a failure signaled in the stream.
|
* if there is a failure signaled in the stream.
|
||||||
*
|
*
|
||||||
* If the stream is empty (i.e. completes before signalling any elements),
|
* If the stream is empty (i.e. completes before signalling any elements),
|
||||||
* the reduce stage will fail its downstream with a [[NoSuchElementException]],
|
* the reduce operator will fail its downstream with a [[NoSuchElementException]],
|
||||||
* which is semantically in-line with that Scala's standard library collections
|
* which is semantically in-line with that Scala's standard library collections
|
||||||
* do in such situations.
|
* do in such situations.
|
||||||
*
|
*
|
||||||
|
|
@ -396,7 +396,7 @@ object Sink {
|
||||||
* i.e. if the actor is not consuming the messages fast enough the mailbox
|
* i.e. if the actor is not consuming the messages fast enough the mailbox
|
||||||
* of the actor will grow. For potentially slow consumer actors it is recommended
|
* of the actor will grow. For potentially slow consumer actors it is recommended
|
||||||
* to use a bounded mailbox with zero `mailbox-push-timeout-time` or use a rate
|
* to use a bounded mailbox with zero `mailbox-push-timeout-time` or use a rate
|
||||||
* limiting stage in front of this `Sink`.
|
* limiting operator in front of this `Sink`.
|
||||||
*/
|
*/
|
||||||
@InternalApi private[akka] def actorRef[T](ref: ActorRef, onCompleteMessage: Any, onFailureMessage: Throwable ⇒ Any): Sink[T, NotUsed] =
|
@InternalApi private[akka] def actorRef[T](ref: ActorRef, onCompleteMessage: Any, onFailureMessage: Throwable ⇒ Any): Sink[T, NotUsed] =
|
||||||
fromGraph(new ActorRefSink(ref, onCompleteMessage, onFailureMessage,
|
fromGraph(new ActorRefSink(ref, onCompleteMessage, onFailureMessage,
|
||||||
|
|
@ -415,7 +415,7 @@ object Sink {
|
||||||
* i.e. if the actor is not consuming the messages fast enough the mailbox
|
* i.e. if the actor is not consuming the messages fast enough the mailbox
|
||||||
* of the actor will grow. For potentially slow consumer actors it is recommended
|
* of the actor will grow. For potentially slow consumer actors it is recommended
|
||||||
* to use a bounded mailbox with zero `mailbox-push-timeout-time` or use a rate
|
* to use a bounded mailbox with zero `mailbox-push-timeout-time` or use a rate
|
||||||
* limiting stage in front of this `Sink`.
|
* limiting operator in front of this `Sink`.
|
||||||
*/
|
*/
|
||||||
def actorRef[T](ref: ActorRef, onCompleteMessage: Any): Sink[T, NotUsed] =
|
def actorRef[T](ref: ActorRef, onCompleteMessage: Any): Sink[T, NotUsed] =
|
||||||
fromGraph(new ActorRefSink(ref, onCompleteMessage, t ⇒ Status.Failure(t),
|
fromGraph(new ActorRefSink(ref, onCompleteMessage, t ⇒ Status.Failure(t),
|
||||||
|
|
|
||||||
|
|
@ -132,7 +132,7 @@ final class Source[+Out, +Mat](
|
||||||
* if there is a failure signaled in the stream.
|
* if there is a failure signaled in the stream.
|
||||||
*
|
*
|
||||||
* If the stream is empty (i.e. completes before signalling any elements),
|
* If the stream is empty (i.e. completes before signalling any elements),
|
||||||
* the reduce stage will fail its downstream with a [[NoSuchElementException]],
|
* the reduce operator will fail its downstream with a [[NoSuchElementException]],
|
||||||
* which is semantically in-line with that Scala's standard library collections
|
* which is semantically in-line with that Scala's standard library collections
|
||||||
* do in such situations.
|
* do in such situations.
|
||||||
*/
|
*/
|
||||||
|
|
@ -321,7 +321,7 @@ object Source {
|
||||||
def fromFutureSource[T, M](future: Future[Graph[SourceShape[T], M]]): Source[T, Future[M]] = fromGraph(new FutureFlattenSource(future))
|
def fromFutureSource[T, M](future: Future[Graph[SourceShape[T], M]]): Source[T, Future[M]] = fromGraph(new FutureFlattenSource(future))
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Streams the elements of an asynchronous source once its given `completion` stage completes.
|
* Streams the elements of an asynchronous source once its given `completion` operator completes.
|
||||||
* If the [[CompletionStage]] fails the stream is failed with the exception from the future.
|
* If the [[CompletionStage]] fails the stream is failed with the exception from the future.
|
||||||
* If downstream cancels before the stream completes the materialized `Future` will be failed
|
* If downstream cancels before the stream completes the materialized `Future` will be failed
|
||||||
* with a [[StreamDetachedException]]
|
* with a [[StreamDetachedException]]
|
||||||
|
|
|
||||||
|
|
@ -50,7 +50,7 @@ trait SubFlow[+Out, +Mat, +F[+_], C] extends FlowOps[Out, Mat] {
|
||||||
* Flatten the sub-flows back into the super-flow by concatenating them.
|
* Flatten the sub-flows back into the super-flow by concatenating them.
|
||||||
* This is usually a bad idea when combined with `groupBy` since it can
|
* This is usually a bad idea when combined with `groupBy` since it can
|
||||||
* easily lead to deadlock—the concatenation does not consume from the second
|
* easily lead to deadlock—the concatenation does not consume from the second
|
||||||
* substream until the first has finished and the `groupBy` stage will get
|
* substream until the first has finished and the `groupBy` operator will get
|
||||||
* back-pressure from the second stream.
|
* back-pressure from the second stream.
|
||||||
*
|
*
|
||||||
* This is identical in effect to `mergeSubstreamsWithParallelism(1)`.
|
* This is identical in effect to `mergeSubstreamsWithParallelism(1)`.
|
||||||
|
|
|
||||||
|
|
@ -40,17 +40,17 @@ import scala.util.{ Failure, Success, Try }
|
||||||
* The TLS specification does not permit half-closing of the user data session
|
* The TLS specification does not permit half-closing of the user data session
|
||||||
* that it transports—to be precise a half-close will always promptly lead to a
|
* that it transports—to be precise a half-close will always promptly lead to a
|
||||||
* full close. This means that canceling the plaintext output or completing the
|
* full close. This means that canceling the plaintext output or completing the
|
||||||
* plaintext input of the SslTls stage will lead to full termination of the
|
* plaintext input of the SslTls operator will lead to full termination of the
|
||||||
* secure connection without regard to whether bytes are remaining to be sent or
|
* secure connection without regard to whether bytes are remaining to be sent or
|
||||||
* received, respectively. Especially for a client the common idiom of attaching
|
* received, respectively. Especially for a client the common idiom of attaching
|
||||||
* a finite Source to the plaintext input and transforming the plaintext response
|
* a finite Source to the plaintext input and transforming the plaintext response
|
||||||
* bytes coming out will not work out of the box due to early termination of the
|
* bytes coming out will not work out of the box due to early termination of the
|
||||||
* connection. For this reason there is a parameter that determines whether the
|
* connection. For this reason there is a parameter that determines whether the
|
||||||
* SslTls stage shall ignore completion and/or cancellation events, and the
|
* SslTls operator shall ignore completion and/or cancellation events, and the
|
||||||
* default is to ignore completion (in view of the client–server scenario). In
|
* default is to ignore completion (in view of the client–server scenario). In
|
||||||
* order to terminate the connection the client will then need to cancel the
|
* order to terminate the connection the client will then need to cancel the
|
||||||
* plaintext output as soon as all expected bytes have been received. When
|
* plaintext output as soon as all expected bytes have been received. When
|
||||||
* ignoring both types of events the stage will shut down once both events have
|
* ignoring both types of events the operator will shut down once both events have
|
||||||
* been received. See also [[TLSClosing]].
|
* been received. See also [[TLSClosing]].
|
||||||
*/
|
*/
|
||||||
object TLS {
|
object TLS {
|
||||||
|
|
|
||||||
|
|
@ -173,7 +173,7 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
|
||||||
*
|
*
|
||||||
* Note that the ByteString chunk boundaries are not retained across the network,
|
* Note that the ByteString chunk boundaries are not retained across the network,
|
||||||
* to achieve application level chunks you have to introduce explicit framing in your streams,
|
* to achieve application level chunks you have to introduce explicit framing in your streams,
|
||||||
* for example using the [[Framing]] stages.
|
* for example using the [[Framing]] operators.
|
||||||
*
|
*
|
||||||
* @param remoteAddress The remote address to connect to
|
* @param remoteAddress The remote address to connect to
|
||||||
* @param localAddress Optional local address for the connection
|
* @param localAddress Optional local address for the connection
|
||||||
|
|
@ -218,7 +218,7 @@ final class Tcp(system: ExtendedActorSystem) extends akka.actor.Extension {
|
||||||
*
|
*
|
||||||
* Note that the ByteString chunk boundaries are not retained across the network,
|
* Note that the ByteString chunk boundaries are not retained across the network,
|
||||||
* to achieve application level chunks you have to introduce explicit framing in your streams,
|
* to achieve application level chunks you have to introduce explicit framing in your streams,
|
||||||
* for example using the [[Framing]] stages.
|
* for example using the [[Framing]] operators.
|
||||||
*/
|
*/
|
||||||
def outgoingConnection(host: String, port: Int): Flow[ByteString, ByteString, Future[OutgoingConnection]] =
|
def outgoingConnection(host: String, port: Int): Flow[ByteString, ByteString, Future[OutgoingConnection]] =
|
||||||
outgoingConnection(InetSocketAddress.createUnresolved(host, port))
|
outgoingConnection(InetSocketAddress.createUnresolved(host, port))
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,7 @@ import scala.concurrent.duration.FiniteDuration
|
||||||
import scala.concurrent.{ Future, Promise }
|
import scala.concurrent.{ Future, Promise }
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Scala API: A GraphStage represents a reusable graph stream processing stage.
|
* Scala API: A GraphStage represents a reusable graph stream processing operator.
|
||||||
*
|
*
|
||||||
* Extend this `GraphStageWithMaterializedValue` if you want to provide a materialized value,
|
* Extend this `GraphStageWithMaterializedValue` if you want to provide a materialized value,
|
||||||
* represented by the type parameter `M`. If your GraphStage does not need to provide a materialized
|
* represented by the type parameter `M`. If your GraphStage does not need to provide a materialized
|
||||||
|
|
@ -32,7 +32,7 @@ import scala.concurrent.{ Future, Promise }
|
||||||
* A GraphStage consists of a [[Shape]] which describes its input and output ports and a factory function that
|
* A GraphStage consists of a [[Shape]] which describes its input and output ports and a factory function that
|
||||||
* creates a [[GraphStageLogic]] which implements the processing logic that ties the ports together.
|
* creates a [[GraphStageLogic]] which implements the processing logic that ties the ports together.
|
||||||
*
|
*
|
||||||
* See also [[AbstractGraphStageWithMaterializedValue]] for Java DSL for this stage.
|
* See also [[AbstractGraphStageWithMaterializedValue]] for Java DSL for this operator.
|
||||||
*/
|
*/
|
||||||
abstract class GraphStageWithMaterializedValue[+S <: Shape, +M] extends Graph[S, M] {
|
abstract class GraphStageWithMaterializedValue[+S <: Shape, +M] extends Graph[S, M] {
|
||||||
|
|
||||||
|
|
@ -60,7 +60,7 @@ abstract class GraphStageWithMaterializedValue[+S <: Shape, +M] extends Graph[S,
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Java API: A GraphStage represents a reusable graph stream processing stage.
|
* Java API: A GraphStage represents a reusable graph stream processing operator.
|
||||||
*
|
*
|
||||||
* Extend this `AbstractGraphStageWithMaterializedValue` if you want to provide a materialized value,
|
* Extend this `AbstractGraphStageWithMaterializedValue` if you want to provide a materialized value,
|
||||||
* represented by the type parameter `M`. If your GraphStage does not need to provide a materialized
|
* represented by the type parameter `M`. If your GraphStage does not need to provide a materialized
|
||||||
|
|
@ -69,7 +69,7 @@ abstract class GraphStageWithMaterializedValue[+S <: Shape, +M] extends Graph[S,
|
||||||
* A GraphStage consists of a [[Shape]] which describes its input and output ports and a factory function that
|
* A GraphStage consists of a [[Shape]] which describes its input and output ports and a factory function that
|
||||||
* creates a [[GraphStageLogic]] which implements the processing logic that ties the ports together.
|
* creates a [[GraphStageLogic]] which implements the processing logic that ties the ports together.
|
||||||
*
|
*
|
||||||
* See also [[GraphStageWithMaterializedValue]] for Scala DSL for this stage.
|
* See also [[GraphStageWithMaterializedValue]] for Scala DSL for this operator.
|
||||||
*/
|
*/
|
||||||
abstract class AbstractGraphStageWithMaterializedValue[+S <: Shape, M] extends GraphStageWithMaterializedValue[S, M] {
|
abstract class AbstractGraphStageWithMaterializedValue[+S <: Shape, M] extends GraphStageWithMaterializedValue[S, M] {
|
||||||
@throws(classOf[Exception])
|
@throws(classOf[Exception])
|
||||||
|
|
@ -83,7 +83,7 @@ abstract class AbstractGraphStageWithMaterializedValue[+S <: Shape, M] extends G
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* A GraphStage represents a reusable graph stream processing stage.
|
* A GraphStage represents a reusable graph stream processing operator.
|
||||||
*
|
*
|
||||||
* A GraphStage consists of a [[Shape]] which describes its input and output ports and a factory function that
|
* A GraphStage consists of a [[Shape]] which describes its input and output ports and a factory function that
|
||||||
* creates a [[GraphStageLogic]] which implements the processing logic that ties the ports together.
|
* creates a [[GraphStageLogic]] which implements the processing logic that ties the ports together.
|
||||||
|
|
@ -106,8 +106,8 @@ object GraphStageLogic {
|
||||||
extends RuntimeException("You must first call getStageActor, to initialize the Actors behavior")
|
extends RuntimeException("You must first call getStageActor, to initialize the Actors behavior")
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Input handler that terminates the stage upon receiving completion.
|
* Input handler that terminates the operator upon receiving completion.
|
||||||
* The stage fails upon receiving a failure.
|
* The operator fails upon receiving a failure.
|
||||||
*/
|
*/
|
||||||
object EagerTerminateInput extends InHandler {
|
object EagerTerminateInput extends InHandler {
|
||||||
override def onPush(): Unit = ()
|
override def onPush(): Unit = ()
|
||||||
|
|
@ -115,8 +115,8 @@ object GraphStageLogic {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Input handler that does not terminate the stage upon receiving completion.
|
* Input handler that does not terminate the operator upon receiving completion.
|
||||||
* The stage fails upon receiving a failure.
|
* The operator fails upon receiving a failure.
|
||||||
*/
|
*/
|
||||||
object IgnoreTerminateInput extends InHandler {
|
object IgnoreTerminateInput extends InHandler {
|
||||||
override def onPush(): Unit = ()
|
override def onPush(): Unit = ()
|
||||||
|
|
@ -126,7 +126,7 @@ object GraphStageLogic {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Input handler that terminates the state upon receiving completion if the
|
* Input handler that terminates the state upon receiving completion if the
|
||||||
* given condition holds at that time. The stage fails upon receiving a failure.
|
* given condition holds at that time. The operator fails upon receiving a failure.
|
||||||
*/
|
*/
|
||||||
class ConditionalTerminateInput(predicate: () ⇒ Boolean) extends InHandler {
|
class ConditionalTerminateInput(predicate: () ⇒ Boolean) extends InHandler {
|
||||||
override def onPush(): Unit = ()
|
override def onPush(): Unit = ()
|
||||||
|
|
@ -135,7 +135,7 @@ object GraphStageLogic {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Input handler that does not terminate the stage upon receiving completion
|
* Input handler that does not terminate the operator upon receiving completion
|
||||||
* nor failure.
|
* nor failure.
|
||||||
*/
|
*/
|
||||||
object TotallyIgnorantInput extends InHandler {
|
object TotallyIgnorantInput extends InHandler {
|
||||||
|
|
@ -145,7 +145,7 @@ object GraphStageLogic {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Output handler that terminates the stage upon cancellation.
|
* Output handler that terminates the operator upon cancellation.
|
||||||
*/
|
*/
|
||||||
object EagerTerminateOutput extends OutHandler {
|
object EagerTerminateOutput extends OutHandler {
|
||||||
override def onPull(): Unit = ()
|
override def onPull(): Unit = ()
|
||||||
|
|
@ -153,7 +153,7 @@ object GraphStageLogic {
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Output handler that does not terminate the stage upon cancellation.
|
* Output handler that does not terminate the operator upon cancellation.
|
||||||
*/
|
*/
|
||||||
object IgnoreTerminateOutput extends OutHandler {
|
object IgnoreTerminateOutput extends OutHandler {
|
||||||
override def onPull(): Unit = ()
|
override def onPull(): Unit = ()
|
||||||
|
|
@ -163,7 +163,7 @@ object GraphStageLogic {
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Output handler that terminates the state upon receiving completion if the
|
* Output handler that terminates the state upon receiving completion if the
|
||||||
* given condition holds at that time. The stage fails upon receiving a failure.
|
* given condition holds at that time. The operator fails upon receiving a failure.
|
||||||
*/
|
*/
|
||||||
class ConditionalTerminateOutput(predicate: () ⇒ Boolean) extends OutHandler {
|
class ConditionalTerminateOutput(predicate: () ⇒ Boolean) extends OutHandler {
|
||||||
override def onPull(): Unit = ()
|
override def onPull(): Unit = ()
|
||||||
|
|
@ -270,12 +270,12 @@ object GraphStageLogic {
|
||||||
* * The lifecycle hooks [[preStart()]] and [[postStop()]]
|
* * The lifecycle hooks [[preStart()]] and [[postStop()]]
|
||||||
* * Methods for performing stream processing actions, like pulling or pushing elements
|
* * Methods for performing stream processing actions, like pulling or pushing elements
|
||||||
*
|
*
|
||||||
* The stage logic is completed once all its input and output ports have been closed. This can be changed by
|
* The operator logic is completed once all its input and output ports have been closed. This can be changed by
|
||||||
* setting `setKeepGoing` to true.
|
* setting `setKeepGoing` to true.
|
||||||
*
|
*
|
||||||
* The `postStop` lifecycle hook on the logic itself is called once all ports are closed. This is the only tear down
|
* The `postStop` lifecycle hook on the logic itself is called once all ports are closed. This is the only tear down
|
||||||
* callback that is guaranteed to happen, if the actor system or the materializer is terminated the handlers may never
|
* callback that is guaranteed to happen, if the actor system or the materializer is terminated the handlers may never
|
||||||
* see any callbacks to `onUpstreamFailure`, `onUpstreamFinish` or `onDownstreamFinish`. Therefore stage resource
|
* see any callbacks to `onUpstreamFailure`, `onUpstreamFinish` or `onDownstreamFinish`. Therefore operator resource
|
||||||
* cleanup should always be done in `postStop`.
|
* cleanup should always be done in `postStop`.
|
||||||
*/
|
*/
|
||||||
abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: Int) {
|
abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount: Int) {
|
||||||
|
|
@ -301,7 +301,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
|
||||||
/**
|
/**
|
||||||
* INTERNAL API
|
* INTERNAL API
|
||||||
*
|
*
|
||||||
* If possible a link back to the stage that the logic was created with, used for debugging.
|
* If possible a link back to the operator that the logic was created with, used for debugging.
|
||||||
*/
|
*/
|
||||||
private[stream] var originalStage: OptionVal[GraphStageWithMaterializedValue[_ <: Shape, _]] = OptionVal.None
|
private[stream] var originalStage: OptionVal[GraphStageWithMaterializedValue[_ <: Shape, _]] = OptionVal.None
|
||||||
|
|
||||||
|
|
@ -364,39 +364,39 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
|
||||||
protected def subFusingMaterializer: Materializer = interpreter.subFusingMaterializer
|
protected def subFusingMaterializer: Materializer = interpreter.subFusingMaterializer
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Input handler that terminates the stage upon receiving completion.
|
* Input handler that terminates the operator upon receiving completion.
|
||||||
* The stage fails upon receiving a failure.
|
* The operator fails upon receiving a failure.
|
||||||
*/
|
*/
|
||||||
final protected def eagerTerminateInput: InHandler = EagerTerminateInput
|
final protected def eagerTerminateInput: InHandler = EagerTerminateInput
|
||||||
/**
|
/**
|
||||||
* Input handler that does not terminate the stage upon receiving completion.
|
* Input handler that does not terminate the operator upon receiving completion.
|
||||||
* The stage fails upon receiving a failure.
|
* The operator fails upon receiving a failure.
|
||||||
*/
|
*/
|
||||||
final protected def ignoreTerminateInput: InHandler = IgnoreTerminateInput
|
final protected def ignoreTerminateInput: InHandler = IgnoreTerminateInput
|
||||||
/**
|
/**
|
||||||
* Input handler that terminates the state upon receiving completion if the
|
* Input handler that terminates the state upon receiving completion if the
|
||||||
* given condition holds at that time. The stage fails upon receiving a failure.
|
* given condition holds at that time. The operator fails upon receiving a failure.
|
||||||
*/
|
*/
|
||||||
final protected def conditionalTerminateInput(predicate: () ⇒ Boolean): InHandler = new ConditionalTerminateInput(predicate)
|
final protected def conditionalTerminateInput(predicate: () ⇒ Boolean): InHandler = new ConditionalTerminateInput(predicate)
|
||||||
/**
|
/**
|
||||||
* Input handler that does not terminate the stage upon receiving completion
|
* Input handler that does not terminate the operator upon receiving completion
|
||||||
* nor failure.
|
* nor failure.
|
||||||
*/
|
*/
|
||||||
final protected def totallyIgnorantInput: InHandler = TotallyIgnorantInput
|
final protected def totallyIgnorantInput: InHandler = TotallyIgnorantInput
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Output handler that terminates the stage upon cancellation.
|
* Output handler that terminates the operator upon cancellation.
|
||||||
*/
|
*/
|
||||||
final protected def eagerTerminateOutput: OutHandler = EagerTerminateOutput
|
final protected def eagerTerminateOutput: OutHandler = EagerTerminateOutput
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Output handler that does not terminate the stage upon cancellation.
|
* Output handler that does not terminate the operator upon cancellation.
|
||||||
*/
|
*/
|
||||||
final protected def ignoreTerminateOutput: OutHandler = IgnoreTerminateOutput
|
final protected def ignoreTerminateOutput: OutHandler = IgnoreTerminateOutput
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Output handler that terminates the state upon receiving completion if the
|
* Output handler that terminates the state upon receiving completion if the
|
||||||
* given condition holds at that time. The stage fails upon receiving a failure.
|
* given condition holds at that time. The operator fails upon receiving a failure.
|
||||||
*/
|
*/
|
||||||
final protected def conditionalTerminateOutput(predicate: () ⇒ Boolean): OutHandler = new ConditionalTerminateOutput(predicate)
|
final protected def conditionalTerminateOutput(predicate: () ⇒ Boolean): OutHandler = new ConditionalTerminateOutput(predicate)
|
||||||
|
|
||||||
|
|
@ -409,7 +409,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Assign callbacks for linear stage for both [[Inlet]] and [[Outlet]]
|
* Assign callbacks for linear operator for both [[Inlet]] and [[Outlet]]
|
||||||
*/
|
*/
|
||||||
final protected def setHandlers(in: Inlet[_], out: Outlet[_], handler: InHandler with OutHandler): Unit = {
|
final protected def setHandlers(in: Inlet[_], out: Outlet[_], handler: InHandler with OutHandler): Unit = {
|
||||||
setHandler(in, handler)
|
setHandler(in, handler)
|
||||||
|
|
@ -575,11 +575,11 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Controls whether this stage shall shut down when all its ports are closed, which
|
* Controls whether this operator shall shut down when all its ports are closed, which
|
||||||
* is the default. In order to have it keep going past that point this method needs
|
* is the default. In order to have it keep going past that point this method needs
|
||||||
* to be called with a `true` argument before all ports are closed, and afterwards
|
* to be called with a `true` argument before all ports are closed, and afterwards
|
||||||
* it will not be closed until this method is called with a `false` argument or the
|
* it will not be closed until this method is called with a `false` argument or the
|
||||||
* stage is terminated via `completeStage()` or `failStage()`.
|
* operator is terminated via `completeStage()` or `failStage()`.
|
||||||
*/
|
*/
|
||||||
final protected def setKeepGoing(enabled: Boolean): Unit =
|
final protected def setKeepGoing(enabled: Boolean): Unit =
|
||||||
interpreter.setKeepGoing(this, enabled)
|
interpreter.setKeepGoing(this, enabled)
|
||||||
|
|
@ -600,7 +600,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Automatically invokes [[cancel()]] or [[complete()]] on all the input or output ports that have been called,
|
* Automatically invokes [[cancel()]] or [[complete()]] on all the input or output ports that have been called,
|
||||||
* then marks the stage as stopped.
|
* then marks the operator as stopped.
|
||||||
*/
|
*/
|
||||||
final def completeStage(): Unit = {
|
final def completeStage(): Unit = {
|
||||||
var i = 0
|
var i = 0
|
||||||
|
|
@ -618,7 +618,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Automatically invokes [[cancel()]] or [[fail()]] on all the input or output ports that have been called,
|
* Automatically invokes [[cancel()]] or [[fail()]] on all the input or output ports that have been called,
|
||||||
* then marks the stage as stopped.
|
* then marks the operator as stopped.
|
||||||
*/
|
*/
|
||||||
final def failStage(ex: Throwable): Unit = {
|
final def failStage(ex: Throwable): Unit = {
|
||||||
var i = 0
|
var i = 0
|
||||||
|
|
@ -980,7 +980,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
|
||||||
/**
|
/**
|
||||||
* Install a handler on the given inlet that emits received elements on the
|
* Install a handler on the given inlet that emits received elements on the
|
||||||
* given outlet before pulling for more data. `doFinish` and `doFail` control whether
|
* given outlet before pulling for more data. `doFinish` and `doFail` control whether
|
||||||
* completion or failure of the given inlet shall lead to stage termination or not.
|
* completion or failure of the given inlet shall lead to operator termination or not.
|
||||||
* `doPull` instructs to perform one initial pull on the `from` port.
|
* `doPull` instructs to perform one initial pull on the `from` port.
|
||||||
*/
|
*/
|
||||||
final protected def passAlong[Out, In <: Out](from: Inlet[In], to: Outlet[Out],
|
final protected def passAlong[Out, In <: Out](from: Inlet[In], to: Outlet[Out],
|
||||||
|
|
@ -1019,8 +1019,8 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
|
||||||
* [[AsyncCallback.invokeWithFeedback()]] has an internal promise that will be failed if event cannot be processed
|
* [[AsyncCallback.invokeWithFeedback()]] has an internal promise that will be failed if event cannot be processed
|
||||||
* due to stream completion.
|
* due to stream completion.
|
||||||
*
|
*
|
||||||
* To be thread safe this method must only be called from either the constructor of the graph stage during
|
* To be thread safe this method must only be called from either the constructor of the graph operator during
|
||||||
* materialization or one of the methods invoked by the graph stage machinery, such as `onPush` and `onPull`.
|
* materialization or one of the methods invoked by the graph operator machinery, such as `onPush` and `onPull`.
|
||||||
*
|
*
|
||||||
* This object can be cached and reused within the same [[GraphStageLogic]].
|
* This object can be cached and reused within the same [[GraphStageLogic]].
|
||||||
*/
|
*/
|
||||||
|
|
@ -1038,7 +1038,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
|
||||||
* State of this object can be changed both "internally" by the owning GraphStage or by the "external world" (e.g. other threads).
|
* State of this object can be changed both "internally" by the owning GraphStage or by the "external world" (e.g. other threads).
|
||||||
* Specifically, calls to this class can be made:
|
* Specifically, calls to this class can be made:
|
||||||
* * From the owning [[GraphStage]], to [[onStart]] - when materialization is finished and to [[onStop()]] -
|
* * From the owning [[GraphStage]], to [[onStart]] - when materialization is finished and to [[onStop()]] -
|
||||||
* because the stage is about to stop or fail.
|
* because the operator is about to stop or fail.
|
||||||
* * "Real world" calls [[invoke()]] and [[invokeWithFeedback()]]. These methods have synchronization
|
* * "Real world" calls [[invoke()]] and [[invokeWithFeedback()]]. These methods have synchronization
|
||||||
* with class state that reflects the stream state
|
* with class state that reflects the stream state
|
||||||
*
|
*
|
||||||
|
|
@ -1157,14 +1157,14 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
|
||||||
/**
|
/**
|
||||||
* Initialize a [[StageActorRef]] which can be used to interact with from the outside world "as-if" an [[Actor]].
|
* Initialize a [[StageActorRef]] which can be used to interact with from the outside world "as-if" an [[Actor]].
|
||||||
* The messages are looped through the [[getAsyncCallback]] mechanism of [[GraphStage]] so they are safe to modify
|
* The messages are looped through the [[getAsyncCallback]] mechanism of [[GraphStage]] so they are safe to modify
|
||||||
* internal state of this stage.
|
* internal state of this operator.
|
||||||
*
|
*
|
||||||
* This method must (the earliest) be called after the [[GraphStageLogic]] constructor has finished running,
|
* This method must (the earliest) be called after the [[GraphStageLogic]] constructor has finished running,
|
||||||
* for example from the [[preStart]] callback the graph stage logic provides.
|
* for example from the [[preStart]] callback the graph operator logic provides.
|
||||||
*
|
*
|
||||||
* Created [[StageActorRef]] to get messages and watch other actors in synchronous way.
|
* Created [[StageActorRef]] to get messages and watch other actors in synchronous way.
|
||||||
*
|
*
|
||||||
* The [[StageActorRef]]'s lifecycle is bound to the Stage, in other words when the Stage is finished,
|
* The [[StageActorRef]]'s lifecycle is bound to the operator, in other words when the operator is finished,
|
||||||
* the Actor will be terminated as well. The entity backing the [[StageActorRef]] is not a real Actor,
|
* the Actor will be terminated as well. The entity backing the [[StageActorRef]] is not a real Actor,
|
||||||
* but the [[GraphStageLogic]] itself, therefore it does not react to [[PoisonPill]].
|
* but the [[GraphStageLogic]] itself, therefore it does not react to [[PoisonPill]].
|
||||||
*
|
*
|
||||||
|
|
@ -1185,7 +1185,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Override and return a name to be given to the StageActor of this stage.
|
* Override and return a name to be given to the StageActor of this operator.
|
||||||
*
|
*
|
||||||
* This method will be only invoked and used once, during the first [[getStageActor]]
|
* This method will be only invoked and used once, during the first [[getStageActor]]
|
||||||
* invocation whichc reates the actor, since subsequent `getStageActors` calls function
|
* invocation whichc reates the actor, since subsequent `getStageActors` calls function
|
||||||
|
|
@ -1247,13 +1247,13 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
|
||||||
new StreamDetachedException(s"Stage with GraphStageLogic ${this} stopped before async invocation was processed")
|
new StreamDetachedException(s"Stage with GraphStageLogic ${this} stopped before async invocation was processed")
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Invoked before any external events are processed, at the startup of the stage.
|
* Invoked before any external events are processed, at the startup of the operator.
|
||||||
*/
|
*/
|
||||||
@throws(classOf[Exception])
|
@throws(classOf[Exception])
|
||||||
def preStart(): Unit = ()
|
def preStart(): Unit = ()
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Invoked after processing of external events stopped because the stage is about to stop or fail.
|
* Invoked after processing of external events stopped because the operator is about to stop or fail.
|
||||||
*/
|
*/
|
||||||
@throws(classOf[Exception])
|
@throws(classOf[Exception])
|
||||||
def postStop(): Unit = ()
|
def postStop(): Unit = ()
|
||||||
|
|
@ -1264,7 +1264,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
|
||||||
* This allows the dynamic creation of an Inlet for a GraphStage which is
|
* This allows the dynamic creation of an Inlet for a GraphStage which is
|
||||||
* connected to a Sink that is available for materialization (e.g. using
|
* connected to a Sink that is available for materialization (e.g. using
|
||||||
* the `subFusingMaterializer`). Care needs to be taken to cancel this Inlet
|
* the `subFusingMaterializer`). Care needs to be taken to cancel this Inlet
|
||||||
* when the stage shuts down lest the corresponding Sink be left hanging.
|
* when the operator shuts down lest the corresponding Sink be left hanging.
|
||||||
*/
|
*/
|
||||||
class SubSinkInlet[T](name: String) {
|
class SubSinkInlet[T](name: String) {
|
||||||
import ActorSubscriberMessage._
|
import ActorSubscriberMessage._
|
||||||
|
|
@ -1327,7 +1327,7 @@ abstract class GraphStageLogic private[stream] (val inCount: Int, val outCount:
|
||||||
* This allows the dynamic creation of an Outlet for a GraphStage which is
|
* This allows the dynamic creation of an Outlet for a GraphStage which is
|
||||||
* connected to a Source that is available for materialization (e.g. using
|
* connected to a Source that is available for materialization (e.g. using
|
||||||
* the `subFusingMaterializer`). Care needs to be taken to complete this
|
* the `subFusingMaterializer`). Care needs to be taken to complete this
|
||||||
* Outlet when the stage shuts down lest the corresponding Sink be left
|
* Outlet when the operator shuts down lest the corresponding Sink be left
|
||||||
* hanging. It is good practice to use the `timeout` method to cancel this
|
* hanging. It is good practice to use the `timeout` method to cancel this
|
||||||
* Outlet in case the corresponding Source is not materialized within a
|
* Outlet in case the corresponding Source is not materialized within a
|
||||||
* given time limit, see e.g. ActorMaterializerSettings.
|
* given time limit, see e.g. ActorMaterializerSettings.
|
||||||
|
|
@ -1441,8 +1441,8 @@ trait AsyncCallback[T] {
|
||||||
* may be invoked from external execution contexts.
|
* may be invoked from external execution contexts.
|
||||||
*
|
*
|
||||||
* The method returns directly and the returned future is then completed once the event
|
* The method returns directly and the returned future is then completed once the event
|
||||||
* has been handled by the stage, if the event triggers an exception from the handler the future
|
* has been handled by the operator, if the event triggers an exception from the handler the future
|
||||||
* is failed with that exception and finally if the stage was stopped before the event has been
|
* is failed with that exception and finally if the operator was stopped before the event has been
|
||||||
* handled the future is failed with `StreamDetachedException`.
|
* handled the future is failed with `StreamDetachedException`.
|
||||||
*
|
*
|
||||||
* The handling of the returned future incurs a slight overhead, so for cases where it does not matter
|
* The handling of the returned future incurs a slight overhead, so for cases where it does not matter
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ import akka.stream.MaterializerLoggingProvider
|
||||||
* Note, abiding to [[akka.stream.ActorAttributes.logLevels]] has to be done manually,
|
* Note, abiding to [[akka.stream.ActorAttributes.logLevels]] has to be done manually,
|
||||||
* the logger itself is configured based on the logSource provided to it. Also, the `log`
|
* the logger itself is configured based on the logSource provided to it. Also, the `log`
|
||||||
* itself would not know if you're calling it from a "on element" context or not, which is why
|
* itself would not know if you're calling it from a "on element" context or not, which is why
|
||||||
* these decisions have to be handled by the stage itself.
|
* these decisions have to be handled by the operator itself.
|
||||||
*/
|
*/
|
||||||
trait StageLogging { self: GraphStageLogic ⇒
|
trait StageLogging { self: GraphStageLogic ⇒
|
||||||
private[this] var _log: LoggingAdapter = _
|
private[this] var _log: LoggingAdapter = _
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue