=doc #17329 Spelling normalization, typos fixed.
This commit is contained in:
parent
880d51b89b
commit
f858881b2e
31 changed files with 81 additions and 81 deletions
|
|
@ -196,7 +196,7 @@ case class ValidationRejection(message: String, cause: Option[Throwable] = None)
|
|||
* 3. A TransformationRejection holding a function filtering out the MethodRejection
|
||||
*
|
||||
* so that in the end the RejectionHandler will only see one rejection (the ValidationRejection), because the
|
||||
* MethodRejection added by the ``get`` directive is cancelled by the ``put`` directive (since the HTTP method
|
||||
* MethodRejection added by the ``get`` directive is canceled by the ``put`` directive (since the HTTP method
|
||||
* did indeed match eventually).
|
||||
*/
|
||||
case class TransformationRejection(transform: immutable.Seq[Rejection] ⇒ immutable.Seq[Rejection]) extends Rejection
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ import akka.testkit.{ TestKit, TestDuration }
|
|||
* The Coroner can be used to print a diagnostic report of the JVM state,
|
||||
* including stack traces and deadlocks. A report can be printed directly, by
|
||||
* calling `printReport`. Alternatively, the Coroner can be asked to `watch`
|
||||
* the JVM and generate a report at a later time - unless the Coroner is cancelled
|
||||
* the JVM and generate a report at a later time - unless the Coroner is canceled
|
||||
* by that time.
|
||||
*
|
||||
* The latter method is useful for printing diagnostics in the event that, for
|
||||
|
|
@ -30,7 +30,7 @@ object Coroner { // FIXME: remove once going back to project dependencies
|
|||
|
||||
/**
|
||||
* Used to cancel the Coroner after calling `watch`.
|
||||
* The result of this Awaitable will be `true` if it has been cancelled.
|
||||
* The result of this Awaitable will be `true` if it has been canceled.
|
||||
*/
|
||||
trait WatchHandle extends Awaitable[Boolean] {
|
||||
/**
|
||||
|
|
@ -73,7 +73,7 @@ object Coroner { // FIXME: remove once going back to project dependencies
|
|||
val defaultStartAndStopDuration = 1.second
|
||||
|
||||
/**
|
||||
* Ask the Coroner to print a report if it is not cancelled by the given deadline.
|
||||
* Ask the Coroner to print a report if it is not canceled by the given deadline.
|
||||
* The returned handle can be used to perform the cancellation.
|
||||
*
|
||||
* If displayThreadCounts is set to true, then the Coroner will print thread counts during start
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ class ActorRefSourceSpec extends AkkaSpec {
|
|||
sub.request(2) // not all elements drained yet
|
||||
s.expectNext(1, 2)
|
||||
ref ! PoisonPill
|
||||
s.expectComplete() // element `3` not signalled
|
||||
s.expectComplete() // element `3` not signaled
|
||||
}
|
||||
|
||||
"fail the stream when receiving Status.Failure" in assertAllStagesStopped {
|
||||
|
|
|
|||
|
|
@ -143,7 +143,7 @@ class FlowSpec extends AkkaSpec(ConfigFactory.parseString("akka.actor.debug.rece
|
|||
downstream.expectNext("test2")
|
||||
downstreamSubscription.cancel()
|
||||
|
||||
// because of the "must cancel its upstream Subscription if its last downstream Subscription has been cancelled" rule
|
||||
// because of the "must cancel its upstream Subscription if its last downstream Subscription has been canceled" rule
|
||||
upstreamSubscription.expectCancellation()
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -367,7 +367,7 @@ class GraphFlexiMergeSpec extends AkkaSpec {
|
|||
sub.request(10)
|
||||
s.expectNext((1L, 1, "a"))
|
||||
s.expectNext((2L, 2, "b"))
|
||||
// soonCancelledInput is now cancelled and continues with default (null) value
|
||||
// soonCancelledInput is now canceled and continues with default (null) value
|
||||
s.expectNext((0L, 3, "c"))
|
||||
s.expectComplete()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -134,7 +134,7 @@ class SubstreamSubscriptionTimeoutSpec(conf: String) extends AkkaSpec(conf) {
|
|||
s2.runWith(Sink.publisher).subscribe(s2SubscriberProbe)
|
||||
val s2Sub = s2SubscriberProbe.expectSubscription()
|
||||
|
||||
// sleep long enough for tiemout to trigger if not cancelled
|
||||
// sleep long enough for timeout to trigger if not canceled
|
||||
Thread.sleep(1000)
|
||||
|
||||
s2Sub.request(100)
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ object ActorPublisherMessage {
|
|||
|
||||
/**
|
||||
* This message is delivered to the [[ActorPublisher]] actor in order to signal the exceeding of an subscription timeout.
|
||||
* Once the actor receives this message, this publisher will already be in cancelled state, thus the actor should clean-up and stop itself.
|
||||
* Once the actor receives this message, this publisher will already be in canceled state, thus the actor should clean-up and stop itself.
|
||||
*/
|
||||
final case object SubscriptionTimeoutExceeded extends SubscriptionTimeoutExceeded with NoSerializationVerificationNeeded
|
||||
sealed abstract class SubscriptionTimeoutExceeded extends ActorPublisherMessage
|
||||
|
|
@ -208,7 +208,7 @@ trait ActorPublisher[T] extends Actor {
|
|||
* Complete the stream. After that you are not allowed to
|
||||
* call [[#onNext]], [[#onError]] and [[#onComplete]].
|
||||
*
|
||||
* After signalling completion the Actor will then stop itself as it has completed the protocol.
|
||||
* After signaling completion the Actor will then stop itself as it has completed the protocol.
|
||||
* When [[#onComplete]] is called before any [[Subscriber]] has had the chance to subscribe
|
||||
* to this [[ActorPublisher]] the completion signal (and therefore stopping of the Actor as well)
|
||||
* will be delayed until such [[Subscriber]] arrives.
|
||||
|
|
@ -240,7 +240,7 @@ trait ActorPublisher[T] extends Actor {
|
|||
* Terminate the stream with failure. After that you are not allowed to
|
||||
* call [[#onNext]], [[#onError]] and [[#onComplete]].
|
||||
*
|
||||
* After signalling the Error the Actor will then stop itself as it has completed the protocol.
|
||||
* After signaling the Error the Actor will then stop itself as it has completed the protocol.
|
||||
* When [[#onError]] is called before any [[Subscriber]] has had the chance to subscribe
|
||||
* to this [[ActorPublisher]] the error signal (and therefore stopping of the Actor as well)
|
||||
* will be delayed until such [[Subscriber]] arrives.
|
||||
|
|
|
|||
|
|
@ -83,7 +83,7 @@ case object ZeroRequestStrategy extends RequestStrategy {
|
|||
object WatermarkRequestStrategy {
|
||||
/**
|
||||
* Create [[WatermarkRequestStrategy]] with `lowWatermark` as half of
|
||||
* the specifed `highWatermark`.
|
||||
* the specified `highWatermark`.
|
||||
*/
|
||||
def apply(highWatermark: Int): WatermarkRequestStrategy = new WatermarkRequestStrategy(highWatermark)
|
||||
}
|
||||
|
|
@ -98,7 +98,7 @@ final case class WatermarkRequestStrategy(highWatermark: Int, lowWatermark: Int)
|
|||
|
||||
/**
|
||||
* Create [[WatermarkRequestStrategy]] with `lowWatermark` as half of
|
||||
* the specifed `highWatermark`.
|
||||
* the specified `highWatermark`.
|
||||
*/
|
||||
def this(highWatermark: Int) = this(highWatermark, lowWatermark = math.max(1, highWatermark / 2))
|
||||
|
||||
|
|
@ -257,7 +257,7 @@ trait ActorSubscriber extends Actor {
|
|||
* Cancel upstream subscription.
|
||||
* No more elements will be delivered after cancel.
|
||||
*
|
||||
* The [[ActorSubscriber]] will be stopped immediatly after signalling cancelation.
|
||||
* The [[ActorSubscriber]] will be stopped immediately after signaling cancellation.
|
||||
* In case the upstream subscription has not yet arrived the Actor will stay alive
|
||||
* until a subscription arrives, cancel it and then stop itself.
|
||||
*/
|
||||
|
|
@ -268,7 +268,7 @@ trait ActorSubscriber extends Actor {
|
|||
context.stop(self)
|
||||
s.cancel()
|
||||
case _ ⇒
|
||||
_canceled = true // cancel will be signalled once a subscription arrives
|
||||
_canceled = true // cancel will be signaled once a subscription arrives
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -21,13 +21,13 @@ object Implicits {
|
|||
implicit class TimedSourceDsl[I, Mat](val source: Source[I, Mat]) extends AnyVal {
|
||||
|
||||
/**
|
||||
* Measures time from receieving the first element and completion events - one for each subscriber of this `Flow`.
|
||||
* Measures time from receiving the first element and completion events - one for each subscriber of this `Flow`.
|
||||
*/
|
||||
def timed[O, Mat2](measuredOps: Source[I, Mat] ⇒ Source[O, Mat2], onComplete: FiniteDuration ⇒ Unit): Source[O, Mat2] =
|
||||
Timed.timed[I, O, Mat, Mat2](source, measuredOps, onComplete)
|
||||
|
||||
/**
|
||||
* Measures rolling interval between immediatly subsequent `matching(o: O)` elements.
|
||||
* Measures rolling interval between immediately subsequent `matching(o: O)` elements.
|
||||
*/
|
||||
def timedIntervalBetween(matching: I ⇒ Boolean, onInterval: FiniteDuration ⇒ Unit): Source[I, Mat] =
|
||||
Timed.timedIntervalBetween[I, Mat](source, matching, onInterval)
|
||||
|
|
@ -41,13 +41,13 @@ object Implicits {
|
|||
implicit class TimedFlowDsl[I, O, Mat](val flow: Flow[I, O, Mat]) extends AnyVal {
|
||||
|
||||
/**
|
||||
* Measures time from receieving the first element and completion events - one for each subscriber of this `Flow`.
|
||||
* Measures time from receiving the first element and completion events - one for each subscriber of this `Flow`.
|
||||
*/
|
||||
def timed[Out, Mat2](measuredOps: Flow[I, O, Mat] ⇒ Flow[I, Out, Mat2], onComplete: FiniteDuration ⇒ Unit): Flow[I, Out, Mat2] =
|
||||
Timed.timed[I, O, Out, Mat, Mat2](flow, measuredOps, onComplete)
|
||||
|
||||
/**
|
||||
* Measures rolling interval between immediatly subsequent `matching(o: O)` elements.
|
||||
* Measures rolling interval between immediately subsequent `matching(o: O)` elements.
|
||||
*/
|
||||
def timedIntervalBetween(matching: O ⇒ Boolean, onInterval: FiniteDuration ⇒ Unit): Flow[I, O, Mat] =
|
||||
Timed.timedIntervalBetween[I, O, Mat](flow, matching, onInterval)
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ private[akka] class ActorRefSourceActor(bufferSize: Int, overflowStrategy: Overf
|
|||
context.stop(self)
|
||||
|
||||
case Status.Failure(cause) if isActive ⇒
|
||||
// errors must be signalled as soon as possible,
|
||||
// errors must be signaled as soon as possible,
|
||||
// even if previously valid completion was requested via Status.Success
|
||||
onErrorThenStop(cause)
|
||||
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ private[akka] abstract class ExposedPublisherReceive(activeReceive: Actor.Receiv
|
|||
case ep: ExposedPublisher ⇒
|
||||
receiveExposedPublisher(ep)
|
||||
if (stash.nonEmpty) {
|
||||
// we don't use sender() so this is allright
|
||||
// we don't use sender() so this is alright
|
||||
stash.reverse.foreach { msg ⇒
|
||||
activeReceive.applyOrElse(msg, unhandled)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -196,7 +196,7 @@ private[akka] object FanOut {
|
|||
/**
|
||||
* Will only transfer an element when all marked outputs
|
||||
* have demand, and will complete as soon as any of the marked
|
||||
* outputs have cancelled.
|
||||
* outputs have canceled.
|
||||
*/
|
||||
val AllOfMarkedOutputs = new TransferState {
|
||||
override def isCompleted: Boolean = markedCancelled > 0 || markedCount == 0
|
||||
|
|
@ -206,7 +206,7 @@ private[akka] object FanOut {
|
|||
/**
|
||||
* Will transfer an element when any of the marked outputs
|
||||
* have demand, and will complete when all of the marked
|
||||
* outputs have cancelled.
|
||||
* outputs have canceled.
|
||||
*/
|
||||
val AnyOfMarkedOutputs = new TransferState {
|
||||
override def isCompleted: Boolean = markedCancelled == markedCount
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ private[akka] object FixedSizeBuffer {
|
|||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
* Returns a fixed size buffer backed by an array. The buffer implementation DOES NOT check agains overflow or
|
||||
* Returns a fixed size buffer backed by an array. The buffer implementation DOES NOT check against overflow or
|
||||
* underflow, it is the responsibility of the user to track or check the capacity of the buffer before enqueueing
|
||||
* dequeueing or dropping.
|
||||
*
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ private[stream] object Junctions {
|
|||
else throw new UnsupportedOperationException("cannot change the shape of a " + simpleName(this))
|
||||
}
|
||||
|
||||
// note: can't be sealed as we have boilerplate generated classes which must extend FaninModule/FanoutModule
|
||||
// note: can't be sealed as we have boilerplate generated classes which must extend FanInModule/FanOutModule
|
||||
private[akka] trait FanInModule extends JunctionModule
|
||||
private[akka] trait FanOutModule extends JunctionModule
|
||||
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ object StreamSubscriptionTimeoutSupport {
|
|||
/**
|
||||
* INTERNAL API
|
||||
* Provides support methods to create Publishers and Subscribers which time-out gracefully,
|
||||
* and are cancelled subscribing an `CancellingSubscriber` to the publisher, or by calling `onError` on the timed-out subscriber.
|
||||
* and are canceled subscribing an `CancellingSubscriber` to the publisher, or by calling `onError` on the timed-out subscriber.
|
||||
*
|
||||
* See `akka.stream.materializer.subscription-timeout` for configuration options.
|
||||
*/
|
||||
|
|
@ -103,7 +103,7 @@ private[akka] trait StreamSubscriptionTimeoutSupport {
|
|||
}
|
||||
|
||||
/**
|
||||
* Callback that should ensure that the target is cancelled with the given cause.
|
||||
* Callback that should ensure that the target is canceled with the given cause.
|
||||
*/
|
||||
protected def handleSubscriptionTimeout(target: Publisher[_], cause: Exception): Unit
|
||||
}
|
||||
|
|
|
|||
|
|
@ -71,7 +71,7 @@ private[akka] trait SubscriberManagement[T] extends ResizableMultiReaderRingBuff
|
|||
|
||||
/**
|
||||
* called before `shutdown()` if the stream is *not* being regularly completed
|
||||
* but shut-down due to the last subscriber having cancelled its subscription
|
||||
* but shut-down due to the last subscriber having canceled its subscription
|
||||
*/
|
||||
protected def cancelUpstream(): Unit
|
||||
|
||||
|
|
|
|||
|
|
@ -712,7 +712,7 @@ private[akka] class OneBoundedInterpreter(ops: Seq[Stage[_, _]],
|
|||
a.context = state
|
||||
try a.preStart(state) catch {
|
||||
case NonFatal(ex) ⇒
|
||||
failures ::= InitializationFailure(op, ex) // not logging here as 'most downstream' exception will be signalled via onError
|
||||
failures ::= InitializationFailure(op, ex) // not logging here as 'most downstream' exception will be signaled via onError
|
||||
}
|
||||
}
|
||||
op += 1
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ import scala.concurrent.{ Future, Promise }
|
|||
/**
|
||||
* INTERNAL API
|
||||
* Creates simple synchronous (Java 6 compatible) Sink which writes all incoming elements to the given file
|
||||
* (creating it before hand if neccessary).
|
||||
* (creating it before hand if necessary).
|
||||
*/
|
||||
private[akka] final class SynchronousFileSink(f: File, append: Boolean, val attributes: Attributes, shape: SinkShape[ByteString])
|
||||
extends SinkModule[ByteString, Future[Long]](shape) {
|
||||
|
|
@ -42,7 +42,7 @@ private[akka] final class SynchronousFileSink(f: File, append: Boolean, val attr
|
|||
/**
|
||||
* INTERNAL API
|
||||
* Creates simple synchronous (Java 6 compatible) Sink which writes all incoming elements to the given file
|
||||
* (creating it before hand if neccessary).
|
||||
* (creating it before hand if necessary).
|
||||
*/
|
||||
private[akka] final class OutputStreamSink(createOutput: () ⇒ OutputStream, val attributes: Attributes, shape: SinkShape[ByteString])
|
||||
extends SinkModule[ByteString, Future[Long]](shape) {
|
||||
|
|
|
|||
|
|
@ -225,7 +225,7 @@ case object Server extends Server
|
|||
* - [[IgnoreCancel]] means to not react to cancellation of the receiving
|
||||
* side unless the sending side has already completed
|
||||
* - [[IgnoreComplete]] means to not react to the completion of the sending
|
||||
* side unless the receiving side has already cancelled
|
||||
* side unless the receiving side has already canceled
|
||||
* - [[IgnoreBoth]] means to ignore the first termination signal—be that
|
||||
* cancellation or completion—and only act upon the second one
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ object FlexiMerge {
|
|||
* fulfilled when there are elements for one specific upstream
|
||||
* input.
|
||||
*
|
||||
* It is not allowed to use a handle that has been cancelled or
|
||||
* It is not allowed to use a handle that has been canceled or
|
||||
* has been completed. `IllegalArgumentException` is thrown if
|
||||
* that is not obeyed.
|
||||
*/
|
||||
|
|
@ -34,7 +34,7 @@ object FlexiMerge {
|
|||
* fulfilled when there are elements for any of the given upstream
|
||||
* inputs.
|
||||
*
|
||||
* Cancelled and completed inputs are not used, i.e. it is allowed
|
||||
* Canceled and completed inputs are not used, i.e. it is allowed
|
||||
* to specify them in the list of `inputs`.
|
||||
*/
|
||||
class ReadAny[T](val inputs: JList[InPort]) extends ReadCondition[T]
|
||||
|
|
@ -46,7 +46,7 @@ object FlexiMerge {
|
|||
* the `preferred` and at least one other `secondary` input have demand,
|
||||
* the `preferred` input will always be consumed first.
|
||||
*
|
||||
* Cancelled and completed inputs are not used, i.e. it is allowed
|
||||
* Canceled and completed inputs are not used, i.e. it is allowed
|
||||
* to specify them in the list of `inputs`.
|
||||
*/
|
||||
class ReadPreferred[T](val preferred: InPort, val secondaries: JList[InPort]) extends ReadCondition[T]
|
||||
|
|
@ -56,11 +56,11 @@ object FlexiMerge {
|
|||
* fulfilled when there are elements for *all* of the given upstream
|
||||
* inputs.
|
||||
*
|
||||
* The emitted element the will be a [[ReadAllInputs]] object, which contains values for all non-cancelled inputs of this FlexiMerge.
|
||||
* The emitted element the will be a [[ReadAllInputs]] object, which contains values for all non-canceled inputs of this FlexiMerge.
|
||||
*
|
||||
* Cancelled inputs are not used, i.e. it is allowed to specify them in the list of `inputs`,
|
||||
* Canceled inputs are not used, i.e. it is allowed to specify them in the list of `inputs`,
|
||||
* the resulting [[ReadAllInputs]] will then not contain values for this element, which can be
|
||||
* handled via supplying a default value instead of the value from the (now cancelled) input.
|
||||
* handled via supplying a default value instead of the value from the (now canceled) input.
|
||||
*/
|
||||
class ReadAll(val inputs: JList[InPort]) extends ReadCondition[ReadAllInputs]
|
||||
|
||||
|
|
@ -68,7 +68,7 @@ object FlexiMerge {
|
|||
* Provides typesafe accessors to values from inputs supplied to [[ReadAll]].
|
||||
*/
|
||||
final class ReadAllInputs(map: immutable.Map[InPort, Any]) extends ReadAllInputsBase {
|
||||
/** Returns the value for the given [[Inlet]], or `null` if this input was cancelled. */
|
||||
/** Returns the value for the given [[Inlet]], or `null` if this input was canceled. */
|
||||
def get[T](input: Inlet[T]): T = getOrDefault(input, null)
|
||||
|
||||
/** Returns the value for the given [[Inlet]], or `defaultValue`. */
|
||||
|
|
@ -96,12 +96,12 @@ object FlexiMerge {
|
|||
*/
|
||||
trait MergeLogicContextBase[Out] {
|
||||
/**
|
||||
* Complete this stream successfully. Upstream subscriptions will be cancelled.
|
||||
* Complete this stream successfully. Upstream subscriptions will be canceled.
|
||||
*/
|
||||
def finish(): Unit
|
||||
|
||||
/**
|
||||
* Complete this stream with failure. Upstream subscriptions will be cancelled.
|
||||
* Complete this stream with failure. Upstream subscriptions will be canceled.
|
||||
*/
|
||||
def fail(cause: Throwable): Unit
|
||||
|
||||
|
|
@ -119,7 +119,7 @@ object FlexiMerge {
|
|||
/**
|
||||
* How to handle completion or failure from upstream input.
|
||||
*
|
||||
* The `onUpstreamFinish` method is called when an upstream input was completed sucessfully.
|
||||
* The `onUpstreamFinish` method is called when an upstream input was completed successfully.
|
||||
* It returns next behavior or [[MergeLogic#sameState]] to keep current behavior.
|
||||
* A completion can be propagated downstream with [[MergeLogicContextBase#finish]],
|
||||
* or it can be swallowed to continue with remaining inputs.
|
||||
|
|
|
|||
|
|
@ -22,7 +22,7 @@ object FlexiRoute {
|
|||
* fulfilled when there are requests for elements from one specific downstream
|
||||
* output.
|
||||
*
|
||||
* It is not allowed to use a handle that has been cancelled or
|
||||
* It is not allowed to use a handle that has been canceled or
|
||||
* has been completed. `IllegalArgumentException` is thrown if
|
||||
* that is not obeyed.
|
||||
*/
|
||||
|
|
@ -33,7 +33,7 @@ object FlexiRoute {
|
|||
* fulfilled when there are requests for elements from any of the given downstream
|
||||
* outputs.
|
||||
*
|
||||
* Cancelled and completed inputs are not used, i.e. it is allowed
|
||||
* Canceled and completed inputs are not used, i.e. it is allowed
|
||||
* to specify them in the list of `outputs`.
|
||||
*/
|
||||
class DemandFromAny(val outputs: JList[OutPort]) extends DemandCondition[OutPort]
|
||||
|
|
@ -43,7 +43,7 @@ object FlexiRoute {
|
|||
* fulfilled when there are requests for elements from all of the given downstream
|
||||
* outputs.
|
||||
*
|
||||
* Cancelled and completed outputs are not used, i.e. it is allowed
|
||||
* Canceled and completed outputs are not used, i.e. it is allowed
|
||||
* to specify them in the list of `outputs`.
|
||||
*/
|
||||
class DemandFromAll(val outputs: JList[OutPort]) extends DemandCondition[Unit]
|
||||
|
|
|
|||
|
|
@ -213,7 +213,7 @@ class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Graph
|
|||
*
|
||||
* Make sure that the `Iterable` is immutable or at least not modified after
|
||||
* being used as an output sequence. Otherwise the stream may fail with
|
||||
* `oncurrentModificationException` or other more subtle errors may occur.
|
||||
* `ConcurrentModificationException` or other more subtle errors may occur.
|
||||
*
|
||||
* The returned `Iterable` MUST NOT contain `null` values,
|
||||
* as they are illegal as stream elements - according to the Reactive Streams specification.
|
||||
|
|
@ -272,7 +272,7 @@ class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Graph
|
|||
* Transform this stream by applying the given function to each of the elements
|
||||
* as they pass through this processing step. The function returns a `Future` and the
|
||||
* value of that future will be emitted downstreams. As many futures as requested elements by
|
||||
* downstream may run in parallel and each processed element will be emitted dowstream
|
||||
* downstream may run in parallel and each processed element will be emitted downstream
|
||||
* as soon as it is ready, i.e. it is possible that the elements are not emitted downstream
|
||||
* in the same order as received from upstream.
|
||||
*
|
||||
|
|
@ -812,7 +812,7 @@ class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Graph
|
|||
* Logs elements flowing through the stream as well as completion and erroring.
|
||||
*
|
||||
* By default element and completion signals are logged on debug level, and errors are logged on Error level.
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] atrribute on the given Flow:
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] attribute on the given Flow:
|
||||
*
|
||||
* The `extract` function will be applied to each element before logging, so it is possible to log only those fields
|
||||
* of a complex object flowing through this element.
|
||||
|
|
@ -834,7 +834,7 @@ class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Graph
|
|||
* Logs elements flowing through the stream as well as completion and erroring.
|
||||
*
|
||||
* By default element and completion signals are logged on debug level, and errors are logged on Error level.
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] atrribute on the given Flow:
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] attribute on the given Flow:
|
||||
*
|
||||
* The `extract` function will be applied to each element before logging, so it is possible to log only those fields
|
||||
* of a complex object flowing through this element.
|
||||
|
|
@ -856,7 +856,7 @@ class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Graph
|
|||
* Logs elements flowing through the stream as well as completion and erroring.
|
||||
*
|
||||
* By default element and completion signals are logged on debug level, and errors are logged on Error level.
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] atrribute on the given Flow:
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] attribute on the given Flow:
|
||||
*
|
||||
* Uses the given [[LoggingAdapter]] for logging.
|
||||
*
|
||||
|
|
@ -875,7 +875,7 @@ class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Graph
|
|||
* Logs elements flowing through the stream as well as completion and erroring.
|
||||
*
|
||||
* By default element and completion signals are logged on debug level, and errors are logged on Error level.
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] atrribute on the given Flow.
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] attribute on the given Flow.
|
||||
*
|
||||
* Uses an internally created [[LoggingAdapter]] which uses `akka.stream.Log` as it's source (use this class to configure slf4j loggers).
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -104,7 +104,7 @@ object Sink {
|
|||
|
||||
/**
|
||||
* Sends the elements of the stream to the given `ActorRef`.
|
||||
* If the target actor terminates the stream will be cancelled.
|
||||
* If the target actor terminates the stream will be canceled.
|
||||
* When the stream is completed successfully the given `onCompleteMessage`
|
||||
* will be sent to the destination actor.
|
||||
* When the stream is completed with failure a [[akka.actor.Status.Failure]]
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ object Source {
|
|||
*
|
||||
* It materializes a [[scala.concurrent.Promise]] which will be completed
|
||||
* when the downstream stage of this source cancels. This promise can also
|
||||
* be used to externally trigger completion, which the source then signalls
|
||||
* be used to externally trigger completion, which the source then signals
|
||||
* to its downstream.
|
||||
*/
|
||||
def lazyEmpty[T](): Source[T, Promise[Unit]] =
|
||||
|
|
@ -185,7 +185,7 @@ object Source {
|
|||
* The stream can be completed with failure by sending [[akka.actor.Status.Failure]] to the
|
||||
* actor reference.
|
||||
*
|
||||
* The actor will be stopped when the stream is completed, failed or cancelled from downstream,
|
||||
* The actor will be stopped when the stream is completed, failed or canceled from downstream,
|
||||
* i.e. you can watch it to get notified when that happens.
|
||||
*
|
||||
* @param bufferSize The size of the buffer in element count
|
||||
|
|
@ -388,7 +388,7 @@ class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[Sour
|
|||
* Transform this stream by applying the given function to each of the elements
|
||||
* as they pass through this processing step. The function returns a `Future` and the
|
||||
* value of that future will be emitted downstreams. As many futures as requested elements by
|
||||
* downstream may run in parallel and each processed element will be emitted dowstream
|
||||
* downstream may run in parallel and each processed element will be emitted downstream
|
||||
* as soon as it is ready, i.e. it is possible that the elements are not emitted downstream
|
||||
* in the same order as received from upstream.
|
||||
*
|
||||
|
|
@ -689,7 +689,7 @@ class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[Sour
|
|||
* Logs elements flowing through the stream as well as completion and erroring.
|
||||
*
|
||||
* By default element and completion signals are logged on debug level, and errors are logged on Error level.
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] atrribute on the given Flow:
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] attribute on the given Flow:
|
||||
*
|
||||
* The `extract` function will be applied to each element before logging, so it is possible to log only those fields
|
||||
* of a complex object flowing through this element.
|
||||
|
|
@ -711,7 +711,7 @@ class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[Sour
|
|||
* Logs elements flowing through the stream as well as completion and erroring.
|
||||
*
|
||||
* By default element and completion signals are logged on debug level, and errors are logged on Error level.
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] atrribute on the given Flow:
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] attribute on the given Flow:
|
||||
*
|
||||
* The `extract` function will be applied to each element before logging, so it is possible to log only those fields
|
||||
* of a complex object flowing through this element.
|
||||
|
|
@ -733,7 +733,7 @@ class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[Sour
|
|||
* Logs elements flowing through the stream as well as completion and erroring.
|
||||
*
|
||||
* By default element and completion signals are logged on debug level, and errors are logged on Error level.
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] atrribute on the given Flow:
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] attribute on the given Flow:
|
||||
*
|
||||
* Uses the given [[LoggingAdapter]] for logging.
|
||||
*
|
||||
|
|
@ -752,7 +752,7 @@ class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[Sour
|
|||
* Logs elements flowing through the stream as well as completion and erroring.
|
||||
*
|
||||
* By default element and completion signals are logged on debug level, and errors are logged on Error level.
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] atrribute on the given Flow:
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] attribute on the given Flow:
|
||||
*
|
||||
* Uses an internally created [[LoggingAdapter]] which uses `akka.stream.Log` as it's source (use this class to configure slf4j loggers).
|
||||
*
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ object FlexiMerge {
|
|||
* fulfilled when there are elements for one specific upstream
|
||||
* input.
|
||||
*
|
||||
* It is not allowed to use a handle that has been cancelled or
|
||||
* It is not allowed to use a handle that has been canceled or
|
||||
* has been completed. `IllegalArgumentException` is thrown if
|
||||
* that is not obeyed.
|
||||
*/
|
||||
|
|
@ -36,7 +36,7 @@ object FlexiMerge {
|
|||
* fulfilled when there are elements for any of the given upstream
|
||||
* inputs.
|
||||
*
|
||||
* Cancelled and completed inputs are not used, i.e. it is allowed
|
||||
* Canceled and completed inputs are not used, i.e. it is allowed
|
||||
* to specify them in the list of `inputs`.
|
||||
*/
|
||||
final case class ReadAny[T](inputs: Inlet[T]*) extends ReadCondition[T]
|
||||
|
|
@ -53,7 +53,7 @@ object FlexiMerge {
|
|||
* the `preferred` and at least one other `secondary` input have demand,
|
||||
* the `preferred` input will always be consumed first.
|
||||
*
|
||||
* Cancelled and completed inputs are not used, i.e. it is allowed
|
||||
* Canceled and completed inputs are not used, i.e. it is allowed
|
||||
* to specify them in the list of `inputs`.
|
||||
*/
|
||||
final case class ReadPreferred[T](preferred: Inlet[T], secondaries: Inlet[T]*) extends ReadCondition[T]
|
||||
|
|
@ -68,11 +68,11 @@ object FlexiMerge {
|
|||
* fulfilled when there are elements for *all* of the given upstream
|
||||
* inputs.
|
||||
*
|
||||
* The emitted element the will be a [[ReadAllInputs]] object, which contains values for all non-cancelled inputs of this FlexiMerge.
|
||||
* The emitted element the will be a [[ReadAllInputs]] object, which contains values for all non-canceled inputs of this FlexiMerge.
|
||||
*
|
||||
* Cancelled inputs are not used, i.e. it is allowed to specify them in the list of `inputs`,
|
||||
* Canceled inputs are not used, i.e. it is allowed to specify them in the list of `inputs`,
|
||||
* the resulting [[ReadAllInputs]] will then not contain values for this element, which can be
|
||||
* handled via supplying a default value instead of the value from the (now cancelled) input.
|
||||
* handled via supplying a default value instead of the value from the (now canceled) input.
|
||||
*/
|
||||
final case class ReadAll[T](mkResult: immutable.Map[InPort, Any] ⇒ ReadAllInputsBase, inputs: Inlet[T]*) extends ReadCondition[ReadAllInputs]
|
||||
|
||||
|
|
@ -133,12 +133,12 @@ object FlexiMerge {
|
|||
*/
|
||||
trait MergeLogicContextBase {
|
||||
/**
|
||||
* Complete this stream successfully. Upstream subscriptions will be cancelled.
|
||||
* Complete this stream successfully. Upstream subscriptions will be canceled.
|
||||
*/
|
||||
def finish(): Unit
|
||||
|
||||
/**
|
||||
* Complete this stream with failure. Upstream subscriptions will be cancelled.
|
||||
* Complete this stream with failure. Upstream subscriptions will be canceled.
|
||||
*/
|
||||
def fail(cause: Throwable): Unit
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ object FlexiRoute {
|
|||
* fulfilled when there are requests for elements from one specific downstream
|
||||
* output.
|
||||
*
|
||||
* It is not allowed to use a handle that has been cancelled or
|
||||
* It is not allowed to use a handle that has been canceled or
|
||||
* has been completed. `IllegalArgumentException` is thrown if
|
||||
* that is not obeyed.
|
||||
*/
|
||||
|
|
@ -35,7 +35,7 @@ object FlexiRoute {
|
|||
* fulfilled when there are requests for elements from any of the given downstream
|
||||
* outputs.
|
||||
*
|
||||
* Cancelled and completed outputs are not used, i.e. it is allowed
|
||||
* Canceled and completed outputs are not used, i.e. it is allowed
|
||||
* to specify them in the list of `outputs`.
|
||||
*/
|
||||
final case class DemandFromAny(outputs: immutable.Seq[OutPort]) extends DemandCondition[OutPort]
|
||||
|
|
@ -49,7 +49,7 @@ object FlexiRoute {
|
|||
* fulfilled when there are requests for elements from all of the given downstream
|
||||
* outputs.
|
||||
*
|
||||
* Cancelled and completed outputs are not used, i.e. it is allowed
|
||||
* Canceled and completed outputs are not used, i.e. it is allowed
|
||||
* to specify them in the list of `outputs`.
|
||||
*/
|
||||
final case class DemandFromAll(outputs: immutable.Seq[OutPort]) extends DemandCondition[Unit]
|
||||
|
|
|
|||
|
|
@ -473,7 +473,7 @@ trait FlowOps[+Out, +Mat] {
|
|||
* Transform this stream by applying the given function to each of the elements
|
||||
* as they pass through this processing step. The function returns a `Future` and the
|
||||
* value of that future will be emitted downstreams. As many futures as requested elements by
|
||||
* downstream may run in parallel and each processed element will be emitted dowstream
|
||||
* downstream may run in parallel and each processed element will be emitted downstream
|
||||
* as soon as it is ready, i.e. it is possible that the elements are not emitted downstream
|
||||
* in the same order as received from upstream.
|
||||
*
|
||||
|
|
@ -969,7 +969,7 @@ trait FlowOps[+Out, +Mat] {
|
|||
* Logs elements flowing through the stream as well as completion and erroring.
|
||||
*
|
||||
* By default element and completion signals are logged on debug level, and errors are logged on Error level.
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] atrribute on the given Flow:
|
||||
* This can be adjusted according to your needs by providing a custom [[Attributes.LogLevels]] attribute on the given Flow:
|
||||
*
|
||||
* Uses implicit [[LoggingAdapter]] if available, otherwise uses an internally created one,
|
||||
* which uses `akka.stream.Log` as it's source (use this class to configure slf4j loggers).
|
||||
|
|
|
|||
|
|
@ -180,7 +180,7 @@ object Sink extends SinkApply {
|
|||
|
||||
/**
|
||||
* Sends the elements of the stream to the given `ActorRef`.
|
||||
* If the target actor terminates the stream will be cancelled.
|
||||
* If the target actor terminates the stream will be canceled.
|
||||
* When the stream is completed successfully the given `onCompleteMessage`
|
||||
* will be sent to the destination actor.
|
||||
* When the stream is completed with failure a [[akka.actor.Status.Failure]]
|
||||
|
|
|
|||
|
|
@ -285,7 +285,7 @@ object Source extends SourceApply {
|
|||
*
|
||||
* It materializes a [[scala.concurrent.Promise]] which will be completed
|
||||
* when the downstream stage of this source cancels. This promise can also
|
||||
* be used to externally trigger completion, which the source then signalls
|
||||
* be used to externally trigger completion, which the source then signals
|
||||
* to its downstream.
|
||||
*/
|
||||
def lazyEmpty[T]: Source[T, Promise[Unit]] =
|
||||
|
|
@ -355,15 +355,15 @@ object Source extends SourceApply {
|
|||
* not matter.
|
||||
*
|
||||
* The stream can be completed successfully by sending the actor reference an [[akka.actor.Status.Success]]
|
||||
* message in which case already buffered elements will be signalled before signalling completion,
|
||||
* or by sending a [[akka.actor.PoisonPill]] in which case completion will be signalled immediately.
|
||||
* message in which case already buffered elements will be signaled before signaling completion,
|
||||
* or by sending a [[akka.actor.PoisonPill]] in which case completion will be signaled immediately.
|
||||
*
|
||||
* The stream can be completed with failure by sending [[akka.actor.Status.Failure]] to the
|
||||
* actor reference. In case the Actor is still draining its internal buffer (after having received
|
||||
* an [[akka.actor.Status.Success]]) before signalling completion and it receives a [[akka.actor.Status.Failure]],
|
||||
* the failure will be signalled downstream immediatly (instead of the completion signal).
|
||||
* an [[akka.actor.Status.Success]]) before signaling completion and it receives a [[akka.actor.Status.Failure]],
|
||||
* the failure will be signaled downstream immediately (instead of the completion signal).
|
||||
*
|
||||
* The actor will be stopped when the stream is completed, failed or cancelled from downstream,
|
||||
* The actor will be stopped when the stream is completed, failed or canceled from downstream,
|
||||
* i.e. you can watch it to get notified when that happens.
|
||||
*
|
||||
* @param bufferSize The size of the buffer in element count
|
||||
|
|
|
|||
|
|
@ -27,7 +27,7 @@ import akka.stream.impl.io.StreamTcpManager
|
|||
object Tcp extends ExtensionId[Tcp] with ExtensionIdProvider {
|
||||
|
||||
/**
|
||||
* * Represents a succdessful TCP server binding.
|
||||
* * Represents a successful TCP server binding.
|
||||
*/
|
||||
case class ServerBinding(localAddress: InetSocketAddress)(private val unbindAction: () ⇒ Future[Unit]) {
|
||||
def unbind(): Future[Unit] = unbindAction()
|
||||
|
|
|
|||
|
|
@ -139,7 +139,7 @@ abstract class AbstractStage[-In, Out, PushD <: Directive, PullD <: Directive, C
|
|||
def onUpstreamFinish(ctx: Ctx): TerminationDirective = ctx.finish()
|
||||
|
||||
/**
|
||||
* `onDownstreamFinish` is called when downstream has cancelled.
|
||||
* `onDownstreamFinish` is called when downstream has canceled.
|
||||
*
|
||||
* By default the cancel signal is immediately propagated with [[akka.stream.stage.Context#finish]].
|
||||
*/
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue