Fix several minor typos detected by github.com/client9/misspell (#25448)
* Fix several minor typos detected by github.com/client9/misspell * Revert s/erminater/erminator/ in /ActorSystemSpec
This commit is contained in:
parent
fddc198178
commit
482eaea122
70 changed files with 102 additions and 102 deletions
|
|
@ -164,7 +164,7 @@ abstract class TestProbe[M] {
|
|||
def expectNoMessage(): Unit
|
||||
|
||||
/**
|
||||
* Expect the given actor to be stopped or stop withing the given timeout or
|
||||
* Expect the given actor to be stopped or stop within the given timeout or
|
||||
* throw an [[AssertionError]].
|
||||
*/
|
||||
def expectTerminated[U](actorRef: ActorRef[U], max: Duration): Unit
|
||||
|
|
|
|||
|
|
@ -183,7 +183,7 @@ object TestProbe {
|
|||
protected def fishForMessage_internal(max: FiniteDuration, hint: String, fisher: M ⇒ FishingOutcome): immutable.Seq[M]
|
||||
|
||||
/**
|
||||
* Expect the given actor to be stopped or stop withing the given timeout or
|
||||
* Expect the given actor to be stopped or stop within the given timeout or
|
||||
* throw an [[AssertionError]].
|
||||
*/
|
||||
def expectTerminated[U](actorRef: ActorRef[U], max: FiniteDuration): Unit
|
||||
|
|
|
|||
|
|
@ -49,7 +49,7 @@ class ActorTestKitSpec extends WordSpec with Matchers with ActorTestKit with Sca
|
|||
|
||||
}
|
||||
|
||||
// derivate classes should also work fine (esp the naming part
|
||||
// derivative classes should also work fine (esp the naming part
|
||||
trait MyBaseSpec extends WordSpec with ActorTestKit with Matchers with BeforeAndAfterAll {
|
||||
override protected def afterAll(): Unit = {
|
||||
shutdownTestKit()
|
||||
|
|
@ -57,7 +57,7 @@ trait MyBaseSpec extends WordSpec with ActorTestKit with Matchers with BeforeAnd
|
|||
}
|
||||
|
||||
class MyConcreteDerivateSpec extends MyBaseSpec {
|
||||
"A derivate test" should {
|
||||
"A derivative test" should {
|
||||
"generate a default name from the test class" in {
|
||||
system.name should ===("MyConcreteDerivateSpec")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -107,7 +107,7 @@ class ExtensionSpec extends WordSpec with Matchers {
|
|||
val system = ActorSystem("extensions")
|
||||
val listedExtensions = system.settings.config.getStringList("akka.library-extensions")
|
||||
listedExtensions.size should be > 0
|
||||
// could be initalized by other tests, so at least once
|
||||
// could be initialized by other tests, so at least once
|
||||
InstanceCountingExtension.createCount.get() should be > 0
|
||||
|
||||
shutdownActorSystem(system)
|
||||
|
|
|
|||
|
|
@ -817,7 +817,7 @@ class SupervisorHierarchySpec extends AkkaSpec(SupervisorHierarchySpec.config) w
|
|||
expectMsg("pong")
|
||||
}
|
||||
|
||||
"handle failure in creation when supervision startegy returns Resume and Restart" taggedAs LongRunningTest in {
|
||||
"handle failure in creation when supervision strategy returns Resume and Restart" taggedAs LongRunningTest in {
|
||||
val createAttempt = new AtomicInteger(0)
|
||||
val preStartCalled = new AtomicInteger(0)
|
||||
val postRestartCalled = new AtomicInteger(0)
|
||||
|
|
|
|||
|
|
@ -357,7 +357,7 @@ class SupervisorSpec extends AkkaSpec(SupervisorSpec.config) with BeforeAndAfter
|
|||
expectMsg(Timeout, PingMessage)
|
||||
}
|
||||
|
||||
"restart killed actors in nested superviser hierarchy" in {
|
||||
"restart killed actors in nested supervisor hierarchy" in {
|
||||
val (actor1, actor2, actor3, _) = nestedSupervisorsAllForOne
|
||||
|
||||
ping(actor1)
|
||||
|
|
|
|||
|
|
@ -163,7 +163,7 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend
|
|||
}
|
||||
|
||||
"get the correct types of dispatchers" in {
|
||||
//All created/obtained dispatchers are of the expeced type/instance
|
||||
//All created/obtained dispatchers are of the expected type/instance
|
||||
assert(typesAndValidators.forall(tuple ⇒ tuple._2(allDispatchers(tuple._1))))
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ class DispatcherShutdownSpec extends WordSpec with Matchers {
|
|||
|
||||
"akka dispatcher" should {
|
||||
|
||||
"eventualy shutdown when used after system terminate" in {
|
||||
"eventually shutdown when used after system terminate" in {
|
||||
|
||||
val threads = ManagementFactory.getThreadMXBean()
|
||||
def threadCount = threads
|
||||
|
|
|
|||
|
|
@ -241,7 +241,7 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT
|
|||
|
||||
msgs1.foreach(_.second.open()) //process two messages
|
||||
|
||||
// make sure some time passes inbetween
|
||||
// make sure some time passes in-between
|
||||
Thread.sleep(300)
|
||||
|
||||
// wait for routees to update their mail boxes
|
||||
|
|
@ -273,7 +273,7 @@ class MetricsBasedResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultT
|
|||
|
||||
msgs1.foreach(_.second.open()) //process two messages
|
||||
|
||||
// make sure some time passes inbetween
|
||||
// make sure some time passes in-between
|
||||
Thread.sleep(300)
|
||||
|
||||
// wait for routees to update their mail boxes
|
||||
|
|
|
|||
|
|
@ -309,7 +309,7 @@ public class AdapterTest extends JUnitSuite {
|
|||
|
||||
int originalLogLevel = system.eventStream().logLevel();
|
||||
try {
|
||||
// supress the logging with stack trace
|
||||
// suppress the logging with stack trace
|
||||
system.eventStream().setLogLevel(Integer.MIN_VALUE); // OFF
|
||||
|
||||
// only stop supervisorStrategy
|
||||
|
|
|
|||
|
|
@ -88,7 +88,7 @@ class AskSpec extends ActorTestKit
|
|||
}
|
||||
|
||||
"must transform a replied akka.actor.Status.Failure to a failed future" in {
|
||||
// It's unlikely but possible that this happens, since the recieving actor would
|
||||
// It's unlikely but possible that this happens, since the receiving actor would
|
||||
// have to accept a message with an actoref that accepts AnyRef or be doing crazy casting
|
||||
// For completeness sake though
|
||||
implicit val untypedSystem = akka.actor.ActorSystem("AskSpec-untyped-1")
|
||||
|
|
|
|||
|
|
@ -138,7 +138,7 @@ class ExtensionsSpec extends TypedAkkaSpec {
|
|||
withEmptyActorSystem("ExtensionsSpec06") { system ⇒
|
||||
val listedExtensions = system.settings.config.getStringList("akka.actor.typed.library-extensions")
|
||||
listedExtensions.size should be > 0
|
||||
// could be initalized by other tests, so at least once
|
||||
// could be initialized by other tests, so at least once
|
||||
InstanceCountingExtension.createCount.get() should be > 0
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -297,7 +297,7 @@ akka {
|
|||
# exploration will be +- 5
|
||||
explore-step-size = 0.1
|
||||
|
||||
# Probabily of doing an exploration v.s. optmization.
|
||||
# Probability of doing an exploration v.s. optmization.
|
||||
chance-of-exploration = 0.4
|
||||
|
||||
# When downsizing after a long streak of underutilization, the resizer
|
||||
|
|
@ -387,7 +387,7 @@ akka {
|
|||
|
||||
# Each worker in the pool uses a separate bounded MPSC queue. This value
|
||||
# indicates the upper bound of the queue. Whenever an attempt to enqueue
|
||||
# a task is made and the queue does not have capacity to accomodate
|
||||
# a task is made and the queue does not have capacity to accommodate
|
||||
# the task, the rejection handler created by the rejection handler specified
|
||||
# in "rejection-handler" is invoked.
|
||||
task-queue-size = 512
|
||||
|
|
@ -1100,7 +1100,7 @@ akka {
|
|||
# When Coordinated Shutdown is triggered an instance of `Reason` is
|
||||
# required. That value can be used to override the default settings.
|
||||
# Only 'exit-jvm', 'exit-code' and 'terminate-actor-system' may be
|
||||
# overriden depending on the reason.
|
||||
# overridden depending on the reason.
|
||||
reason-overrides {
|
||||
# Overrides are applied using the `reason.getClass.getName`. This
|
||||
# default overrides the `exit-code` when the `Reason` is a cluster
|
||||
|
|
|
|||
|
|
@ -33,7 +33,7 @@ object AkkaVersion {
|
|||
case VersionPattern(currentMajorStr, currentMinorStr, currentPatchStr, mOrRc) ⇒
|
||||
requiredVersion match {
|
||||
case requiredVersion @ VersionPattern(requiredMajorStr, requiredMinorStr, requiredPatchStr, _) ⇒
|
||||
// a M or RC is basically inbetween versions, so offset
|
||||
// a M or RC is basically in-between versions, so offset
|
||||
val currentPatch =
|
||||
if (mOrRc ne null) currentPatchStr.toInt - 1
|
||||
else currentPatchStr.toInt
|
||||
|
|
|
|||
|
|
@ -13,22 +13,22 @@ private[akka] object PrettyDuration {
|
|||
|
||||
/**
|
||||
* JAVA API
|
||||
* Selects most apropriate TimeUnit for given duration and formats it accordingly, with 4 digits precision
|
||||
* Selects most appropriate TimeUnit for given duration and formats it accordingly, with 4 digits precision
|
||||
*/
|
||||
def format(duration: Duration): String = duration.pretty
|
||||
|
||||
/**
|
||||
* JAVA API
|
||||
* Selects most apropriate TimeUnit for given duration and formats it accordingly
|
||||
* Selects most appropriate TimeUnit for given duration and formats it accordingly
|
||||
*/
|
||||
def format(duration: Duration, includeNanos: Boolean, precision: Int): String = duration.pretty(includeNanos, precision)
|
||||
|
||||
implicit class PrettyPrintableDuration(val duration: Duration) extends AnyVal {
|
||||
|
||||
/** Selects most apropriate TimeUnit for given duration and formats it accordingly, with 4 digits precision **/
|
||||
/** Selects most appropriate TimeUnit for given duration and formats it accordingly, with 4 digits precision **/
|
||||
def pretty: String = pretty(includeNanos = false)
|
||||
|
||||
/** Selects most apropriate TimeUnit for given duration and formats it accordingly */
|
||||
/** Selects most appropriate TimeUnit for given duration and formats it accordingly */
|
||||
def pretty(includeNanos: Boolean, precision: Int = 4): String = {
|
||||
require(precision > 0, "precision must be > 0")
|
||||
|
||||
|
|
|
|||
|
|
@ -62,7 +62,7 @@ object TypedBenchmarkActors {
|
|||
Behaviors.receive { (ctx, msg) ⇒
|
||||
msg match {
|
||||
case Start(respondTo) ⇒
|
||||
// note: no protection against accidentally running bench sessions in paralell
|
||||
// note: no protection against accidentally running bench sessions in parallel
|
||||
val sessionBehavior = startEchoBenchSession(numMessagesPerActorPair, numActors, dispatcher, batchSize, respondTo)
|
||||
ctx.spawnAnonymous(sessionBehavior)
|
||||
Behaviors.same
|
||||
|
|
|
|||
|
|
@ -317,7 +317,7 @@ class FusedGraphsBenchmark {
|
|||
|
||||
@Benchmark
|
||||
@OperationsPerInvocation(100 * 1000)
|
||||
def boradcast_zip_balance_merge(blackhole: org.openjdk.jmh.infra.Blackhole): Unit = {
|
||||
def broadcast_zip_balance_merge(blackhole: org.openjdk.jmh.infra.Blackhole): Unit = {
|
||||
FusedGraphsBenchmark.blackhole = blackhole
|
||||
broadcastZipBalanceMerge.run()(materializer).await()
|
||||
}
|
||||
|
|
|
|||
|
|
@ -726,7 +726,7 @@ class ClusterSingletonManager(
|
|||
setTimer(TakeOverRetryTimer, TakeOverRetry(count + 1), handOverRetryInterval, repeat = false)
|
||||
stay
|
||||
} else
|
||||
throw new ClusterSingletonManagerIsStuck(s"Expected hand-over to [${newOldestOption}] never occured")
|
||||
throw new ClusterSingletonManagerIsStuck(s"Expected hand-over to [${newOldestOption}] never occurred")
|
||||
|
||||
case Event(HandOverToMe, WasOldestData(singleton, singletonTerminated, _)) ⇒
|
||||
gotoHandingOver(singleton, singletonTerminated, Some(sender()))
|
||||
|
|
|
|||
|
|
@ -93,7 +93,7 @@ abstract class RestartFirstSeedNodeSpec
|
|||
expectMsg(5 seconds, "ok")
|
||||
}
|
||||
}
|
||||
enterBarrier("seed1-address-transfered")
|
||||
enterBarrier("seed1-address-transferred")
|
||||
|
||||
// now we can join seed1System, seed2, seed3 together
|
||||
runOn(seed1) {
|
||||
|
|
|
|||
|
|
@ -94,7 +94,7 @@ abstract class RestartNode2SpecSpec
|
|||
expectMsg(5.seconds, "ok")
|
||||
}
|
||||
}
|
||||
enterBarrier("seed1-address-transfered")
|
||||
enterBarrier("seed1-address-transferred")
|
||||
|
||||
// now we can join seed1System, seed2 together
|
||||
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@ abstract class RestartNode3Spec
|
|||
expectMsg(5.seconds, "ok")
|
||||
}
|
||||
}
|
||||
enterBarrier("second-address-transfered")
|
||||
enterBarrier("second-address-transferred")
|
||||
|
||||
// now we can join first, third together
|
||||
runOn(first, third) {
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ abstract class RestartNodeSpec
|
|||
expectMsg(5.seconds, "ok")
|
||||
}
|
||||
}
|
||||
enterBarrier("second-address-transfered")
|
||||
enterBarrier("second-address-transferred")
|
||||
|
||||
// now we can join first, secondSystem, third together
|
||||
runOn(first, third) {
|
||||
|
|
|
|||
|
|
@ -36,7 +36,7 @@ object SharedMediaDriverSupport {
|
|||
require(aeronDir.nonEmpty, "aeron-dir must be defined")
|
||||
|
||||
// Check if the media driver is already started by another multi-node jvm.
|
||||
// It checks more than one time with a sleep inbetween. The number of checks
|
||||
// It checks more than one time with a sleep in-between. The number of checks
|
||||
// depends on the multi-node index (i).
|
||||
@tailrec def isDriverInactive(i: Int): Boolean = {
|
||||
if (i < 0) true
|
||||
|
|
|
|||
|
|
@ -81,7 +81,7 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
|
|||
nr-of-nodes-factor = 1
|
||||
# not scaled
|
||||
nr-of-seed-nodes = 3
|
||||
nr-of-nodes-joining-to-seed-initally = 2
|
||||
nr-of-nodes-joining-to-seed-initially = 2
|
||||
nr-of-nodes-joining-one-by-one-small = 2
|
||||
nr-of-nodes-joining-one-by-one-large = 2
|
||||
nr-of-nodes-joining-to-one = 2
|
||||
|
|
@ -175,7 +175,7 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
|
|||
val infolog = getBoolean("infolog")
|
||||
val nFactor = getInt("nr-of-nodes-factor")
|
||||
val numberOfSeedNodes = getInt("nr-of-seed-nodes") // not scaled by nodes factor
|
||||
val numberOfNodesJoiningToSeedNodesInitially = getInt("nr-of-nodes-joining-to-seed-initally") * nFactor
|
||||
val numberOfNodesJoiningToSeedNodesInitially = getInt("nr-of-nodes-joining-to-seed-initially") * nFactor
|
||||
val numberOfNodesJoiningOneByOneSmall = getInt("nr-of-nodes-joining-one-by-one-small") * nFactor
|
||||
val numberOfNodesJoiningOneByOneLarge = getInt("nr-of-nodes-joining-one-by-one-large") * nFactor
|
||||
val numberOfNodesJoiningToOneNode = getInt("nr-of-nodes-joining-to-one") * nFactor
|
||||
|
|
|
|||
|
|
@ -226,7 +226,7 @@ abstract class TransitionSpec
|
|||
enterBarrier("after-3")
|
||||
}
|
||||
|
||||
"perform correct transitions when second becomes unavailble" taggedAs LongRunningTest in {
|
||||
"perform correct transitions when second becomes unavailable" taggedAs LongRunningTest in {
|
||||
runOn(third) {
|
||||
markNodeAsUnavailable(second)
|
||||
reapUnreachable()
|
||||
|
|
@ -234,7 +234,7 @@ abstract class TransitionSpec
|
|||
awaitAssert(seenLatestGossip should ===(Set(third)))
|
||||
}
|
||||
|
||||
enterBarrier("after-second-unavailble")
|
||||
enterBarrier("after-second-unavailable")
|
||||
|
||||
third gossipTo first
|
||||
|
||||
|
|
|
|||
|
|
@ -84,7 +84,7 @@ class AutoDownSpec extends AkkaSpec("akka.actor.provider=remote") {
|
|||
expectMsg(DownCalled(memberB.address))
|
||||
}
|
||||
|
||||
"down unreachable when becoming leader inbetween detection and specified duration" in {
|
||||
"down unreachable when becoming leader in-between detection and specified duration" in {
|
||||
val a = autoDownActor(2.seconds)
|
||||
a ! LeaderChanged(Some(memberB.address))
|
||||
a ! UnreachableMember(memberC)
|
||||
|
|
@ -93,7 +93,7 @@ class AutoDownSpec extends AkkaSpec("akka.actor.provider=remote") {
|
|||
expectMsg(DownCalled(memberC.address))
|
||||
}
|
||||
|
||||
"not down unreachable when losing leadership inbetween detection and specified duration" taggedAs TimingTest in {
|
||||
"not down unreachable when losing leadership in-between detection and specified duration" taggedAs TimingTest in {
|
||||
val a = autoDownActor(2.seconds)
|
||||
a ! LeaderChanged(Some(memberA.address))
|
||||
a ! UnreachableMember(memberC)
|
||||
|
|
@ -101,7 +101,7 @@ class AutoDownSpec extends AkkaSpec("akka.actor.provider=remote") {
|
|||
expectNoMsg(3.second)
|
||||
}
|
||||
|
||||
"not down when unreachable become reachable inbetween detection and specified duration" taggedAs TimingTest in {
|
||||
"not down when unreachable become reachable in-between detection and specified duration" taggedAs TimingTest in {
|
||||
val a = autoDownActor(2.seconds)
|
||||
a ! LeaderChanged(Some(memberA.address))
|
||||
a ! UnreachableMember(memberB)
|
||||
|
|
@ -109,7 +109,7 @@ class AutoDownSpec extends AkkaSpec("akka.actor.provider=remote") {
|
|||
expectNoMsg(3.second)
|
||||
}
|
||||
|
||||
"not down when unreachable is removed inbetween detection and specified duration" taggedAs TimingTest in {
|
||||
"not down when unreachable is removed in-between detection and specified duration" taggedAs TimingTest in {
|
||||
val a = autoDownActor(2.seconds)
|
||||
a ! LeaderChanged(Some(memberA.address))
|
||||
a ! UnreachableMember(memberB)
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ final class CircuitBreakerProxy(
|
|||
}
|
||||
|
||||
case Event(message, state) ⇒
|
||||
log.debug("CLOSED: Sending message {} expecting a response withing timeout {}", message, callTimeout)
|
||||
log.debug("CLOSED: Sending message {} expecting a response within timeout {}", message, callTimeout)
|
||||
val currentSender = sender()
|
||||
forwardRequest(message, sender, state, log)
|
||||
stay
|
||||
|
|
|
|||
|
|
@ -161,7 +161,7 @@ abstract class DurableDataSpec(multiNodeConfig: DurableDataSpecConfig)
|
|||
expectMsg(ReplicaCount(2))
|
||||
}
|
||||
}
|
||||
enterBarrier("both-initalized")
|
||||
enterBarrier("both-initialized")
|
||||
|
||||
r ! Update(KeyA, GCounter(), writeTwo)(_ + 1)
|
||||
expectMsg(UpdateSuccess(KeyA, None))
|
||||
|
|
|
|||
|
|
@ -260,7 +260,7 @@ class ReplicatorSpec extends MultiNodeSpec(ReplicatorSpec) with STMultiNodeSpec
|
|||
enterBarrierAfterTestStep()
|
||||
}
|
||||
|
||||
"be replicated after succesful update" in {
|
||||
"be replicated after successful update" in {
|
||||
val changedProbe = TestProbe()
|
||||
runOn(first, second) {
|
||||
replicator ! Subscribe(KeyC, changedProbe.ref)
|
||||
|
|
@ -403,14 +403,14 @@ class ReplicatorSpec extends MultiNodeSpec(ReplicatorSpec) with STMultiNodeSpec
|
|||
replicator ! Update(KeyE, GCounter(), writeMajority)(_ + 50)
|
||||
expectMsg(UpdateSuccess(KeyE, None))
|
||||
}
|
||||
enterBarrier("write-inital-majority")
|
||||
enterBarrier("write-initial-majority")
|
||||
|
||||
runOn(first, second, third) {
|
||||
replicator ! Get(KeyE, readMajority)
|
||||
val c150 = expectMsgPF() { case g @ GetSuccess(KeyE, _) ⇒ g.get(KeyE) }
|
||||
c150.value should be(150)
|
||||
}
|
||||
enterBarrier("read-inital-majority")
|
||||
enterBarrier("read-initial-majority")
|
||||
|
||||
runOn(first) {
|
||||
testConductor.blackhole(first, third, Direction.Both).await
|
||||
|
|
|
|||
|
|
@ -176,7 +176,7 @@ class DeltaPropagationSelectorSpec extends WordSpec with Matchers with TypeCheck
|
|||
selector.collectPropagations() should ===(Map(nodes(0) → expected))
|
||||
}
|
||||
|
||||
"calcualte right slice size" in {
|
||||
"calculate right slice size" in {
|
||||
val selector = new TestSelector(selfUniqueAddress, nodes)
|
||||
selector.nodesSliceSize(0) should ===(0)
|
||||
selector.nodesSliceSize(1) should ===(1)
|
||||
|
|
|
|||
|
|
@ -495,7 +495,7 @@ together.
|
|||
There is a special version of `ORMultiMap`, created by using separate constructor
|
||||
`ORMultiMap.emptyWithValueDeltas[A, B]`, that also propagates the updates to its values (of `ORSet` type) as deltas.
|
||||
This means that the `ORMultiMap` initiated with `ORMultiMap.emptyWithValueDeltas` propagates its updates as pairs
|
||||
consisting of delta of the key and delta of the value. It is much more efficient in terms of network bandwith consumed.
|
||||
consisting of delta of the key and delta of the value. It is much more efficient in terms of network bandwidth consumed.
|
||||
|
||||
However, this behavior has not been made default for `ORMultiMap` and if you wish to use it in your code, you
|
||||
need to replace invocations of `ORMultiMap.empty[A, B]` (or `ORMultiMap()`) with `ORMultiMap.emptyWithValueDeltas[A, B]`
|
||||
|
|
|
|||
|
|
@ -197,7 +197,7 @@ This pattern is useful when the started actor fails <a id="^1" href="#1">[1]</a>
|
|||
and we need to give it some time to start-up again. One of the prime examples when this is useful is
|
||||
when a @ref:[PersistentActor](../persistence.md) fails (by stopping) with a persistence failure - which indicates that
|
||||
the database may be down or overloaded, in such situations it makes most sense to give it a little bit of time
|
||||
to recover before the peristent actor is started.
|
||||
to recover before the persistent actor is started.
|
||||
|
||||
> <a id="1" href="#^1">[1]</a> A failure can be indicated in two different ways; by an actor stopping or crashing.
|
||||
|
||||
|
|
|
|||
|
|
@ -356,7 +356,7 @@ that the type is no longer needed, and skip the deserialization all-together:
|
|||
|
||||

|
||||
|
||||
The serializer is aware of the old event types that need to be skipped (**O**), and can skip deserializing them alltogether
|
||||
The serializer is aware of the old event types that need to be skipped (**O**), and can skip deserializing them altogether
|
||||
by returning a "tombstone" (**T**), which the EventAdapter converts into an empty EventSeq.
|
||||
Other events (**E**) can just be passed through.
|
||||
|
||||
|
|
@ -411,7 +411,7 @@ Java
|
|||
: @@snip [PersistenceSchemaEvolutionDocTest.java]($code$/java/jdocs/persistence/PersistenceSchemaEvolutionDocTest.java) { #detach-models }
|
||||
|
||||
The `EventAdapter` takes care of converting from one model to the other one (in both directions),
|
||||
alowing the models to be completely detached from each other, such that they can be optimised independently
|
||||
allowing the models to be completely detached from each other, such that they can be optimised independently
|
||||
as long as the mapping logic is able to convert between them:
|
||||
|
||||
Scala
|
||||
|
|
@ -496,7 +496,7 @@ The `EventAdapter` splits the incoming event into smaller more fine grained even
|
|||
|
||||
During recovery however, we now need to convert the old `V1` model into the `V2` representation of the change.
|
||||
Depending if the old event contains a name change, we either emit the `UserNameChanged` or we don't,
|
||||
and the address change is handled similarily:
|
||||
and the address change is handled similarly:
|
||||
|
||||
|
||||
Scala
|
||||
|
|
|
|||
|
|
@ -1126,7 +1126,7 @@ A snapshot store plugin can be activated with the following minimal configuratio
|
|||
|
||||
The snapshot store instance is an actor so the methods corresponding to requests from persistent actors
|
||||
are executed sequentially. It may delegate to asynchronous libraries, spawn futures, or delegate to other
|
||||
actors to achive parallelism.
|
||||
actors to achieve parallelism.
|
||||
|
||||
The snapshot store plugin class must have a constructor with one of these signatures:
|
||||
|
||||
|
|
|
|||
|
|
@ -293,7 +293,7 @@ as the `GraphStage` itself is a factory of logic instances.
|
|||
|
||||
### SubFlow.zip and SubSource.zip now emit akka.japi.Pair instead of Scala's Pair
|
||||
|
||||
The the Java API's `zip` operator on `SubFlow` and `SubSource` has been emiting
|
||||
The the Java API's `zip` operator on `SubFlow` and `SubSource` has been emitting
|
||||
Scala's `Pair` (`Tuple2`) instead of `akka.japi.Pair`. This is fixed in Akka 2.5 where it emits the proper
|
||||
Java DSl type.
|
||||
|
||||
|
|
@ -591,7 +591,7 @@ The class is now called `PersistenceIdsQuery`, and the method which used to be `
|
|||
|
||||
### Queries now use `Offset` instead of `Long` for offsets
|
||||
|
||||
This change was made to better accomodate the various types of Journals and their understanding what an offset is.
|
||||
This change was made to better accommodate the various types of Journals and their understanding what an offset is.
|
||||
For example, in some journals an offset is always a time, while in others it is a numeric offset (like a sequence id).
|
||||
|
||||
Instead of the previous `Long` offset you can now use the provided `Offset` factories (and types):
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
|
||||
### Date
|
||||
|
||||
10 Feburary 2017
|
||||
10 February 2017
|
||||
|
||||
### Description of Vulnerability
|
||||
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ would log the `t2` error.
|
|||
Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||
This operators can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||
|
||||
Similarily to `recover` throwing an exception inside `mapError` _will_ be logged on ERROR level automatically.
|
||||
Similarly to `recover` throwing an exception inside `mapError` _will_ be logged on ERROR level automatically.
|
||||
|
||||
|
||||
@@@div { .callout }
|
||||
|
|
|
|||
|
|
@ -172,7 +172,7 @@ The response adapting function is running in the receiving actor and can safely
|
|||
|
||||
## Request-Response with ask from outside the ActorSystem
|
||||
|
||||
Some times you need to interact with actors from outside of the actor system, this can be done with fire-and-forget as described above or through another version of `ask` that returns a @scala[`Future[Response]`]@java[`CompletionStage<Response>`] that is either completed with a succesful response or failed with a `TimeoutException` if there was no response within the specified timeout.
|
||||
Some times you need to interact with actors from outside of the actor system, this can be done with fire-and-forget as described above or through another version of `ask` that returns a @scala[`Future[Response]`]@java[`CompletionStage<Response>`] that is either completed with a successful response or failed with a `TimeoutException` if there was no response within the specified timeout.
|
||||
|
||||
To do this we use @scala[`ActorRef.ask` (or the symbolic `ActorRef.?`) implicitly provided by `akka.actor.typed.scaladsl.AskPattern`]@java[`akka.actor.typed.javadsl.AskPattern.ask`] to send a message to an actor and get a @scala[`Future[Response]`]@java[`CompletionState[Response]`] back.
|
||||
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ Java
|
|||
|
||||
## Actor Sink
|
||||
|
||||
There are two sinks availabe that accept typed `ActorRef`s. To send all of the messages from a stream to an actor without considering backpressure, use @scala[@scaladoc[`ActorSink.actorRef`](akka.stream.typed.scaladsl.ActorSink#actorRef)]@java[@javadoc[`ActorSink.actorRef`](akka.stream.typed.javadsl.ActorSink#actorRef)].
|
||||
There are two sinks available that accept typed `ActorRef`s. To send all of the messages from a stream to an actor without considering backpressure, use @scala[@scaladoc[`ActorSink.actorRef`](akka.stream.typed.scaladsl.ActorSink#actorRef)]@java[@javadoc[`ActorSink.actorRef`](akka.stream.typed.javadsl.ActorSink#actorRef)].
|
||||
|
||||
Scala
|
||||
: @@snip [ActorSourceSinkExample.scala]($akka$/akka-stream-typed/src/test/scala/docs/akka/stream/typed/ActorSourceSinkExample.scala) { #actor-sink-ref }
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ public class ByteBufferSerializerDocTest {
|
|||
|
||||
@Override
|
||||
public byte[] toBinary(Object o) {
|
||||
// in production code, aquire this from a BufferPool
|
||||
// in production code, acquire this from a BufferPool
|
||||
final ByteBuffer buf = ByteBuffer.allocate(256);
|
||||
|
||||
toBinary(o, buf);
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import akka.actor.ActorRef;
|
|||
import java.util.function.Predicate;
|
||||
|
||||
/**
|
||||
* Acts as if `System.out.println()` yet swallows all messages. Useful for putting printlines in examples yet without poluting the build with them.
|
||||
* Acts as if `System.out.println()` yet swallows all messages. Useful for putting printlines in examples yet without polluting the build with them.
|
||||
*/
|
||||
public class SilenceSystemOut {
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,7 @@ class ByteBufferSerializerDocSpec {
|
|||
|
||||
// Implement this method for compatibility with `SerializerWithStringManifest`.
|
||||
override def toBinary(o: AnyRef): Array[Byte] = {
|
||||
// in production code, aquire this from a BufferPool
|
||||
// in production code, acquire this from a BufferPool
|
||||
val buf = ByteBuffer.allocate(256)
|
||||
|
||||
toBinary(o, buf)
|
||||
|
|
|
|||
|
|
@ -117,7 +117,7 @@ class StreamPartialGraphDSLDocSpec extends AkkaSpec {
|
|||
val actorRef: ActorRef = testActor
|
||||
//#sink-combine
|
||||
val sendRmotely = Sink.actorRef(actorRef, "Done")
|
||||
val localProcessing = Sink.foreach[Int](_ ⇒ /* do something usefull */ ())
|
||||
val localProcessing = Sink.foreach[Int](_ ⇒ /* do something useful */ ())
|
||||
|
||||
val sink = Sink.combine(sendRmotely, localProcessing)(Broadcast[Int](_))
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ trait OptionalTests {
|
|||
if (flag.value)
|
||||
try test catch {
|
||||
case ex: Exception ⇒
|
||||
throw new AssertionError("Imlpementation did not pass this spec. " +
|
||||
throw new AssertionError("Implementation did not pass this spec. " +
|
||||
"If your journal will be (by definition) unable to abide the here tested rule, you can disable this test," +
|
||||
s"by overriding [${flag.name}] with CapabilityFlag.off in your test class.")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,7 +69,7 @@ abstract class SnapshotStoreSpec(config: Config) extends PluginSpec(config)
|
|||
/**
|
||||
* The limit defines a number of bytes persistence plugin can support to store the snapshot.
|
||||
* If plugin does not support persistence of the snapshots of 10000 bytes or may support more than default size,
|
||||
* the value can be overriden by the SnapshotStoreSpec implementation with a note in a plugin documentation.
|
||||
* the value can be overridden by the SnapshotStoreSpec implementation with a note in a plugin documentation.
|
||||
*/
|
||||
def snapshotByteSizeLimit = 10000
|
||||
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ private[persistence] class LocalSnapshotStore(config: Config) extends SnapshotSt
|
|||
private def withStream[A <: Closeable, B](stream: A, p: A ⇒ B): B =
|
||||
try { p(stream) } finally { stream.close() }
|
||||
|
||||
/** Only by persistenceId and sequenceNr, timestamp is informational - accomodates for 2.13.x series files */
|
||||
/** Only by persistenceId and sequenceNr, timestamp is informational - accommodates for 2.13.x series files */
|
||||
protected def snapshotFileForWrite(metadata: SnapshotMetadata, extension: String = ""): File =
|
||||
new File(snapshotDir, s"snapshot-${URLEncoder.encode(metadata.persistenceId, UTF_8)}-${metadata.sequenceNr}-${metadata.timestamp}${extension}")
|
||||
|
||||
|
|
|
|||
|
|
@ -58,7 +58,7 @@ import java.util.List;
|
|||
* that desires a Message instead of a Builder. In terms of the implementation,
|
||||
* the {@code SingleFieldBuilder} and {@code RepeatedFieldBuilder}
|
||||
* classes cache messages that were created so that messages only need to be
|
||||
* created when some change occured in its builder or a builder for one of its
|
||||
* created when some change occurred in its builder or a builder for one of its
|
||||
* descendants.
|
||||
*
|
||||
* @param <MType> the type of message for the field
|
||||
|
|
|
|||
|
|
@ -51,7 +51,7 @@ package akka.protobuf;
|
|||
* that desires a Message instead of a Builder. In terms of the implementation,
|
||||
* the {@code SingleFieldBuilder} and {@code RepeatedFieldBuilder}
|
||||
* classes cache messages that were created so that messages only need to be
|
||||
* created when some change occured in its builder or a builder for one of its
|
||||
* created when some change occurred in its builder or a builder for one of its
|
||||
* descendants.
|
||||
*
|
||||
* @param <MType> the type of message for the field
|
||||
|
|
|
|||
|
|
@ -902,7 +902,7 @@ akka {
|
|||
# Only used when transport is aeron-udp.
|
||||
aeron-dir = ""
|
||||
|
||||
# Whether to delete aeron embeded driver directory upon driver stop.
|
||||
# Whether to delete aeron embedded driver directory upon driver stop.
|
||||
# Only used when transport is aeron-udp.
|
||||
delete-aeron-dir = yes
|
||||
|
||||
|
|
|
|||
|
|
@ -409,7 +409,7 @@ private[akka] class RemoteActorRefProvider(
|
|||
// using thread local LRU cache, which will call internalRresolveActorRef
|
||||
// if the value is not cached
|
||||
actorRefResolveThreadLocalCache match {
|
||||
case null ⇒ internalResolveActorRef(path) // not initalized yet
|
||||
case null ⇒ internalResolveActorRef(path) // not initialized yet
|
||||
case c ⇒ c.threadLocalCache(this).getOrCompute(path)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1013,7 +1013,7 @@ private[remote] class AssociationRegistry(createAssociation: Address ⇒ Associa
|
|||
// make sure we don't overwrite same UID with different association
|
||||
throw new IllegalArgumentException(s"UID collision old [$previous] new [$a]")
|
||||
case _ ⇒
|
||||
// update associationsByUid Map with the uid -> assocation
|
||||
// update associationsByUid Map with the uid -> association
|
||||
val newMap = currentMap.updated(peer.uid, a)
|
||||
if (associationsByUid.compareAndSet(currentMap, newMap))
|
||||
a
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ private[remote] class SynchronizedEventSink(delegate: EventSink) extends EventSi
|
|||
/**
|
||||
* INTERNAL API
|
||||
*
|
||||
* Update clock at various resolutions and aquire the resulting timestamp.
|
||||
* Update clock at various resolutions and acquire the resulting timestamp.
|
||||
*/
|
||||
private[remote] trait EventClock {
|
||||
|
||||
|
|
|
|||
|
|
@ -226,7 +226,7 @@ private[remote] object InboundCompression {
|
|||
*
|
||||
* @param oldTables is guaranteed to always have at-least one and at-most [[keepOldTables]] elements.
|
||||
* It starts with containing only a single "disabled" table (versioned as `DecompressionTable.DisabledVersion`),
|
||||
* and from there on continiously accumulates at most [[keepOldTables]] recently used tables.
|
||||
* and from there on continuously accumulates at most [[keepOldTables]] recently used tables.
|
||||
*/
|
||||
final case class Tables[T](
|
||||
oldTables: List[DecompressionTable[T]],
|
||||
|
|
@ -485,14 +485,14 @@ private[akka] final class UnknownCompressedIdException(id: Long)
|
|||
private[remote] case object NoInboundCompressions extends InboundCompressions {
|
||||
override def hitActorRef(originUid: Long, remote: Address, ref: ActorRef, n: Int): Unit = ()
|
||||
override def decompressActorRef(originUid: Long, tableVersion: Byte, idx: Int): OptionVal[ActorRef] =
|
||||
if (idx == -1) throw new IllegalArgumentException("Attemted decompression of illegal compression id: -1")
|
||||
if (idx == -1) throw new IllegalArgumentException("Attempted decompression of illegal compression id: -1")
|
||||
else OptionVal.None
|
||||
override def confirmActorRefCompressionAdvertisement(originUid: Long, tableVersion: Byte): Unit = ()
|
||||
override def runNextActorRefAdvertisement(): Unit = ()
|
||||
|
||||
override def hitClassManifest(originUid: Long, remote: Address, manifest: String, n: Int): Unit = ()
|
||||
override def decompressClassManifest(originUid: Long, tableVersion: Byte, idx: Int): OptionVal[String] =
|
||||
if (idx == -1) throw new IllegalArgumentException("Attemted decompression of illegal compression id: -1")
|
||||
if (idx == -1) throw new IllegalArgumentException("Attempted decompression of illegal compression id: -1")
|
||||
else OptionVal.None
|
||||
override def confirmClassManifestCompressionAdvertisement(originUid: Long, tableVersion: Byte): Unit = ()
|
||||
override def runNextClassManifestAdvertisement(): Unit = ()
|
||||
|
|
|
|||
|
|
@ -436,7 +436,7 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW
|
|||
Address(
|
||||
a.getProtocol,
|
||||
a.getSystem,
|
||||
// technicaly the presence of hostname and port are guaranteed, see our serializeAddressData
|
||||
// technically the presence of hostname and port are guaranteed, see our serializeAddressData
|
||||
if (a.hasHostname) Some(a.getHostname) else None,
|
||||
if (a.hasPort) Some(a.getPort) else None
|
||||
)
|
||||
|
|
@ -445,7 +445,7 @@ class MiscMessageSerializer(val system: ExtendedActorSystem) extends SerializerW
|
|||
Address(
|
||||
a.getProtocol,
|
||||
a.getSystem,
|
||||
// technicaly the presence of hostname and port are guaranteed, see our serializeAddressData
|
||||
// technically the presence of hostname and port are guaranteed, see our serializeAddressData
|
||||
if (a.hasHostname) Some(a.getHostname) else None,
|
||||
if (a.hasPort) Some(a.getPort) else None
|
||||
)
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ class RemoteConsistentHashingRouterSpec extends AkkaSpec("""
|
|||
|
||||
"ConsistentHashingGroup" must {
|
||||
|
||||
"use same hash ring indepenent of self address" in {
|
||||
"use same hash ring independent of self address" in {
|
||||
// simulating running router on two different nodes (a1, a2) with target routees on 3 other nodes (s1, s2, s3)
|
||||
val a1 = Address("akka.tcp", "Sys", "client1", 2552)
|
||||
val a2 = Address("akka.tcp", "Sys", "client2", 2552)
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ class AllowJavaSerializationOffSpec extends AkkaSpec(
|
|||
akka {
|
||||
loglevel = debug
|
||||
actor {
|
||||
enable-additional-serialization-bindings = off # this should be overriden by the setting below, which should force it to be on
|
||||
enable-additional-serialization-bindings = off # this should be overridden by the setting below, which should force it to be on
|
||||
allow-java-serialization = off
|
||||
# this is by default on, but tests are running with off, use defaults here
|
||||
warn-about-java-serializer-usage = on
|
||||
|
|
|
|||
|
|
@ -432,7 +432,7 @@ object TestSubscriber {
|
|||
}
|
||||
|
||||
/**
|
||||
* Expect subscription to be followed immediatly by an error signal.
|
||||
* Expect subscription to be followed immediately by an error signal.
|
||||
*
|
||||
* By default `1` demand will be signalled in order to wake up a possibly lazy upstream.
|
||||
*
|
||||
|
|
@ -443,9 +443,9 @@ object TestSubscriber {
|
|||
}
|
||||
|
||||
/**
|
||||
* Expect subscription to be followed immediatly by an error signal.
|
||||
* Expect subscription to be followed immediately by an error signal.
|
||||
*
|
||||
* Depending on the `signalDemand` parameter demand may be signalled immediatly after obtaining the subscription
|
||||
* Depending on the `signalDemand` parameter demand may be signalled immediately after obtaining the subscription
|
||||
* in order to wake up a possibly lazy upstream. You can disable this by setting the `signalDemand` parameter to `false`.
|
||||
*
|
||||
* See also [[#expectSubscriptionAndError()]].
|
||||
|
|
@ -499,7 +499,7 @@ object TestSubscriber {
|
|||
*
|
||||
* Expect subscription followed by immediate stream completion.
|
||||
*
|
||||
* Depending on the `signalDemand` parameter demand may be signalled immediatly after obtaining the subscription
|
||||
* Depending on the `signalDemand` parameter demand may be signalled immediately after obtaining the subscription
|
||||
* in order to wake up a possibly lazy upstream. You can disable this by setting the `signalDemand` parameter to `false`.
|
||||
*
|
||||
* See also [[#expectSubscriptionAndComplete]].
|
||||
|
|
|
|||
|
|
@ -599,7 +599,7 @@ class InterpreterSpec extends StreamSpec with GraphInterpreterSpecKit {
|
|||
push(out, lastElem)
|
||||
}
|
||||
|
||||
// note that the default value of lastElem will be always pushed if the upstream closed at the very begining without a pulling
|
||||
// note that the default value of lastElem will be always pushed if the upstream closed at the very beginning without a pulling
|
||||
override def onPull(): Unit = {
|
||||
if (isClosed(in)) {
|
||||
push(out, lastElem)
|
||||
|
|
|
|||
|
|
@ -297,7 +297,7 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString(
|
|||
new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher"))
|
||||
// this already introduces an async boundary here
|
||||
.map(identity)
|
||||
// this is now just for map since there already is one inbetween stage and map
|
||||
// this is now just for map since there already is one in-between stage and map
|
||||
.async // potential sugar .async("my-dispatcher")
|
||||
.addAttributes(ActorAttributes.dispatcher("my-dispatcher"))
|
||||
.runWith(Sink.head)
|
||||
|
|
@ -400,7 +400,7 @@ class AttributesSpec extends StreamSpec(ConfigFactory.parseString(
|
|||
new ThreadNameSnitchingStage("akka.stream.default-blocking-io-dispatcher"))
|
||||
// this already introduces an async boundary here
|
||||
.detach
|
||||
// this is now just for map since there already is one inbetween stage and map
|
||||
// this is now just for map since there already is one in-between stage and map
|
||||
.async
|
||||
.addAttributes(ActorAttributes.dispatcher("my-dispatcher"))
|
||||
.runWith(javadsl.Sink.head(), materializer)
|
||||
|
|
|
|||
|
|
@ -161,7 +161,7 @@ class FlowThrottleSpec extends StreamSpec {
|
|||
downstream.cancel()
|
||||
}
|
||||
|
||||
"throw exception when exceeding throughtput in enforced mode" in assertAllStagesStopped {
|
||||
"throw exception when exceeding throughput in enforced mode" in assertAllStagesStopped {
|
||||
Await.result(
|
||||
Source(1 to 5).throttle(1, 200.millis, 5, Enforcing).runWith(Sink.seq),
|
||||
2.seconds) should ===(1 to 5) // Burst is 5 so this will not fail
|
||||
|
|
@ -282,7 +282,7 @@ class FlowThrottleSpec extends StreamSpec {
|
|||
downstream.cancel()
|
||||
}
|
||||
|
||||
"throw exception when exceeding throughtput in enforced mode" in assertAllStagesStopped {
|
||||
"throw exception when exceeding throughput in enforced mode" in assertAllStagesStopped {
|
||||
Await.result(
|
||||
Source(1 to 4).throttle(2, 200.millis, 10, identity, Enforcing).runWith(Sink.seq),
|
||||
2.seconds) should ===(1 to 4) // Burst is 10 so this will not fail
|
||||
|
|
|
|||
|
|
@ -60,8 +60,8 @@ akka {
|
|||
max-fixed-buffer-size = 1000000000
|
||||
|
||||
# Maximum number of sync messages that actor can process for stream to substream communication.
|
||||
# Parameter allows to interrupt synchronous processing to get upsteam/downstream messages.
|
||||
# Allows to accelerate message processing that happening withing same actor but keep system responsive.
|
||||
# Parameter allows to interrupt synchronous processing to get upstream/downstream messages.
|
||||
# Allows to accelerate message processing that happening within same actor but keep system responsive.
|
||||
sync-processing-limit = 1000
|
||||
|
||||
debug {
|
||||
|
|
@ -115,7 +115,7 @@ akka {
|
|||
|
||||
# Deprecated, use akka.stream.materializer.blocking-io-dispatcher, this setting
|
||||
# was never applied because of bug #24357
|
||||
# It must still have a valid value becuase used from Akka HTTP.
|
||||
# It must still have a valid value because used from Akka HTTP.
|
||||
blocking-io-dispatcher = "akka.stream.default-blocking-io-dispatcher"
|
||||
|
||||
default-blocking-io-dispatcher {
|
||||
|
|
|
|||
|
|
@ -86,7 +86,7 @@ final case class InvalidSequenceNumberException(expectedSeqNr: Long, gotSeqNr: L
|
|||
* of a stream. Each such actor refers to the other side as its "partner". We make sure that no other actor than
|
||||
* the initial partner can send demand/messages to the other side accidentally.
|
||||
*
|
||||
* This exception is thrown when a message is recived from a non-partner actor,
|
||||
* This exception is thrown when a message is received from a non-partner actor,
|
||||
* which could mean a bug or some actively malicient behavior from the other side.
|
||||
*
|
||||
* This is not meant as a security feature, but rather as plain sanity-check.
|
||||
|
|
|
|||
|
|
@ -1351,7 +1351,7 @@ final class Flow[In, Out, Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends Gr
|
|||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||
*
|
||||
* Similarily to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
||||
* Similarly to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
||||
*
|
||||
* '''Emits when''' element is available from the upstream or upstream is failed and pf returns an element
|
||||
*
|
||||
|
|
|
|||
|
|
@ -1222,7 +1222,7 @@ final class Source[Out, Mat](delegate: scaladsl.Source[Out, Mat]) extends Graph[
|
|||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||
*
|
||||
* Similarily to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
||||
* Similarly to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
||||
*
|
||||
* '''Emits when''' element is available from the upstream or upstream is failed and pf returns an element
|
||||
*
|
||||
|
|
|
|||
|
|
@ -951,7 +951,7 @@ class SubFlow[In, Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flow[I
|
|||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||
*
|
||||
* Similarily to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
||||
* Similarly to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
||||
*
|
||||
* '''Emits when''' element is available from the upstream or upstream is failed and pf returns an element
|
||||
*
|
||||
|
|
|
|||
|
|
@ -933,7 +933,7 @@ class SubSource[Out, Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source[O
|
|||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||
*
|
||||
* Similarily to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
||||
* Similarly to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
||||
*
|
||||
* '''Emits when''' element is available from the upstream or upstream is failed and pf returns an element
|
||||
*
|
||||
|
|
|
|||
|
|
@ -736,7 +736,7 @@ trait FlowOps[+Out, +Mat] {
|
|||
* Since the underlying failure signal onError arrives out-of-band, it might jump over existing elements.
|
||||
* This operator can recover the failure signal, but not the skipped elements, which will be dropped.
|
||||
*
|
||||
* Similarily to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
||||
* Similarly to [[recover]] throwing an exception inside `mapError` _will_ be logged.
|
||||
*
|
||||
* '''Emits when''' element is available from the upstream or upstream is failed and pf returns an element
|
||||
*
|
||||
|
|
|
|||
|
|
@ -749,7 +749,7 @@ final class Partition[T](val outputPorts: Int, val partitioner: T ⇒ Int, val e
|
|||
def this(outputPorts: Int, partitioner: T ⇒ Int) = this(outputPorts, partitioner, false)
|
||||
|
||||
val in: Inlet[T] = Inlet[T]("Partition.in")
|
||||
val out: Seq[Outlet[T]] = Seq.tabulate(outputPorts)(i ⇒ Outlet[T]("Partition.out" + i)) // FIXME BC make this immutable.IndexedSeq as type + Vector as concret impl
|
||||
val out: Seq[Outlet[T]] = Seq.tabulate(outputPorts)(i ⇒ Outlet[T]("Partition.out" + i)) // FIXME BC make this immutable.IndexedSeq as type + Vector as concrete impl
|
||||
override val shape: UniformFanOutShape[T, T] = UniformFanOutShape[T, T](in, out: _*)
|
||||
|
||||
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = new GraphStageLogic(shape) with InHandler {
|
||||
|
|
|
|||
|
|
@ -41,7 +41,7 @@ import org.junit.rules.ExternalResource;
|
|||
* </code>
|
||||
*
|
||||
* Note that it is important to not use <code>getSystem</code> from the
|
||||
* constructor of the test, becuase some test runners may create an
|
||||
* constructor of the test, because some test runners may create an
|
||||
* instance of the class without actually using it later, resulting in
|
||||
* memory leaks because of not shutting down the actor system.
|
||||
*/
|
||||
|
|
|
|||
|
|
@ -45,35 +45,35 @@ private[akka] trait MemoryUsageSnapshotting extends MetricsPrefix {
|
|||
|
||||
}
|
||||
|
||||
private[akka] case class TotalMemoryUsage(init: Long, used: Long, max: Long, comitted: Long) {
|
||||
private[akka] case class TotalMemoryUsage(init: Long, used: Long, max: Long, committed: Long) {
|
||||
|
||||
def diff(other: TotalMemoryUsage): TotalMemoryUsage =
|
||||
TotalMemoryUsage(
|
||||
this.init - other.init,
|
||||
this.used - other.used,
|
||||
this.max - other.max,
|
||||
this.comitted - other.comitted)
|
||||
this.committed - other.committed)
|
||||
|
||||
}
|
||||
|
||||
private[akka] case class HeapMemoryUsage(init: Long, used: Long, max: Long, comitted: Long, usage: Double) {
|
||||
private[akka] case class HeapMemoryUsage(init: Long, used: Long, max: Long, committed: Long, usage: Double) {
|
||||
|
||||
def diff(other: HeapMemoryUsage): HeapMemoryUsage =
|
||||
HeapMemoryUsage(
|
||||
this.init - other.init,
|
||||
this.used - other.used,
|
||||
this.max - other.max,
|
||||
this.comitted - other.comitted,
|
||||
this.committed - other.committed,
|
||||
this.usage - other.usage)
|
||||
}
|
||||
|
||||
private[akka] case class NonHeapMemoryUsage(init: Long, used: Long, max: Long, comitted: Long, usage: Double) {
|
||||
private[akka] case class NonHeapMemoryUsage(init: Long, used: Long, max: Long, committed: Long, usage: Double) {
|
||||
|
||||
def diff(other: NonHeapMemoryUsage): NonHeapMemoryUsage =
|
||||
NonHeapMemoryUsage(
|
||||
this.init - other.init,
|
||||
this.used - other.used,
|
||||
this.max - other.max,
|
||||
this.comitted - other.comitted,
|
||||
this.committed - other.committed,
|
||||
this.usage - other.usage)
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue