change spelling of behaviour to behavior, #24457

This commit is contained in:
Patrik Nordwall 2018-01-31 16:25:31 +01:00
parent 4af523a012
commit 23fa8b0810
37 changed files with 81 additions and 81 deletions

View file

@ -73,7 +73,7 @@ The steps are exactly the same for everyone involved in the project (be it core
- Please make sure to follow the general quality guidelines (specified below) when developing your patch.
- Please write additional tests covering your feature and adjust existing ones if needed before submitting your pull request. The `validatePullRequest` sbt task ([explained below](#the-validatepullrequest-task)) may come in handy to verify your changes are correct.
1. Once your feature is complete, prepare the commit following our [Creating Commits And Writing Commit Messages](#creating-commits-and-writing-commit-messages). For example, a good commit message would be: `Adding compression support for Manifests #22222` (note the reference to the ticket it aimed to resolve).
1. If it's a new feature, or a change of behaviour, document it on the [akka-docs](https://github.com/akka/akka/tree/master/akka-docs), remember, an undocumented feature is not a feature. If the feature was touching Scala or Java DSL, make sure to document both the Scala and Java APIs.
1. If it's a new feature, or a change of behavior, document it on the [akka-docs](https://github.com/akka/akka/tree/master/akka-docs), remember, an undocumented feature is not a feature. If the feature was touching Scala or Java DSL, make sure to document both the Scala and Java APIs.
1. Now it's finally time to [submit the pull request](https://help.github.com/articles/using-pull-requests)!
- Please make sure to include a reference to the issue you're solving *in the comment* for the Pull Request, this will cause the PR to be linked properly with the Issue. Examples of good phrases for this are: "Resolves #1234" or "Refs #1234".
1. If you have not already done so, you will be asked by our CLA bot to [sign the Lightbend CLA](http://www.lightbend.com/contribute/cla) online. CLA stands for Contributor License Agreement and is a way of protecting intellectual property disputes from harming the project.

View file

@ -23,12 +23,12 @@ object ActorWithBoundedStashSpec {
sender() ! "ok"
case "world"
context.become(afterWorldBehaviour)
context.become(afterWorldBehavior)
unstashAll()
}
def afterWorldBehaviour: Receive = {
def afterWorldBehavior: Receive = {
case _ stash()
}
}

View file

@ -38,7 +38,7 @@ public class BasicSyncTestingTest extends JUnitSuite {
}
}
public static Behavior<Command> myBehaviour = Behaviors.immutable(Command.class)
public static Behavior<Command> myBehavior = Behaviors.immutable(Command.class)
.onMessage(CreateAChild.class, (ctx, msg) -> {
ctx.spawn(childActor, msg.childName);
return Behaviors.same();
@ -66,7 +66,7 @@ public class BasicSyncTestingTest extends JUnitSuite {
@Test
public void testSpawning() {
//#test-child
BehaviorTestkit<Command> test = BehaviorTestkit.create(myBehaviour);
BehaviorTestkit<Command> test = BehaviorTestkit.create(myBehavior);
test.run(new CreateAChild("child"));
test.expectEffect(new Effect.Spawned(childActor, "child", Props.empty()));
//#test-child
@ -75,7 +75,7 @@ public class BasicSyncTestingTest extends JUnitSuite {
@Test
public void testSpawningAnonymous() {
//#test-anonymous-child
BehaviorTestkit<Command> test = BehaviorTestkit.create(myBehaviour);
BehaviorTestkit<Command> test = BehaviorTestkit.create(myBehavior);
test.run(new CreateAnAnonymousChild());
test.expectEffect(new Effect.SpawnedAnonymous(childActor, Props.empty()));
//#test-anonymous-child
@ -84,7 +84,7 @@ public class BasicSyncTestingTest extends JUnitSuite {
@Test
public void testRecodingMessageSend() {
//#test-message
BehaviorTestkit<Command> test = BehaviorTestkit.create(myBehaviour);
BehaviorTestkit<Command> test = BehaviorTestkit.create(myBehavior);
TestInbox<String> inbox = new TestInbox<String>();
test.run(new SayHello(inbox.ref()));
inbox.expectMsg("hello");
@ -94,7 +94,7 @@ public class BasicSyncTestingTest extends JUnitSuite {
@Test
public void testMessageToChild() {
//#test-child-message
BehaviorTestkit<Command> testKit = BehaviorTestkit.create(myBehaviour);
BehaviorTestkit<Command> testKit = BehaviorTestkit.create(myBehavior);
testKit.run(new SayHelloToChild("child"));
TestInbox<String> childInbox = testKit.childInbox("child");
childInbox.expectMsg("hello");
@ -104,7 +104,7 @@ public class BasicSyncTestingTest extends JUnitSuite {
@Test
public void testMessageToAnonymousChild() {
//#test-child-message-anonymous
BehaviorTestkit<Command> testKit = BehaviorTestkit.create(myBehaviour);
BehaviorTestkit<Command> testKit = BehaviorTestkit.create(myBehavior);
testKit.run(new SayHelloToAnonymousChild());
// Anonymous actors are created as: $a $b etc
TestInbox<String> childInbox = testKit.childInbox("$a");

View file

@ -31,7 +31,7 @@ class DeferredSpec extends TestKit with TypedAkkaSpec {
import DeferredSpec._
implicit val testSettings = TestKitSettings(system)
"Deferred behaviour" must {
"Deferred behavior" must {
"must create underlying" in {
val probe = TestProbe[Event]("evt")
val behv = Behaviors.deferred[Command] { _

View file

@ -274,22 +274,22 @@ class AdapterSpec extends AkkaSpec {
probe.expectMsg("terminated")
}
"spawn untyped behaviour anonymously" in {
"spawn untyped behavior anonymously" in {
val probe = TestProbe()
val untypedBehaviour: Behavior[String] = new UntypedBehavior[String] {
val untypedBehavior: Behavior[String] = new UntypedBehavior[String] {
override private[akka] def untypedProps: Props = untypedForwarder(probe.ref)
}
val ref = system.spawnAnonymous(untypedBehaviour)
val ref = system.spawnAnonymous(untypedBehavior)
ref ! "hello"
probe.expectMsg("hello")
}
"spawn untyped behaviour" in {
"spawn untyped behavior" in {
val probe = TestProbe()
val untypedBehaviour: Behavior[String] = new UntypedBehavior[String] {
val untypedBehavior: Behavior[String] = new UntypedBehavior[String] {
override private[akka] def untypedProps: Props = untypedForwarder(probe.ref)
}
val ref = system.spawn(untypedBehaviour, "test")
val ref = system.spawn(untypedBehavior, "test")
ref ! "hello"
probe.expectMsg("hello")
}

View file

@ -23,7 +23,7 @@ object BasicSyncTestingSpec {
case object SayHelloToAnonymousChild extends Cmd
case class SayHello(who: ActorRef[String]) extends Cmd
val myBehaviour = Behaviors.immutablePartial[Cmd] {
val myBehavior = Behaviors.immutablePartial[Cmd] {
case (ctx, CreateChild(name))
ctx.spawn(childActor, name)
Behaviors.same
@ -54,7 +54,7 @@ class BasicSyncTestingSpec extends WordSpec with Matchers {
"record spawning" in {
//#test-child
val testKit = BehaviorTestkit(myBehaviour)
val testKit = BehaviorTestkit(myBehavior)
testKit.run(CreateChild("child"))
testKit.expectEffect(Spawned(childActor, "child"))
//#test-child
@ -62,7 +62,7 @@ class BasicSyncTestingSpec extends WordSpec with Matchers {
"record spawning anonymous" in {
//#test-anonymous-child
val testKit = BehaviorTestkit(myBehaviour)
val testKit = BehaviorTestkit(myBehavior)
testKit.run(CreateAnonymousChild)
testKit.expectEffect(SpawnedAnonymous(childActor))
//#test-anonymous-child
@ -70,7 +70,7 @@ class BasicSyncTestingSpec extends WordSpec with Matchers {
"record message sends" in {
//#test-message
val testKit = BehaviorTestkit(myBehaviour)
val testKit = BehaviorTestkit(myBehavior)
val inbox = TestInbox[String]()
testKit.run(SayHello(inbox.ref))
inbox.expectMsg("hello")
@ -79,7 +79,7 @@ class BasicSyncTestingSpec extends WordSpec with Matchers {
"send a message to a spawned child" in {
//#test-child-message
val testKit = BehaviorTestkit(myBehaviour)
val testKit = BehaviorTestkit(myBehavior)
testKit.run(SayHelloToChild("child"))
val childInbox = testKit.childInbox[String]("child")
childInbox.expectMsg("hello")
@ -88,7 +88,7 @@ class BasicSyncTestingSpec extends WordSpec with Matchers {
"send a message to an anonymous spawned child" in {
//#test-child-message-anonymous
val testKit = BehaviorTestkit(myBehaviour)
val testKit = BehaviorTestkit(myBehavior)
testKit.run(SayHelloToAnonymousChild)
// Anonymous actors are created as: $a $b etc
val childInbox = testKit.childInbox[String](s"$$a")

View file

@ -175,7 +175,7 @@ object ActorSystem {
/**
* Java API: Shortcut for creating an actor system with custom bootstrap settings.
* Same behaviour as calling `ActorSystem.create(name, ActorSystemSetup.create(bootstrapSettings))`
* Same behavior as calling `ActorSystem.create(name, ActorSystemSetup.create(bootstrapSettings))`
*/
def create(name: String, bootstrapSetup: BootstrapSetup): ActorSystem =
create(name, ActorSystemSetup.create(bootstrapSetup))
@ -247,7 +247,7 @@ object ActorSystem {
/**
* Scala API: Shortcut for creating an actor system with custom bootstrap settings.
* Same behaviour as calling `ActorSystem(name, ActorSystemSetup(bootstrapSetup))`
* Same behavior as calling `ActorSystem(name, ActorSystemSetup(bootstrapSetup))`
*/
def apply(name: String, bootstrapSetup: BootstrapSetup): ActorSystem =
create(name, ActorSystemSetup.create(bootstrapSetup))

View file

@ -201,7 +201,7 @@ private[akka] class Mailboxes(
mailboxType match {
case m: ProducesPushTimeoutSemanticsMailbox if m.pushTimeOut.toNanos > 0L
warn(s"Configured potentially-blocking mailbox [$id] configured with non-zero pushTimeOut (${m.pushTimeOut}), " +
s"which can lead to blocking behaviour when sending messages to this mailbox. " +
s"which can lead to blocking behavior when sending messages to this mailbox. " +
s"Avoid this by setting `$id.mailbox-push-timeout-time` to `0`.")
mailboxNonZeroPushTimeoutWarningIssued = true
case _ // good; nothing to see here, move along, sir.

View file

@ -68,7 +68,7 @@ object ByteString {
* methods defined on ByteString. This method of creating a ByteString saves one array
* copy and allocation and therefore can lead to better performance, however it also means
* that one MUST NOT modify the passed in array, or unexpected immutable data structure
* contract-breaking behaviour will manifest itself.
* contract-breaking behavior will manifest itself.
*
* This API is intended for users who have obtained an byte array from some other API, and
* want wrap it into an ByteArray, and from there on only use that reference (the ByteString)
@ -93,7 +93,7 @@ object ByteString {
* methods defined on ByteString. This method of creating a ByteString saves one array
* copy and allocation and therefore can lead to better performance, however it also means
* that one MUST NOT modify the passed in array, or unexpected immutable data structure
* contract-breaking behaviour will manifest itself.
* contract-breaking behavior will manifest itself.
*
* This API is intended for users who have obtained an byte array from some other API, and
* want wrap it into an ByteArray, and from there on only use that reference (the ByteString)

View file

@ -67,7 +67,7 @@ object ClusterSingletonManagerSpec extends MultiNodeConfig {
/**
* This channel is extremely strict with regards to
* registration and unregistration of consumer to
* be able to detect misbehaviour (e.g. two active
* be able to detect misbehavior (e.g. two active
* singleton instances).
*/
class PointToPointChannel extends Actor with ActorLogging {
@ -334,7 +334,7 @@ class ClusterSingletonManagerSpec extends MultiNodeSpec(ClusterSingletonManagerS
memberProbe.expectMsgClass(classOf[CurrentClusterState])
runOn(controller) {
// watch that it is not terminated, which would indicate misbehaviour
// watch that it is not terminated, which would indicate misbehavior
watch(system.actorOf(Props[PointToPointChannel], "queue"))
}
enterBarrier("queue-started")

View file

@ -59,7 +59,7 @@ akka {
down-removal-margin = off
# Pluggable support for downing of nodes in the cluster.
# If this setting is left empty behaviour will depend on 'auto-down-unreachable' in the following ways:
# If this setting is left empty behavior will depend on 'auto-down-unreachable' in the following ways:
# * if it is 'off' the `NoDowning` provider is used and no automatic downing will be performed
# * if it is set to a duration the `AutoDowning` provider is with the configured downing duration
#

View file

@ -138,7 +138,7 @@ Ie. this function should return `true` if the call should increase failure count
### Low level API
The low-level API allows you to describe the behaviour of the CircuitBreaker in detail, including deciding what to return to the calling `Actor` in case of success or failure. This is especially useful when expecting the remote call to send a reply. CircuitBreaker doesn't support `Tell Protection` (protecting against calls that expect a reply) natively at the moment, so you need to use the low-level power-user APIs, `succeed` and `fail` methods, as well as `isClose`, `isOpen`, `isHalfOpen` to implement it.
The low-level API allows you to describe the behavior of the CircuitBreaker in detail, including deciding what to return to the calling `Actor` in case of success or failure. This is especially useful when expecting the remote call to send a reply. CircuitBreaker doesn't support `Tell Protection` (protecting against calls that expect a reply) natively at the moment, so you need to use the low-level power-user APIs, `succeed` and `fail` methods, as well as `isClose`, `isOpen`, `isHalfOpen` to implement it.
As can be seen in the examples below, a `Tell Protection` pattern could be implemented by using the `succeed` and `fail` methods, which would count towards the `CircuitBreaker` counts. In the example, a call is made to the remote service if the `breaker.isClosed`, and once a response is received, the `succeed` method is invoked, which tells the `CircuitBreaker` to keep the breaker closed. If on the other hand an error or timeout is received, we trigger a `fail` and the breaker accrues this failure towards its count for opening the breaker.

View file

@ -306,11 +306,11 @@ that you have not configured an explicit dispatcher for).
### Solution: Dedicated dispatcher for blocking operations
One of the most efficient methods of isolating the blocking behaviour such that it does not impact the rest of the system
One of the most efficient methods of isolating the blocking behavior such that it does not impact the rest of the system
is to prepare and use a dedicated dispatcher for all those blocking operations.
This technique is often referred to as as "bulk-heading" or simply "isolating blocking".
In `application.conf`, the dispatcher dedicated to blocking behaviour should
In `application.conf`, the dispatcher dedicated to blocking behavior should
be configured as follows:
<!--same config text for Scala & Java-->
@ -332,7 +332,7 @@ Scala
Java
: @@snip [BlockingDispatcherSample.java]($akka$/akka-docs/src/test/java/jdocs/actor/SeparateDispatcherFutureActor.java) { #separate-dispatcher }
The thread pool behaviour is shown in the below diagram.
The thread pool behavior is shown in the below diagram.
![dispatcher-behaviour-on-good-code.png](./images/dispatcher-behaviour-on-good-code.png)

View file

@ -498,7 +498,7 @@ There is a special version of `ORMultiMap`, created by using separate constructo
This means that the `ORMultiMap` initiated with `ORMultiMap.emptyWithValueDeltas` propagates its updates as pairs
consisting of delta of the key and delta of the value. It is much more efficient in terms of network bandwith consumed.
However, this behaviour has not been made default for `ORMultiMap` and if you wish to use it in your code, you
However, this behavior has not been made default for `ORMultiMap` and if you wish to use it in your code, you
need to replace invocations of `ORMultiMap.empty[A, B]` (or `ORMultiMap()`) with `ORMultiMap.emptyWithValueDeltas[A, B]`
where `A` and `B` are types respectively of keys and values in the map.

View file

@ -409,7 +409,7 @@ which means `scala-java8-compat` implementation is not used after the first mapp
@@@ note { .group-java }
After adding any additional computation stage to `CompletionStage` returned by `scala-java8-compat`
(e.g. `CompletionStage` instances returned by Akka) it falls back to standard behaviour of Java `CompletableFuture`.
(e.g. `CompletionStage` instances returned by Akka) it falls back to standard behavior of Java `CompletableFuture`.
@@@

View file

@ -373,7 +373,7 @@ retain both the thread safety (including the right value of @scala[`sender()`]@j
In general it is encouraged to create command handlers which do not need to resort to nested event persisting,
however there are situations where it may be useful. It is important to understand the ordering of callback execution in
those situations, as well as their implication on the stashing behaviour (that `persist()` enforces). In the following
those situations, as well as their implication on the stashing behavior (that `persist()` enforces). In the following
example two persist calls are issued, and each of them issues another persist inside its callback:
Scala
@ -554,7 +554,7 @@ Consider using explicit shut-down messages instead of `PoisonPill` when working
@@@
The example below highlights how messages arrive in the Actor's mailbox and how they interact with its internal stashing
mechanism when `persist()` is used. Notice the early stop behaviour that occurs when `PoisonPill` is used:
mechanism when `persist()` is used. Notice the early stop behavior that occurs when `PoisonPill` is used:
Scala
: @@snip [PersistenceDocSpec.scala]($code$/scala/docs/persistence/PersistenceDocSpec.scala) { #safe-shutdown }

View file

@ -70,7 +70,7 @@ Java
Similarly, to create a custom `Sink` one can register a subclass `InHandler` with the stage `Inlet`.
The `onPush()` callback is used to signal the handler a new element has been pushed to the stage,
and can hence be grabbed and used. `onPush()` can be overridden to provide custom behaviour.
and can hence be grabbed and used. `onPush()` can be overridden to provide custom behavior.
Please note, most Sinks would need to request upstream elements as soon as they are created: this can be
done by calling `pull(inlet)` in the `preStart()` callback.

View file

@ -185,7 +185,7 @@ specification, which Akka is a founding member of.
The user of the library does not have to write any explicit back-pressure handling code — it is built in
and dealt with automatically by all of the provided Akka Streams processing stages. It is possible however to add
explicit buffer stages with overflow strategies that can influence the behaviour of the stream. This is especially important
explicit buffer stages with overflow strategies that can influence the behavior of the stream. This is especially important
in complex processing graphs which may even contain loops (which *must* be treated with very special
care, as explained in @ref:[Graph cycles, liveness and deadlocks](stream-graphs.md#graph-cycles)).

View file

@ -15,7 +15,7 @@ streams, such that the second one is consumed after the first one has completed)
## Constructing Graphs
Graphs are built from simple Flows which serve as the linear connections within the graphs as well as junctions
which serve as fan-in and fan-out points for Flows. Thanks to the junctions having meaningful types based on their behaviour
which serve as fan-in and fan-out points for Flows. Thanks to the junctions having meaningful types based on their behavior
and making them explicit elements these elements should be rather straightforward to use.
Akka Streams currently provide these junctions (for a detailed list see @ref[stages overview](stages-overview.md)):

View file

@ -1,6 +1,6 @@
# Testing streams
Verifying behaviour of Akka Stream sources, flows and sinks can be done using
Verifying behavior of Akka Stream sources, flows and sinks can be done using
various code patterns and libraries. Here we will discuss testing these
elements using:
@ -124,7 +124,7 @@ Scala
Java
: @@snip [StreamTestKitDocTest.java]($code$/java/jdocs/stream/StreamTestKitDocTest.java) { #test-source-probe }
You can also inject exceptions and test sink behaviour on error conditions.
You can also inject exceptions and test sink behavior on error conditions.
Scala
: @@snip [StreamTestKitDocSpec.scala]($code$/scala/docs/stream/StreamTestKitDocSpec.scala) { #injecting-failure }

View file

@ -819,12 +819,12 @@ restriction is an impediment to unit testing, which led to the inception of the
`TestActorRef`. This special type of reference is designed specifically
for test purposes and allows access to the actor in two ways: either by
obtaining a reference to the underlying actor instance, or by invoking or
querying the actor's behaviour (`receive`). Each one warrants its own
querying the actor's behavior (`receive`). Each one warrants its own
section below.
@@@ note
It is highly recommended to stick to traditional behavioural testing (using messaging
It is highly recommended to stick to traditional behavioral testing (using messaging
to ask the Actor to reply with the state you want to run assertions against),
instead of using `TestActorRef` whenever possible.
@ -833,7 +833,7 @@ instead of using `TestActorRef` whenever possible.
@@@ warning
Due to the synchronous nature of `TestActorRef` it will **not** work with some support
traits that Akka provides as they require asynchronous behaviours to function properly.
traits that Akka provides as they require asynchronous behaviors to function properly.
Examples of traits that do not mix well with test actor refs are @ref:[PersistentActor](persistence.md#event-sourcing)
and @ref:[AtLeastOnceDelivery](persistence.md#at-least-once-delivery) provided by @ref:[Akka Persistence](persistence.md).

View file

@ -1,6 +1,6 @@
# Testing
Testing can either be done asynchronously using a real `ActorSystem` or synchronously on the testing thread using the `BehaviousTestKit`.
Testing can either be done asynchronously using a real `ActorSystem` or synchronously on the testing thread using the `BehaviousTestKit`.
For testing logic in a `Behavior` in isolation synchronous testing is preferred. For testing interactions between multiple
actors a more realistic asynchronous test is preferred.
@ -29,7 +29,7 @@ To use Akka TestKit Type, add the module to your project:
scope=test
}
## Synchronous behaviour testing
## Synchronous behavior testing
The following demonstrates how to test:
@ -196,4 +196,4 @@ Scala
: @@snip [ManualTimerSpec.scala]($akka$/akka-actor-typed-tests/src/test/scala/akka/actor/typed/ManualTimerSpec.scala) { #manual-scheduling-simple }
Java
: @@snip [ManualTimerTest.scala]($akka$/akka-actor-typed-tests/src/test/java/akka/actor/typed/ManualTimerTest.java) { #manual-scheduling-simple }
: @@snip [ManualTimerTest.scala]($akka$/akka-actor-typed-tests/src/test/java/akka/actor/typed/ManualTimerTest.java) { #manual-scheduling-simple }

View file

@ -40,7 +40,7 @@ class UnnestedReceives extends Actor {
case 'GoAhead //When we get the GoAhead signal we process all our buffered messages/events
queue foreach process
queue.clear
become { //Then we change behaviour to process incoming messages/events as they arrive
become { //Then we change behavior to process incoming messages/events as they arrive
case msg process(msg)
}
case msg //While we haven't gotten the GoAhead signal, buffer all incoming messages

View file

@ -20,7 +20,7 @@ class DefaultOSGiLogger extends DefaultLogger {
override def receive: Receive = uninitialisedReceive.orElse[Any, Unit](super.receive)
/**
* Behaviour of the logger that waits for its LogService
* Behavior of the logger that waits for its LogService
* @return Receive: Store LogEvent or become initialised
*/
def uninitialisedReceive: Receive = {
@ -47,7 +47,7 @@ class DefaultOSGiLogger extends DefaultLogger {
}
/**
* Behaviour of the Event handler that is setup (has received a LogService)
* Behavior of the Event handler that is setup (has received a LogService)
* @param logService registered OSGi LogService
* @return Receive : Logs LogEvent or go back to the uninitialised state
*/

View file

@ -11,7 +11,7 @@ class LeveldbJournalNoAtomicPersistMultipleEventsSpec extends JournalSpec(
with PluginCleanup {
/**
* Setting to false to test the single message atomic write behaviour of JournalSpec
* Setting to false to test the single message atomic write behavior of JournalSpec
*/
override def supportsAtomicPersistAllOfSeveralEvents = false

View file

@ -33,7 +33,7 @@ object NettyTransportSpec {
}
}
class NettyTransportSpec extends WordSpec with Matchers with BindBehaviour {
class NettyTransportSpec extends WordSpec with Matchers with BindBehavior {
import akka.remote.transport.netty.NettyTransportSpec._
"NettyTransport" should {
@ -119,7 +119,7 @@ class NettyTransportSpec extends WordSpec with Matchers with BindBehaviour {
}
}
trait BindBehaviour { this: WordSpec with Matchers
trait BindBehavior { this: WordSpec with Matchers
import akka.remote.transport.netty.NettyTransportSpec._
def theOneWhoKnowsTheDifferenceBetweenBoundAndRemotingAddress(proto: String) = {

View file

@ -191,14 +191,14 @@ object StageActorRefSpec {
override def preStart(): Unit = {
pull(in)
probe ! getStageActor(behaviour).ref
probe ! getStageActor(behavior).ref
}
def behaviour(m: (ActorRef, Any)): Unit = {
def behavior(m: (ActorRef, Any)): Unit = {
m match {
case (sender, Add(n)) sum += n
case (sender, PullNow) pull(in)
case (sender, CallInitStageActorRef) sender ! getStageActor(behaviour).ref
case (sender, CallInitStageActorRef) sender ! getStageActor(behavior).ref
case (sender, BecomeStringEcho)
getStageActor {
case (theSender, msg) theSender ! msg.toString

View file

@ -86,7 +86,7 @@ final case class InvalidSequenceNumberException(expectedSeqNr: Long, gotSeqNr: L
* the initial partner can send demand/messages to the other side accidentally.
*
* This exception is thrown when a message is recived from a non-partner actor,
* which could mean a bug or some actively malicient behaviour from the other side.
* which could mean a bug or some actively malicient behavior from the other side.
*
* This is not meant as a security feature, but rather as plain sanity-check.
*/

View file

@ -2079,7 +2079,7 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends
* bucket accumulates enough tokens. Elements that costs more than the allowed burst will be delayed proportionally
* to their cost minus available tokens, meeting the target rate. Bucket is full when stream just materialized and started.
*
* Parameter `mode` manages behaviour when upstream is faster than throttle rate:
* Parameter `mode` manages behavior when upstream is faster than throttle rate:
* - [[akka.stream.ThrottleMode.Shaping]] makes pauses before emitting messages to meet throttle rate
* - [[akka.stream.ThrottleMode.Enforcing]] fails with exception when upstream is faster than throttle rate
*
@ -2121,7 +2121,7 @@ final class Flow[-In, +Out, +Mat](delegate: scaladsl.Flow[In, Out, Mat]) extends
* bucket accumulates enough tokens. Elements that costs more than the allowed burst will be delayed proportionally
* to their cost minus available tokens, meeting the target rate. Bucket is full when stream just materialized and started.
*
* Parameter `mode` manages behaviour when upstream is faster than throttle rate:
* Parameter `mode` manages behavior when upstream is faster than throttle rate:
* - [[akka.stream.ThrottleMode.Shaping]] makes pauses before emitting messages to meet throttle rate
* - [[akka.stream.ThrottleMode.Enforcing]] fails with exception when upstream is faster than throttle rate. Enforcing
* cannot emit elements that cost more than the maximumBurst

View file

@ -20,7 +20,7 @@ object Framing {
* If there are buffered bytes (an incomplete frame) when the input stream finishes and ''allowTruncation'' is set to
* false then this Flow will fail the stream reporting a truncated frame.
*
* Default truncation behaviour is: when the last frame being decoded contains no valid delimiter this Flow
* Default truncation behavior is: when the last frame being decoded contains no valid delimiter this Flow
* fails the stream instead of returning a truncated frame.
*
* @param delimiter The byte sequence to be treated as the end of the frame.

View file

@ -2144,7 +2144,7 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap
* bucket accumulates enough tokens. Elements that costs more than the allowed burst will be delayed proportionally
* to their cost minus available tokens, meeting the target rate. Bucket is full when stream just materialized and started.
*
* Parameter `mode` manages behaviour when upstream is faster than throttle rate:
* Parameter `mode` manages behavior when upstream is faster than throttle rate:
* - [[akka.stream.ThrottleMode.Shaping]] makes pauses before emitting messages to meet throttle rate
* - [[akka.stream.ThrottleMode.Enforcing]] fails with exception when upstream is faster than throttle rate
*
@ -2186,7 +2186,7 @@ final class Source[+Out, +Mat](delegate: scaladsl.Source[Out, Mat]) extends Grap
* bucket accumulates enough tokens. Elements that costs more than the allowed burst will be delayed proportionally
* to their cost minus available tokens, meeting the target rate. Bucket is full when stream just materialized and started.
*
* Parameter `mode` manages behaviour when upstream is faster than throttle rate:
* Parameter `mode` manages behavior when upstream is faster than throttle rate:
* - [[akka.stream.ThrottleMode.Shaping]] makes pauses before emitting messages to meet throttle rate
* - [[akka.stream.ThrottleMode.Enforcing]] fails with exception when upstream is faster than throttle rate. Enforcing
* cannot emit elements that cost more than the maximumBurst

View file

@ -1363,7 +1363,7 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo
* bucket accumulates enough tokens. Elements that costs more than the allowed burst will be delayed proportionally
* to their cost minus available tokens, meeting the target rate. Bucket is full when stream just materialized and started.
*
* Parameter `mode` manages behaviour when upstream is faster than throttle rate:
* Parameter `mode` manages behavior when upstream is faster than throttle rate:
* - [[akka.stream.ThrottleMode.Shaping]] makes pauses before emitting messages to meet throttle rate
* - [[akka.stream.ThrottleMode.Enforcing]] fails with exception when upstream is faster than throttle rate
*
@ -1405,7 +1405,7 @@ class SubFlow[-In, +Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Flo
* bucket accumulates enough tokens. Elements that costs more than the allowed burst will be delayed proportionally
* to their cost minus available tokens, meeting the target rate. Bucket is full when stream just materialized and started.
*
* Parameter `mode` manages behaviour when upstream is faster than throttle rate:
* Parameter `mode` manages behavior when upstream is faster than throttle rate:
* - [[akka.stream.ThrottleMode.Shaping]] makes pauses before emitting messages to meet throttle rate
* - [[akka.stream.ThrottleMode.Enforcing]] fails with exception when upstream is faster than throttle rate. Enforcing
* cannot emit elements that cost more than the maximumBurst

View file

@ -1356,7 +1356,7 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source
* bucket accumulates enough tokens. Elements that costs more than the allowed burst will be delayed proportionally
* to their cost minus available tokens, meeting the target rate. Bucket is full when stream just materialized and started.
*
* Parameter `mode` manages behaviour when upstream is faster than throttle rate:
* Parameter `mode` manages behavior when upstream is faster than throttle rate:
* - [[akka.stream.ThrottleMode.Shaping]] makes pauses before emitting messages to meet throttle rate
* - [[akka.stream.ThrottleMode.Enforcing]] fails with exception when upstream is faster than throttle rate
*
@ -1398,7 +1398,7 @@ class SubSource[+Out, +Mat](delegate: scaladsl.SubFlow[Out, Mat, scaladsl.Source
* bucket accumulates enough tokens. Elements that costs more than the allowed burst will be delayed proportionally
* to their cost minus available tokens, meeting the target rate. Bucket is full when stream just materialized and started.
*
* Parameter `mode` manages behaviour when upstream is faster than throttle rate:
* Parameter `mode` manages behavior when upstream is faster than throttle rate:
* - [[akka.stream.ThrottleMode.Shaping]] makes pauses before emitting messages to meet throttle rate
* - [[akka.stream.ThrottleMode.Enforcing]] fails with exception when upstream is faster than throttle rate. Enforcing
* cannot emit elements that cost more than the maximumBurst

View file

@ -1835,7 +1835,7 @@ trait FlowOps[+Out, +Mat] {
* bucket accumulates enough tokens. Elements that costs more than the allowed burst will be delayed proportionally
* to their cost minus available tokens, meeting the target rate. Bucket is full when stream just materialized and started.
*
* Parameter `mode` manages behaviour when upstream is faster than throttle rate:
* Parameter `mode` manages behavior when upstream is faster than throttle rate:
* - [[akka.stream.ThrottleMode.Shaping]] makes pauses before emitting messages to meet throttle rate
* - [[akka.stream.ThrottleMode.Enforcing]] fails with exception when upstream is faster than throttle rate. Enforcing
* cannot emit elements that cost more than the maximumBurst
@ -1877,7 +1877,7 @@ trait FlowOps[+Out, +Mat] {
* bucket accumulates enough tokens. Elements that costs more than the allowed burst will be delayed proportionally
* to their cost minus available tokens, meeting the target rate. Bucket is full when stream just materialized and started.
*
* Parameter `mode` manages behaviour when upstream is faster than throttle rate:
* Parameter `mode` manages behavior when upstream is faster than throttle rate:
* - [[akka.stream.ThrottleMode.Shaping]] makes pauses before emitting messages to meet throttle rate
* - [[akka.stream.ThrottleMode.Enforcing]] fails with exception when upstream is faster than throttle rate. Enforcing
* cannot emit elements that cost more than the maximumBurst

View file

@ -103,7 +103,7 @@ private object TimerMessages {
object GraphStageLogic {
final case class StageActorRefNotInitializedException()
extends RuntimeException("You must first call getStageActor, to initialize the Actors behaviour")
extends RuntimeException("You must first call getStageActor, to initialize the Actors behavior")
/**
* Input handler that terminates the stage upon receiving completion.
@ -219,7 +219,7 @@ object GraphStageLogic {
def ref: ActorRef = functionRef
@volatile
private[this] var behaviour = initialReceive
private[this] var behavior = initialReceive
/** INTERNAL API */
private[akka] def internalReceive(pack: (ActorRef, Any)): Unit = {
@ -227,18 +227,18 @@ object GraphStageLogic {
case Terminated(ref)
if (functionRef.isWatching(ref)) {
functionRef.unwatch(ref)
behaviour(pack)
behavior(pack)
}
case _ behaviour(pack)
case _ behavior(pack)
}
}
/**
* Special `become` allowing to swap the behaviour of this StageActorRef.
* Special `become` allowing to swap the behavior of this StageActorRef.
* Unbecome is not available.
*/
def become(receive: StageActorRef.Receive): Unit = {
behaviour = receive
behavior = receive
}
def stop(): Unit = cell.removeFunctionRef(functionRef)

View file

@ -35,7 +35,7 @@ object Effect {
abstract class SpawnedEffect extends Effect
@SerialVersionUID(1L) final case class Spawned(behavior: Behavior[_], childName: String, props: Props = Props.empty) extends SpawnedEffect
@SerialVersionUID(1L) final case class SpawnedAnonymous(behaviour: Behavior[_], props: Props = Props.empty) extends SpawnedEffect
@SerialVersionUID(1L) final case class SpawnedAnonymous(behavior: Behavior[_], props: Props = Props.empty) extends SpawnedEffect
@SerialVersionUID(1L) final case object SpawnedAdapter extends SpawnedEffect
@SerialVersionUID(1L) final case class Stopped(childName: String) extends Effect
@SerialVersionUID(1L) final case class Watched[T](other: ActorRef[T]) extends Effect

View file

@ -111,9 +111,9 @@ trait TestKitBase {
def spawn[T](behavior: Behavior[T], name: String, props: Props): ActorRef[T] =
Await.result(system ? (SpawnActor(name, behavior, _, props)), timeoutDuration)
def systemActor[T](behaviour: Behavior[T], name: String): ActorRef[T] =
Await.result(system.systemActorOf(behaviour, name), timeoutDuration)
def systemActor[T](behavior: Behavior[T], name: String): ActorRef[T] =
Await.result(system.systemActorOf(behavior, name), timeoutDuration)
def systemActor[T](behaviour: Behavior[T]): ActorRef[T] =
Await.result(system.systemActorOf(behaviour, childName.next()), timeoutDuration)
def systemActor[T](behavior: Behavior[T]): ActorRef[T] =
Await.result(system.systemActorOf(behavior, childName.next()), timeoutDuration)
}