dotty phase 2: scalafix ExplicitNonNullaryApply (#28949)
* scalafix ExplicitNonNullaryApply prepare + Temporarily use com.sandinh:sbt-scalafix because scalacenter/scalafix#1098 + Add ExplicitNonNullaryApply rule to .scalafix.conf + Manually fix a NonNullaryApply case in DeathWatchSpec that cause `fixall` fail because ExplicitNonNullaryApply rule incorrectly rewrite `context unbecome` to `context unbecome()` instead of `context.unbecome()` * scalafix ExplicitNonNullaryApply fix by enabling only ExplicitNonNullaryApply rule in .scalafix.conf then: ``` % sbt -Dakka.build.scalaVersion=2.13.1 > fixall ``` * scalafmtAll * Revert to ch.epfl.scala:sbt-scalafix Co-authored-by: Bùi Việt Thành <thanhbv@sandinh.net>
This commit is contained in:
parent
4ba835d328
commit
ea7205eaf7
266 changed files with 929 additions and 919 deletions
|
|
@ -2,6 +2,7 @@
|
|||
rules = [
|
||||
RemoveUnused
|
||||
ExplicitResultTypes
|
||||
"github:ohze/scalafix-rules/ExplicitNonNullaryApply"
|
||||
"github:ohze/scalafix-rules/ConstructorProcedureSyntax"
|
||||
"github:ohze/scalafix-rules/FinalObject"
|
||||
"github:ohze/scalafix-rules/Any2StringAdd"
|
||||
|
|
|
|||
|
|
@ -90,7 +90,7 @@ import org.slf4j.LoggerFactory
|
|||
|
||||
override def scheduler: Scheduler = throw new UnsupportedOperationException("no scheduler")
|
||||
|
||||
private val terminationPromise = Promise[Done]
|
||||
private val terminationPromise = Promise[Done]()
|
||||
override def terminate(): Unit = terminationPromise.trySuccess(Done)
|
||||
override def whenTerminated: Future[Done] = terminationPromise.future
|
||||
override def getWhenTerminated: CompletionStage[Done] = FutureConverters.toJava(whenTerminated)
|
||||
|
|
|
|||
|
|
@ -128,7 +128,7 @@ private[akka] final class BehaviorTestKitImpl[T](_path: ActorPath, _initialBehav
|
|||
} catch handleException
|
||||
}
|
||||
|
||||
override def runOne(): Unit = run(selfInbox.receiveMessage())
|
||||
override def runOne(): Unit = run(selfInbox().receiveMessage())
|
||||
|
||||
override def signal(signal: Signal): Unit = {
|
||||
try {
|
||||
|
|
|
|||
|
|
@ -147,7 +147,7 @@ private[akka] final class FunctionRef[-T](override val path: ActorPath, send: (T
|
|||
new FunctionRef[U](p, (message, _) => {
|
||||
val m = f(message);
|
||||
if (m != null) {
|
||||
selfInbox.ref ! m; i.selfInbox.ref ! message
|
||||
selfInbox.ref ! m; i.selfInbox().ref ! message
|
||||
}
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -75,7 +75,7 @@ abstract class BehaviorTestKit[T] {
|
|||
/**
|
||||
* The self reference of the actor living inside this testkit.
|
||||
*/
|
||||
def getRef(): ActorRef[T] = selfInbox.getRef()
|
||||
def getRef(): ActorRef[T] = selfInbox().getRef()
|
||||
|
||||
/**
|
||||
* Requests all the effects. The effects are consumed, subsequent calls will only
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ trait BehaviorTestKit[T] {
|
|||
/**
|
||||
* The self reference of the actor living inside this testkit.
|
||||
*/
|
||||
def ref: ActorRef[T] = selfInbox.ref
|
||||
def ref: ActorRef[T] = selfInbox().ref
|
||||
|
||||
/**
|
||||
* Requests all the effects. The effects are consumed, subsequent calls will only
|
||||
|
|
|
|||
|
|
@ -148,7 +148,7 @@ class TestProbeSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with
|
|||
val probe = createTestProbe[EventT]()
|
||||
eventsT(10).forall { e =>
|
||||
probe.ref ! e
|
||||
probe.receiveMessage == e
|
||||
probe.receiveMessage() == e
|
||||
} should ===(true)
|
||||
|
||||
probe.expectNoMessage()
|
||||
|
|
|
|||
|
|
@ -47,22 +47,22 @@ class ActorConfigurationVerificationSpec
|
|||
"An Actor configured with a BalancingDispatcher" must {
|
||||
"fail verification with a ConfigurationException if also configured with a RoundRobinPool" in {
|
||||
intercept[ConfigurationException] {
|
||||
system.actorOf(RoundRobinPool(2).withDispatcher("balancing-dispatcher").props(Props[TestActor]))
|
||||
system.actorOf(RoundRobinPool(2).withDispatcher("balancing-dispatcher").props(Props[TestActor]()))
|
||||
}
|
||||
}
|
||||
"fail verification with a ConfigurationException if also configured with a BroadcastPool" in {
|
||||
intercept[ConfigurationException] {
|
||||
system.actorOf(BroadcastPool(2).withDispatcher("balancing-dispatcher").props(Props[TestActor]))
|
||||
system.actorOf(BroadcastPool(2).withDispatcher("balancing-dispatcher").props(Props[TestActor]()))
|
||||
}
|
||||
}
|
||||
"fail verification with a ConfigurationException if also configured with a RandomPool" in {
|
||||
intercept[ConfigurationException] {
|
||||
system.actorOf(RandomPool(2).withDispatcher("balancing-dispatcher").props(Props[TestActor]))
|
||||
system.actorOf(RandomPool(2).withDispatcher("balancing-dispatcher").props(Props[TestActor]()))
|
||||
}
|
||||
}
|
||||
"fail verification with a ConfigurationException if also configured with a SmallestMailboxPool" in {
|
||||
intercept[ConfigurationException] {
|
||||
system.actorOf(SmallestMailboxPool(2).withDispatcher("balancing-dispatcher").props(Props[TestActor]))
|
||||
system.actorOf(SmallestMailboxPool(2).withDispatcher("balancing-dispatcher").props(Props[TestActor]()))
|
||||
}
|
||||
}
|
||||
"fail verification with a ConfigurationException if also configured with a ScatterGatherFirstCompletedPool" in {
|
||||
|
|
@ -70,33 +70,33 @@ class ActorConfigurationVerificationSpec
|
|||
system.actorOf(
|
||||
ScatterGatherFirstCompletedPool(nrOfInstances = 2, within = 2 seconds)
|
||||
.withDispatcher("balancing-dispatcher")
|
||||
.props(Props[TestActor]))
|
||||
.props(Props[TestActor]()))
|
||||
}
|
||||
}
|
||||
"not fail verification with a ConfigurationException also not configured with a Router" in {
|
||||
system.actorOf(Props[TestActor].withDispatcher("balancing-dispatcher"))
|
||||
system.actorOf(Props[TestActor]().withDispatcher("balancing-dispatcher"))
|
||||
}
|
||||
}
|
||||
"An Actor configured with a non-balancing dispatcher" must {
|
||||
"not fail verification with a ConfigurationException if also configured with a Router" in {
|
||||
system.actorOf(RoundRobinPool(2).props(Props[TestActor].withDispatcher("pinned-dispatcher")))
|
||||
system.actorOf(RoundRobinPool(2).props(Props[TestActor]().withDispatcher("pinned-dispatcher")))
|
||||
}
|
||||
|
||||
"fail verification if the dispatcher cannot be found" in {
|
||||
intercept[ConfigurationException] {
|
||||
system.actorOf(Props[TestActor].withDispatcher("does not exist"))
|
||||
system.actorOf(Props[TestActor]().withDispatcher("does not exist"))
|
||||
}
|
||||
}
|
||||
|
||||
"fail verification if the dispatcher cannot be found for the head of a router" in {
|
||||
intercept[ConfigurationException] {
|
||||
system.actorOf(RoundRobinPool(1, routerDispatcher = "does not exist").props(Props[TestActor]))
|
||||
system.actorOf(RoundRobinPool(1, routerDispatcher = "does not exist").props(Props[TestActor]()))
|
||||
}
|
||||
}
|
||||
|
||||
"fail verification if the dispatcher cannot be found for the routees of a router" in {
|
||||
intercept[ConfigurationException] {
|
||||
system.actorOf(RoundRobinPool(1).props(Props[TestActor].withDispatcher("does not exist")))
|
||||
system.actorOf(RoundRobinPool(1).props(Props[TestActor]().withDispatcher("does not exist")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -218,9 +218,9 @@ class ActorCreationPerfSpec
|
|||
|
||||
"Actor creation with actorOf" must {
|
||||
|
||||
registerTests("Props[EmptyActor] with new Props", () => Props[EmptyActor])
|
||||
registerTests("Props[EmptyActor] with new Props", () => Props[EmptyActor]())
|
||||
|
||||
val props1 = Props[EmptyActor]
|
||||
val props1 = Props[EmptyActor]()
|
||||
registerTests("Props[EmptyActor] with same Props", () => props1)
|
||||
|
||||
registerTests("Props(new EmptyActor) new", () => { Props(new EmptyActor) })
|
||||
|
|
|
|||
|
|
@ -242,22 +242,22 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
|
|||
"An Actor" must {
|
||||
|
||||
"get an unbounded message queue by default" in {
|
||||
checkMailboxQueue(Props[QueueReportingActor], "default-default", UnboundedMailboxTypes)
|
||||
checkMailboxQueue(Props[QueueReportingActor](), "default-default", UnboundedMailboxTypes)
|
||||
}
|
||||
|
||||
"get an unbounded deque message queue when it is only configured on the props" in {
|
||||
checkMailboxQueue(
|
||||
Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
|
||||
Props[QueueReportingActor]().withMailbox("akka.actor.mailbox.unbounded-deque-based"),
|
||||
"default-override-from-props",
|
||||
UnboundedDeqMailboxTypes)
|
||||
}
|
||||
|
||||
"get an bounded message queue when it's only configured with RequiresMailbox" in {
|
||||
checkMailboxQueue(Props[BoundedQueueReportingActor], "default-override-from-trait", BoundedMailboxTypes)
|
||||
checkMailboxQueue(Props[BoundedQueueReportingActor](), "default-override-from-trait", BoundedMailboxTypes)
|
||||
}
|
||||
|
||||
"get an unbounded deque message queue when it's only mixed with Stash" in {
|
||||
checkMailboxQueue(Props[StashQueueReportingActor], "default-override-from-stash", UnboundedDeqMailboxTypes)
|
||||
checkMailboxQueue(Props[StashQueueReportingActor](), "default-override-from-stash", UnboundedDeqMailboxTypes)
|
||||
checkMailboxQueue(Props(new StashQueueReportingActor), "default-override-from-stash2", UnboundedDeqMailboxTypes)
|
||||
checkMailboxQueue(
|
||||
Props(classOf[StashQueueReportingActorWithParams], 17, "hello"),
|
||||
|
|
@ -270,99 +270,99 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
|
|||
}
|
||||
|
||||
"get a bounded message queue when it's configured as mailbox" in {
|
||||
checkMailboxQueue(Props[QueueReportingActor], "default-bounded", BoundedMailboxTypes)
|
||||
checkMailboxQueue(Props[QueueReportingActor](), "default-bounded", BoundedMailboxTypes)
|
||||
}
|
||||
|
||||
"get an unbounded deque message queue when it's configured as mailbox" in {
|
||||
checkMailboxQueue(Props[QueueReportingActor], "default-unbounded-deque", UnboundedDeqMailboxTypes)
|
||||
checkMailboxQueue(Props[QueueReportingActor](), "default-unbounded-deque", UnboundedDeqMailboxTypes)
|
||||
}
|
||||
|
||||
"get a bounded control aware message queue when it's configured as mailbox" in {
|
||||
checkMailboxQueue(Props[QueueReportingActor], "default-bounded-control-aware", BoundedControlAwareMailboxTypes)
|
||||
checkMailboxQueue(Props[QueueReportingActor](), "default-bounded-control-aware", BoundedControlAwareMailboxTypes)
|
||||
}
|
||||
|
||||
"get an unbounded control aware message queue when it's configured as mailbox" in {
|
||||
checkMailboxQueue(
|
||||
Props[QueueReportingActor],
|
||||
Props[QueueReportingActor](),
|
||||
"default-unbounded-control-aware",
|
||||
UnboundedControlAwareMailboxTypes)
|
||||
}
|
||||
|
||||
"get an bounded control aware message queue when it's only configured with RequiresMailbox" in {
|
||||
checkMailboxQueue(
|
||||
Props[BoundedControlAwareQueueReportingActor],
|
||||
Props[BoundedControlAwareQueueReportingActor](),
|
||||
"default-override-from-trait-bounded-control-aware",
|
||||
BoundedControlAwareMailboxTypes)
|
||||
}
|
||||
|
||||
"get an unbounded control aware message queue when it's only configured with RequiresMailbox" in {
|
||||
checkMailboxQueue(
|
||||
Props[UnboundedControlAwareQueueReportingActor],
|
||||
Props[UnboundedControlAwareQueueReportingActor](),
|
||||
"default-override-from-trait-unbounded-control-aware",
|
||||
UnboundedControlAwareMailboxTypes)
|
||||
}
|
||||
|
||||
"fail to create actor when an unbounded dequeu message queue is configured as mailbox overriding RequestMailbox" in {
|
||||
intercept[ConfigurationException](
|
||||
system.actorOf(Props[BoundedQueueReportingActor], "default-unbounded-deque-override-trait"))
|
||||
system.actorOf(Props[BoundedQueueReportingActor](), "default-unbounded-deque-override-trait"))
|
||||
}
|
||||
|
||||
"get an unbounded message queue when defined in dispatcher" in {
|
||||
checkMailboxQueue(Props[QueueReportingActor], "unbounded-default", UnboundedMailboxTypes)
|
||||
checkMailboxQueue(Props[QueueReportingActor](), "unbounded-default", UnboundedMailboxTypes)
|
||||
}
|
||||
|
||||
"fail to create actor when an unbounded message queue is defined in dispatcher overriding RequestMailbox" in {
|
||||
intercept[ConfigurationException](
|
||||
system.actorOf(Props[BoundedQueueReportingActor], "unbounded-default-override-trait"))
|
||||
system.actorOf(Props[BoundedQueueReportingActor](), "unbounded-default-override-trait"))
|
||||
}
|
||||
|
||||
"get a bounded message queue when it's configured as mailbox overriding unbounded in dispatcher" in {
|
||||
checkMailboxQueue(Props[QueueReportingActor], "unbounded-bounded", BoundedMailboxTypes)
|
||||
checkMailboxQueue(Props[QueueReportingActor](), "unbounded-bounded", BoundedMailboxTypes)
|
||||
}
|
||||
|
||||
"get a bounded message queue when defined in dispatcher" in {
|
||||
checkMailboxQueue(Props[QueueReportingActor], "bounded-default", BoundedMailboxTypes)
|
||||
checkMailboxQueue(Props[QueueReportingActor](), "bounded-default", BoundedMailboxTypes)
|
||||
}
|
||||
|
||||
"get a bounded message queue with 0 push timeout when defined in dispatcher" in {
|
||||
val q = checkMailboxQueue(
|
||||
Props[QueueReportingActor],
|
||||
Props[QueueReportingActor](),
|
||||
"default-bounded-mailbox-with-zero-pushtimeout",
|
||||
BoundedMailboxTypes)
|
||||
q.asInstanceOf[BoundedMessageQueueSemantics].pushTimeOut should ===(Duration.Zero)
|
||||
}
|
||||
|
||||
"get an unbounded message queue when it's configured as mailbox overriding bounded in dispatcher" in {
|
||||
checkMailboxQueue(Props[QueueReportingActor], "bounded-unbounded", UnboundedMailboxTypes)
|
||||
checkMailboxQueue(Props[QueueReportingActor](), "bounded-unbounded", UnboundedMailboxTypes)
|
||||
}
|
||||
|
||||
"get an unbounded message queue overriding configuration on the props" in {
|
||||
checkMailboxQueue(
|
||||
Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
|
||||
Props[QueueReportingActor]().withMailbox("akka.actor.mailbox.unbounded-deque-based"),
|
||||
"bounded-unbounded-override-props",
|
||||
UnboundedMailboxTypes)
|
||||
}
|
||||
|
||||
"get a bounded deque-based message queue if configured and required" in {
|
||||
checkMailboxQueue(
|
||||
Props[StashQueueReportingActor],
|
||||
Props[StashQueueReportingActor](),
|
||||
"bounded-deque-requirements-configured",
|
||||
BoundedDeqMailboxTypes)
|
||||
}
|
||||
|
||||
"fail with a unbounded deque-based message queue if configured and required" in {
|
||||
intercept[ConfigurationException](
|
||||
system.actorOf(Props[StashQueueReportingActor], "bounded-deque-require-unbounded-configured"))
|
||||
system.actorOf(Props[StashQueueReportingActor](), "bounded-deque-require-unbounded-configured"))
|
||||
}
|
||||
|
||||
"fail with a bounded deque-based message queue if not configured" in {
|
||||
intercept[ConfigurationException](
|
||||
system.actorOf(Props[StashQueueReportingActor], "bounded-deque-require-unbounded-unconfigured"))
|
||||
system.actorOf(Props[StashQueueReportingActor](), "bounded-deque-require-unbounded-unconfigured"))
|
||||
}
|
||||
|
||||
"get a bounded deque-based message queue if configured and required with Props" in {
|
||||
checkMailboxQueue(
|
||||
Props[StashQueueReportingActor]
|
||||
Props[StashQueueReportingActor]()
|
||||
.withDispatcher("requiring-bounded-dispatcher")
|
||||
.withMailbox("akka.actor.mailbox.bounded-deque-based"),
|
||||
"bounded-deque-requirements-configured-props",
|
||||
|
|
@ -372,7 +372,7 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
|
|||
"fail with a unbounded deque-based message queue if configured and required with Props" in {
|
||||
intercept[ConfigurationException](
|
||||
system.actorOf(
|
||||
Props[StashQueueReportingActor]
|
||||
Props[StashQueueReportingActor]()
|
||||
.withDispatcher("requiring-bounded-dispatcher")
|
||||
.withMailbox("akka.actor.mailbox.unbounded-deque-based"),
|
||||
"bounded-deque-require-unbounded-configured-props"))
|
||||
|
|
@ -381,13 +381,13 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
|
|||
"fail with a bounded deque-based message queue if not configured with Props" in {
|
||||
intercept[ConfigurationException](
|
||||
system.actorOf(
|
||||
Props[StashQueueReportingActor].withDispatcher("requiring-bounded-dispatcher"),
|
||||
Props[StashQueueReportingActor]().withDispatcher("requiring-bounded-dispatcher"),
|
||||
"bounded-deque-require-unbounded-unconfigured-props"))
|
||||
}
|
||||
|
||||
"get a bounded deque-based message queue if configured and required with Props (dispatcher)" in {
|
||||
checkMailboxQueue(
|
||||
Props[StashQueueReportingActor].withDispatcher("requiring-bounded-dispatcher"),
|
||||
Props[StashQueueReportingActor]().withDispatcher("requiring-bounded-dispatcher"),
|
||||
"bounded-deque-requirements-configured-props-disp",
|
||||
BoundedDeqMailboxTypes)
|
||||
}
|
||||
|
|
@ -395,20 +395,20 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
|
|||
"fail with a unbounded deque-based message queue if configured and required with Props (dispatcher)" in {
|
||||
intercept[ConfigurationException](
|
||||
system.actorOf(
|
||||
Props[StashQueueReportingActor].withDispatcher("requiring-bounded-dispatcher"),
|
||||
Props[StashQueueReportingActor]().withDispatcher("requiring-bounded-dispatcher"),
|
||||
"bounded-deque-require-unbounded-configured-props-disp"))
|
||||
}
|
||||
|
||||
"fail with a bounded deque-based message queue if not configured with Props (dispatcher)" in {
|
||||
intercept[ConfigurationException](
|
||||
system.actorOf(
|
||||
Props[StashQueueReportingActor].withDispatcher("requiring-bounded-dispatcher"),
|
||||
Props[StashQueueReportingActor]().withDispatcher("requiring-bounded-dispatcher"),
|
||||
"bounded-deque-require-unbounded-unconfigured-props-disp"))
|
||||
}
|
||||
|
||||
"get a bounded deque-based message queue if configured and required with Props (mailbox)" in {
|
||||
checkMailboxQueue(
|
||||
Props[StashQueueReportingActor].withMailbox("akka.actor.mailbox.bounded-deque-based"),
|
||||
Props[StashQueueReportingActor]().withMailbox("akka.actor.mailbox.bounded-deque-based"),
|
||||
"bounded-deque-requirements-configured-props-mail",
|
||||
BoundedDeqMailboxTypes)
|
||||
}
|
||||
|
|
@ -416,32 +416,32 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
|
|||
"fail with a unbounded deque-based message queue if configured and required with Props (mailbox)" in {
|
||||
intercept[ConfigurationException](
|
||||
system.actorOf(
|
||||
Props[StashQueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
|
||||
Props[StashQueueReportingActor]().withMailbox("akka.actor.mailbox.unbounded-deque-based"),
|
||||
"bounded-deque-require-unbounded-configured-props-mail"))
|
||||
}
|
||||
|
||||
"fail with a bounded deque-based message queue if not configured with Props (mailbox)" in {
|
||||
intercept[ConfigurationException](
|
||||
system.actorOf(Props[StashQueueReportingActor], "bounded-deque-require-unbounded-unconfigured-props-mail"))
|
||||
system.actorOf(Props[StashQueueReportingActor](), "bounded-deque-require-unbounded-unconfigured-props-mail"))
|
||||
}
|
||||
|
||||
"get an unbounded message queue with a balancing dispatcher" in {
|
||||
checkMailboxQueue(
|
||||
Props[QueueReportingActor].withDispatcher("balancing-dispatcher"),
|
||||
Props[QueueReportingActor]().withDispatcher("balancing-dispatcher"),
|
||||
"unbounded-balancing",
|
||||
UnboundedMailboxTypes)
|
||||
}
|
||||
|
||||
"get a bounded message queue with a balancing bounded dispatcher" in {
|
||||
checkMailboxQueue(
|
||||
Props[QueueReportingActor].withDispatcher("balancing-bounded-dispatcher"),
|
||||
Props[QueueReportingActor]().withDispatcher("balancing-bounded-dispatcher"),
|
||||
"bounded-balancing",
|
||||
BoundedMailboxTypes)
|
||||
}
|
||||
|
||||
"get a bounded message queue with a requiring balancing bounded dispatcher" in {
|
||||
checkMailboxQueue(
|
||||
Props[QueueReportingActor].withDispatcher("requiring-balancing-bounded-dispatcher"),
|
||||
Props[QueueReportingActor]().withDispatcher("requiring-balancing-bounded-dispatcher"),
|
||||
"requiring-bounded-balancing",
|
||||
BoundedMailboxTypes)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,11 +25,11 @@ object ActorRefSpec {
|
|||
def receive = {
|
||||
case "complexRequest" => {
|
||||
replyTo = sender()
|
||||
val worker = context.actorOf(Props[WorkerActor])
|
||||
val worker = context.actorOf(Props[WorkerActor]())
|
||||
worker ! "work"
|
||||
}
|
||||
case "complexRequest2" =>
|
||||
val worker = context.actorOf(Props[WorkerActor])
|
||||
val worker = context.actorOf(Props[WorkerActor]())
|
||||
worker ! ReplyTo(sender())
|
||||
case "workDone" => replyTo ! "complexReply"
|
||||
case "simpleRequest" => sender() ! "simpleReply"
|
||||
|
|
@ -278,7 +278,7 @@ class ActorRefSpec extends AkkaSpec("""
|
|||
}
|
||||
|
||||
"be serializable using Java Serialization on local node" in {
|
||||
val a = system.actorOf(Props[InnerActor])
|
||||
val a = system.actorOf(Props[InnerActor]())
|
||||
val esys = system.asInstanceOf[ExtendedActorSystem]
|
||||
|
||||
import java.io._
|
||||
|
|
@ -309,7 +309,7 @@ class ActorRefSpec extends AkkaSpec("""
|
|||
}
|
||||
|
||||
"throw an exception on deserialize if no system in scope" in {
|
||||
val a = system.actorOf(Props[InnerActor])
|
||||
val a = system.actorOf(Props[InnerActor]())
|
||||
|
||||
import java.io._
|
||||
|
||||
|
|
@ -337,7 +337,7 @@ class ActorRefSpec extends AkkaSpec("""
|
|||
val out = new ObjectOutputStream(baos)
|
||||
|
||||
val sysImpl = system.asInstanceOf[ActorSystemImpl]
|
||||
val ref = system.actorOf(Props[ReplyActor], "non-existing")
|
||||
val ref = system.actorOf(Props[ReplyActor](), "non-existing")
|
||||
val serialized = SerializedActorRef(ref)
|
||||
|
||||
out.writeObject(serialized)
|
||||
|
|
@ -381,7 +381,7 @@ class ActorRefSpec extends AkkaSpec("""
|
|||
|
||||
"support reply via sender" in {
|
||||
val latch = new TestLatch(4)
|
||||
val serverRef = system.actorOf(Props[ReplyActor])
|
||||
val serverRef = system.actorOf(Props[ReplyActor]())
|
||||
val clientRef = system.actorOf(Props(new SenderActor(serverRef, latch)))
|
||||
|
||||
clientRef ! "complex"
|
||||
|
|
@ -391,7 +391,7 @@ class ActorRefSpec extends AkkaSpec("""
|
|||
|
||||
Await.ready(latch, timeout.duration)
|
||||
|
||||
latch.reset
|
||||
latch.reset()
|
||||
|
||||
clientRef ! "complex2"
|
||||
clientRef ! "simple"
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ object ActorSelectionSpec {
|
|||
final case class GetSender(to: ActorRef) extends Query
|
||||
final case class Forward(path: String, msg: Any) extends Query
|
||||
|
||||
val p = Props[Node]
|
||||
val p = Props[Node]()
|
||||
|
||||
class Node extends Actor {
|
||||
def receive = {
|
||||
|
|
|
|||
|
|
@ -31,7 +31,7 @@ object ActorSystemSpec {
|
|||
case n: Int =>
|
||||
master = sender()
|
||||
terminaters = Set() ++ (for (_ <- 1 to n) yield {
|
||||
val man = context.watch(context.system.actorOf(Props[Terminater]))
|
||||
val man = context.watch(context.system.actorOf(Props[Terminater]()))
|
||||
man ! "run"
|
||||
man
|
||||
})
|
||||
|
|
@ -142,7 +142,7 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend
|
|||
ActorSystem("LogDeadLetters", ConfigFactory.parseString("akka.loglevel=INFO").withFallback(AkkaSpec.testConf))
|
||||
try {
|
||||
val probe = TestProbe()(sys)
|
||||
val a = sys.actorOf(Props[ActorSystemSpec.Terminater])
|
||||
val a = sys.actorOf(Props[ActorSystemSpec.Terminater]())
|
||||
probe.watch(a)
|
||||
a.tell("run", probe.ref)
|
||||
probe.expectTerminated(a)
|
||||
|
|
@ -166,7 +166,7 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend
|
|||
ActorSystem("LogDeadLetters", ConfigFactory.parseString("akka.loglevel=INFO").withFallback(AkkaSpec.testConf))
|
||||
try {
|
||||
val probe = TestProbe()(sys)
|
||||
val a = sys.actorOf(Props[ActorSystemSpec.Terminater])
|
||||
val a = sys.actorOf(Props[ActorSystemSpec.Terminater]())
|
||||
probe.watch(a)
|
||||
a.tell("run", probe.ref)
|
||||
probe.expectTerminated(a)
|
||||
|
|
@ -264,7 +264,7 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend
|
|||
"reliably create waves of actors" in {
|
||||
import system.dispatcher
|
||||
implicit val timeout: Timeout = Timeout((20 seconds).dilated)
|
||||
val waves = for (_ <- 1 to 3) yield system.actorOf(Props[ActorSystemSpec.Waves]) ? 50000
|
||||
val waves = for (_ <- 1 to 3) yield system.actorOf(Props[ActorSystemSpec.Waves]()) ? 50000
|
||||
Await.result(Future.sequence(waves), timeout.duration + 5.seconds) should ===(Vector("done", "done", "done"))
|
||||
}
|
||||
|
||||
|
|
@ -281,7 +281,7 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend
|
|||
var created = Vector.empty[ActorRef]
|
||||
while (!system.whenTerminated.isCompleted) {
|
||||
try {
|
||||
val t = system.actorOf(Props[ActorSystemSpec.Terminater])
|
||||
val t = system.actorOf(Props[ActorSystemSpec.Terminater]())
|
||||
failing should not be true // because once failing => always failing (it’s due to shutdown)
|
||||
created :+= t
|
||||
if (created.size % 1000 == 0) Thread.sleep(50) // in case of unfair thread scheduling
|
||||
|
|
|
|||
|
|
@ -135,22 +135,22 @@ class ActorWithBoundedStashSpec
|
|||
"An Actor with Stash" must {
|
||||
|
||||
"end up in DeadLetters in case of a capacity violation when configured via dispatcher" in {
|
||||
val stasher = system.actorOf(Props[StashingActor].withDispatcher(dispatcherId1))
|
||||
val stasher = system.actorOf(Props[StashingActor]().withDispatcher(dispatcherId1))
|
||||
testDeadLetters(stasher)
|
||||
}
|
||||
|
||||
"end up in DeadLetters in case of a capacity violation when configured via mailbox" in {
|
||||
val stasher = system.actorOf(Props[StashingActor].withMailbox(mailboxId1))
|
||||
val stasher = system.actorOf(Props[StashingActor]().withMailbox(mailboxId1))
|
||||
testDeadLetters(stasher)
|
||||
}
|
||||
|
||||
"throw a StashOverflowException in case of a stash capacity violation when configured via dispatcher" in {
|
||||
val stasher = system.actorOf(Props[StashingActorWithOverflow].withDispatcher(dispatcherId2))
|
||||
val stasher = system.actorOf(Props[StashingActorWithOverflow]().withDispatcher(dispatcherId2))
|
||||
testStashOverflowException(stasher)
|
||||
}
|
||||
|
||||
"throw a StashOverflowException in case of a stash capacity violation when configured via mailbox" in {
|
||||
val stasher = system.actorOf(Props[StashingActorWithOverflow].withMailbox(mailboxId2))
|
||||
val stasher = system.actorOf(Props[StashingActorWithOverflow]().withMailbox(mailboxId2))
|
||||
testStashOverflowException(stasher)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -24,7 +24,7 @@ object ActorWithStashSpec {
|
|||
def greeted: Receive = {
|
||||
case "bye" =>
|
||||
state.s = "bye"
|
||||
state.finished.await
|
||||
state.finished.await()
|
||||
case _ => // do nothing
|
||||
}
|
||||
|
||||
|
|
@ -63,7 +63,7 @@ object ActorWithStashSpec {
|
|||
context.unbecome()
|
||||
case _ => stash()
|
||||
}
|
||||
case "done" => state.finished.await
|
||||
case "done" => state.finished.await()
|
||||
case _ => stash()
|
||||
}
|
||||
}
|
||||
|
|
@ -73,7 +73,7 @@ object ActorWithStashSpec {
|
|||
}
|
||||
|
||||
class TerminatedMessageStashingActor(probe: ActorRef) extends Actor with Stash {
|
||||
val watched = context.watch(context.actorOf(Props[WatchedActor]))
|
||||
val watched = context.watch(context.actorOf(Props[WatchedActor]()))
|
||||
var stashed = false
|
||||
|
||||
context.stop(watched)
|
||||
|
|
@ -109,7 +109,7 @@ class ActorWithStashSpec extends AkkaSpec with DefaultTimeout with BeforeAndAfte
|
|||
system.eventStream.publish(Mute(EventFilter[Exception]("Crashing...")))
|
||||
}
|
||||
|
||||
override def beforeEach() = state.finished.reset
|
||||
override def beforeEach() = state.finished.reset()
|
||||
|
||||
"An Actor with Stash" must {
|
||||
|
||||
|
|
@ -117,12 +117,12 @@ class ActorWithStashSpec extends AkkaSpec with DefaultTimeout with BeforeAndAfte
|
|||
val stasher = system.actorOf(Props(new StashingActor))
|
||||
stasher ! "bye"
|
||||
stasher ! "hello"
|
||||
state.finished.await
|
||||
state.finished.await()
|
||||
state.s should ===("bye")
|
||||
}
|
||||
|
||||
"support protocols" in {
|
||||
val protoActor = system.actorOf(Props[ActorWithProtocol])
|
||||
val protoActor = system.actorOf(Props[ActorWithProtocol]())
|
||||
protoActor ! "open"
|
||||
protoActor ! "write"
|
||||
protoActor ! "open"
|
||||
|
|
@ -130,12 +130,12 @@ class ActorWithStashSpec extends AkkaSpec with DefaultTimeout with BeforeAndAfte
|
|||
protoActor ! "write"
|
||||
protoActor ! "close"
|
||||
protoActor ! "done"
|
||||
state.finished.await
|
||||
state.finished.await()
|
||||
}
|
||||
|
||||
"throw an IllegalStateException if the same messages is stashed twice" in {
|
||||
state.expectedException = new TestLatch
|
||||
val stasher = system.actorOf(Props[StashingTwiceActor])
|
||||
val stasher = system.actorOf(Props[StashingTwiceActor]())
|
||||
stasher ! "hello"
|
||||
stasher ! "hello"
|
||||
Await.ready(state.expectedException, 10 seconds)
|
||||
|
|
|
|||
|
|
@ -60,7 +60,7 @@ class ConsistencySpec extends AkkaSpec(ConsistencySpec.config) {
|
|||
"The Akka actor model implementation" must {
|
||||
"provide memory consistency" in {
|
||||
val noOfActors = threads + 1
|
||||
val props = Props[ConsistencyCheckingActor].withDispatcher("consistency-dispatcher")
|
||||
val props = Props[ConsistencyCheckingActor]().withDispatcher("consistency-dispatcher")
|
||||
val actors = Vector.fill(noOfActors)(system.actorOf(props))
|
||||
|
||||
for (i <- 0L until 10000L) {
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ object DeathWatchSpec {
|
|||
context.become {
|
||||
case Terminated(`currentKid`) =>
|
||||
testActor ! "GREEN"
|
||||
context unbecome
|
||||
context.unbecome()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -217,7 +217,7 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout =>
|
|||
.sendSystemMessage(DeathWatchNotification(subject, existenceConfirmed = true, addressTerminated = false))
|
||||
|
||||
// the testActor is not watching subject and will not receive a Terminated msg
|
||||
expectNoMessage
|
||||
expectNoMessage()
|
||||
}
|
||||
|
||||
"discard Terminated when unwatched between sysmsg and processing" in {
|
||||
|
|
|
|||
|
|
@ -43,17 +43,17 @@ object FSMActorSpec {
|
|||
case Event(digit: Char, CodeState(soFar, code)) => {
|
||||
soFar + digit match {
|
||||
case incomplete if incomplete.length < code.length =>
|
||||
stay.using(CodeState(incomplete, code))
|
||||
stay().using(CodeState(incomplete, code))
|
||||
case codeTry if (codeTry == code) => {
|
||||
doUnlock()
|
||||
goto(Open).using(CodeState("", code)).forMax(timeout)
|
||||
}
|
||||
case _ => {
|
||||
stay.using(CodeState("", code))
|
||||
stay().using(CodeState("", code))
|
||||
}
|
||||
}
|
||||
}
|
||||
case Event("hello", _) => stay.replying("world")
|
||||
case Event("hello", _) => stay().replying("world")
|
||||
case Event("bye", _) => stop(FSM.Shutdown)
|
||||
}
|
||||
|
||||
|
|
@ -67,13 +67,13 @@ object FSMActorSpec {
|
|||
whenUnhandled {
|
||||
case Event(msg, _) => {
|
||||
log.warning("unhandled event " + msg + " in state " + stateName + " with data " + stateData)
|
||||
unhandledLatch.open
|
||||
stay
|
||||
unhandledLatch.open()
|
||||
stay()
|
||||
}
|
||||
}
|
||||
|
||||
onTransition {
|
||||
case Locked -> Open => transitionLatch.open
|
||||
case Locked -> Open => transitionLatch.open()
|
||||
}
|
||||
|
||||
// verify that old-style does still compile
|
||||
|
|
@ -119,8 +119,8 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im
|
|||
|
||||
val transitionTester = system.actorOf(Props(new Actor {
|
||||
def receive = {
|
||||
case Transition(_, _, _) => transitionCallBackLatch.open
|
||||
case CurrentState(_, s: LockState) if s eq Locked => initialStateLatch.open // SI-5900 workaround
|
||||
case Transition(_, _, _) => transitionCallBackLatch.open()
|
||||
case CurrentState(_, s: LockState) if s eq Locked => initialStateLatch.open() // SI-5900 workaround
|
||||
}
|
||||
}))
|
||||
|
||||
|
|
@ -147,7 +147,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im
|
|||
val tester = system.actorOf(Props(new Actor {
|
||||
def receive = {
|
||||
case Hello => lock ! "hello"
|
||||
case "world" => answerLatch.open
|
||||
case "world" => answerLatch.open()
|
||||
case Bye => lock ! "bye"
|
||||
}
|
||||
}))
|
||||
|
|
@ -183,7 +183,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im
|
|||
* It is necessary here because of the path-dependent type fsm.StopEvent.
|
||||
*/
|
||||
lazy val fsm = new Actor with FSM[Int, Null] {
|
||||
override def preStart = { started.countDown }
|
||||
override def preStart = { started.countDown() }
|
||||
startWith(1, null)
|
||||
when(1) { FSM.NullFunction }
|
||||
onTermination {
|
||||
|
|
@ -269,7 +269,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im
|
|||
when(2) {
|
||||
case Event("stop", _) =>
|
||||
cancelTimer("t")
|
||||
stop
|
||||
stop()
|
||||
}
|
||||
onTermination {
|
||||
case StopEvent(r, _, _) => testActor ! r
|
||||
|
|
@ -307,8 +307,8 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im
|
|||
override def logDepth = 3
|
||||
startWith(1, 0)
|
||||
when(1) {
|
||||
case Event("count", c) => stay.using(c + 1)
|
||||
case Event("log", _) => stay.replying(getLog)
|
||||
case Event("count", c) => stay().using(c + 1)
|
||||
case Event("log", _) => stay().replying(getLog)
|
||||
}
|
||||
})
|
||||
fsmref ! "log"
|
||||
|
|
@ -327,12 +327,12 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im
|
|||
val fsmref = system.actorOf(Props(new Actor with FSM[Int, Int] {
|
||||
startWith(0, 0)
|
||||
when(0)(transform {
|
||||
case Event("go", _) => stay
|
||||
case Event("go", _) => stay()
|
||||
}.using {
|
||||
case _ => goto(1)
|
||||
})
|
||||
when(1) {
|
||||
case _ => stay
|
||||
case _ => stay()
|
||||
}
|
||||
}))
|
||||
fsmref ! SubscribeTransitionCallBack(testActor)
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ class FSMTimingSpec extends AkkaSpec with ImplicitSender {
|
|||
}
|
||||
|
||||
"cancel a StateTimeout when actor is stopped" taggedAs TimingTest in {
|
||||
val stoppingActor = system.actorOf(Props[StoppingActor])
|
||||
val stoppingActor = system.actorOf(Props[StoppingActor]())
|
||||
system.eventStream.subscribe(testActor, classOf[DeadLetter])
|
||||
stoppingActor ! TestStoppingActorStateTimeout
|
||||
within(400 millis) {
|
||||
|
|
@ -56,7 +56,7 @@ class FSMTimingSpec extends AkkaSpec with ImplicitSender {
|
|||
// the timeout in state TestStateTimeout is 800 ms, then it will change to Initial
|
||||
within(400 millis) {
|
||||
fsm ! TestStateTimeoutOverride
|
||||
expectNoMessage
|
||||
expectNoMessage()
|
||||
}
|
||||
within(1 second) {
|
||||
fsm ! Cancel
|
||||
|
|
@ -72,7 +72,7 @@ class FSMTimingSpec extends AkkaSpec with ImplicitSender {
|
|||
expectMsg(Tick)
|
||||
expectMsg(Transition(fsm, TestSingleTimer, Initial))
|
||||
}
|
||||
expectNoMessage
|
||||
expectNoMessage()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -86,7 +86,7 @@ class FSMTimingSpec extends AkkaSpec with ImplicitSender {
|
|||
expectMsg(Tock)
|
||||
expectMsg(Transition(fsm, TestSingleTimerResubmit, Initial))
|
||||
}
|
||||
expectNoMessage
|
||||
expectNoMessage()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -232,10 +232,10 @@ object FSMTimingSpec {
|
|||
cancelTimer("hallo")
|
||||
sender() ! Tick
|
||||
startSingleTimer("hallo", Tock, 500.millis.dilated)
|
||||
stay
|
||||
stay()
|
||||
case Event(Tock, _) =>
|
||||
tester ! Tock
|
||||
stay
|
||||
stay()
|
||||
case Event(Cancel, _) =>
|
||||
cancelTimer("hallo")
|
||||
goto(Initial)
|
||||
|
|
@ -247,7 +247,7 @@ object FSMTimingSpec {
|
|||
cancelTimer("tester")
|
||||
goto(Initial)
|
||||
} else {
|
||||
stay.using(remaining - 1)
|
||||
stay().using(remaining - 1)
|
||||
}
|
||||
}
|
||||
when(TestCancelStateTimerInNamedTimerMessage) {
|
||||
|
|
@ -256,7 +256,7 @@ object FSMTimingSpec {
|
|||
suspend(self)
|
||||
startSingleTimer("named", Tock, 1.millis.dilated)
|
||||
TestKit.awaitCond(context.asInstanceOf[ActorCell].mailbox.hasMessages, 1.second.dilated)
|
||||
stay.forMax(1.millis.dilated).replying(Tick)
|
||||
stay().forMax(1.millis.dilated).replying(Tick)
|
||||
case Event(Tock, _) =>
|
||||
goto(TestCancelStateTimerInNamedTimerMessage2)
|
||||
}
|
||||
|
|
@ -271,9 +271,9 @@ object FSMTimingSpec {
|
|||
whenUnhandled {
|
||||
case Event(Tick, _) =>
|
||||
tester ! Unhandled(Tick)
|
||||
stay
|
||||
stay()
|
||||
}
|
||||
stay
|
||||
stay()
|
||||
case Event(Cancel, _) =>
|
||||
whenUnhandled(NullFunction)
|
||||
goto(Initial)
|
||||
|
|
@ -286,7 +286,7 @@ object FSMTimingSpec {
|
|||
when(Initial, 200 millis) {
|
||||
case Event(TestStoppingActorStateTimeout, _) =>
|
||||
context.stop(self)
|
||||
stay
|
||||
stay()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ object FSMTransitionSpec {
|
|||
case Event("tick", _) => goto(0)
|
||||
}
|
||||
whenUnhandled {
|
||||
case Event("reply", _) => stay.replying("reply")
|
||||
case Event("reply", _) => stay().replying("reply")
|
||||
}
|
||||
initialize()
|
||||
override def preRestart(reason: Throwable, msg: Option[Any]): Unit = { target ! "restarted" }
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ object FunctionRefSpec {
|
|||
}
|
||||
|
||||
class SupSuper extends Actor {
|
||||
val s = context.actorOf(Props[Super], "super")
|
||||
val s = context.actorOf(Props[Super](), "super")
|
||||
def receive = {
|
||||
case msg => s ! msg
|
||||
}
|
||||
|
|
@ -86,12 +86,12 @@ class FunctionRefSpec extends AkkaSpec("""
|
|||
"A FunctionRef" when {
|
||||
|
||||
"created by a toplevel actor" must {
|
||||
val s = system.actorOf(Props[Super], "super")
|
||||
val s = system.actorOf(Props[Super](), "super")
|
||||
commonTests(s)
|
||||
}
|
||||
|
||||
"created by a non-toplevel actor" must {
|
||||
val s = system.actorOf(Props[SupSuper], "supsuper")
|
||||
val s = system.actorOf(Props[SupSuper](), "supsuper")
|
||||
commonTests(s)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ class ReceiveTimeoutSpec extends AkkaSpec() {
|
|||
context.setReceiveTimeout(500 milliseconds)
|
||||
|
||||
def receive = {
|
||||
case ReceiveTimeout => timeoutLatch.open
|
||||
case ReceiveTimeout => timeoutLatch.open()
|
||||
}
|
||||
}))
|
||||
|
||||
|
|
@ -82,7 +82,7 @@ class ReceiveTimeoutSpec extends AkkaSpec() {
|
|||
|
||||
def receive = {
|
||||
case Tick => ()
|
||||
case ReceiveTimeout => timeoutLatch.open
|
||||
case ReceiveTimeout => timeoutLatch.open()
|
||||
}
|
||||
}))
|
||||
|
||||
|
|
@ -103,7 +103,7 @@ class ReceiveTimeoutSpec extends AkkaSpec() {
|
|||
case Tick => ()
|
||||
case ReceiveTimeout =>
|
||||
count.incrementAndGet
|
||||
timeoutLatch.open
|
||||
timeoutLatch.open()
|
||||
context.setReceiveTimeout(Duration.Undefined)
|
||||
}
|
||||
}))
|
||||
|
|
@ -120,7 +120,7 @@ class ReceiveTimeoutSpec extends AkkaSpec() {
|
|||
|
||||
val timeoutActor = system.actorOf(Props(new Actor {
|
||||
def receive = {
|
||||
case ReceiveTimeout => timeoutLatch.open
|
||||
case ReceiveTimeout => timeoutLatch.open()
|
||||
}
|
||||
}))
|
||||
|
||||
|
|
@ -135,7 +135,7 @@ class ReceiveTimeoutSpec extends AkkaSpec() {
|
|||
context.setReceiveTimeout(1 second)
|
||||
|
||||
def receive = {
|
||||
case ReceiveTimeout => timeoutLatch.open
|
||||
case ReceiveTimeout => timeoutLatch.open()
|
||||
case TransparentTick =>
|
||||
}
|
||||
}))
|
||||
|
|
@ -179,7 +179,7 @@ class ReceiveTimeoutSpec extends AkkaSpec() {
|
|||
context.setReceiveTimeout(1 second)
|
||||
def receive: Receive = {
|
||||
case ReceiveTimeout =>
|
||||
timeoutLatch.open
|
||||
timeoutLatch.open()
|
||||
case TransparentTick =>
|
||||
count.incrementAndGet()
|
||||
}
|
||||
|
|
@ -198,7 +198,7 @@ class ReceiveTimeoutSpec extends AkkaSpec() {
|
|||
val timeoutActor = system.actorOf(Props(new Actor {
|
||||
def receive = {
|
||||
case TransparentTick => context.setReceiveTimeout(500 milliseconds)
|
||||
case ReceiveTimeout => timeoutLatch.open
|
||||
case ReceiveTimeout => timeoutLatch.open()
|
||||
}
|
||||
}))
|
||||
|
||||
|
|
@ -216,7 +216,7 @@ class ReceiveTimeoutSpec extends AkkaSpec() {
|
|||
|
||||
def receive = {
|
||||
case TransparentTick => context.setReceiveTimeout(Duration.Inf)
|
||||
case ReceiveTimeout => timeoutLatch.open
|
||||
case ReceiveTimeout => timeoutLatch.open()
|
||||
}
|
||||
}))
|
||||
|
||||
|
|
@ -235,7 +235,7 @@ class ReceiveTimeoutSpec extends AkkaSpec() {
|
|||
|
||||
def receive: Receive = {
|
||||
case TransparentTick => context.setReceiveTimeout(Duration.Undefined)
|
||||
case ReceiveTimeout => timeoutLatch.open
|
||||
case ReceiveTimeout => timeoutLatch.open()
|
||||
}
|
||||
}))
|
||||
|
||||
|
|
|
|||
|
|
@ -116,7 +116,7 @@ class RestartStrategySpec extends AkkaSpec with DefaultTimeout {
|
|||
|
||||
def receive = {
|
||||
case Ping =>
|
||||
if (!pingLatch.isOpen) pingLatch.open else secondPingLatch.open
|
||||
if (!pingLatch.isOpen) pingLatch.open() else secondPingLatch.open()
|
||||
case Crash => throw new Exception("Crashing...")
|
||||
}
|
||||
override def postRestart(reason: Throwable) = {
|
||||
|
|
|
|||
|
|
@ -330,7 +330,7 @@ trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with Implicit
|
|||
case Crash => throw new Exception("CRASH")
|
||||
}
|
||||
|
||||
override def postRestart(reason: Throwable) = restartLatch.open
|
||||
override def postRestart(reason: Throwable) = restartLatch.open()
|
||||
})
|
||||
val actor = Await.result((supervisor ? props).mapTo[ActorRef], timeout.duration)
|
||||
|
||||
|
|
|
|||
|
|
@ -53,7 +53,7 @@ object SupervisorHierarchySpec {
|
|||
class Resumer extends Actor {
|
||||
override def supervisorStrategy = OneForOneStrategy() { case _ => SupervisorStrategy.Resume }
|
||||
def receive = {
|
||||
case "spawn" => sender() ! context.actorOf(Props[Resumer])
|
||||
case "spawn" => sender() ! context.actorOf(Props[Resumer]())
|
||||
case "fail" => throw new Exception("expected")
|
||||
case "ping" => sender() ! "pong"
|
||||
}
|
||||
|
|
@ -294,7 +294,7 @@ object SupervisorHierarchySpec {
|
|||
setFlags(f.directive)
|
||||
stateCache.put(self.path, stateCache.get(self.path).copy(failConstr = f.copy()))
|
||||
throw f
|
||||
case "ping" => { Thread.sleep((random.nextFloat * 1.03).toLong); sender() ! "pong" }
|
||||
case "ping" => { Thread.sleep((random.nextFloat() * 1.03).toLong); sender() ! "pong" }
|
||||
case Dump(0) => abort("dump")
|
||||
case Dump(level) => context.children.foreach(_ ! Dump(level - 1))
|
||||
case Terminated(ref) =>
|
||||
|
|
@ -432,7 +432,7 @@ object SupervisorHierarchySpec {
|
|||
var idleChildren = Vector.empty[ActorRef]
|
||||
var pingChildren = Set.empty[ActorRef]
|
||||
|
||||
val nextJob = Iterator.continually(random.nextFloat match {
|
||||
val nextJob = Iterator.continually(random.nextFloat() match {
|
||||
case x if x >= 0.5 =>
|
||||
// ping one child
|
||||
val pick = ((x - 0.5) * 2 * idleChildren.size).toInt
|
||||
|
|
@ -479,7 +479,7 @@ object SupervisorHierarchySpec {
|
|||
} else {
|
||||
children :+= ref
|
||||
if (children.size == size) goto(Stress)
|
||||
else stay
|
||||
else stay()
|
||||
}
|
||||
case Event(StateTimeout, _) =>
|
||||
testActor ! "did not get children list"
|
||||
|
|
@ -497,7 +497,7 @@ object SupervisorHierarchySpec {
|
|||
|
||||
val workSchedule = 50.millis
|
||||
|
||||
private def random012: Int = random.nextFloat match {
|
||||
private def random012: Int = random.nextFloat() match {
|
||||
case x if x > 0.1 => 0
|
||||
case x if x > 0.03 => 1
|
||||
case _ => 2
|
||||
|
|
@ -516,9 +516,9 @@ object SupervisorHierarchySpec {
|
|||
when(Stress) {
|
||||
case Event(Work, _) if idleChildren.isEmpty =>
|
||||
context.system.scheduler.scheduleOnce(workSchedule, self, Work)(context.dispatcher)
|
||||
stay
|
||||
stay()
|
||||
case Event(Work, x) if x > 0 =>
|
||||
nextJob.next match {
|
||||
nextJob.next() match {
|
||||
case Ping(ref) => ref ! "ping"
|
||||
case Fail(ref, dir) =>
|
||||
val f = Failure(
|
||||
|
|
@ -537,15 +537,15 @@ object SupervisorHierarchySpec {
|
|||
}
|
||||
if (idleChildren.nonEmpty) self ! Work
|
||||
else context.system.scheduler.scheduleOnce(workSchedule, self, Work)(context.dispatcher)
|
||||
stay.using(x - 1)
|
||||
stay().using(x - 1)
|
||||
case Event(Work, _) => if (pingChildren.isEmpty) goto(LastPing) else goto(Finishing)
|
||||
case Event(Died(path), _) =>
|
||||
bury(path)
|
||||
stay
|
||||
stay()
|
||||
case Event("pong", _) =>
|
||||
pingChildren -= sender()
|
||||
idleChildren :+= sender()
|
||||
stay
|
||||
stay()
|
||||
case Event(StateTimeout, todo) =>
|
||||
log.info("dumping state due to StateTimeout")
|
||||
log.info(
|
||||
|
|
@ -566,10 +566,10 @@ object SupervisorHierarchySpec {
|
|||
case Event("pong", _) =>
|
||||
pingChildren -= sender()
|
||||
idleChildren :+= sender()
|
||||
if (pingChildren.isEmpty) goto(LastPing) else stay
|
||||
if (pingChildren.isEmpty) goto(LastPing) else stay()
|
||||
case Event(Died(ref), _) =>
|
||||
bury(ref)
|
||||
if (pingChildren.isEmpty) goto(LastPing) else stay
|
||||
if (pingChildren.isEmpty) goto(LastPing) else stay()
|
||||
}
|
||||
|
||||
onTransition {
|
||||
|
|
@ -583,10 +583,10 @@ object SupervisorHierarchySpec {
|
|||
case Event("pong", _) =>
|
||||
pingChildren -= sender()
|
||||
idleChildren :+= sender()
|
||||
if (pingChildren.isEmpty) goto(Stopping) else stay
|
||||
if (pingChildren.isEmpty) goto(Stopping) else stay()
|
||||
case Event(Died(ref), _) =>
|
||||
bury(ref)
|
||||
if (pingChildren.isEmpty) goto(Stopping) else stay
|
||||
if (pingChildren.isEmpty) goto(Stopping) else stay()
|
||||
}
|
||||
|
||||
onTransition {
|
||||
|
|
@ -596,14 +596,14 @@ object SupervisorHierarchySpec {
|
|||
}
|
||||
|
||||
when(Stopping, stateTimeout = 5.seconds.dilated) {
|
||||
case Event(PongOfDeath, _) => stay
|
||||
case Event(PongOfDeath, _) => stay()
|
||||
case Event(Terminated(r), _) if r == hierarchy =>
|
||||
@silent
|
||||
val undead = children.filterNot(_.isTerminated)
|
||||
if (undead.nonEmpty) {
|
||||
log.info("undead:\n" + undead.mkString("\n"))
|
||||
testActor ! "stressTestFailed (" + undead.size + " undead)"
|
||||
stop
|
||||
stop()
|
||||
} else if (false) {
|
||||
/*
|
||||
* This part of the test is normally disabled, because it does not
|
||||
|
|
@ -621,7 +621,7 @@ object SupervisorHierarchySpec {
|
|||
goto(GC)
|
||||
} else {
|
||||
testActor ! "stressTestSuccessful"
|
||||
stop
|
||||
stop()
|
||||
}
|
||||
case Event(StateTimeout, _) =>
|
||||
errors :+= self -> ErrorLog("timeout while Stopping", Vector.empty)
|
||||
|
|
@ -630,7 +630,7 @@ object SupervisorHierarchySpec {
|
|||
printErrors()
|
||||
idleChildren.foreach(println)
|
||||
testActor ! "timeout in Stopping"
|
||||
stop
|
||||
stop()
|
||||
case Event(e: ErrorLog, _) =>
|
||||
errors :+= sender() -> e
|
||||
goto(Failed)
|
||||
|
|
@ -642,14 +642,14 @@ object SupervisorHierarchySpec {
|
|||
if (next.nonEmpty) {
|
||||
context.system.scheduler.scheduleOnce(workSchedule, self, GCcheck(next))(context.dispatcher)
|
||||
System.gc()
|
||||
stay
|
||||
stay()
|
||||
} else {
|
||||
testActor ! "stressTestSuccessful"
|
||||
stop
|
||||
stop()
|
||||
}
|
||||
case Event(StateTimeout, _) =>
|
||||
testActor ! "timeout in GC"
|
||||
stop
|
||||
stop()
|
||||
}
|
||||
|
||||
var errors = Vector.empty[(ActorRef, ErrorLog)]
|
||||
|
|
@ -658,19 +658,19 @@ object SupervisorHierarchySpec {
|
|||
case Event(e: ErrorLog, _) =>
|
||||
if (!e.msg.startsWith("not resumed") || !ignoreNotResumedLogs)
|
||||
errors :+= sender() -> e
|
||||
stay
|
||||
stay()
|
||||
case Event(Terminated(r), _) if r == hierarchy =>
|
||||
printErrors()
|
||||
testActor ! "stressTestFailed"
|
||||
stop
|
||||
stop()
|
||||
case Event(StateTimeout, _) =>
|
||||
getErrors(hierarchy, 10)
|
||||
printErrors()
|
||||
testActor ! "timeout in Failed"
|
||||
stop
|
||||
case Event("pong", _) => stay // don’t care?
|
||||
case Event(Work, _) => stay
|
||||
case Event(Died(_), _) => stay
|
||||
stop()
|
||||
case Event("pong", _) => stay() // don’t care?
|
||||
case Event(Work, _) => stay()
|
||||
case Event(Died(_), _) => stay()
|
||||
}
|
||||
|
||||
def getErrors(target: ActorRef, depth: Int): Unit = {
|
||||
|
|
@ -716,9 +716,9 @@ object SupervisorHierarchySpec {
|
|||
activeChildren :+= ref
|
||||
children :+= ref
|
||||
idleChildren :+= ref
|
||||
stay
|
||||
stay()
|
||||
case Event(e: ErrorLog, _) =>
|
||||
if (e.msg.startsWith("not resumed")) stay
|
||||
if (e.msg.startsWith("not resumed")) stay()
|
||||
else {
|
||||
errors :+= sender() -> e
|
||||
// don’t stop the hierarchy, that is going to happen all by itself and in the right order
|
||||
|
|
@ -737,7 +737,7 @@ object SupervisorHierarchySpec {
|
|||
goto(Failed)
|
||||
case Event(msg, _) =>
|
||||
testActor ! ("received unexpected msg: " + msg)
|
||||
stop
|
||||
stop()
|
||||
}
|
||||
|
||||
initialize()
|
||||
|
|
@ -801,7 +801,7 @@ class SupervisorHierarchySpec extends AkkaSpec(SupervisorHierarchySpec.config) w
|
|||
}
|
||||
|
||||
"resume children after Resume" taggedAs LongRunningTest in {
|
||||
val boss = system.actorOf(Props[Resumer], "resumer")
|
||||
val boss = system.actorOf(Props[Resumer](), "resumer")
|
||||
boss ! "spawn"
|
||||
val middle = expectMsgType[ActorRef]
|
||||
middle ! "spawn"
|
||||
|
|
@ -824,7 +824,7 @@ class SupervisorHierarchySpec extends AkkaSpec(SupervisorHierarchySpec.config) w
|
|||
case _ => Await.ready(latch, 4.seconds.dilated); SupervisorStrategy.Resume
|
||||
}
|
||||
def receive = {
|
||||
case "spawn" => sender() ! context.actorOf(Props[Resumer])
|
||||
case "spawn" => sender() ! context.actorOf(Props[Resumer]())
|
||||
}
|
||||
}), "slowResumer")
|
||||
slowResumer ! "spawn"
|
||||
|
|
|
|||
|
|
@ -28,7 +28,7 @@ class Ticket669Spec extends AkkaSpec with BeforeAndAfterAll with ImplicitSender
|
|||
filterEvents(EventFilter[Exception]("test", occurrences = 1)) {
|
||||
val supervisor =
|
||||
system.actorOf(Props(new Supervisor(AllForOneStrategy(5, 10 seconds)(List(classOf[Exception])))))
|
||||
val supervised = Await.result((supervisor ? Props[Supervised]).mapTo[ActorRef], timeout.duration)
|
||||
val supervised = Await.result((supervisor ? Props[Supervised]()).mapTo[ActorRef], timeout.duration)
|
||||
|
||||
supervised.!("test")(testActor)
|
||||
expectMsg("failure1")
|
||||
|
|
@ -40,7 +40,7 @@ class Ticket669Spec extends AkkaSpec with BeforeAndAfterAll with ImplicitSender
|
|||
filterEvents(EventFilter[Exception]("test", occurrences = 1)) {
|
||||
val supervisor =
|
||||
system.actorOf(Props(new Supervisor(AllForOneStrategy(maxNrOfRetries = 0)(List(classOf[Exception])))))
|
||||
val supervised = Await.result((supervisor ? Props[Supervised]).mapTo[ActorRef], timeout.duration)
|
||||
val supervised = Await.result((supervisor ? Props[Supervised]()).mapTo[ActorRef], timeout.duration)
|
||||
|
||||
supervised.!("test")(testActor)
|
||||
expectMsg("failure2")
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ object TimerSpec {
|
|||
startTimerWithFixedDelay("T", Tick(bumpCount + 1), interval)
|
||||
else
|
||||
startSingleTimer("T", Tick(bumpCount + 1), interval)
|
||||
stay.using(bumpCount + 1)
|
||||
stay().using(bumpCount + 1)
|
||||
}
|
||||
|
||||
def autoReceive(): State = {
|
||||
|
|
@ -116,7 +116,7 @@ object TimerSpec {
|
|||
startTimerWithFixedDelay("A", PoisonPill, interval)
|
||||
else
|
||||
startSingleTimer("A", PoisonPill, interval)
|
||||
stay
|
||||
stay()
|
||||
}
|
||||
|
||||
{
|
||||
|
|
@ -131,7 +131,7 @@ object TimerSpec {
|
|||
when(TheState) {
|
||||
case Event(Tick(n), _) =>
|
||||
monitor ! Tock(n)
|
||||
stay
|
||||
stay()
|
||||
case Event(Bump, bumpCount) =>
|
||||
bump(bumpCount)
|
||||
case Event(SlowThenBump(latch), bumpCount) =>
|
||||
|
|
@ -141,7 +141,7 @@ object TimerSpec {
|
|||
stop()
|
||||
case Event(Cancel, _) =>
|
||||
cancelTimer("T")
|
||||
stay
|
||||
stay()
|
||||
case Event(Throw(e), _) =>
|
||||
throw e
|
||||
case Event(SlowThenThrow(latch, e), _) =>
|
||||
|
|
|
|||
|
|
@ -123,11 +123,11 @@ object TypedActorSpec {
|
|||
|
||||
def pigdog = "Pigdog"
|
||||
|
||||
def futurePigdog(): Future[String] = Future.successful(pigdog)
|
||||
def futurePigdog(): Future[String] = Future.successful(pigdog())
|
||||
|
||||
def futurePigdog(delay: FiniteDuration): Future[String] = {
|
||||
Thread.sleep(delay.toMillis)
|
||||
futurePigdog
|
||||
futurePigdog()
|
||||
}
|
||||
|
||||
def futurePigdog(delay: FiniteDuration, numbered: Int): Future[String] = {
|
||||
|
|
@ -140,16 +140,16 @@ object TypedActorSpec {
|
|||
foo.futurePigdog(500 millis).map(_.toUpperCase)
|
||||
}
|
||||
|
||||
def optionPigdog(): Option[String] = Some(pigdog)
|
||||
def optionPigdog(): Option[String] = Some(pigdog())
|
||||
|
||||
def optionPigdog(delay: FiniteDuration): Option[String] = {
|
||||
Thread.sleep(delay.toMillis)
|
||||
Some(pigdog)
|
||||
Some(pigdog())
|
||||
}
|
||||
|
||||
def joptionPigdog(delay: FiniteDuration): JOption[String] = {
|
||||
Thread.sleep(delay.toMillis)
|
||||
JOption.some(pigdog)
|
||||
JOption.some(pigdog())
|
||||
}
|
||||
|
||||
var internalNumber = 0
|
||||
|
|
@ -408,14 +408,14 @@ class TypedActorSpec
|
|||
t.failingPigdog()
|
||||
t.read() should ===(1) //Make sure state is not reset after failure
|
||||
|
||||
intercept[IllegalStateException] { Await.result(t.failingFuturePigdog, 2 seconds) }.getMessage should ===(
|
||||
intercept[IllegalStateException] { Await.result(t.failingFuturePigdog(), 2 seconds) }.getMessage should ===(
|
||||
"expected")
|
||||
t.read() should ===(1) //Make sure state is not reset after failure
|
||||
|
||||
intercept[IllegalStateException] { t.failingJOptionPigdog }.getMessage should ===("expected")
|
||||
intercept[IllegalStateException] { t.failingJOptionPigdog() }.getMessage should ===("expected")
|
||||
t.read() should ===(1) //Make sure state is not reset after failure
|
||||
|
||||
intercept[IllegalStateException] { t.failingOptionPigdog }.getMessage should ===("expected")
|
||||
intercept[IllegalStateException] { t.failingOptionPigdog() }.getMessage should ===("expected")
|
||||
|
||||
t.read() should ===(1) //Make sure state is not reset after failure
|
||||
|
||||
|
|
@ -466,7 +466,7 @@ class TypedActorSpec
|
|||
val thais = for (_ <- 1 to 60) yield newFooBar("pooled-dispatcher", 6 seconds)
|
||||
val iterator = new CyclicIterator(thais)
|
||||
|
||||
val results = for (i <- 1 to 120) yield (i, iterator.next.futurePigdog(200 millis, i))
|
||||
val results = for (i <- 1 to 120) yield (i, iterator.next().futurePigdog(200 millis, i))
|
||||
|
||||
for ((i, r) <- results) Await.result(r, remaining) should ===("Pigdog" + i)
|
||||
|
||||
|
|
|
|||
|
|
@ -76,7 +76,7 @@ object UidClashTest {
|
|||
Stop
|
||||
case _ => Restart
|
||||
}
|
||||
val theRestartedOne = context.actorOf(Props[RestartedActor], "theRestartedOne")
|
||||
val theRestartedOne = context.actorOf(Props[RestartedActor](), "theRestartedOne")
|
||||
|
||||
def receive = {
|
||||
case PleaseRestart => theRestartedOne ! PleaseRestart
|
||||
|
|
|
|||
|
|
@ -257,7 +257,7 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa
|
|||
|
||||
import ActorModelSpec._
|
||||
|
||||
def newTestActor(dispatcher: String) = system.actorOf(Props[DispatcherActor].withDispatcher(dispatcher))
|
||||
def newTestActor(dispatcher: String) = system.actorOf(Props[DispatcherActor]().withDispatcher(dispatcher))
|
||||
|
||||
def awaitStarted(ref: ActorRef): Unit = {
|
||||
awaitCond(ref match {
|
||||
|
|
@ -352,7 +352,7 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa
|
|||
val a = newTestActor(dispatcher.id).asInstanceOf[InternalActorRef]
|
||||
awaitStarted(a)
|
||||
val done = new CountDownLatch(1)
|
||||
a.suspend
|
||||
a.suspend()
|
||||
a ! CountDown(done)
|
||||
assertNoCountDown(done, 1000, "Should not process messages while suspended")
|
||||
assertRefDefaultZero(a)(registers = 1, msgsReceived = 1, suspensions = 1)
|
||||
|
|
@ -373,7 +373,7 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa
|
|||
|
||||
"handle waves of actors" in {
|
||||
val dispatcher = interceptedDispatcher()
|
||||
val props = Props[DispatcherActor].withDispatcher(dispatcher.id)
|
||||
val props = Props[DispatcherActor]().withDispatcher(dispatcher.id)
|
||||
|
||||
def flood(num: Int): Unit = {
|
||||
val cachedMessage = CountDownNStop(new CountDownLatch(num))
|
||||
|
|
@ -417,7 +417,7 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa
|
|||
}
|
||||
|
||||
System.err.println("Mailbox: " + mq.numberOfMessages + " " + mq.hasMessages)
|
||||
Iterator.continually(mq.dequeue).takeWhile(_ ne null).foreach(System.err.println)
|
||||
Iterator.continually(mq.dequeue()).takeWhile(_ ne null).foreach(System.err.println)
|
||||
case _ =>
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -59,14 +59,14 @@ class DispatcherActorSpec extends AkkaSpec(DispatcherActorSpec.config) with Defa
|
|||
"A Dispatcher and an Actor" must {
|
||||
|
||||
"support tell" in {
|
||||
val actor = system.actorOf(Props[OneWayTestActor].withDispatcher("test-dispatcher"))
|
||||
val actor = system.actorOf(Props[OneWayTestActor]().withDispatcher("test-dispatcher"))
|
||||
actor ! "OneWay"
|
||||
assert(OneWayTestActor.oneWay.await(1, TimeUnit.SECONDS))
|
||||
system.stop(actor)
|
||||
}
|
||||
|
||||
"support ask/reply" in {
|
||||
val actor = system.actorOf(Props[TestActor].withDispatcher("test-dispatcher"))
|
||||
val actor = system.actorOf(Props[TestActor]().withDispatcher("test-dispatcher"))
|
||||
assert("World" === Await.result(actor ? "Hello", timeout.duration))
|
||||
system.stop(actor)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -182,11 +182,11 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend
|
|||
}
|
||||
|
||||
"include system name and dispatcher id in thread names for fork-join-executor" in {
|
||||
assertMyDispatcherIsUsed(system.actorOf(Props[ThreadNameEcho].withDispatcher("myapp.mydispatcher")))
|
||||
assertMyDispatcherIsUsed(system.actorOf(Props[ThreadNameEcho]().withDispatcher("myapp.mydispatcher")))
|
||||
}
|
||||
|
||||
"include system name and dispatcher id in thread names for thread-pool-executor" in {
|
||||
system.actorOf(Props[ThreadNameEcho].withDispatcher("myapp.thread-pool-dispatcher")) ! "what's the name?"
|
||||
system.actorOf(Props[ThreadNameEcho]().withDispatcher("myapp.thread-pool-dispatcher")) ! "what's the name?"
|
||||
val Expected = R("(DispatchersSpec-myapp.thread-pool-dispatcher-[1-9][0-9]*)")
|
||||
expectMsgPF() {
|
||||
case Expected(_) =>
|
||||
|
|
@ -194,7 +194,7 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend
|
|||
}
|
||||
|
||||
"include system name and dispatcher id in thread names for default-dispatcher" in {
|
||||
system.actorOf(Props[ThreadNameEcho]) ! "what's the name?"
|
||||
system.actorOf(Props[ThreadNameEcho]()) ! "what's the name?"
|
||||
val Expected = R("(DispatchersSpec-akka.actor.default-dispatcher-[1-9][0-9]*)")
|
||||
expectMsgPF() {
|
||||
case Expected(_) =>
|
||||
|
|
@ -202,7 +202,7 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend
|
|||
}
|
||||
|
||||
"include system name and dispatcher id in thread names for pinned dispatcher" in {
|
||||
system.actorOf(Props[ThreadNameEcho].withDispatcher("myapp.my-pinned-dispatcher")) ! "what's the name?"
|
||||
system.actorOf(Props[ThreadNameEcho]().withDispatcher("myapp.my-pinned-dispatcher")) ! "what's the name?"
|
||||
val Expected = R("(DispatchersSpec-myapp.my-pinned-dispatcher-[1-9][0-9]*)")
|
||||
expectMsgPF() {
|
||||
case Expected(_) =>
|
||||
|
|
@ -210,7 +210,7 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend
|
|||
}
|
||||
|
||||
"include system name and dispatcher id in thread names for balancing dispatcher" in {
|
||||
system.actorOf(Props[ThreadNameEcho].withDispatcher("myapp.balancing-dispatcher")) ! "what's the name?"
|
||||
system.actorOf(Props[ThreadNameEcho]().withDispatcher("myapp.balancing-dispatcher")) ! "what's the name?"
|
||||
val Expected = R("(DispatchersSpec-myapp.balancing-dispatcher-[1-9][0-9]*)")
|
||||
expectMsgPF() {
|
||||
case Expected(_) =>
|
||||
|
|
@ -218,16 +218,16 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend
|
|||
}
|
||||
|
||||
"use dispatcher in deployment config" in {
|
||||
assertMyDispatcherIsUsed(system.actorOf(Props[ThreadNameEcho], name = "echo1"))
|
||||
assertMyDispatcherIsUsed(system.actorOf(Props[ThreadNameEcho](), name = "echo1"))
|
||||
}
|
||||
|
||||
"use dispatcher in deployment config, trumps code" in {
|
||||
assertMyDispatcherIsUsed(
|
||||
system.actorOf(Props[ThreadNameEcho].withDispatcher("myapp.my-pinned-dispatcher"), name = "echo2"))
|
||||
system.actorOf(Props[ThreadNameEcho]().withDispatcher("myapp.my-pinned-dispatcher"), name = "echo2"))
|
||||
}
|
||||
|
||||
"use pool-dispatcher router of deployment config" in {
|
||||
val pool = system.actorOf(FromConfig.props(Props[ThreadNameEcho]), name = "pool1")
|
||||
val pool = system.actorOf(FromConfig.props(Props[ThreadNameEcho]()), name = "pool1")
|
||||
pool ! Identify(None)
|
||||
val routee = expectMsgType[ActorIdentity].ref.get
|
||||
routee ! "what's the name?"
|
||||
|
|
@ -238,7 +238,7 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend
|
|||
}
|
||||
|
||||
"use balancing-pool router with special routees mailbox of deployment config" in {
|
||||
system.actorOf(FromConfig.props(Props[ThreadNameEcho]), name = "balanced") ! "what's the name?"
|
||||
system.actorOf(FromConfig.props(Props[ThreadNameEcho]()), name = "balanced") ! "what's the name?"
|
||||
val Expected = R("""(DispatchersSpec-BalancingPool-/balanced-[1-9][0-9]*)""")
|
||||
expectMsgPF() {
|
||||
case Expected(_) =>
|
||||
|
|
|
|||
|
|
@ -44,7 +44,7 @@ class PinnedActorSpec extends AkkaSpec(PinnedActorSpec.config) with BeforeAndAft
|
|||
}
|
||||
|
||||
"support ask/reply" in {
|
||||
val actor = system.actorOf(Props[TestActor].withDispatcher("pinned-dispatcher"))
|
||||
val actor = system.actorOf(Props[TestActor]().withDispatcher("pinned-dispatcher"))
|
||||
assert("World" === Await.result(actor ? "Hello", timeout.duration))
|
||||
system.stop(actor)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -25,7 +25,7 @@ class DispatchSpec extends AkkaSpec("""
|
|||
|
||||
"The dispatcher" should {
|
||||
"log an appropriate message when akka.actor.serialize-messages triggers a serialization error" in {
|
||||
val actor = system.actorOf(Props[EmptyActor])
|
||||
val actor = system.actorOf(Props[EmptyActor]())
|
||||
EventFilter[Exception](pattern = ".*NoSerializationVerificationNeeded.*", occurrences = 1).intercept {
|
||||
actor ! new UnserializableMessageClass
|
||||
}
|
||||
|
|
|
|||
|
|
@ -157,7 +157,7 @@ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAn
|
|||
def createConsumer: Future[Vector[Envelope]] = spawn {
|
||||
var r = Vector[Envelope]()
|
||||
|
||||
while (producers.exists(_.isCompleted == false) || q.hasMessages) Option(q.dequeue).foreach { message =>
|
||||
while (producers.exists(_.isCompleted == false) || q.hasMessages) Option(q.dequeue()).foreach { message =>
|
||||
r = r :+ message
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -85,7 +85,7 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) {
|
|||
expectMsg(M(42))
|
||||
bus.unsubscribe(testActor)
|
||||
bus.publish(M(13))
|
||||
expectNoMessage
|
||||
expectNoMessage()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -159,7 +159,7 @@ class EventStreamSpec extends AkkaSpec(EventStreamSpec.config) {
|
|||
bus.publish(a)
|
||||
expectMsg(b2)
|
||||
expectMsg(a)
|
||||
expectNoMessage
|
||||
expectNoMessage()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -235,7 +235,7 @@ class LoggerSpec extends AnyWordSpec with Matchers {
|
|||
system.eventStream.publish(SetTarget(probe.ref, qualifier = 1))
|
||||
probe.expectMsg("OK")
|
||||
|
||||
val ref = system.actorOf(Props[ActorWithMDC])
|
||||
val ref = system.actorOf(Props[ActorWithMDC]())
|
||||
|
||||
ref ! "Processing new Request"
|
||||
probe.expectMsgPF(max = 3.seconds) {
|
||||
|
|
|
|||
|
|
@ -195,7 +195,7 @@ class LoggingReceiveSpec extends AnyWordSpec with BeforeAndAfterAll {
|
|||
within(3 seconds) {
|
||||
val lifecycleGuardian = appLifecycle.asInstanceOf[ActorSystemImpl].guardian
|
||||
val lname = lifecycleGuardian.path.toString
|
||||
val supervisor = TestActorRef[TestLogActor](Props[TestLogActor])
|
||||
val supervisor = TestActorRef[TestLogActor](Props[TestLogActor]())
|
||||
val sname = supervisor.path.toString
|
||||
|
||||
fishForMessage(hint = "now supervising") {
|
||||
|
|
@ -203,7 +203,7 @@ class LoggingReceiveSpec extends AnyWordSpec with BeforeAndAfterAll {
|
|||
case _ => false
|
||||
}
|
||||
|
||||
TestActorRef[TestLogActor](Props[TestLogActor], supervisor, "none")
|
||||
TestActorRef[TestLogActor](Props[TestLogActor](), supervisor, "none")
|
||||
|
||||
fishForMessage(hint = "now supervising") {
|
||||
case Logging.Debug(`sname`, _, msg: String) if msg.startsWith("now supervising") => true
|
||||
|
|
@ -217,9 +217,9 @@ class LoggingReceiveSpec extends AnyWordSpec with BeforeAndAfterAll {
|
|||
new TestKit(appLifecycle) {
|
||||
system.eventStream.subscribe(testActor, classOf[Logging.Debug])
|
||||
within(3 seconds) {
|
||||
val supervisor = TestActorRef[TestLogActor](Props[TestLogActor])
|
||||
val supervisor = TestActorRef[TestLogActor](Props[TestLogActor]())
|
||||
val sclass = classOf[TestLogActor]
|
||||
val actor = TestActorRef[TestLogActor](Props[TestLogActor], supervisor, "none")
|
||||
val actor = TestActorRef[TestLogActor](Props[TestLogActor](), supervisor, "none")
|
||||
val aname = actor.path.toString
|
||||
|
||||
supervisor.watch(actor)
|
||||
|
|
@ -242,7 +242,7 @@ class LoggingReceiveSpec extends AnyWordSpec with BeforeAndAfterAll {
|
|||
system.eventStream.subscribe(testActor, classOf[Logging.Debug])
|
||||
system.eventStream.subscribe(testActor, classOf[Logging.Error])
|
||||
within(3 seconds) {
|
||||
val supervisor = TestActorRef[TestLogActor](Props[TestLogActor])
|
||||
val supervisor = TestActorRef[TestLogActor](Props[TestLogActor]())
|
||||
val sname = supervisor.path.toString
|
||||
val sclass = classOf[TestLogActor]
|
||||
|
||||
|
|
@ -251,7 +251,7 @@ class LoggingReceiveSpec extends AnyWordSpec with BeforeAndAfterAll {
|
|||
case Logging.Debug(_, _, msg: String) if msg.startsWith("now supervising") => 1
|
||||
}
|
||||
|
||||
val actor = TestActorRef[TestLogActor](Props[TestLogActor], supervisor, "none")
|
||||
val actor = TestActorRef[TestLogActor](Props[TestLogActor](), supervisor, "none")
|
||||
val aname = actor.path.toString
|
||||
val aclass = classOf[TestLogActor]
|
||||
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ class JavaLoggerSpec extends AkkaSpec(JavaLoggerSpec.config) {
|
|||
def close(): Unit = {}
|
||||
})
|
||||
|
||||
val producer = system.actorOf(Props[JavaLoggerSpec.LogProducer], name = "log")
|
||||
val producer = system.actorOf(Props[JavaLoggerSpec.LogProducer](), name = "log")
|
||||
|
||||
"JavaLogger" must {
|
||||
|
||||
|
|
|
|||
|
|
@ -161,8 +161,8 @@ class AskSpec extends AkkaSpec {
|
|||
val echo = system.actorOf(Props(new Actor {
|
||||
def receive = {
|
||||
case x =>
|
||||
val name = sender.path.name
|
||||
val parent = sender.path.parent
|
||||
val name = sender().path.name
|
||||
val parent = sender().path.parent
|
||||
context.actorSelection(parent / ".." / "temp" / name) ! x
|
||||
}
|
||||
}), "select-echo4")
|
||||
|
|
@ -182,7 +182,7 @@ class AskSpec extends AkkaSpec {
|
|||
val echo = system.actorOf(Props(new Actor {
|
||||
def receive = {
|
||||
case x =>
|
||||
val parent = sender.path.parent
|
||||
val parent = sender().path.parent
|
||||
context.actorSelection(parent / "missing") ! x
|
||||
}
|
||||
}), "select-echo5")
|
||||
|
|
|
|||
|
|
@ -119,7 +119,7 @@ class BackoffOnRestartSupervisorSpec extends AkkaSpec("""
|
|||
val supervisorChildSelection = system.actorSelection(supervisor.path / "*")
|
||||
supervisorChildSelection.tell("testmsg", probe.ref)
|
||||
probe.expectMsg("testmsg")
|
||||
probe.expectNoMessage
|
||||
probe.expectNoMessage()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -180,7 +180,7 @@ class CircuitBreakerSpec extends AkkaSpec {
|
|||
val breaker = shortResetTimeoutCb()
|
||||
intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) }
|
||||
checkLatch(breaker.halfOpenLatch)
|
||||
breaker.openLatch.reset
|
||||
breaker.openLatch.reset()
|
||||
intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) }
|
||||
checkLatch(breaker.openLatch)
|
||||
}
|
||||
|
|
@ -190,7 +190,7 @@ class CircuitBreakerSpec extends AkkaSpec {
|
|||
val breaker = shortResetTimeoutCb()
|
||||
intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) }
|
||||
checkLatch(breaker.halfOpenLatch)
|
||||
breaker.openLatch.reset
|
||||
breaker.openLatch.reset()
|
||||
breaker().withSyncCircuitBreaker(2, evenNumberIsFailure)
|
||||
checkLatch(breaker.openLatch)
|
||||
}
|
||||
|
|
@ -200,7 +200,7 @@ class CircuitBreakerSpec extends AkkaSpec {
|
|||
val breaker = shortResetTimeoutCb()
|
||||
intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) }
|
||||
checkLatch(breaker.halfOpenLatch)
|
||||
breaker.openLatch.reset
|
||||
breaker.openLatch.reset()
|
||||
breaker().fail()
|
||||
checkLatch(breaker.openLatch)
|
||||
}
|
||||
|
|
@ -451,7 +451,7 @@ class CircuitBreakerSpec extends AkkaSpec {
|
|||
checkLatch(breaker.halfOpenLatch)
|
||||
|
||||
// transit to open again
|
||||
breaker.openLatch.reset
|
||||
breaker.openLatch.reset()
|
||||
breaker().withCircuitBreaker(Future(throwException))
|
||||
checkLatch(breaker.openLatch)
|
||||
|
||||
|
|
@ -513,7 +513,7 @@ class CircuitBreakerSpec extends AkkaSpec {
|
|||
val breaker = shortResetTimeoutCb()
|
||||
breaker().withCircuitBreaker(Future(throwException))
|
||||
checkLatch(breaker.halfOpenLatch)
|
||||
breaker.openLatch.reset
|
||||
breaker.openLatch.reset()
|
||||
intercept[TestException] { Await.result(breaker().withCircuitBreaker(Future(throwException)), awaitTimeout) }
|
||||
checkLatch(breaker.openLatch)
|
||||
}
|
||||
|
|
@ -523,7 +523,7 @@ class CircuitBreakerSpec extends AkkaSpec {
|
|||
val breaker = shortResetTimeoutCb()
|
||||
intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) }
|
||||
checkLatch(breaker.halfOpenLatch)
|
||||
breaker.openLatch.reset
|
||||
breaker.openLatch.reset()
|
||||
Await.result(breaker().withCircuitBreaker(Future(2), evenNumberIsFailure), awaitTimeout)
|
||||
checkLatch(breaker.openLatch)
|
||||
}
|
||||
|
|
@ -534,7 +534,7 @@ class CircuitBreakerSpec extends AkkaSpec {
|
|||
breaker().withCircuitBreaker(Future(throwException))
|
||||
checkLatch(breaker.halfOpenLatch)
|
||||
|
||||
breaker.openLatch.reset
|
||||
breaker.openLatch.reset()
|
||||
breaker().withCircuitBreaker(Future(throwException))
|
||||
checkLatch(breaker.openLatch)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -29,19 +29,19 @@ class PatternSpec extends AkkaSpec {
|
|||
"pattern.gracefulStop" must {
|
||||
|
||||
"provide Future for stopping an actor" in {
|
||||
val target = system.actorOf(Props[TargetActor])
|
||||
val target = system.actorOf(Props[TargetActor]())
|
||||
val result = gracefulStop(target, 5 seconds)
|
||||
Await.result(result, 6 seconds) should ===(true)
|
||||
}
|
||||
|
||||
"complete Future when actor already terminated" in {
|
||||
val target = system.actorOf(Props[TargetActor])
|
||||
val target = system.actorOf(Props[TargetActor]())
|
||||
Await.ready(gracefulStop(target, 5 seconds), 6 seconds)
|
||||
Await.ready(gracefulStop(target, 1 millis), 1 second)
|
||||
}
|
||||
|
||||
"complete Future with AskTimeoutException when actor not terminated within timeout" in {
|
||||
val target = system.actorOf(Props[TargetActor])
|
||||
val target = system.actorOf(Props[TargetActor]())
|
||||
val latch = TestLatch()
|
||||
target ! ((latch, remainingOrDefault))
|
||||
intercept[AskTimeoutException] { Await.result(gracefulStop(target, 500 millis), remainingOrDefault) }
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ class RetrySpec extends AkkaSpec with RetrySupport {
|
|||
} else Future.successful(5)
|
||||
}
|
||||
|
||||
val retried = retry(() => attempt, 10, 100 milliseconds)
|
||||
val retried = retry(() => attempt(), 10, 100 milliseconds)
|
||||
|
||||
within(3 seconds) {
|
||||
Await.result(retried, remaining) should ===(5)
|
||||
|
|
@ -76,7 +76,7 @@ class RetrySpec extends AkkaSpec with RetrySupport {
|
|||
} else Future.successful(5)
|
||||
}
|
||||
|
||||
val retried = retry(() => attempt, 5, 100 milliseconds)
|
||||
val retried = retry(() => attempt(), 5, 100 milliseconds)
|
||||
|
||||
within(3 seconds) {
|
||||
intercept[IllegalStateException] { Await.result(retried, remaining) }.getMessage should ===("6")
|
||||
|
|
@ -94,7 +94,7 @@ class RetrySpec extends AkkaSpec with RetrySupport {
|
|||
} else Future.successful(5)
|
||||
}
|
||||
|
||||
val retried = retry(() => attempt, 5, attempted => {
|
||||
val retried = retry(() => attempt(), 5, attempted => {
|
||||
attemptedCount = attempted
|
||||
Some(100.milliseconds * attempted)
|
||||
})
|
||||
|
|
@ -114,7 +114,7 @@ class RetrySpec extends AkkaSpec with RetrySupport {
|
|||
} else Future.successful(1)
|
||||
}
|
||||
val start = System.currentTimeMillis()
|
||||
val retried = retry(() => attempt, 999)
|
||||
val retried = retry(() => attempt(), 999)
|
||||
|
||||
within(1 seconds) {
|
||||
intercept[IllegalStateException] {
|
||||
|
|
|
|||
|
|
@ -118,14 +118,14 @@ class BalancingSpec extends AkkaSpec("""
|
|||
|
||||
"work with anonymous actor names" in {
|
||||
// the dispatcher-id must not contain invalid config key characters (e.g. $a)
|
||||
system.actorOf(Props[Parent]) ! 1000
|
||||
system.actorOf(Props[Parent]()) ! 1000
|
||||
expectMsgType[Int]
|
||||
}
|
||||
|
||||
"work with encoded actor names" in {
|
||||
val encName = URLEncoder.encode("abcå6#$€xyz", "utf-8")
|
||||
// % is a valid config key character (e.g. %C3%A5)
|
||||
system.actorOf(Props[Parent], encName) ! 1001
|
||||
system.actorOf(Props[Parent](), encName) ! 1001
|
||||
expectMsgType[Int]
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -127,26 +127,26 @@ class ConfiguredLocalRoutingSpec
|
|||
"RouterConfig" must {
|
||||
|
||||
"be picked up from Props" in {
|
||||
val actor = system.actorOf(RoundRobinPool(12).props(routeeProps = Props[EchoProps]), "someOther")
|
||||
val actor = system.actorOf(RoundRobinPool(12).props(routeeProps = Props[EchoProps]()), "someOther")
|
||||
routerConfig(actor) should ===(RoundRobinPool(12))
|
||||
Await.result(gracefulStop(actor, 3 seconds), 3 seconds)
|
||||
}
|
||||
|
||||
"be overridable in config" in {
|
||||
val actor = system.actorOf(RoundRobinPool(12).props(routeeProps = Props[EchoProps]), "config")
|
||||
val actor = system.actorOf(RoundRobinPool(12).props(routeeProps = Props[EchoProps]()), "config")
|
||||
routerConfig(actor) should ===(RandomPool(nrOfInstances = 4, usePoolDispatcher = true))
|
||||
Await.result(gracefulStop(actor, 3 seconds), 3 seconds)
|
||||
}
|
||||
|
||||
"use routees.paths from config" in {
|
||||
val actor = system.actorOf(RandomPool(12).props(routeeProps = Props[EchoProps]), "paths")
|
||||
val actor = system.actorOf(RandomPool(12).props(routeeProps = Props[EchoProps]()), "paths")
|
||||
routerConfig(actor) should ===(RandomGroup(List("/user/service1", "/user/service2")))
|
||||
Await.result(gracefulStop(actor, 3 seconds), 3 seconds)
|
||||
}
|
||||
|
||||
"be overridable in explicit deployment" in {
|
||||
val actor = system.actorOf(
|
||||
FromConfig.props(routeeProps = Props[EchoProps]).withDeploy(Deploy(routerConfig = RoundRobinPool(12))),
|
||||
FromConfig.props(routeeProps = Props[EchoProps]()).withDeploy(Deploy(routerConfig = RoundRobinPool(12))),
|
||||
"someOther")
|
||||
routerConfig(actor) should ===(RoundRobinPool(12))
|
||||
Await.result(gracefulStop(actor, 3 seconds), 3 seconds)
|
||||
|
|
@ -154,7 +154,7 @@ class ConfiguredLocalRoutingSpec
|
|||
|
||||
"be overridable in config even with explicit deployment" in {
|
||||
val actor = system.actorOf(
|
||||
FromConfig.props(routeeProps = Props[EchoProps]).withDeploy(Deploy(routerConfig = RoundRobinPool(12))),
|
||||
FromConfig.props(routeeProps = Props[EchoProps]()).withDeploy(Deploy(routerConfig = RoundRobinPool(12))),
|
||||
"config")
|
||||
routerConfig(actor) should ===(RandomPool(nrOfInstances = 4, usePoolDispatcher = true))
|
||||
Await.result(gracefulStop(actor, 3 seconds), 3 seconds)
|
||||
|
|
@ -185,7 +185,7 @@ class ConfiguredLocalRoutingSpec
|
|||
// we don't really support deployment configuration of system actors, but
|
||||
// it's used for the pool of the SimpleDnsManager "/IO-DNS/inet-address"
|
||||
val probe = TestProbe()
|
||||
val parent = system.asInstanceOf[ExtendedActorSystem].systemActorOf(Props[Parent], "sys-parent")
|
||||
val parent = system.asInstanceOf[ExtendedActorSystem].systemActorOf(Props[Parent](), "sys-parent")
|
||||
parent.tell((FromConfig.props(echoActorProps), "round"), probe.ref)
|
||||
val router = probe.expectMsgType[ActorRef]
|
||||
val replies = collectRouteePaths(probe, router, 10)
|
||||
|
|
|
|||
|
|
@ -59,7 +59,7 @@ class ConsistentHashingRouterSpec
|
|||
import ConsistentHashingRouterSpec._
|
||||
implicit val ec: ExecutionContextExecutor = system.dispatcher
|
||||
|
||||
val router1 = system.actorOf(FromConfig.props(Props[Echo]), "router1")
|
||||
val router1 = system.actorOf(FromConfig.props(Props[Echo]()), "router1")
|
||||
|
||||
"consistent hashing router" must {
|
||||
"create routees from configuration" in {
|
||||
|
|
@ -90,7 +90,7 @@ class ConsistentHashingRouterSpec
|
|||
}
|
||||
val router2 =
|
||||
system.actorOf(
|
||||
ConsistentHashingPool(nrOfInstances = 1, hashMapping = hashMapping).props(Props[Echo]),
|
||||
ConsistentHashingPool(nrOfInstances = 1, hashMapping = hashMapping).props(Props[Echo]()),
|
||||
"router2")
|
||||
|
||||
router2 ! Msg2("a", "A")
|
||||
|
|
|
|||
|
|
@ -100,7 +100,7 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with
|
|||
c1 should ===(2)
|
||||
|
||||
val current =
|
||||
Vector(ActorRefRoutee(system.actorOf(Props[TestActor])), ActorRefRoutee(system.actorOf(Props[TestActor])))
|
||||
Vector(ActorRefRoutee(system.actorOf(Props[TestActor]())), ActorRefRoutee(system.actorOf(Props[TestActor]())))
|
||||
val c2 = resizer.capacity(current)
|
||||
c2 should ===(0)
|
||||
}
|
||||
|
|
@ -129,7 +129,7 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with
|
|||
val latch = new TestLatch(3)
|
||||
|
||||
val resizer = DefaultResizer(lowerBound = 2, upperBound = 3)
|
||||
val router = system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(Props[TestActor]))
|
||||
val router = system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(Props[TestActor]()))
|
||||
|
||||
router ! latch
|
||||
router ! latch
|
||||
|
|
@ -144,7 +144,7 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with
|
|||
"be possible to define in configuration" in {
|
||||
val latch = new TestLatch(3)
|
||||
|
||||
val router = system.actorOf(FromConfig.props(Props[TestActor]), "router1")
|
||||
val router = system.actorOf(FromConfig.props(Props[TestActor]()), "router1")
|
||||
|
||||
router ! latch
|
||||
router ! latch
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with
|
|||
"routers in general" must {
|
||||
|
||||
"evict terminated routees" in {
|
||||
val router = system.actorOf(RoundRobinPool(2).props(routeeProps = Props[Echo]))
|
||||
val router = system.actorOf(RoundRobinPool(2).props(routeeProps = Props[Echo]()))
|
||||
router ! ""
|
||||
router ! ""
|
||||
val c1, c2 = expectMsgType[ActorRef]
|
||||
|
|
@ -87,7 +87,8 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with
|
|||
}
|
||||
}
|
||||
val router =
|
||||
system.actorOf(RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(routeeProps = Props[TestActor]))
|
||||
system.actorOf(
|
||||
RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(routeeProps = Props[TestActor]()))
|
||||
watch(router)
|
||||
Await.ready(latch, remainingOrDefault)
|
||||
router ! GetRoutees
|
||||
|
|
@ -99,7 +100,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with
|
|||
}
|
||||
|
||||
"use configured nr-of-instances when FromConfig" in {
|
||||
val router = system.actorOf(FromConfig.props(routeeProps = Props[TestActor]), "router1")
|
||||
val router = system.actorOf(FromConfig.props(routeeProps = Props[TestActor]()), "router1")
|
||||
router ! GetRoutees
|
||||
expectMsgType[Routees].routees.size should ===(3)
|
||||
watch(router)
|
||||
|
|
@ -108,7 +109,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with
|
|||
}
|
||||
|
||||
"use configured nr-of-instances when router is specified" in {
|
||||
val router = system.actorOf(RoundRobinPool(nrOfInstances = 2).props(routeeProps = Props[TestActor]), "router2")
|
||||
val router = system.actorOf(RoundRobinPool(nrOfInstances = 2).props(routeeProps = Props[TestActor]()), "router2")
|
||||
router ! GetRoutees
|
||||
expectMsgType[Routees].routees.size should ===(3)
|
||||
system.stop(router)
|
||||
|
|
@ -125,7 +126,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with
|
|||
}
|
||||
val router =
|
||||
system.actorOf(
|
||||
RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(routeeProps = Props[TestActor]),
|
||||
RoundRobinPool(nrOfInstances = 0, resizer = Some(resizer)).props(routeeProps = Props[TestActor]()),
|
||||
"router3")
|
||||
Await.ready(latch, remainingOrDefault)
|
||||
router ! GetRoutees
|
||||
|
|
@ -141,7 +142,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with
|
|||
//#custom-strategy
|
||||
}
|
||||
val router =
|
||||
system.actorOf(RoundRobinPool(1, supervisorStrategy = escalator).props(routeeProps = Props[TestActor]))
|
||||
system.actorOf(RoundRobinPool(1, supervisorStrategy = escalator).props(routeeProps = Props[TestActor]()))
|
||||
//#supervision
|
||||
router ! GetRoutees
|
||||
EventFilter[ActorKilledException](occurrences = 1).intercept {
|
||||
|
|
@ -150,7 +151,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with
|
|||
expectMsgType[ActorKilledException]
|
||||
|
||||
val router2 =
|
||||
system.actorOf(RoundRobinPool(1).withSupervisorStrategy(escalator).props(routeeProps = Props[TestActor]))
|
||||
system.actorOf(RoundRobinPool(1).withSupervisorStrategy(escalator).props(routeeProps = Props[TestActor]()))
|
||||
router2 ! GetRoutees
|
||||
EventFilter[ActorKilledException](occurrences = 1).intercept {
|
||||
expectMsgType[Routees].routees.head.send(Kill, testActor)
|
||||
|
|
@ -163,7 +164,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with
|
|||
case e => testActor ! e; SupervisorStrategy.Escalate
|
||||
}
|
||||
val router =
|
||||
system.actorOf(FromConfig.withSupervisorStrategy(escalator).props(routeeProps = Props[TestActor]), "router1")
|
||||
system.actorOf(FromConfig.withSupervisorStrategy(escalator).props(routeeProps = Props[TestActor]()), "router1")
|
||||
router ! GetRoutees
|
||||
EventFilter[ActorKilledException](occurrences = 1).intercept {
|
||||
expectMsgType[Routees].routees.head.send(Kill, testActor)
|
||||
|
|
@ -227,7 +228,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with
|
|||
"router FromConfig" must {
|
||||
"throw suitable exception when not configured" in {
|
||||
val e = intercept[ConfigurationException] {
|
||||
system.actorOf(FromConfig.props(routeeProps = Props[TestActor]), "routerNotDefined")
|
||||
system.actorOf(FromConfig.props(routeeProps = Props[TestActor]()), "routerNotDefined")
|
||||
}
|
||||
e.getMessage should include("routerNotDefined")
|
||||
}
|
||||
|
|
@ -239,7 +240,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with
|
|||
.parseString("akka.actor.deployment./routed.router=round-robin-pool")
|
||||
.withFallback(system.settings.config))
|
||||
try {
|
||||
sys.actorOf(FromConfig.props(routeeProps = Props[TestActor]), "routed")
|
||||
sys.actorOf(FromConfig.props(routeeProps = Props[TestActor]()), "routed")
|
||||
} finally {
|
||||
shutdown(sys)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -284,7 +284,7 @@ class VerifySerializabilitySpec extends AkkaSpec(SerializationTests.verifySerial
|
|||
}
|
||||
|
||||
"verify creators" in {
|
||||
val a = system.actorOf(Props[FooActor])
|
||||
val a = system.actorOf(Props[FooActor]())
|
||||
system.stop(a)
|
||||
|
||||
val b = system.actorOf(Props(new FooAbstractActor))
|
||||
|
|
@ -307,7 +307,7 @@ class VerifySerializabilitySpec extends AkkaSpec(SerializationTests.verifySerial
|
|||
}
|
||||
|
||||
"verify messages" in {
|
||||
val a = system.actorOf(Props[FooActor])
|
||||
val a = system.actorOf(Props[FooActor]())
|
||||
Await.result(a ? "pigdog", timeout.duration) should ===("pigdog")
|
||||
|
||||
EventFilter[SerializationCheckFailedException](
|
||||
|
|
@ -319,7 +319,7 @@ class VerifySerializabilitySpec extends AkkaSpec(SerializationTests.verifySerial
|
|||
}
|
||||
|
||||
"not verify akka messages" in {
|
||||
val a = system.actorOf(Props[FooActor])
|
||||
val a = system.actorOf(Props[FooActor]())
|
||||
EventFilter.warning(start = "ok", occurrences = 1).intercept {
|
||||
// ActorSystem is not possible to serialize, but ok since it starts with "akka."
|
||||
val message = system
|
||||
|
|
|
|||
|
|
@ -191,7 +191,7 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers {
|
|||
body(bsBuilder)
|
||||
body(vecBuilder)
|
||||
|
||||
bsBuilder.result == vecBuilder.result
|
||||
bsBuilder.result() == vecBuilder.result()
|
||||
}
|
||||
|
||||
def testShortDecoding(slice: ByteStringSlice, byteOrder: ByteOrder): Boolean = {
|
||||
|
|
@ -275,7 +275,7 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers {
|
|||
for (i <- 0 until from) builder.putShort(data(i))(byteOrder)
|
||||
builder.putShorts(data, from, to - from)(byteOrder)
|
||||
for (i <- to until data.length) builder.putShort(data(i))(byteOrder)
|
||||
reference.toSeq == builder.result
|
||||
reference.toSeq == builder.result()
|
||||
}
|
||||
|
||||
def testIntEncoding(slice: ArraySlice[Int], byteOrder: ByteOrder): Boolean = {
|
||||
|
|
@ -287,7 +287,7 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers {
|
|||
for (i <- 0 until from) builder.putInt(data(i))(byteOrder)
|
||||
builder.putInts(data, from, to - from)(byteOrder)
|
||||
for (i <- to until data.length) builder.putInt(data(i))(byteOrder)
|
||||
reference.toSeq == builder.result
|
||||
reference.toSeq == builder.result()
|
||||
}
|
||||
|
||||
def testLongEncoding(slice: ArraySlice[Long], byteOrder: ByteOrder): Boolean = {
|
||||
|
|
@ -299,7 +299,7 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers {
|
|||
for (i <- 0 until from) builder.putLong(data(i))(byteOrder)
|
||||
builder.putLongs(data, from, to - from)(byteOrder)
|
||||
for (i <- to until data.length) builder.putLong(data(i))(byteOrder)
|
||||
reference.toSeq == builder.result
|
||||
reference.toSeq == builder.result()
|
||||
}
|
||||
|
||||
def testLongPartEncoding(anb: ArrayNumBytes[Long], byteOrder: ByteOrder): Boolean = {
|
||||
|
|
@ -316,7 +316,7 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers {
|
|||
case (r, i) if byteOrder == ByteOrder.LITTLE_ENDIAN && i % elemSize < nBytes => r
|
||||
case (r, i) if byteOrder == ByteOrder.BIG_ENDIAN && i % elemSize >= (elemSize - nBytes) => r
|
||||
})
|
||||
.toSeq == builder.result
|
||||
.toSeq == builder.result()
|
||||
}
|
||||
|
||||
def testFloatEncoding(slice: ArraySlice[Float], byteOrder: ByteOrder): Boolean = {
|
||||
|
|
@ -328,7 +328,7 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers {
|
|||
for (i <- 0 until from) builder.putFloat(data(i))(byteOrder)
|
||||
builder.putFloats(data, from, to - from)(byteOrder)
|
||||
for (i <- to until data.length) builder.putFloat(data(i))(byteOrder)
|
||||
reference.toSeq == builder.result
|
||||
reference.toSeq == builder.result()
|
||||
}
|
||||
|
||||
def testDoubleEncoding(slice: ArraySlice[Double], byteOrder: ByteOrder): Boolean = {
|
||||
|
|
@ -340,7 +340,7 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers {
|
|||
for (i <- 0 until from) builder.putDouble(data(i))(byteOrder)
|
||||
builder.putDoubles(data, from, to - from)(byteOrder)
|
||||
for (i <- to until data.length) builder.putDouble(data(i))(byteOrder)
|
||||
reference.toSeq == builder.result
|
||||
reference.toSeq == builder.result()
|
||||
}
|
||||
|
||||
"ByteString1" must {
|
||||
|
|
@ -1301,7 +1301,7 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers {
|
|||
for (i <- 0 until from) builder.putByte(data(i))
|
||||
builder.putBytes(data, from, to - from)
|
||||
for (i <- to until data.length) builder.putByte(data(i))
|
||||
data.toSeq == builder.result
|
||||
data.toSeq == builder.result()
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1313,7 +1313,7 @@ class ByteStringSpec extends AnyWordSpec with Matchers with Checkers {
|
|||
for (i <- 0 until from) builder.asOutputStream.write(data(i).toInt)
|
||||
builder.asOutputStream.write(data, from, to - from)
|
||||
for (i <- to until data.length) builder.asOutputStream.write(data(i).toInt)
|
||||
data.toSeq == builder.result
|
||||
data.toSeq == builder.result()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ class IndexSpec extends AkkaSpec with Matchers with DefaultTimeout {
|
|||
case 3 => readTask()
|
||||
}
|
||||
|
||||
val tasks = List.fill(nrOfTasks)(executeRandomTask)
|
||||
val tasks = List.fill(nrOfTasks)(executeRandomTask())
|
||||
|
||||
tasks.foreach(Await.result(_, timeout.duration))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -520,7 +520,7 @@ abstract class ActorContextSpec extends ScalaTestWithActorTestKit with AnyWordSp
|
|||
|
||||
"return the right context info" in {
|
||||
type Info = (ActorSystem[Nothing], ActorRef[String])
|
||||
val probe = TestProbe[Info]
|
||||
val probe = TestProbe[Info]()
|
||||
val actor = spawn(
|
||||
Behaviors
|
||||
.receivePartial[String] {
|
||||
|
|
|
|||
|
|
@ -1274,7 +1274,7 @@ class SupervisionSpec extends ScalaTestWithActorTestKit("""
|
|||
}
|
||||
|
||||
"not allow AbstractBehavior without setup" in {
|
||||
val contextProbe = createTestProbe[ActorContext[String]]
|
||||
val contextProbe = createTestProbe[ActorContext[String]]()
|
||||
spawn(Behaviors.setup[String] { context =>
|
||||
contextProbe.ref ! context
|
||||
Behaviors.empty
|
||||
|
|
@ -1298,7 +1298,7 @@ class SupervisionSpec extends ScalaTestWithActorTestKit("""
|
|||
}
|
||||
|
||||
"detect AbstractBehavior with wrong ActorContext" in {
|
||||
val contextProbe = createTestProbe[ActorContext[String]]
|
||||
val contextProbe = createTestProbe[ActorContext[String]]()
|
||||
spawn(Behaviors.setup[String] { context =>
|
||||
contextProbe.ref ! context
|
||||
Behaviors.empty
|
||||
|
|
|
|||
|
|
@ -49,8 +49,8 @@ class TypedSupervisingClassicSpec extends ScalaTestWithActorTestKit("""
|
|||
"Typed supervising classic" should {
|
||||
"default to restart" in {
|
||||
val ref: ActorRef[Protocol] = spawn(classicActorOf())
|
||||
val lifecycleProbe = TestProbe[String]
|
||||
val probe = TestProbe[SpawnedClassicActor]
|
||||
val lifecycleProbe = TestProbe[String]()
|
||||
val probe = TestProbe[SpawnedClassicActor]()
|
||||
ref ! SpawnClassicActor(classic.Props(new CLassicActor(lifecycleProbe.ref)), probe.ref)
|
||||
val spawnedClassic = probe.expectMessageType[SpawnedClassicActor].ref
|
||||
lifecycleProbe.expectMessage("preStart")
|
||||
|
|
|
|||
|
|
@ -45,9 +45,9 @@ class EventStreamSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike wit
|
|||
}
|
||||
|
||||
"a system event stream subscriber" must {
|
||||
val rootEventListener = testKit.createTestProbe[Root]
|
||||
val level1EventListener = testKit.createTestProbe[Level1]
|
||||
val rootEventListenerForLevel1 = testKit.createTestProbe[Root]
|
||||
val rootEventListener = testKit.createTestProbe[Root]()
|
||||
val level1EventListener = testKit.createTestProbe[Level1]()
|
||||
val rootEventListenerForLevel1 = testKit.createTestProbe[Root]()
|
||||
testKit.system.eventStream ! Subscribe(rootEventListener.ref)
|
||||
testKit.system.eventStream ! Subscribe(level1EventListener.ref)
|
||||
testKit.system.eventStream ! Subscribe[Level1](rootEventListenerForLevel1.ref)
|
||||
|
|
|
|||
|
|
@ -133,7 +133,7 @@ class ActorSystemSpec
|
|||
|
||||
"have a working thread factory" in {
|
||||
withSystem("thread", Behaviors.empty[String]) { sys =>
|
||||
val p = Promise[Int]
|
||||
val p = Promise[Int]()
|
||||
sys.threadFactory
|
||||
.newThread(new Runnable {
|
||||
def run(): Unit = p.success(42)
|
||||
|
|
|
|||
|
|
@ -210,8 +210,8 @@ class RoutersSpec extends ScalaTestWithActorTestKit("""
|
|||
val router = spawn(Behaviors.setup[String](context =>
|
||||
new GroupRouterImpl(context, serviceKey, false, new RoutingLogics.RoundRobinLogic[String], true)))
|
||||
|
||||
val reachableProbe = createTestProbe[String]
|
||||
val unreachableProbe = createTestProbe[String]
|
||||
val reachableProbe = createTestProbe[String]()
|
||||
val unreachableProbe = createTestProbe[String]()
|
||||
router
|
||||
.unsafeUpcast[Any] ! Receptionist.Listing(serviceKey, Set(reachableProbe.ref), Set(unreachableProbe.ref), false)
|
||||
router ! "one"
|
||||
|
|
@ -225,7 +225,7 @@ class RoutersSpec extends ScalaTestWithActorTestKit("""
|
|||
val router = spawn(Behaviors.setup[String](context =>
|
||||
new GroupRouterImpl(context, serviceKey, false, new RoutingLogics.RoundRobinLogic[String], true)))
|
||||
|
||||
val unreachableProbe = createTestProbe[String]
|
||||
val unreachableProbe = createTestProbe[String]()
|
||||
router.unsafeUpcast[Any] ! Receptionist.Listing(
|
||||
serviceKey,
|
||||
Set.empty[ActorRef[String]],
|
||||
|
|
|
|||
|
|
@ -660,7 +660,7 @@ class UnstashingSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with
|
|||
}
|
||||
|
||||
"deal with initial stop" in {
|
||||
val probe = TestProbe[Any]
|
||||
val probe = TestProbe[Any]()
|
||||
val ref = spawn(Behaviors.withStash[String](10) { stash =>
|
||||
stash.stash("one")
|
||||
|
||||
|
|
@ -675,7 +675,7 @@ class UnstashingSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with
|
|||
}
|
||||
|
||||
"deal with stop" in {
|
||||
val probe = TestProbe[Any]
|
||||
val probe = TestProbe[Any]()
|
||||
val deadLetterProbe = createDeadLetterProbe()
|
||||
|
||||
val ref = spawn(Behaviors.withStash[String](10) { stash =>
|
||||
|
|
@ -699,7 +699,7 @@ class UnstashingSpec extends ScalaTestWithActorTestKit with AnyWordSpecLike with
|
|||
}
|
||||
|
||||
"work with initial same" in {
|
||||
val probe = TestProbe[Any]
|
||||
val probe = TestProbe[Any]()
|
||||
val ref = spawn(Behaviors.withStash[String](10) { stash =>
|
||||
stash.stash("one")
|
||||
stash.stash("two")
|
||||
|
|
|
|||
|
|
@ -195,7 +195,7 @@ private class RestartSupervisor[T, Thr <: Throwable: ClassTag](initial: Behavior
|
|||
|
||||
private def deadlineHasTimeLeft: Boolean = deadline match {
|
||||
case OptionVal.None => true
|
||||
case OptionVal.Some(d) => d.hasTimeLeft
|
||||
case OptionVal.Some(d) => d.hasTimeLeft()
|
||||
}
|
||||
|
||||
override def aroundSignal(ctx: TypedActorContext[Any], signal: Signal, target: SignalTarget[T]): Behavior[T] = {
|
||||
|
|
|
|||
|
|
@ -168,7 +168,7 @@ object ByteIterator {
|
|||
if ((off < 0) || (len < 0) || (off + len > b.length)) throw new IndexOutOfBoundsException
|
||||
if (len == 0) 0
|
||||
else if (!isEmpty) {
|
||||
val nRead = math.min(available, len)
|
||||
val nRead = math.min(available(), len)
|
||||
copyToArray(b, off, nRead)
|
||||
nRead
|
||||
} else -1
|
||||
|
|
@ -269,7 +269,7 @@ object ByteIterator {
|
|||
}
|
||||
iterators = iterators.tail
|
||||
}
|
||||
iterators = builder.result
|
||||
iterators = builder.result()
|
||||
normalize()
|
||||
}
|
||||
|
||||
|
|
@ -294,7 +294,7 @@ object ByteIterator {
|
|||
if (current.len < lastLen) stop = true
|
||||
dropCurrent()
|
||||
}
|
||||
iterators = builder.result
|
||||
iterators = builder.result()
|
||||
normalize()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1334,7 +1334,7 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] {
|
|||
if (_length == 0) ByteString.empty
|
||||
else {
|
||||
clearTemp()
|
||||
val bytestrings = _builder.result
|
||||
val bytestrings = _builder.result()
|
||||
if (bytestrings.size == 1)
|
||||
bytestrings.head
|
||||
else
|
||||
|
|
|
|||
|
|
@ -1017,7 +1017,7 @@ private[akka] class ActorSystemImpl(
|
|||
_initialized = true
|
||||
|
||||
if (settings.LogDeadLetters > 0)
|
||||
logDeadLetterListener = Some(systemActorOf(Props[DeadLetterListener], "deadLetterListener"))
|
||||
logDeadLetterListener = Some(systemActorOf(Props[DeadLetterListener](), "deadLetterListener"))
|
||||
eventStream.startUnsubscriber()
|
||||
ManifestInfo(this).checkSameVersion("Akka", allModules, logWarning = true)
|
||||
if (!terminating)
|
||||
|
|
|
|||
|
|
@ -710,7 +710,7 @@ final class CoordinatedShutdown private[akka] (
|
|||
val deadline = Deadline.now + timeout
|
||||
val timeoutFut = try {
|
||||
after(timeout, system.scheduler) {
|
||||
if (phaseName == CoordinatedShutdown.PhaseActorSystemTerminate && deadline.hasTimeLeft) {
|
||||
if (phaseName == CoordinatedShutdown.PhaseActorSystemTerminate && deadline.hasTimeLeft()) {
|
||||
// too early, i.e. triggered by system termination
|
||||
result
|
||||
} else if (result.isCompleted)
|
||||
|
|
|
|||
|
|
@ -466,7 +466,7 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging {
|
|||
/**
|
||||
* Produce change descriptor to stop this FSM actor including specified reason.
|
||||
*/
|
||||
final def stop(reason: Reason, stateData: D): State = stay.using(stateData).withStopReason(reason)
|
||||
final def stop(reason: Reason, stateData: D): State = stay().using(stateData).withStopReason(reason)
|
||||
|
||||
final class TransformHelper(func: StateFunction) {
|
||||
def using(andThen: PartialFunction[State, State]): StateFunction =
|
||||
|
|
@ -559,7 +559,7 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging {
|
|||
if (timers contains name) {
|
||||
timers(name).cancel()
|
||||
}
|
||||
val timer = Timer(name, msg, mode, timerGen.next, this)(context)
|
||||
val timer = Timer(name, msg, mode, timerGen.next(), this)(context)
|
||||
timer.schedule(self, timeout)
|
||||
timers(name) = timer
|
||||
}
|
||||
|
|
@ -728,7 +728,7 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging {
|
|||
private val handleEventDefault: StateFunction = {
|
||||
case Event(value, _) =>
|
||||
log.warning("unhandled event " + value + " in state " + stateName)
|
||||
stay
|
||||
stay()
|
||||
}
|
||||
private var handleEvent: StateFunction = handleEventDefault
|
||||
|
||||
|
|
@ -821,7 +821,7 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging {
|
|||
|
||||
private[akka] def makeTransition(nextState: State): Unit = {
|
||||
if (!stateFunctions.contains(nextState.stateName)) {
|
||||
terminate(stay.withStopReason(Failure("Next state %s does not exist".format(nextState.stateName))))
|
||||
terminate(stay().withStopReason(Failure("Next state %s does not exist".format(nextState.stateName))))
|
||||
} else {
|
||||
nextState.replies.reverse.foreach { r =>
|
||||
sender() ! r
|
||||
|
|
@ -862,7 +862,7 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging {
|
|||
* setting this instance’s state to terminated does no harm during restart
|
||||
* since the new instance will initialize fresh using startWith()
|
||||
*/
|
||||
terminate(stay.withStopReason(Shutdown))
|
||||
terminate(stay().withStopReason(Shutdown))
|
||||
super.postStop()
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ object Props extends AbstractProps {
|
|||
/**
|
||||
* A Props instance whose creator will create an actor that doesn't respond to any message
|
||||
*/
|
||||
final val empty = Props[EmptyActor]
|
||||
final val empty = Props[EmptyActor]()
|
||||
|
||||
/**
|
||||
* The default Props instance, uses the settings from the Props object starting with default*.
|
||||
|
|
|
|||
|
|
@ -149,7 +149,7 @@ private[akka] class RepointableActorRef(
|
|||
|
||||
def getChild(name: Iterator[String]): InternalActorRef =
|
||||
if (name.hasNext) {
|
||||
name.next match {
|
||||
name.next() match {
|
||||
case ".." => getParent.getChild(name)
|
||||
case "" => getChild(name)
|
||||
case other =>
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ trait TypedActorFactory {
|
|||
*/
|
||||
def stop(proxy: AnyRef): Boolean = getActorRefFor(proxy) match {
|
||||
case null => false
|
||||
case ref => ref.asInstanceOf[InternalActorRef].stop; true
|
||||
case ref => ref.asInstanceOf[InternalActorRef].stop(); true
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
@ -77,7 +77,7 @@ trait TypedActorFactory {
|
|||
val proxyVar = new AtomVar[R] //Chicken'n'egg-resolver
|
||||
val c = props.creator //Cache this to avoid closing over the Props
|
||||
val i = props.interfaces //Cache this to avoid closing over the Props
|
||||
val ap = Props(new TypedActor.TypedActor[R, T](proxyVar, c(), i)).withDeploy(props.actorProps.deploy)
|
||||
val ap = Props(new TypedActor.TypedActor[R, T](proxyVar, c(), i)).withDeploy(props.actorProps().deploy)
|
||||
typedActor.createActorRefProxy(props, proxyVar, actorFactory.actorOf(ap))
|
||||
}
|
||||
|
||||
|
|
@ -88,7 +88,7 @@ trait TypedActorFactory {
|
|||
val proxyVar = new AtomVar[R] //Chicken'n'egg-resolver
|
||||
val c = props.creator //Cache this to avoid closing over the Props
|
||||
val i = props.interfaces //Cache this to avoid closing over the Props
|
||||
val ap = Props(new akka.actor.TypedActor.TypedActor[R, T](proxyVar, c(), i)).withDeploy(props.actorProps.deploy)
|
||||
val ap = Props(new akka.actor.TypedActor.TypedActor[R, T](proxyVar, c(), i)).withDeploy(props.actorProps().deploy)
|
||||
typedActor.createActorRefProxy(props, proxyVar, actorFactory.actorOf(ap, name))
|
||||
}
|
||||
|
||||
|
|
@ -272,7 +272,7 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi
|
|||
private val me = withContext[T](createInstance)
|
||||
|
||||
override def supervisorStrategy: SupervisorStrategy = me match {
|
||||
case l: Supervisor => l.supervisorStrategy
|
||||
case l: Supervisor => l.supervisorStrategy()
|
||||
case _ => super.supervisorStrategy
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -73,7 +73,7 @@ private[akka] trait Dispatch { this: ActorCell =>
|
|||
*/
|
||||
// we need to delay the failure to the point of actor creation so we can handle
|
||||
// it properly in the normal way
|
||||
val actorClass = props.actorClass
|
||||
val actorClass = props.actorClass()
|
||||
val createMessage = mailboxType match {
|
||||
case _: ProducesMessageQueue[_] if system.mailboxes.hasRequiredType(actorClass) =>
|
||||
val req = system.mailboxes.getRequiredType(actorClass)
|
||||
|
|
|
|||
|
|
@ -103,7 +103,7 @@ private[akka] class BalancingDispatcher(
|
|||
if (messageQueue.hasMessages
|
||||
&& i.hasNext
|
||||
&& (executorService.executor match {
|
||||
case lm: LoadMetrics => !lm.atFullThrottle
|
||||
case lm: LoadMetrics => !lm.atFullThrottle()
|
||||
case _ => true
|
||||
})
|
||||
&& !registerForExecution(i.next.mailbox, false, false))
|
||||
|
|
|
|||
|
|
@ -510,10 +510,10 @@ trait QueueBasedMessageQueue extends MessageQueue with MultipleConsumerSemantics
|
|||
def hasMessages = !queue.isEmpty
|
||||
def cleanUp(owner: ActorRef, deadLetters: MessageQueue): Unit = {
|
||||
if (hasMessages) {
|
||||
var envelope = dequeue
|
||||
var envelope = dequeue()
|
||||
while (envelope ne null) {
|
||||
deadLetters.enqueue(owner, envelope)
|
||||
envelope = dequeue
|
||||
envelope = dequeue()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -139,7 +139,7 @@ private[akka] class Mailboxes(
|
|||
protected[akka] def getMailboxType(props: Props, dispatcherConfig: Config): MailboxType = {
|
||||
val id = dispatcherConfig.getString("id")
|
||||
val deploy = props.deploy
|
||||
val actorClass = props.actorClass
|
||||
val actorClass = props.actorClass()
|
||||
lazy val actorRequirement = getRequiredType(actorClass)
|
||||
|
||||
val mailboxRequirement: Class[_] = getMailboxRequirement(dispatcherConfig)
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ private[akka] class LoggerMailbox(@unused owner: ActorRef, system: ActorSystem)
|
|||
override def cleanUp(owner: ActorRef, deadLetters: MessageQueue): Unit = {
|
||||
if (hasMessages) {
|
||||
val logLevel = system.eventStream.logLevel
|
||||
var envelope = dequeue
|
||||
var envelope = dequeue()
|
||||
// Drain all remaining messages to the StandardOutLogger.
|
||||
// cleanUp is called after switching out the mailbox, which is why
|
||||
// this kind of look works without a limit.
|
||||
|
|
@ -54,7 +54,7 @@ private[akka] class LoggerMailbox(@unused owner: ActorRef, system: ActorSystem)
|
|||
case _ => // skip
|
||||
}
|
||||
|
||||
envelope = dequeue
|
||||
envelope = dequeue()
|
||||
}
|
||||
}
|
||||
super.cleanUp(owner, deadLetters)
|
||||
|
|
|
|||
|
|
@ -50,7 +50,7 @@ abstract class LookupEventBus[E, S, C] extends EventBus[E, S, C] {
|
|||
type Subscriber = S
|
||||
type Classifier = C
|
||||
|
||||
override protected def mapSize: Int = LookupEventBus.this.mapSize
|
||||
override protected def mapSize: Int = LookupEventBus.this.mapSize()
|
||||
|
||||
override protected def compareSubscribers(a: S, b: S): Int =
|
||||
LookupEventBus.this.compareSubscribers(a, b)
|
||||
|
|
@ -197,7 +197,7 @@ abstract class ManagedActorEventBus[E](system: ActorSystem) extends EventBus[E,
|
|||
|
||||
override val system = ManagedActorEventBus.this.system
|
||||
|
||||
override protected def mapSize: Int = ManagedActorEventBus.this.mapSize
|
||||
override protected def mapSize: Int = ManagedActorEventBus.this.mapSize()
|
||||
|
||||
override protected def classify(event: E): ActorRef =
|
||||
ManagedActorEventBus.this.classify(event)
|
||||
|
|
|
|||
|
|
@ -30,7 +30,7 @@ class SimpleDnsCache extends Dns with PeriodicCacheCleanup with NoSerializationV
|
|||
new Cache[(String, RequestType), Resolved](
|
||||
immutable.SortedSet()(expiryEntryOrdering()),
|
||||
immutable.Map(),
|
||||
() => clock))
|
||||
() => clock()))
|
||||
|
||||
private val nanoBase = System.nanoTime()
|
||||
|
||||
|
|
|
|||
|
|
@ -769,12 +769,12 @@ class CircuitBreaker(
|
|||
materialize(body).onComplete {
|
||||
case Success(result) =>
|
||||
p.trySuccess(result)
|
||||
timeout.cancel
|
||||
timeout.cancel()
|
||||
case Failure(ex) =>
|
||||
if (p.tryFailure(ex)) {
|
||||
notifyCallFailureListeners(start)
|
||||
}
|
||||
timeout.cancel
|
||||
timeout.cancel()
|
||||
}(parasitic)
|
||||
p.future
|
||||
}
|
||||
|
|
|
|||
|
|
@ -26,7 +26,7 @@ hand checking:
|
|||
class ActorCreationBenchmark {
|
||||
implicit val system: ActorSystem = ActorSystem()
|
||||
|
||||
final val props = Props[MyActor]
|
||||
final val props = Props[MyActor]()
|
||||
|
||||
var i = 1
|
||||
def name = {
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ object BenchmarkActors {
|
|||
}
|
||||
|
||||
class EchoSender(messagesPerPair: Int, latch: CountDownLatch, batchSize: Int) extends Actor {
|
||||
private val echo = context.actorOf(Props[Echo].withDispatcher(context.props.dispatcher), "echo")
|
||||
private val echo = context.actorOf(Props[Echo]().withDispatcher(context.props.dispatcher), "echo")
|
||||
|
||||
private var left = messagesPerPair / 2
|
||||
private var batch = 0
|
||||
|
|
|
|||
|
|
@ -21,7 +21,7 @@ class RouterPoolCreationBenchmark {
|
|||
implicit val system: ActorSystem = ActorSystem()
|
||||
val probe = TestProbe()
|
||||
|
||||
Props[TestActors.EchoActor]
|
||||
Props[TestActors.EchoActor]()
|
||||
|
||||
@Param(Array("1000", "2000", "3000", "4000"))
|
||||
var size = 0
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ object StashCreationBenchmark {
|
|||
}
|
||||
}
|
||||
|
||||
val props = Props[StashingActor]
|
||||
val props = Props[StashingActor]()
|
||||
}
|
||||
|
||||
@State(Scope.Benchmark)
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ class TellOnlyBenchmark {
|
|||
|
||||
@Setup(Level.Iteration)
|
||||
def setupIteration(): Unit = {
|
||||
actor = system.actorOf(Props[TellOnlyBenchmark.Echo].withDispatcher("dropping-dispatcher"))
|
||||
actor = system.actorOf(Props[TellOnlyBenchmark.Echo]().withDispatcher("dropping-dispatcher"))
|
||||
probe = TestProbe()
|
||||
probe.watch(actor)
|
||||
probe.send(actor, message)
|
||||
|
|
|
|||
|
|
@ -52,7 +52,7 @@ class LevelDbBatchingBenchmark {
|
|||
SharedLeveldbJournal.setStore(store, sys)
|
||||
|
||||
probe = TestProbe()(sys)
|
||||
store = sys.actorOf(Props[SharedLeveldbStore], "store")
|
||||
store = sys.actorOf(Props[SharedLeveldbStore](), "store")
|
||||
}
|
||||
|
||||
@TearDown(Level.Trial)
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ class PersistentActorWithAtLeastOnceDeliveryBenchmark {
|
|||
|
||||
storageLocations.foreach(FileUtils.deleteDirectory)
|
||||
|
||||
destinationActor = system.actorOf(Props[DestinationActor], "destination")
|
||||
destinationActor = system.actorOf(Props[DestinationActor](), "destination")
|
||||
|
||||
noPersistPersistentActorWithAtLeastOnceDelivery = system.actorOf(
|
||||
Props(classOf[NoPersistPersistentActorWithAtLeastOnceDelivery], dataCount, probe.ref, destinationActor.path),
|
||||
|
|
|
|||
|
|
@ -108,7 +108,7 @@ class JmxMetricsCollector(address: Address, decayFactor: Double) extends Metrics
|
|||
* Samples and collects new data points.
|
||||
* Creates a new instance each time.
|
||||
*/
|
||||
def sample(): NodeMetrics = NodeMetrics(address, newTimestamp, metrics)
|
||||
def sample(): NodeMetrics = NodeMetrics(address, newTimestamp, metrics())
|
||||
|
||||
/**
|
||||
* Generate metrics set.
|
||||
|
|
@ -209,7 +209,7 @@ class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: SigarP
|
|||
override def metrics(): Set[Metric] = {
|
||||
// Must obtain cpuPerc in one shot. See https://github.com/akka/akka/issues/16121
|
||||
val cpuPerc = sigar.getCpuPerc
|
||||
super.metrics.union(Set(cpuCombined(cpuPerc), cpuStolen(cpuPerc)).flatten)
|
||||
super.metrics().union(Set(cpuCombined(cpuPerc), cpuStolen(cpuPerc)).flatten)
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -111,7 +111,7 @@ abstract class ClusterMetricsEnabledSpec
|
|||
//awaitAssert(clusterView.clusterMetrics.size should ===(roles.size))
|
||||
awaitAssert(metricsView.clusterMetrics.size should ===(roles.size))
|
||||
val collector = MetricsCollector(cluster.system)
|
||||
collector.sample.metrics.size should be > (3)
|
||||
collector.sample().metrics.size should be > (3)
|
||||
enterBarrier("after")
|
||||
}
|
||||
"reflect the correct number of node metrics in cluster view" in within(30 seconds) {
|
||||
|
|
@ -150,7 +150,7 @@ abstract class ClusterMetricsDisabledSpec
|
|||
//clusterView.clusterMetrics.size should ===(0)
|
||||
metricsView.clusterMetrics.size should ===(0)
|
||||
ClusterMetricsExtension(system).subscribe(testActor)
|
||||
expectNoMessage
|
||||
expectNoMessage()
|
||||
// TODO ensure same contract
|
||||
//clusterView.clusterMetrics.size should ===(0)
|
||||
metricsView.clusterMetrics.size should ===(0)
|
||||
|
|
|
|||
|
|
@ -150,7 +150,7 @@ abstract class AdaptiveLoadBalancingRouterSpec
|
|||
ClusterRouterPool(
|
||||
local = AdaptiveLoadBalancingPool(HeapMetricsSelector),
|
||||
settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true))
|
||||
.props(Props[Echo]),
|
||||
.props(Props[Echo]()),
|
||||
name)
|
||||
// it may take some time until router receives cluster member events
|
||||
awaitAssert { currentRoutees(router).size should ===(roles.size) }
|
||||
|
|
@ -201,7 +201,7 @@ abstract class AdaptiveLoadBalancingRouterSpec
|
|||
|
||||
runOn(node2) {
|
||||
within(20.seconds) {
|
||||
system.actorOf(Props[Memory], "memory") ! AllocateMemory
|
||||
system.actorOf(Props[Memory](), "memory") ! AllocateMemory
|
||||
expectMsg("done")
|
||||
}
|
||||
}
|
||||
|
|
@ -230,7 +230,7 @@ abstract class AdaptiveLoadBalancingRouterSpec
|
|||
|
||||
"create routees from configuration" taggedAs LongRunningTest in {
|
||||
runOn(node1) {
|
||||
val router3 = system.actorOf(FromConfig.props(Props[Memory]), "router3")
|
||||
val router3 = system.actorOf(FromConfig.props(Props[Memory]()), "router3")
|
||||
// it may take some time until router receives cluster member events
|
||||
awaitAssert { currentRoutees(router3).size should ===(9) }
|
||||
val routees = currentRoutees(router3)
|
||||
|
|
@ -241,7 +241,7 @@ abstract class AdaptiveLoadBalancingRouterSpec
|
|||
|
||||
"create routees from cluster.enabled configuration" taggedAs LongRunningTest in {
|
||||
runOn(node1) {
|
||||
val router4 = system.actorOf(FromConfig.props(Props[Memory]), "router4")
|
||||
val router4 = system.actorOf(FromConfig.props(Props[Memory]()), "router4")
|
||||
// it may take some time until router receives cluster member events
|
||||
awaitAssert { currentRoutees(router4).size should ===(6) }
|
||||
val routees = currentRoutees(router4)
|
||||
|
|
|
|||
|
|
@ -107,8 +107,8 @@ abstract class StatsSampleSpec
|
|||
Cluster(system).join(firstAddress)
|
||||
//#join
|
||||
|
||||
system.actorOf(Props[StatsWorker], "statsWorker")
|
||||
system.actorOf(Props[StatsService], "statsService")
|
||||
system.actorOf(Props[StatsWorker](), "statsWorker")
|
||||
system.actorOf(Props[StatsService](), "statsService")
|
||||
|
||||
receiveN(3).collect { case MemberUp(m) => m.address }.toSet should be(
|
||||
Set(firstAddress, secondAddress, thirdAddress))
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ class StatsService extends Actor {
|
|||
// This router is used both with lookup and deploy of routees. If you
|
||||
// have a router with only lookup of routees you can use Props.empty
|
||||
// instead of Props[StatsWorker.class].
|
||||
val workerRouter = context.actorOf(FromConfig.props(Props[StatsWorker]), name = "workerRouter")
|
||||
val workerRouter = context.actorOf(FromConfig.props(Props[StatsWorker]()), name = "workerRouter")
|
||||
|
||||
def receive = {
|
||||
case StatsJob(text) if text != "" =>
|
||||
|
|
@ -76,7 +76,7 @@ abstract class StatsService3 extends Actor {
|
|||
ClusterRouterPool(
|
||||
ConsistentHashingPool(0),
|
||||
ClusterRouterPoolSettings(totalInstances = 100, maxInstancesPerNode = 3, allowLocalRoutees = false))
|
||||
.props(Props[StatsWorker]),
|
||||
.props(Props[StatsWorker]()),
|
||||
name = "workerRouter3")
|
||||
//#router-deploy-in-code
|
||||
}
|
||||
|
|
|
|||
|
|
@ -82,7 +82,7 @@ class EWMASpec extends AkkaSpec(MetricsConfig.defaultEnabled) with MetricsCollec
|
|||
// wait a while between each message to give the metrics a chance to change
|
||||
Thread.sleep(100)
|
||||
usedMemory = usedMemory ++ Array.fill(1024)(ThreadLocalRandom.current.nextInt(127).toByte)
|
||||
val changes = collector.sample.metrics.flatMap { latest =>
|
||||
val changes = collector.sample().metrics.flatMap { latest =>
|
||||
streamingDataSet.get(latest.name) match {
|
||||
case None => Some(latest)
|
||||
case Some(previous) =>
|
||||
|
|
|
|||
|
|
@ -146,13 +146,13 @@ class MetricsGossipSpec
|
|||
*/
|
||||
def newSample(previousSample: Set[Metric]): Set[Metric] = {
|
||||
// Metric.equals is based on name equality
|
||||
collector.sample.metrics.filter(previousSample.contains) ++ previousSample
|
||||
collector.sample().metrics.filter(previousSample.contains) ++ previousSample
|
||||
}
|
||||
|
||||
"A MetricsGossip" must {
|
||||
"add new NodeMetrics" in {
|
||||
val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics)
|
||||
val m2 = NodeMetrics(Address("akka", "sys", "a", 2555), newTimestamp, collector.sample.metrics)
|
||||
val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample().metrics)
|
||||
val m2 = NodeMetrics(Address("akka", "sys", "a", 2555), newTimestamp, collector.sample().metrics)
|
||||
|
||||
m1.metrics.size should be > 3
|
||||
m2.metrics.size should be > 3
|
||||
|
|
@ -168,8 +168,8 @@ class MetricsGossipSpec
|
|||
}
|
||||
|
||||
"merge peer metrics" in {
|
||||
val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics)
|
||||
val m2 = NodeMetrics(Address("akka", "sys", "a", 2555), newTimestamp, collector.sample.metrics)
|
||||
val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample().metrics)
|
||||
val m2 = NodeMetrics(Address("akka", "sys", "a", 2555), newTimestamp, collector.sample().metrics)
|
||||
|
||||
val g1 = MetricsGossip.empty :+ m1 :+ m2
|
||||
g1.nodes.size should ===(2)
|
||||
|
|
@ -183,9 +183,9 @@ class MetricsGossipSpec
|
|||
}
|
||||
|
||||
"merge an existing metric set for a node and update node ring" in {
|
||||
val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics)
|
||||
val m2 = NodeMetrics(Address("akka", "sys", "a", 2555), newTimestamp, collector.sample.metrics)
|
||||
val m3 = NodeMetrics(Address("akka", "sys", "a", 2556), newTimestamp, collector.sample.metrics)
|
||||
val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample().metrics)
|
||||
val m2 = NodeMetrics(Address("akka", "sys", "a", 2555), newTimestamp, collector.sample().metrics)
|
||||
val m3 = NodeMetrics(Address("akka", "sys", "a", 2556), newTimestamp, collector.sample().metrics)
|
||||
val m2Updated = m2.copy(metrics = newSample(m2.metrics), timestamp = m2.timestamp + 1000)
|
||||
|
||||
val g1 = MetricsGossip.empty :+ m1 :+ m2
|
||||
|
|
@ -204,14 +204,14 @@ class MetricsGossipSpec
|
|||
}
|
||||
|
||||
"get the current NodeMetrics if it exists in the local nodes" in {
|
||||
val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics)
|
||||
val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample().metrics)
|
||||
val g1 = MetricsGossip.empty :+ m1
|
||||
g1.nodeMetricsFor(m1.address).map(_.metrics) should ===(Some(m1.metrics))
|
||||
}
|
||||
|
||||
"remove a node if it is no longer Up" in {
|
||||
val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics)
|
||||
val m2 = NodeMetrics(Address("akka", "sys", "a", 2555), newTimestamp, collector.sample.metrics)
|
||||
val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample().metrics)
|
||||
val m2 = NodeMetrics(Address("akka", "sys", "a", 2555), newTimestamp, collector.sample().metrics)
|
||||
|
||||
val g1 = MetricsGossip.empty :+ m1 :+ m2
|
||||
g1.nodes.size should ===(2)
|
||||
|
|
@ -223,8 +223,8 @@ class MetricsGossipSpec
|
|||
}
|
||||
|
||||
"filter nodes" in {
|
||||
val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics)
|
||||
val m2 = NodeMetrics(Address("akka", "sys", "a", 2555), newTimestamp, collector.sample.metrics)
|
||||
val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample().metrics)
|
||||
val m2 = NodeMetrics(Address("akka", "sys", "a", 2555), newTimestamp, collector.sample().metrics)
|
||||
|
||||
val g1 = MetricsGossip.empty :+ m1 :+ m2
|
||||
g1.nodes.size should ===(2)
|
||||
|
|
@ -243,16 +243,20 @@ class MetricValuesSpec extends AkkaSpec(MetricsConfig.defaultEnabled) with Metri
|
|||
|
||||
val collector = createMetricsCollector
|
||||
|
||||
val node1 = NodeMetrics(Address("akka", "sys", "a", 2554), 1, collector.sample.metrics)
|
||||
val node2 = NodeMetrics(Address("akka", "sys", "a", 2555), 1, collector.sample.metrics)
|
||||
val node1 = NodeMetrics(Address("akka", "sys", "a", 2554), 1, collector.sample().metrics)
|
||||
val node2 = NodeMetrics(Address("akka", "sys", "a", 2555), 1, collector.sample().metrics)
|
||||
|
||||
val nodes: Seq[NodeMetrics] = {
|
||||
(1 to 100).foldLeft(List(node1, node2)) { (nodes, _) =>
|
||||
nodes.map { n =>
|
||||
n.copy(metrics = collector.sample.metrics.flatMap(latest =>
|
||||
n.metrics.collect {
|
||||
case streaming if latest.sameAs(streaming) => streaming :+ latest
|
||||
}))
|
||||
n.copy(
|
||||
metrics = collector
|
||||
.sample()
|
||||
.metrics
|
||||
.flatMap(latest =>
|
||||
n.metrics.collect {
|
||||
case streaming if latest.sameAs(streaming) => streaming :+ latest
|
||||
}))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,8 +23,8 @@ class MetricsCollectorSpec
|
|||
|
||||
"merge 2 metrics that are tracking the same metric" in {
|
||||
for (_ <- 1 to 20) {
|
||||
val sample1 = collector.sample.metrics
|
||||
val sample2 = collector.sample.metrics
|
||||
val sample1 = collector.sample().metrics
|
||||
val sample2 = collector.sample().metrics
|
||||
sample2.flatMap(latest =>
|
||||
sample1.collect {
|
||||
case peer if latest.sameAs(peer) =>
|
||||
|
|
@ -34,8 +34,8 @@ class MetricsCollectorSpec
|
|||
m
|
||||
})
|
||||
|
||||
val sample3 = collector.sample.metrics
|
||||
val sample4 = collector.sample.metrics
|
||||
val sample3 = collector.sample().metrics
|
||||
val sample4 = collector.sample().metrics
|
||||
sample4.flatMap(latest =>
|
||||
sample3.collect {
|
||||
case peer if latest.sameAs(peer) =>
|
||||
|
|
@ -55,7 +55,7 @@ class MetricsCollectorSpec
|
|||
}
|
||||
|
||||
"collect accurate metrics for a node" in {
|
||||
val sample = collector.sample
|
||||
val sample = collector.sample()
|
||||
val metrics = sample.metrics.collect { case m => (m.name, m.value) }
|
||||
val used = metrics.collectFirst { case (HeapMemoryUsed, b) => b }
|
||||
val committed = metrics.collectFirst { case (HeapMemoryCommitted, b) => b }
|
||||
|
|
@ -93,7 +93,7 @@ class MetricsCollectorSpec
|
|||
|
||||
"collect 50 node metrics samples in an acceptable duration" taggedAs LongRunningTest in within(10 seconds) {
|
||||
(1 to 50).foreach { _ =>
|
||||
val sample = collector.sample
|
||||
val sample = collector.sample()
|
||||
sample.metrics.size should be >= 3
|
||||
Thread.sleep(100)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -67,14 +67,14 @@ abstract class MultiDcClusterShardingSpec
|
|||
"init sharding" in {
|
||||
val sharding = ClusterSharding(typedSystem)
|
||||
val shardRegion: ActorRef[ShardingEnvelope[Command]] = sharding.init(Entity(typeKey)(_ => MultiDcPinger()))
|
||||
val probe = TestProbe[Pong]
|
||||
val probe = TestProbe[Pong]()
|
||||
shardRegion ! ShardingEnvelope(entityId, Ping(probe.ref))
|
||||
probe.expectMessage(max = 15.seconds, Pong(cluster.selfMember.dataCenter))
|
||||
enterBarrier("sharding-initialized")
|
||||
}
|
||||
|
||||
"be able to message via entity ref" in {
|
||||
val probe = TestProbe[Pong]
|
||||
val probe = TestProbe[Pong]()
|
||||
val entityRef = ClusterSharding(typedSystem).entityRefFor(typeKey, entityId)
|
||||
entityRef ! Ping(probe.ref)
|
||||
probe.expectMessage(Pong(cluster.selfMember.dataCenter))
|
||||
|
|
@ -94,7 +94,7 @@ abstract class MultiDcClusterShardingSpec
|
|||
runOn(first, second) {
|
||||
val proxy: ActorRef[ShardingEnvelope[Command]] = ClusterSharding(typedSystem).init(
|
||||
Entity(typeKey)(_ => MultiDcPinger()).withSettings(ClusterShardingSettings(typedSystem).withDataCenter("dc2")))
|
||||
val probe = TestProbe[Pong]
|
||||
val probe = TestProbe[Pong]()
|
||||
proxy ! ShardingEnvelope(entityId, Ping(probe.ref))
|
||||
probe.expectMessage(remainingOrDefault, Pong("dc2"))
|
||||
}
|
||||
|
|
@ -108,7 +108,7 @@ abstract class MultiDcClusterShardingSpec
|
|||
val proxy: ActorRef[ShardingEnvelope[Command]] =
|
||||
ClusterSharding(system).init(Entity(typeKey)(_ => MultiDcPinger()).withDataCenter("dc2"))
|
||||
//#proxy-dc
|
||||
val probe = TestProbe[Pong]
|
||||
val probe = TestProbe[Pong]()
|
||||
proxy ! ShardingEnvelope(entityId, Ping(probe.ref))
|
||||
probe.expectMessage(remainingOrDefault, Pong("dc2"))
|
||||
}
|
||||
|
|
@ -125,7 +125,7 @@ abstract class MultiDcClusterShardingSpec
|
|||
val entityRef = ClusterSharding(system).entityRefFor(typeKey, entityId, "dc2")
|
||||
//#proxy-dc-entityref
|
||||
|
||||
val probe = TestProbe[Pong]
|
||||
val probe = TestProbe[Pong]()
|
||||
entityRef ! Ping(probe.ref)
|
||||
probe.expectMessage(remainingOrDefault, Pong("dc2"))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -181,7 +181,7 @@ class ClusterSharding(system: ExtendedActorSystem) extends Extension {
|
|||
val guardianName: String =
|
||||
system.settings.config.getString("akka.cluster.sharding.guardian-name")
|
||||
val dispatcher = system.settings.config.getString("akka.cluster.sharding.use-dispatcher")
|
||||
system.systemActorOf(Props[ClusterShardingGuardian].withDispatcher(dispatcher), guardianName)
|
||||
system.systemActorOf(Props[ClusterShardingGuardian]().withDispatcher(dispatcher), guardianName)
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ object RemoveInternalClusterShardingData {
|
|||
if (journalPluginId == "") system.settings.config.getString("akka.persistence.journal.plugin")
|
||||
else journalPluginId
|
||||
if (resolvedJournalPluginId == "akka.persistence.journal.leveldb-shared") {
|
||||
val store = system.actorOf(Props[SharedLeveldbStore], "store")
|
||||
val store = system.actorOf(Props[SharedLeveldbStore](), "store")
|
||||
SharedLeveldbJournal.setStore(store, system)
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -113,7 +113,7 @@ abstract class ClusterShardingCustomShardAllocationSpec(multiNodeConfig: Cluster
|
|||
|
||||
lazy val region = ClusterSharding(system).shardRegion("Entity")
|
||||
|
||||
lazy val allocator = system.actorOf(Props[Allocator], "allocator")
|
||||
lazy val allocator = system.actorOf(Props[Allocator](), "allocator")
|
||||
|
||||
s"Cluster sharding ($mode) with custom allocation strategy" must {
|
||||
|
||||
|
|
|
|||
|
|
@ -95,7 +95,7 @@ abstract class ClusterShardingFailureSpec(multiNodeConfig: ClusterShardingFailur
|
|||
startSharding(
|
||||
system,
|
||||
typeName = "Entity",
|
||||
entityProps = Props[Entity],
|
||||
entityProps = Props[Entity](),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -56,7 +56,7 @@ abstract class ClusterShardingGracefulShutdownSpec(multiNodeConfig: ClusterShard
|
|||
startSharding(
|
||||
system,
|
||||
typeName,
|
||||
entityProps = Props[ShardedEntity],
|
||||
entityProps = Props[ShardedEntity](),
|
||||
extractEntityId = MultiNodeClusterShardingSpec.intExtractEntityId,
|
||||
extractShardId = MultiNodeClusterShardingSpec.intExtractShardId,
|
||||
allocationStrategy =
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ abstract class ClusterShardingLeavingSpec(multiNodeConfig: ClusterShardingLeavin
|
|||
startSharding(
|
||||
system,
|
||||
typeName = "Entity",
|
||||
entityProps = Props[Entity],
|
||||
entityProps = Props[Entity](),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
}
|
||||
|
|
@ -120,7 +120,7 @@ abstract class ClusterShardingLeavingSpec(multiNodeConfig: ClusterShardingLeavin
|
|||
|
||||
"initialize shards" in {
|
||||
runOn(first) {
|
||||
val shardLocations = system.actorOf(Props[ShardLocations], "shardLocations")
|
||||
val shardLocations = system.actorOf(Props[ShardLocations](), "shardLocations")
|
||||
val locations = (for (n <- 1 to 10) yield {
|
||||
val id = n.toString
|
||||
region ! Ping(id)
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ abstract class ClusterShardingRegistrationCoordinatedShutdownSpec
|
|||
startSharding(
|
||||
system,
|
||||
typeName = "Entity",
|
||||
entityProps = Props[ShardedEntity],
|
||||
entityProps = Props[ShardedEntity](),
|
||||
extractEntityId = MultiNodeClusterShardingSpec.intExtractEntityId,
|
||||
extractShardId = MultiNodeClusterShardingSpec.intExtractShardId)
|
||||
|
||||
|
|
|
|||
|
|
@ -45,7 +45,7 @@ abstract class ClusterShardingSingleShardPerEntitySpec
|
|||
startSharding(
|
||||
system,
|
||||
typeName = "Entity",
|
||||
entityProps = Props[ShardedEntity],
|
||||
entityProps = Props[ShardedEntity](),
|
||||
extractEntityId = MultiNodeClusterShardingSpec.intExtractEntityId,
|
||||
extractShardId = MultiNodeClusterShardingSpec.intExtractShardId))
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Add a link
Reference in a new issue