format source with scalafmt, #26511

This commit is contained in:
Auto Format 2019-03-13 10:56:20 +01:00 committed by Patrik Nordwall
parent 2ba9b988df
commit 75579bed17
779 changed files with 15729 additions and 13096 deletions

View file

@ -106,8 +106,9 @@ abstract class ClusterDeathWatchSpec
}
runOn(second, third, fourth) {
system.actorOf(Props(new Actor { def receive = Actor.emptyBehavior }).withDeploy(Deploy.local),
name = "subject")
system.actorOf(
Props(new Actor { def receive = Actor.emptyBehavior }).withDeploy(Deploy.local),
name = "subject")
enterBarrier("subjected-started")
enterBarrier("watch-established")
runOn(third) {
@ -157,8 +158,9 @@ abstract class ClusterDeathWatchSpec
"be able to watch actor before node joins cluster, ClusterRemoteWatcher takes over from RemoteWatcher" in within(
20 seconds) {
runOn(fifth) {
system.actorOf(Props(new Actor { def receive = Actor.emptyBehavior }).withDeploy(Deploy.local),
name = "subject5")
system.actorOf(
Props(new Actor { def receive = Actor.emptyBehavior }).withDeploy(Deploy.local),
name = "subject5")
}
enterBarrier("subjected-started")
@ -242,9 +244,8 @@ abstract class ClusterDeathWatchSpec
catch {
case _: TimeoutException =>
fail(
"Failed to stop [%s] within [%s] \n%s".format(system.name,
timeout,
system.asInstanceOf[ActorSystemImpl].printTree))
"Failed to stop [%s] within [%s] \n%s"
.format(system.name, timeout, system.asInstanceOf[ActorSystemImpl].printTree))
}
// signal to the first node that fourth is done

View file

@ -237,8 +237,9 @@ abstract class MultiDcSplitBrainSpec extends MultiNodeSpec(MultiDcSplitBrainMult
Await.ready(system.whenTerminated, remaining)
val port = Cluster(system).selfAddress.port.get
val restartedSystem = ActorSystem(system.name,
ConfigFactory.parseString(s"""
val restartedSystem = ActorSystem(
system.name,
ConfigFactory.parseString(s"""
akka.remote.netty.tcp.port = $port
akka.remote.artery.canonical.port = $port
akka.coordinated-shutdown.terminate-actor-system = on

View file

@ -123,26 +123,28 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro
def muteLog(sys: ActorSystem = system): Unit = {
if (!sys.log.isDebugEnabled) {
Seq(".*Cluster Node.* - registered cluster JMX MBean.*",
".*Cluster Node.* - is starting up.*",
".*Shutting down cluster Node.*",
".*Cluster node successfully shut down.*",
".*Using a dedicated scheduler for cluster.*").foreach { s =>
Seq(
".*Cluster Node.* - registered cluster JMX MBean.*",
".*Cluster Node.* - is starting up.*",
".*Shutting down cluster Node.*",
".*Cluster node successfully shut down.*",
".*Using a dedicated scheduler for cluster.*").foreach { s =>
sys.eventStream.publish(Mute(EventFilter.info(pattern = s)))
}
muteDeadLetters(classOf[ClusterHeartbeatSender.Heartbeat],
classOf[ClusterHeartbeatSender.HeartbeatRsp],
classOf[GossipEnvelope],
classOf[GossipStatus],
classOf[InternalClusterAction.Tick],
classOf[akka.actor.PoisonPill],
classOf[akka.dispatch.sysmsg.DeathWatchNotification],
classOf[akka.remote.transport.AssociationHandle.Disassociated],
// akka.remote.transport.AssociationHandle.Disassociated.getClass,
classOf[akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying],
// akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying.getClass,
classOf[akka.remote.transport.AssociationHandle.InboundPayload])(sys)
muteDeadLetters(
classOf[ClusterHeartbeatSender.Heartbeat],
classOf[ClusterHeartbeatSender.HeartbeatRsp],
classOf[GossipEnvelope],
classOf[GossipStatus],
classOf[InternalClusterAction.Tick],
classOf[akka.actor.PoisonPill],
classOf[akka.dispatch.sysmsg.DeathWatchNotification],
classOf[akka.remote.transport.AssociationHandle.Disassociated],
// akka.remote.transport.AssociationHandle.Disassociated.getClass,
classOf[akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying],
// akka.remote.transport.ActorTransportAdapter.DisassociateUnderlying.getClass,
classOf[akka.remote.transport.AssociationHandle.InboundPayload])(sys)
}
}
@ -299,8 +301,9 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro
val expectedLeader = roleOfLeader(nodesInCluster)
val leader = clusterView.leader
val isLeader = leader == Some(clusterView.selfAddress)
assert(isLeader == isNode(expectedLeader),
"expectedLeader [%s], got leader [%s], members [%s]".format(expectedLeader, leader, clusterView.members))
assert(
isLeader == isNode(expectedLeader),
"expectedLeader [%s], got leader [%s], members [%s]".format(expectedLeader, leader, clusterView.members))
clusterView.status should (be(MemberStatus.Up).or(be(MemberStatus.Leaving)))
}
@ -308,9 +311,10 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec with WatchedByCoro
* Wait until the expected number of members has status Up has been reached.
* Also asserts that nodes in the 'canNotBePartOfMemberRing' are *not* part of the cluster ring.
*/
def awaitMembersUp(numberOfMembers: Int,
canNotBePartOfMemberRing: Set[Address] = Set.empty,
timeout: FiniteDuration = 25.seconds): Unit = {
def awaitMembersUp(
numberOfMembers: Int,
canNotBePartOfMemberRing: Set[Address] = Set.empty,
timeout: FiniteDuration = 25.seconds): Unit = {
within(timeout) {
if (!canNotBePartOfMemberRing.isEmpty) // don't run this on an empty set
awaitAssert(canNotBePartOfMemberRing.foreach(a => clusterView.members.map(_.address) should not contain (a)))

View file

@ -64,9 +64,10 @@ abstract class QuickRestartSpec
system.name,
ConfigFactory.parseString(s"akka.cluster.roles = [round-$n]").withFallback(system.settings.config))
else
ActorSystem(system.name,
// use the same port
ConfigFactory.parseString(s"""
ActorSystem(
system.name,
// use the same port
ConfigFactory.parseString(s"""
akka.cluster.roles = [round-$n]
akka.remote.netty.tcp.port = ${Cluster(restartingSystem).selfAddress.port.get}
akka.remote.artery.canonical.port = ${Cluster(restartingSystem).selfAddress.port.get}

View file

@ -53,8 +53,9 @@ abstract class RestartFirstSeedNodeSpec
def missingSeed = address(seed3).copy(port = Some(61313))
def seedNodes: immutable.IndexedSeq[Address] = Vector(seedNode1Address, seed2, seed3, missingSeed)
lazy val restartedSeed1System = ActorSystem(system.name,
ConfigFactory.parseString(s"""
lazy val restartedSeed1System = ActorSystem(
system.name,
ConfigFactory.parseString(s"""
akka.remote.netty.tcp.port = ${seedNodes.head.port.get}
akka.remote.artery.canonical.port = ${seedNodes.head.port.get}
""").withFallback(system.settings.config))

View file

@ -53,8 +53,9 @@ abstract class RestartNode2SpecSpec
def seedNodes: immutable.IndexedSeq[Address] = Vector(seedNode1Address, seed2)
// this is the node that will attempt to re-join, keep gate times low so it can retry quickly
lazy val restartedSeed1System = ActorSystem(system.name,
ConfigFactory.parseString(s"""
lazy val restartedSeed1System = ActorSystem(
system.name,
ConfigFactory.parseString(s"""
akka.remote.netty.tcp.port = ${seedNodes.head.port.get}
akka.remote.artery.canonical.port = ${seedNodes.head.port.get}
#akka.remote.retry-gate-closed-for = 1s

View file

@ -52,8 +52,9 @@ abstract class RestartNode3Spec
def seedNodes: immutable.IndexedSeq[Address] = Vector(first)
lazy val restartedSecondSystem = ActorSystem(system.name,
ConfigFactory.parseString(s"""
lazy val restartedSecondSystem = ActorSystem(
system.name,
ConfigFactory.parseString(s"""
akka.remote.artery.canonical.port = ${secondUniqueAddress.address.port.get}
akka.remote.netty.tcp.port = ${secondUniqueAddress.address.port.get}
""").withFallback(system.settings.config))

View file

@ -72,8 +72,9 @@ abstract class RestartNodeSpec
def seedNodes: immutable.IndexedSeq[Address] = Vector(first, secondUniqueAddress.address, third)
lazy val restartedSecondSystem = ActorSystem(system.name,
ConfigFactory.parseString(s"""
lazy val restartedSecondSystem = ActorSystem(
system.name,
ConfigFactory.parseString(s"""
akka.remote.netty.tcp.port = ${secondUniqueAddress.address.port.get}
akka.remote.artery.canonical.port = ${secondUniqueAddress.address.port.get}
""").withFallback(system.settings.config))

View file

@ -218,8 +218,9 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
numberOfNodesShutdownOneByOneSmall + numberOfNodesShutdownOneByOneLarge + numberOfNodesShutdown <= totalNumberOfNodes - 3,
s"specified number of leaving/shutdown nodes <= ${totalNumberOfNodes - 3}")
require(numberOfNodesJoinRemove <= totalNumberOfNodes,
s"nr-of-nodes-join-remove should be <= ${totalNumberOfNodes}")
require(
numberOfNodesJoinRemove <= totalNumberOfNodes,
s"nr-of-nodes-join-remove should be <= ${totalNumberOfNodes}")
override def toString: String = {
testConfig.withFallback(ConfigFactory.parseString(s"nrOfNodes=${totalNumberOfNodes}")).root.render
@ -379,10 +380,11 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
val φ = phi(node)
if (φ > 0 || cluster.failureDetector.isMonitoring(node)) {
val aboveOne = if (!φ.isInfinite && φ > 1.0) 1 else 0
phiByNode += node -> PhiValue(node,
previous.countAboveOne + aboveOne,
previous.count + 1,
math.max(previous.max, φ))
phiByNode += node -> PhiValue(
node,
previous.countAboveOne + aboveOne,
previous.count + 1,
math.max(previous.max, φ))
}
}
val phiSet = immutable.SortedSet.empty[PhiValue] ++ phiByNode.values
@ -518,11 +520,12 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
def createJob(): Job = {
if (tree)
TreeJob(idCounter.next(),
payload,
ThreadLocalRandom.current.nextInt(settings.treeWidth),
settings.treeLevels,
settings.treeWidth)
TreeJob(
idCounter.next(),
payload,
ThreadLocalRandom.current.nextInt(settings.treeWidth),
settings.treeLevels,
settings.treeWidth)
else SimpleJob(idCounter.next(), payload)
}
@ -549,10 +552,11 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
case TreeJob(id, payload, idx, levels, width) =>
// create the actors when first TreeJob message is received
val totalActors = ((width * math.pow(width, levels) - 1) / (width - 1)).toInt
log.debug("Creating [{}] actors in a tree structure of [{}] levels and each actor has [{}] children",
totalActors,
levels,
width)
log.debug(
"Creating [{}] actors in a tree structure of [{}] levels and each actor has [{}] children",
totalActors,
levels,
width)
val tree = context.actorOf(Props(classOf[TreeNode], levels, width), "tree")
tree.forward((idx, SimpleJob(id, payload)))
context.become(treeWorker(tree))
@ -610,8 +614,9 @@ private[cluster] object StressMultiJvmSpec extends MultiNodeConfig {
case e: Exception => context.children.foreach { _ ! e }
case GetChildrenCount => sender() ! ChildrenCount(context.children.size, restartCount)
case Reset =>
require(context.children.isEmpty,
s"ResetChildrenCount not allowed when children exists, [${context.children.size}]")
require(
context.children.isEmpty,
s"ResetChildrenCount not allowed when children exists, [${context.children.size}]")
restartCount = 0
}
}
@ -700,12 +705,13 @@ abstract class StressSpec
override def muteLog(sys: ActorSystem = system): Unit = {
super.muteLog(sys)
sys.eventStream.publish(Mute(EventFilter[RuntimeException](pattern = ".*Simulated exception.*")))
muteDeadLetters(classOf[SimpleJob],
classOf[AggregatedClusterResult],
SendBatch.getClass,
classOf[StatsResult],
classOf[PhiResult],
RetryTick.getClass)(sys)
muteDeadLetters(
classOf[SimpleJob],
classOf[AggregatedClusterResult],
SendBatch.getClass,
classOf[StatsResult],
classOf[PhiResult],
RetryTick.getClass)(sys)
}
override protected def afterTermination(): Unit = {
@ -974,9 +980,10 @@ abstract class StressSpec
val usedRoles = roles.take(nbrUsedRoles)
val usedAddresses = usedRoles.map(address(_)).toSet
@tailrec def loop(counter: Int,
previousAS: Option[ActorSystem],
allPreviousAddresses: Set[Address]): Option[ActorSystem] = {
@tailrec def loop(
counter: Int,
previousAS: Option[ActorSystem],
allPreviousAddresses: Set[Address]): Option[ActorSystem] = {
if (counter > rounds) previousAS
else {
val t = title + " round " + counter
@ -998,9 +1005,10 @@ abstract class StressSpec
Some(sys)
} else previousAS
runOn(usedRoles: _*) {
awaitMembersUp(nbrUsedRoles + activeRoles.size,
canNotBePartOfMemberRing = allPreviousAddresses,
timeout = remainingOrDefault)
awaitMembersUp(
nbrUsedRoles + activeRoles.size,
canNotBePartOfMemberRing = allPreviousAddresses,
timeout = remainingOrDefault)
awaitAllReachable()
}
val nextAddresses = clusterView.members.map(_.address).diff(usedAddresses)
@ -1041,11 +1049,12 @@ abstract class StressSpec
identifyProbe.expectMsgType[ActorIdentity].ref
}
def exerciseRouters(title: String,
duration: FiniteDuration,
batchInterval: FiniteDuration,
expectDroppedMessages: Boolean,
tree: Boolean): Unit =
def exerciseRouters(
title: String,
duration: FiniteDuration,
batchInterval: FiniteDuration,
expectDroppedMessages: Boolean,
tree: Boolean): Unit =
within(duration + 10.seconds) {
nbrUsedRoles should ===(totalNumberOfNodes)
createResultAggregator(title, expectedResults = nbrUsedRoles, includeInHistory = false)
@ -1053,8 +1062,9 @@ abstract class StressSpec
val (masterRoles, otherRoles) = roles.take(nbrUsedRoles).splitAt(3)
runOn(masterRoles: _*) {
reportResult {
val m = system.actorOf(Props(classOf[Master], settings, batchInterval, tree).withDeploy(Deploy.local),
name = masterName)
val m = system.actorOf(
Props(classOf[Master], settings, batchInterval, tree).withDeploy(Deploy.local),
name = masterName)
m ! Begin
import system.dispatcher
system.scheduler.scheduleOnce(duration) {
@ -1081,11 +1091,12 @@ abstract class StressSpec
def awaitWorkResult(m: ActorRef): WorkResult = {
val workResult = expectMsgType[WorkResult]
if (settings.infolog)
log.info("{} result, [{}] jobs/s, retried [{}] of [{}] msg",
masterName,
workResult.jobsPerSecond.form,
workResult.retryCount,
workResult.sendCount)
log.info(
"{} result, [{}] jobs/s, retried [{}] of [{}] msg",
masterName,
workResult.jobsPerSecond.form,
workResult.retryCount,
workResult.sendCount)
watch(m)
expectTerminated(m)
workResult
@ -1190,8 +1201,9 @@ abstract class StressSpec
"start routers that are running while nodes are joining" taggedAs LongRunningTest in {
runOn(roles.take(3): _*) {
system.actorOf(Props(classOf[Master], settings, settings.workBatchInterval, false).withDeploy(Deploy.local),
name = masterName) ! Begin
system.actorOf(
Props(classOf[Master], settings, settings.workBatchInterval, false).withDeploy(Deploy.local),
name = masterName) ! Begin
}
}
@ -1238,44 +1250,48 @@ abstract class StressSpec
"use routers with normal throughput" taggedAs LongRunningTest in {
if (exerciseActors) {
exerciseRouters("use routers with normal throughput",
normalThroughputDuration,
batchInterval = workBatchInterval,
expectDroppedMessages = false,
tree = false)
exerciseRouters(
"use routers with normal throughput",
normalThroughputDuration,
batchInterval = workBatchInterval,
expectDroppedMessages = false,
tree = false)
}
enterBarrier("after-" + step)
}
"use routers with high throughput" taggedAs LongRunningTest in {
if (exerciseActors) {
exerciseRouters("use routers with high throughput",
highThroughputDuration,
batchInterval = Duration.Zero,
expectDroppedMessages = false,
tree = false)
exerciseRouters(
"use routers with high throughput",
highThroughputDuration,
batchInterval = Duration.Zero,
expectDroppedMessages = false,
tree = false)
}
enterBarrier("after-" + step)
}
"use many actors with normal throughput" taggedAs LongRunningTest in {
if (exerciseActors) {
exerciseRouters("use many actors with normal throughput",
normalThroughputDuration,
batchInterval = workBatchInterval,
expectDroppedMessages = false,
tree = true)
exerciseRouters(
"use many actors with normal throughput",
normalThroughputDuration,
batchInterval = workBatchInterval,
expectDroppedMessages = false,
tree = true)
}
enterBarrier("after-" + step)
}
"use many actors with high throughput" taggedAs LongRunningTest in {
if (exerciseActors) {
exerciseRouters("use many actors with high throughput",
highThroughputDuration,
batchInterval = Duration.Zero,
expectDroppedMessages = false,
tree = true)
exerciseRouters(
"use many actors with high throughput",
highThroughputDuration,
batchInterval = Duration.Zero,
expectDroppedMessages = false,
tree = true)
}
enterBarrier("after-" + step)
}
@ -1300,8 +1316,9 @@ abstract class StressSpec
"start routers that are running while nodes are removed" taggedAs LongRunningTest in {
if (exerciseActors) {
runOn(roles.take(3): _*) {
system.actorOf(Props(classOf[Master], settings, settings.workBatchInterval, false).withDeploy(Deploy.local),
name = masterName) ! Begin
system.actorOf(
Props(classOf[Master], settings, settings.workBatchInterval, false).withDeploy(Deploy.local),
name = masterName) ! Begin
}
}
enterBarrier("after-" + step)

View file

@ -122,10 +122,10 @@ abstract class ClusterConsistentHashingRouterSpec
"deploy programatically defined routees to the member nodes in the cluster" in {
runOn(first) {
val router2 = system.actorOf(
ClusterRouterPool(local = ConsistentHashingPool(nrOfInstances = 0),
settings = ClusterRouterPoolSettings(totalInstances = 10,
maxInstancesPerNode = 2,
allowLocalRoutees = true)).props(Props[Echo]),
ClusterRouterPool(
local = ConsistentHashingPool(nrOfInstances = 0),
settings = ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 2, allowLocalRoutees = true))
.props(Props[Echo]),
"router2")
// it may take some time until router receives cluster member events
awaitAssert { currentRoutees(router2).size should ===(6) }
@ -143,8 +143,9 @@ abstract class ClusterConsistentHashingRouterSpec
}
val router3 =
system.actorOf(ConsistentHashingPool(nrOfInstances = 0, hashMapping = hashMapping).props(Props[Echo]),
"router3")
system.actorOf(
ConsistentHashingPool(nrOfInstances = 0, hashMapping = hashMapping).props(Props[Echo]),
"router3")
assertHashMapping(router3)
}
@ -159,12 +160,13 @@ abstract class ClusterConsistentHashingRouterSpec
}
val router4 =
system.actorOf(ClusterRouterPool(local = ConsistentHashingPool(nrOfInstances = 0, hashMapping = hashMapping),
settings =
ClusterRouterPoolSettings(totalInstances = 10,
maxInstancesPerNode = 1,
allowLocalRoutees = true)).props(Props[Echo]),
"router4")
system.actorOf(
ClusterRouterPool(
local = ConsistentHashingPool(nrOfInstances = 0, hashMapping = hashMapping),
settings =
ClusterRouterPoolSettings(totalInstances = 10, maxInstancesPerNode = 1, allowLocalRoutees = true))
.props(Props[Echo]),
"router4")
assertHashMapping(router4)
}

View file

@ -118,8 +118,9 @@ abstract class ClusterRoundRobinSpec
lazy val router1 = system.actorOf(FromConfig.props(Props[SomeActor]), "router1")
lazy val router2 = system.actorOf(
ClusterRouterPool(RoundRobinPool(nrOfInstances = 0),
ClusterRouterPoolSettings(totalInstances = 3, maxInstancesPerNode = 1, allowLocalRoutees = true))
ClusterRouterPool(
RoundRobinPool(nrOfInstances = 0),
ClusterRouterPoolSettings(totalInstances = 3, maxInstancesPerNode = 1, allowLocalRoutees = true))
.props(Props[SomeActor]),
"router2")
lazy val router3 = system.actorOf(FromConfig.props(Props[SomeActor]), "router3")

View file

@ -104,11 +104,13 @@ abstract class UseRoleIgnoredSpec
val roles = Set("b")
val router = system.actorOf(
ClusterRouterPool(RoundRobinPool(nrOfInstances = 6),
ClusterRouterPoolSettings(totalInstances = 6,
maxInstancesPerNode = 2,
allowLocalRoutees = false,
useRoles = roles)).props(Props[SomeActor]),
ClusterRouterPool(
RoundRobinPool(nrOfInstances = 6),
ClusterRouterPoolSettings(
totalInstances = 6,
maxInstancesPerNode = 2,
allowLocalRoutees = false,
useRoles = roles)).props(Props[SomeActor]),
"router-2")
awaitAssert(currentRoutees(router).size should ===(4))
@ -134,13 +136,15 @@ abstract class UseRoleIgnoredSpec
runOn(first) {
val roles = Set("b")
val router = system.actorOf(ClusterRouterGroup(RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(totalInstances = 6,
routeesPaths =
List("/user/foo", "/user/bar"),
allowLocalRoutees = false,
useRoles = roles)).props,
"router-2b")
val router = system.actorOf(
ClusterRouterGroup(
RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(
totalInstances = 6,
routeesPaths = List("/user/foo", "/user/bar"),
allowLocalRoutees = false,
useRoles = roles)).props,
"router-2b")
awaitAssert(currentRoutees(router).size should ===(4))
@ -166,11 +170,13 @@ abstract class UseRoleIgnoredSpec
val roles = Set("b")
val router = system.actorOf(
ClusterRouterPool(RoundRobinPool(nrOfInstances = 6),
ClusterRouterPoolSettings(totalInstances = 6,
maxInstancesPerNode = 2,
allowLocalRoutees = true,
useRoles = roles)).props(Props[SomeActor]),
ClusterRouterPool(
RoundRobinPool(nrOfInstances = 6),
ClusterRouterPoolSettings(
totalInstances = 6,
maxInstancesPerNode = 2,
allowLocalRoutees = true,
useRoles = roles)).props(Props[SomeActor]),
"router-3")
awaitAssert(currentRoutees(router).size should ===(4))
@ -196,13 +202,15 @@ abstract class UseRoleIgnoredSpec
runOn(first) {
val roles = Set("b")
val router = system.actorOf(ClusterRouterGroup(RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(totalInstances = 6,
routeesPaths =
List("/user/foo", "/user/bar"),
allowLocalRoutees = true,
useRoles = roles)).props,
"router-3b")
val router = system.actorOf(
ClusterRouterGroup(
RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(
totalInstances = 6,
routeesPaths = List("/user/foo", "/user/bar"),
allowLocalRoutees = true,
useRoles = roles)).props,
"router-3b")
awaitAssert(currentRoutees(router).size should ===(4))
@ -228,11 +236,13 @@ abstract class UseRoleIgnoredSpec
val roles = Set("a")
val router = system.actorOf(
ClusterRouterPool(RoundRobinPool(nrOfInstances = 6),
ClusterRouterPoolSettings(totalInstances = 6,
maxInstancesPerNode = 2,
allowLocalRoutees = true,
useRoles = roles)).props(Props[SomeActor]),
ClusterRouterPool(
RoundRobinPool(nrOfInstances = 6),
ClusterRouterPoolSettings(
totalInstances = 6,
maxInstancesPerNode = 2,
allowLocalRoutees = true,
useRoles = roles)).props(Props[SomeActor]),
"router-4")
awaitAssert(currentRoutees(router).size should ===(2))
@ -258,13 +268,15 @@ abstract class UseRoleIgnoredSpec
runOn(first) {
val roles = Set("a")
val router = system.actorOf(ClusterRouterGroup(RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(totalInstances = 6,
routeesPaths =
List("/user/foo", "/user/bar"),
allowLocalRoutees = true,
useRoles = roles)).props,
"router-4b")
val router = system.actorOf(
ClusterRouterGroup(
RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(
totalInstances = 6,
routeesPaths = List("/user/foo", "/user/bar"),
allowLocalRoutees = true,
useRoles = roles)).props,
"router-4b")
awaitAssert(currentRoutees(router).size should ===(2))
@ -290,11 +302,13 @@ abstract class UseRoleIgnoredSpec
val roles = Set("c")
val router = system.actorOf(
ClusterRouterPool(RoundRobinPool(nrOfInstances = 6),
ClusterRouterPoolSettings(totalInstances = 6,
maxInstancesPerNode = 2,
allowLocalRoutees = true,
useRoles = roles)).props(Props[SomeActor]),
ClusterRouterPool(
RoundRobinPool(nrOfInstances = 6),
ClusterRouterPoolSettings(
totalInstances = 6,
maxInstancesPerNode = 2,
allowLocalRoutees = true,
useRoles = roles)).props(Props[SomeActor]),
"router-5")
awaitAssert(currentRoutees(router).size should ===(6))
@ -320,13 +334,15 @@ abstract class UseRoleIgnoredSpec
runOn(first) {
val roles = Set("c")
val router = system.actorOf(ClusterRouterGroup(RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(totalInstances = 6,
routeesPaths =
List("/user/foo", "/user/bar"),
allowLocalRoutees = true,
useRoles = roles)).props,
"router-5b")
val router = system.actorOf(
ClusterRouterGroup(
RoundRobinGroup(paths = Nil),
ClusterRouterGroupSettings(
totalInstances = 6,
routeesPaths = List("/user/foo", "/user/bar"),
allowLocalRoutees = true,
useRoles = roles)).props,
"router-5b")
awaitAssert(currentRoutees(router).size should ===(6))