format source with scalafmt
This commit is contained in:
parent
0f40491d42
commit
ce404e4f53
1669 changed files with 43208 additions and 35404 deletions
|
|
@ -65,11 +65,15 @@ object ClusterShardingCustomShardAllocationSpec {
|
|||
|
||||
case class TestAllocationStrategy(ref: ActorRef) extends ShardAllocationStrategy {
|
||||
implicit val timeout = Timeout(3.seconds)
|
||||
override def allocateShard(requester: ActorRef, shardId: ShardRegion.ShardId, currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardRegion.ShardId]]): Future[ActorRef] = {
|
||||
override def allocateShard(
|
||||
requester: ActorRef,
|
||||
shardId: ShardRegion.ShardId,
|
||||
currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardRegion.ShardId]]): Future[ActorRef] = {
|
||||
(ref ? AllocateReq).mapTo[ActorRef]
|
||||
}
|
||||
|
||||
override def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardRegion.ShardId]], rebalanceInProgress: Set[ShardRegion.ShardId]): Future[Set[ShardRegion.ShardId]] = {
|
||||
override def rebalance(currentShardAllocations: Map[ActorRef, immutable.IndexedSeq[ShardRegion.ShardId]],
|
||||
rebalanceInProgress: Set[ShardRegion.ShardId]): Future[Set[ShardRegion.ShardId]] = {
|
||||
(ref ? RebalanceReq).mapTo[Set[String]]
|
||||
}
|
||||
}
|
||||
|
|
@ -99,19 +103,28 @@ abstract class ClusterShardingCustomShardAllocationSpecConfig(val mode: String)
|
|||
""").withFallback(MultiNodeClusterSpec.clusterConfig))
|
||||
}
|
||||
|
||||
object PersistentClusterShardingCustomShardAllocationSpecConfig extends ClusterShardingCustomShardAllocationSpecConfig("persistence")
|
||||
object DDataClusterShardingCustomShardAllocationSpecConfig extends ClusterShardingCustomShardAllocationSpecConfig("ddata")
|
||||
object PersistentClusterShardingCustomShardAllocationSpecConfig
|
||||
extends ClusterShardingCustomShardAllocationSpecConfig("persistence")
|
||||
object DDataClusterShardingCustomShardAllocationSpecConfig
|
||||
extends ClusterShardingCustomShardAllocationSpecConfig("ddata")
|
||||
|
||||
class PersistentClusterShardingCustomShardAllocationSpec extends ClusterShardingCustomShardAllocationSpec(PersistentClusterShardingCustomShardAllocationSpecConfig)
|
||||
class DDataClusterShardingCustomShardAllocationSpec extends ClusterShardingCustomShardAllocationSpec(DDataClusterShardingCustomShardAllocationSpecConfig)
|
||||
class PersistentClusterShardingCustomShardAllocationSpec
|
||||
extends ClusterShardingCustomShardAllocationSpec(PersistentClusterShardingCustomShardAllocationSpecConfig)
|
||||
class DDataClusterShardingCustomShardAllocationSpec
|
||||
extends ClusterShardingCustomShardAllocationSpec(DDataClusterShardingCustomShardAllocationSpecConfig)
|
||||
|
||||
class PersistentClusterShardingCustomShardAllocationMultiJvmNode1 extends PersistentClusterShardingCustomShardAllocationSpec
|
||||
class PersistentClusterShardingCustomShardAllocationMultiJvmNode2 extends PersistentClusterShardingCustomShardAllocationSpec
|
||||
class PersistentClusterShardingCustomShardAllocationMultiJvmNode1
|
||||
extends PersistentClusterShardingCustomShardAllocationSpec
|
||||
class PersistentClusterShardingCustomShardAllocationMultiJvmNode2
|
||||
extends PersistentClusterShardingCustomShardAllocationSpec
|
||||
|
||||
class DDataClusterShardingCustomShardAllocationMultiJvmNode1 extends DDataClusterShardingCustomShardAllocationSpec
|
||||
class DDataClusterShardingCustomShardAllocationMultiJvmNode2 extends DDataClusterShardingCustomShardAllocationSpec
|
||||
|
||||
abstract class ClusterShardingCustomShardAllocationSpec(config: ClusterShardingCustomShardAllocationSpecConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender {
|
||||
abstract class ClusterShardingCustomShardAllocationSpec(config: ClusterShardingCustomShardAllocationSpecConfig)
|
||||
extends MultiNodeSpec(config)
|
||||
with STMultiNodeSpec
|
||||
with ImplicitSender {
|
||||
import ClusterShardingCustomShardAllocationSpec._
|
||||
import config._
|
||||
|
||||
|
|
@ -119,21 +132,20 @@ abstract class ClusterShardingCustomShardAllocationSpec(config: ClusterShardingC
|
|||
|
||||
def join(from: RoleName, to: RoleName): Unit = {
|
||||
runOn(from) {
|
||||
Cluster(system) join node(to).address
|
||||
Cluster(system).join(node(to).address)
|
||||
startSharding()
|
||||
}
|
||||
enterBarrier(from.name + "-joined")
|
||||
}
|
||||
|
||||
def startSharding(): Unit = {
|
||||
ClusterSharding(system).start(
|
||||
typeName = "Entity",
|
||||
entityProps = Props[Entity],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId,
|
||||
allocationStrategy = TestAllocationStrategy(allocator),
|
||||
handOffStopMessage = PoisonPill)
|
||||
ClusterSharding(system).start(typeName = "Entity",
|
||||
entityProps = Props[Entity],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId,
|
||||
allocationStrategy = TestAllocationStrategy(allocator),
|
||||
handOffStopMessage = PoisonPill)
|
||||
}
|
||||
|
||||
lazy val region = ClusterSharding(system).shardRegion("Entity")
|
||||
|
|
|
|||
|
|
@ -88,7 +88,8 @@ abstract class ClusterShardingFailureSpecConfig(val mode: String) extends MultiN
|
|||
object PersistentClusterShardingFailureSpecConfig extends ClusterShardingFailureSpecConfig("persistence")
|
||||
object DDataClusterShardingFailureSpecConfig extends ClusterShardingFailureSpecConfig("ddata")
|
||||
|
||||
class PersistentClusterShardingFailureSpec extends ClusterShardingFailureSpec(PersistentClusterShardingFailureSpecConfig)
|
||||
class PersistentClusterShardingFailureSpec
|
||||
extends ClusterShardingFailureSpec(PersistentClusterShardingFailureSpecConfig)
|
||||
class DDataClusterShardingFailureSpec extends ClusterShardingFailureSpec(DDataClusterShardingFailureSpecConfig)
|
||||
|
||||
class PersistentClusterShardingFailureMultiJvmNode1 extends PersistentClusterShardingFailureSpec
|
||||
|
|
@ -99,14 +100,17 @@ class DDataClusterShardingFailureMultiJvmNode1 extends DDataClusterShardingFailu
|
|||
class DDataClusterShardingFailureMultiJvmNode2 extends DDataClusterShardingFailureSpec
|
||||
class DDataClusterShardingFailureMultiJvmNode3 extends DDataClusterShardingFailureSpec
|
||||
|
||||
abstract class ClusterShardingFailureSpec(config: ClusterShardingFailureSpecConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender {
|
||||
abstract class ClusterShardingFailureSpec(config: ClusterShardingFailureSpecConfig)
|
||||
extends MultiNodeSpec(config)
|
||||
with STMultiNodeSpec
|
||||
with ImplicitSender {
|
||||
import ClusterShardingFailureSpec._
|
||||
import config._
|
||||
|
||||
override def initialParticipants = roles.size
|
||||
|
||||
val storageLocations = List(new File(system.settings.config.getString(
|
||||
"akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile)
|
||||
val storageLocations = List(
|
||||
new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile)
|
||||
|
||||
override protected def atStartup(): Unit = {
|
||||
storageLocations.foreach(dir => if (dir.exists) FileUtils.deleteQuietly(dir))
|
||||
|
|
@ -121,7 +125,7 @@ abstract class ClusterShardingFailureSpec(config: ClusterShardingFailureSpecConf
|
|||
|
||||
def join(from: RoleName, to: RoleName): Unit = {
|
||||
runOn(from) {
|
||||
cluster join node(to).address
|
||||
cluster.join(node(to).address)
|
||||
startSharding()
|
||||
|
||||
within(remaining) {
|
||||
|
|
@ -135,12 +139,11 @@ abstract class ClusterShardingFailureSpec(config: ClusterShardingFailureSpecConf
|
|||
}
|
||||
|
||||
def startSharding(): Unit = {
|
||||
ClusterSharding(system).start(
|
||||
typeName = "Entity",
|
||||
entityProps = Props[Entity],
|
||||
settings = ClusterShardingSettings(system).withRememberEntities(true),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
ClusterSharding(system).start(typeName = "Entity",
|
||||
entityProps = Props[Entity],
|
||||
settings = ClusterShardingSettings(system).withRememberEntities(true),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
}
|
||||
|
||||
lazy val region = ClusterSharding(system).shardRegion("Entity")
|
||||
|
|
@ -276,4 +279,3 @@ abstract class ClusterShardingFailureSpec(config: ClusterShardingFailureSpecConf
|
|||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -61,8 +61,7 @@ object ClusterShardingGetStateSpecConfig extends MultiNodeConfig {
|
|||
}
|
||||
""").withFallback(MultiNodeClusterSpec.clusterConfig))
|
||||
|
||||
nodeConfig(first, second)(ConfigFactory.parseString(
|
||||
"""akka.cluster.roles=["shard"]"""))
|
||||
nodeConfig(first, second)(ConfigFactory.parseString("""akka.cluster.roles=["shard"]"""))
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -70,7 +69,9 @@ class ClusterShardingGetStateSpecMultiJvmNode1 extends ClusterShardingGetStateSp
|
|||
class ClusterShardingGetStateSpecMultiJvmNode2 extends ClusterShardingGetStateSpec
|
||||
class ClusterShardingGetStateSpecMultiJvmNode3 extends ClusterShardingGetStateSpec
|
||||
|
||||
abstract class ClusterShardingGetStateSpec extends MultiNodeSpec(ClusterShardingGetStateSpecConfig) with STMultiNodeSpec {
|
||||
abstract class ClusterShardingGetStateSpec
|
||||
extends MultiNodeSpec(ClusterShardingGetStateSpecConfig)
|
||||
with STMultiNodeSpec {
|
||||
|
||||
import ClusterShardingGetStateSpec._
|
||||
import ClusterShardingGetStateSpecConfig._
|
||||
|
|
@ -78,20 +79,18 @@ abstract class ClusterShardingGetStateSpec extends MultiNodeSpec(ClusterSharding
|
|||
def initialParticipants = roles.size
|
||||
|
||||
def startShard(): ActorRef = {
|
||||
ClusterSharding(system).start(
|
||||
typeName = shardTypeName,
|
||||
entityProps = Props(new ShardedActor),
|
||||
settings = ClusterShardingSettings(system).withRole("shard"),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
ClusterSharding(system).start(typeName = shardTypeName,
|
||||
entityProps = Props(new ShardedActor),
|
||||
settings = ClusterShardingSettings(system).withRole("shard"),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
}
|
||||
|
||||
def startProxy(): ActorRef = {
|
||||
ClusterSharding(system).startProxy(
|
||||
typeName = shardTypeName,
|
||||
role = Some("shard"),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
ClusterSharding(system).startProxy(typeName = shardTypeName,
|
||||
role = Some("shard"),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
}
|
||||
|
||||
def join(from: RoleName): Unit = {
|
||||
|
|
|
|||
|
|
@ -63,8 +63,7 @@ object ClusterShardingGetStatsSpecConfig extends MultiNodeConfig {
|
|||
akka.actor.warn-about-java-serializer-usage=false
|
||||
""").withFallback(MultiNodeClusterSpec.clusterConfig))
|
||||
|
||||
nodeConfig(first, second, third)(ConfigFactory.parseString(
|
||||
"""akka.cluster.roles=["shard"]"""))
|
||||
nodeConfig(first, second, third)(ConfigFactory.parseString("""akka.cluster.roles=["shard"]"""))
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -73,7 +72,9 @@ class ClusterShardingGetStatsSpecMultiJvmNode2 extends ClusterShardingGetStatsSp
|
|||
class ClusterShardingGetStatsSpecMultiJvmNode3 extends ClusterShardingGetStatsSpec
|
||||
class ClusterShardingGetStatsSpecMultiJvmNode4 extends ClusterShardingGetStatsSpec
|
||||
|
||||
abstract class ClusterShardingGetStatsSpec extends MultiNodeSpec(ClusterShardingGetStatsSpecConfig) with STMultiNodeSpec {
|
||||
abstract class ClusterShardingGetStatsSpec
|
||||
extends MultiNodeSpec(ClusterShardingGetStatsSpecConfig)
|
||||
with STMultiNodeSpec {
|
||||
|
||||
import ClusterShardingGetStatsSpec._
|
||||
import ClusterShardingGetStatsSpecConfig._
|
||||
|
|
@ -81,20 +82,18 @@ abstract class ClusterShardingGetStatsSpec extends MultiNodeSpec(ClusterSharding
|
|||
def initialParticipants = roles.size
|
||||
|
||||
def startShard(): ActorRef = {
|
||||
ClusterSharding(system).start(
|
||||
typeName = shardTypeName,
|
||||
entityProps = Props(new ShardedActor),
|
||||
settings = ClusterShardingSettings(system).withRole("shard"),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
ClusterSharding(system).start(typeName = shardTypeName,
|
||||
entityProps = Props(new ShardedActor),
|
||||
settings = ClusterShardingSettings(system).withRole("shard"),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
}
|
||||
|
||||
def startProxy(): ActorRef = {
|
||||
ClusterSharding(system).startProxy(
|
||||
typeName = shardTypeName,
|
||||
role = Some("shard"),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
ClusterSharding(system).startProxy(typeName = shardTypeName,
|
||||
role = Some("shard"),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
}
|
||||
|
||||
def join(from: RoleName): Unit = {
|
||||
|
|
|
|||
|
|
@ -35,9 +35,10 @@ object ClusterShardingGracefulShutdownSpec {
|
|||
case id: Int => (id.toString, id)
|
||||
}
|
||||
|
||||
val extractShardId: ShardRegion.ExtractShardId = msg => msg match {
|
||||
case id: Int => id.toString
|
||||
}
|
||||
val extractShardId: ShardRegion.ExtractShardId = msg =>
|
||||
msg match {
|
||||
case id: Int => id.toString
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -67,11 +68,14 @@ abstract class ClusterShardingGracefulShutdownSpecConfig(val mode: String) exten
|
|||
""").withFallback(MultiNodeClusterSpec.clusterConfig))
|
||||
}
|
||||
|
||||
object PersistentClusterShardingGracefulShutdownSpecConfig extends ClusterShardingGracefulShutdownSpecConfig("persistence")
|
||||
object PersistentClusterShardingGracefulShutdownSpecConfig
|
||||
extends ClusterShardingGracefulShutdownSpecConfig("persistence")
|
||||
object DDataClusterShardingGracefulShutdownSpecConfig extends ClusterShardingGracefulShutdownSpecConfig("ddata")
|
||||
|
||||
class PersistentClusterShardingGracefulShutdownSpec extends ClusterShardingGracefulShutdownSpec(PersistentClusterShardingGracefulShutdownSpecConfig)
|
||||
class DDataClusterShardingGracefulShutdownSpec extends ClusterShardingGracefulShutdownSpec(DDataClusterShardingGracefulShutdownSpecConfig)
|
||||
class PersistentClusterShardingGracefulShutdownSpec
|
||||
extends ClusterShardingGracefulShutdownSpec(PersistentClusterShardingGracefulShutdownSpecConfig)
|
||||
class DDataClusterShardingGracefulShutdownSpec
|
||||
extends ClusterShardingGracefulShutdownSpec(DDataClusterShardingGracefulShutdownSpecConfig)
|
||||
|
||||
class PersistentClusterShardingGracefulShutdownMultiJvmNode1 extends PersistentClusterShardingGracefulShutdownSpec
|
||||
class PersistentClusterShardingGracefulShutdownMultiJvmNode2 extends PersistentClusterShardingGracefulShutdownSpec
|
||||
|
|
@ -79,14 +83,17 @@ class PersistentClusterShardingGracefulShutdownMultiJvmNode2 extends PersistentC
|
|||
class DDataClusterShardingGracefulShutdownMultiJvmNode1 extends DDataClusterShardingGracefulShutdownSpec
|
||||
class DDataClusterShardingGracefulShutdownMultiJvmNode2 extends DDataClusterShardingGracefulShutdownSpec
|
||||
|
||||
abstract class ClusterShardingGracefulShutdownSpec(config: ClusterShardingGracefulShutdownSpecConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender {
|
||||
abstract class ClusterShardingGracefulShutdownSpec(config: ClusterShardingGracefulShutdownSpecConfig)
|
||||
extends MultiNodeSpec(config)
|
||||
with STMultiNodeSpec
|
||||
with ImplicitSender {
|
||||
import ClusterShardingGracefulShutdownSpec._
|
||||
import config._
|
||||
|
||||
override def initialParticipants = roles.size
|
||||
|
||||
val storageLocations = List(new File(system.settings.config.getString(
|
||||
"akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile)
|
||||
val storageLocations = List(
|
||||
new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile)
|
||||
|
||||
override protected def atStartup(): Unit = {
|
||||
storageLocations.foreach(dir => if (dir.exists) FileUtils.deleteQuietly(dir))
|
||||
|
|
@ -99,22 +106,22 @@ abstract class ClusterShardingGracefulShutdownSpec(config: ClusterShardingGracef
|
|||
|
||||
def join(from: RoleName, to: RoleName): Unit = {
|
||||
runOn(from) {
|
||||
Cluster(system) join node(to).address
|
||||
Cluster(system).join(node(to).address)
|
||||
startSharding()
|
||||
}
|
||||
enterBarrier(from.name + "-joined")
|
||||
}
|
||||
|
||||
def startSharding(): Unit = {
|
||||
val allocationStrategy = new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1)
|
||||
ClusterSharding(system).start(
|
||||
typeName = "Entity",
|
||||
entityProps = Props[Entity],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId,
|
||||
allocationStrategy,
|
||||
handOffStopMessage = StopEntity)
|
||||
val allocationStrategy =
|
||||
new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1)
|
||||
ClusterSharding(system).start(typeName = "Entity",
|
||||
entityProps = Props[Entity],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId,
|
||||
allocationStrategy,
|
||||
handOffStopMessage = StopEntity)
|
||||
}
|
||||
|
||||
lazy val region = ClusterSharding(system).shardRegion("Entity")
|
||||
|
|
@ -185,15 +192,15 @@ abstract class ClusterShardingGracefulShutdownSpec(config: ClusterShardingGracef
|
|||
|
||||
"gracefully shutdown empty region" in within(30.seconds) {
|
||||
runOn(first) {
|
||||
val allocationStrategy = new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1)
|
||||
val regionEmpty = ClusterSharding(system).start(
|
||||
typeName = "EntityEmpty",
|
||||
entityProps = Props[Entity],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId,
|
||||
allocationStrategy,
|
||||
handOffStopMessage = StopEntity)
|
||||
val allocationStrategy =
|
||||
new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1)
|
||||
val regionEmpty = ClusterSharding(system).start(typeName = "EntityEmpty",
|
||||
entityProps = Props[Entity],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId,
|
||||
allocationStrategy,
|
||||
handOffStopMessage = StopEntity)
|
||||
|
||||
watch(regionEmpty)
|
||||
regionEmpty ! GracefulShutdown
|
||||
|
|
@ -203,4 +210,3 @@ abstract class ClusterShardingGracefulShutdownSpec(config: ClusterShardingGracef
|
|||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -13,8 +13,7 @@ object ClusterShardingIncorrectSetupSpecConfig extends MultiNodeConfig {
|
|||
val first = role("first")
|
||||
val second = role("second")
|
||||
|
||||
val commonConfig = ConfigFactory.parseString(
|
||||
"""
|
||||
val commonConfig = ConfigFactory.parseString("""
|
||||
akka.loglevel = INFO
|
||||
akka.cluster.sharding {
|
||||
waiting-for-state-timeout = 100ms
|
||||
|
|
@ -37,7 +36,10 @@ object ClusterShardingIncorrectSetupSpec {
|
|||
}
|
||||
}
|
||||
|
||||
abstract class ClusterShardingIncorrectSetupSpec extends MultiNodeSpec(ClusterShardingIncorrectSetupSpecConfig) with MultiNodeClusterSpec with ImplicitSender {
|
||||
abstract class ClusterShardingIncorrectSetupSpec
|
||||
extends MultiNodeSpec(ClusterShardingIncorrectSetupSpecConfig)
|
||||
with MultiNodeClusterSpec
|
||||
with ImplicitSender {
|
||||
|
||||
import ClusterShardingIncorrectSetupSpec._
|
||||
import ClusterShardingIncorrectSetupSpecConfig._
|
||||
|
|
@ -48,16 +50,14 @@ abstract class ClusterShardingIncorrectSetupSpec extends MultiNodeSpec(ClusterSh
|
|||
enterBarrier("cluster-up")
|
||||
runOn(first) {
|
||||
EventFilter.error(pattern = """Has ClusterSharding been started on all nodes?""").intercept {
|
||||
ClusterSharding(system).start(
|
||||
typeName = "Entity",
|
||||
entityProps = TestActors.echoActorProps,
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
ClusterSharding(system).start(typeName = "Entity",
|
||||
entityProps = TestActors.echoActorProps,
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
}
|
||||
}
|
||||
enterBarrier("helpful error message logged")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -85,7 +85,8 @@ abstract class ClusterShardingLeavingSpecConfig(val mode: String) extends MultiN
|
|||
object PersistentClusterShardingLeavingSpecConfig extends ClusterShardingLeavingSpecConfig("persistence")
|
||||
object DDataClusterShardingLeavingSpecConfig extends ClusterShardingLeavingSpecConfig("ddata")
|
||||
|
||||
class PersistentClusterShardingLeavingSpec extends ClusterShardingLeavingSpec(PersistentClusterShardingLeavingSpecConfig)
|
||||
class PersistentClusterShardingLeavingSpec
|
||||
extends ClusterShardingLeavingSpec(PersistentClusterShardingLeavingSpecConfig)
|
||||
class DDataClusterShardingLeavingSpec extends ClusterShardingLeavingSpec(DDataClusterShardingLeavingSpecConfig)
|
||||
|
||||
class PersistentClusterShardingLeavingMultiJvmNode1 extends PersistentClusterShardingLeavingSpec
|
||||
|
|
@ -98,14 +99,17 @@ class DDataClusterShardingLeavingMultiJvmNode2 extends DDataClusterShardingLeavi
|
|||
class DDataClusterShardingLeavingMultiJvmNode3 extends DDataClusterShardingLeavingSpec
|
||||
class DDataClusterShardingLeavingMultiJvmNode4 extends DDataClusterShardingLeavingSpec
|
||||
|
||||
abstract class ClusterShardingLeavingSpec(config: ClusterShardingLeavingSpecConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender {
|
||||
abstract class ClusterShardingLeavingSpec(config: ClusterShardingLeavingSpecConfig)
|
||||
extends MultiNodeSpec(config)
|
||||
with STMultiNodeSpec
|
||||
with ImplicitSender {
|
||||
import ClusterShardingLeavingSpec._
|
||||
import config._
|
||||
|
||||
override def initialParticipants = roles.size
|
||||
|
||||
val storageLocations = List(new File(system.settings.config.getString(
|
||||
"akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile)
|
||||
val storageLocations = List(
|
||||
new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile)
|
||||
|
||||
override protected def atStartup(): Unit = {
|
||||
storageLocations.foreach(dir => if (dir.exists) FileUtils.deleteQuietly(dir))
|
||||
|
|
@ -120,7 +124,7 @@ abstract class ClusterShardingLeavingSpec(config: ClusterShardingLeavingSpecConf
|
|||
|
||||
def join(from: RoleName, to: RoleName): Unit = {
|
||||
runOn(from) {
|
||||
cluster join node(to).address
|
||||
cluster.join(node(to).address)
|
||||
startSharding()
|
||||
within(15.seconds) {
|
||||
awaitAssert(cluster.state.members.exists { m =>
|
||||
|
|
@ -132,12 +136,11 @@ abstract class ClusterShardingLeavingSpec(config: ClusterShardingLeavingSpecConf
|
|||
}
|
||||
|
||||
def startSharding(): Unit = {
|
||||
ClusterSharding(system).start(
|
||||
typeName = "Entity",
|
||||
entityProps = Props[Entity],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
ClusterSharding(system).start(typeName = "Entity",
|
||||
entityProps = Props[Entity],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
}
|
||||
|
||||
lazy val region = ClusterSharding(system).shardRegion("Entity")
|
||||
|
|
@ -221,4 +224,3 @@ abstract class ClusterShardingLeavingSpec(config: ClusterShardingLeavingSpecConf
|
|||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -28,9 +28,10 @@ object ClusterShardingMinMembersSpec {
|
|||
case id: Int => (id.toString, id)
|
||||
}
|
||||
|
||||
val extractShardId: ShardRegion.ExtractShardId = msg => msg match {
|
||||
case id: Int => id.toString
|
||||
}
|
||||
val extractShardId: ShardRegion.ExtractShardId = msg =>
|
||||
msg match {
|
||||
case id: Int => id.toString
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -66,7 +67,8 @@ abstract class ClusterShardingMinMembersSpecConfig(val mode: String) extends Mul
|
|||
object PersistentClusterShardingMinMembersSpecConfig extends ClusterShardingMinMembersSpecConfig("persistence")
|
||||
object DDataClusterShardingMinMembersSpecConfig extends ClusterShardingMinMembersSpecConfig("ddata")
|
||||
|
||||
class PersistentClusterShardingMinMembersSpec extends ClusterShardingMinMembersSpec(PersistentClusterShardingMinMembersSpecConfig)
|
||||
class PersistentClusterShardingMinMembersSpec
|
||||
extends ClusterShardingMinMembersSpec(PersistentClusterShardingMinMembersSpecConfig)
|
||||
class DDataClusterShardingMinMembersSpec extends ClusterShardingMinMembersSpec(DDataClusterShardingMinMembersSpecConfig)
|
||||
|
||||
class PersistentClusterShardingMinMembersMultiJvmNode1 extends PersistentClusterShardingMinMembersSpec
|
||||
|
|
@ -77,14 +79,17 @@ class DDataClusterShardingMinMembersMultiJvmNode1 extends DDataClusterShardingMi
|
|||
class DDataClusterShardingMinMembersMultiJvmNode2 extends DDataClusterShardingMinMembersSpec
|
||||
class DDataClusterShardingMinMembersMultiJvmNode3 extends DDataClusterShardingMinMembersSpec
|
||||
|
||||
abstract class ClusterShardingMinMembersSpec(config: ClusterShardingMinMembersSpecConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender {
|
||||
abstract class ClusterShardingMinMembersSpec(config: ClusterShardingMinMembersSpecConfig)
|
||||
extends MultiNodeSpec(config)
|
||||
with STMultiNodeSpec
|
||||
with ImplicitSender {
|
||||
import ClusterShardingMinMembersSpec._
|
||||
import config._
|
||||
|
||||
override def initialParticipants = roles.size
|
||||
|
||||
val storageLocations = List(new File(system.settings.config.getString(
|
||||
"akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile)
|
||||
val storageLocations = List(
|
||||
new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile)
|
||||
|
||||
override protected def atStartup(): Unit = {
|
||||
storageLocations.foreach(dir => if (dir.exists) FileUtils.deleteQuietly(dir))
|
||||
|
|
@ -97,7 +102,7 @@ abstract class ClusterShardingMinMembersSpec(config: ClusterShardingMinMembersSp
|
|||
|
||||
def join(from: RoleName, to: RoleName): Unit = {
|
||||
runOn(from) {
|
||||
Cluster(system) join node(to).address
|
||||
Cluster(system).join(node(to).address)
|
||||
}
|
||||
enterBarrier(from.name + "-joined")
|
||||
}
|
||||
|
|
@ -105,15 +110,15 @@ abstract class ClusterShardingMinMembersSpec(config: ClusterShardingMinMembersSp
|
|||
val cluster = Cluster(system)
|
||||
|
||||
def startSharding(): Unit = {
|
||||
val allocationStrategy = new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1)
|
||||
ClusterSharding(system).start(
|
||||
typeName = "Entity",
|
||||
entityProps = TestActors.echoActorProps,
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId,
|
||||
allocationStrategy,
|
||||
handOffStopMessage = StopEntity)
|
||||
val allocationStrategy =
|
||||
new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1)
|
||||
ClusterSharding(system).start(typeName = "Entity",
|
||||
entityProps = TestActors.echoActorProps,
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId,
|
||||
allocationStrategy,
|
||||
handOffStopMessage = StopEntity)
|
||||
}
|
||||
|
||||
lazy val region = ClusterSharding(system).shardRegion("Entity")
|
||||
|
|
@ -195,4 +200,3 @@ abstract class ClusterShardingMinMembersSpec(config: ClusterShardingMinMembersSp
|
|||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -78,21 +78,18 @@ abstract class ClusterShardingRememberEntitiesNewExtractorSpecConfig(val mode: S
|
|||
}
|
||||
""").withFallback(MultiNodeClusterSpec.clusterConfig))
|
||||
|
||||
val roleConfig = ConfigFactory.parseString(
|
||||
"""
|
||||
val roleConfig = ConfigFactory.parseString("""
|
||||
akka.cluster.roles = [sharding]
|
||||
""")
|
||||
|
||||
// we pretend node 4 and 5 are new incarnations of node 2 and 3 as they never run in parallel
|
||||
// so we can use the same lmdb store for them and have node 4 pick up the persisted data of node 2
|
||||
val ddataNodeAConfig = ConfigFactory.parseString(
|
||||
"""
|
||||
val ddataNodeAConfig = ConfigFactory.parseString("""
|
||||
akka.cluster.sharding.distributed-data.durable.lmdb {
|
||||
dir = target/ShardingRememberEntitiesNewExtractorSpec/sharding-node-a
|
||||
}
|
||||
""")
|
||||
val ddataNodeBConfig = ConfigFactory.parseString(
|
||||
"""
|
||||
val ddataNodeBConfig = ConfigFactory.parseString("""
|
||||
akka.cluster.sharding.distributed-data.durable.lmdb {
|
||||
dir = target/ShardingRememberEntitiesNewExtractorSpec/sharding-node-b
|
||||
}
|
||||
|
|
@ -103,26 +100,37 @@ abstract class ClusterShardingRememberEntitiesNewExtractorSpecConfig(val mode: S
|
|||
|
||||
}
|
||||
|
||||
object PersistentClusterShardingRememberEntitiesSpecNewExtractorConfig extends ClusterShardingRememberEntitiesNewExtractorSpecConfig(
|
||||
ClusterShardingSettings.StateStoreModePersistence)
|
||||
object DDataClusterShardingRememberEntitiesNewExtractorSpecConfig extends ClusterShardingRememberEntitiesNewExtractorSpecConfig(
|
||||
ClusterShardingSettings.StateStoreModeDData)
|
||||
object PersistentClusterShardingRememberEntitiesSpecNewExtractorConfig
|
||||
extends ClusterShardingRememberEntitiesNewExtractorSpecConfig(ClusterShardingSettings.StateStoreModePersistence)
|
||||
object DDataClusterShardingRememberEntitiesNewExtractorSpecConfig
|
||||
extends ClusterShardingRememberEntitiesNewExtractorSpecConfig(ClusterShardingSettings.StateStoreModeDData)
|
||||
|
||||
class PersistentClusterShardingRememberEntitiesNewExtractorSpec extends ClusterShardingRememberEntitiesNewExtractorSpec(
|
||||
PersistentClusterShardingRememberEntitiesSpecNewExtractorConfig)
|
||||
class PersistentClusterShardingRememberEntitiesNewExtractorSpec
|
||||
extends ClusterShardingRememberEntitiesNewExtractorSpec(
|
||||
PersistentClusterShardingRememberEntitiesSpecNewExtractorConfig)
|
||||
|
||||
class PersistentClusterShardingRememberEntitiesNewExtractorMultiJvmNode1 extends PersistentClusterShardingRememberEntitiesNewExtractorSpec
|
||||
class PersistentClusterShardingRememberEntitiesNewExtractorMultiJvmNode2 extends PersistentClusterShardingRememberEntitiesNewExtractorSpec
|
||||
class PersistentClusterShardingRememberEntitiesNewExtractorMultiJvmNode3 extends PersistentClusterShardingRememberEntitiesNewExtractorSpec
|
||||
class PersistentClusterShardingRememberEntitiesNewExtractorMultiJvmNode1
|
||||
extends PersistentClusterShardingRememberEntitiesNewExtractorSpec
|
||||
class PersistentClusterShardingRememberEntitiesNewExtractorMultiJvmNode2
|
||||
extends PersistentClusterShardingRememberEntitiesNewExtractorSpec
|
||||
class PersistentClusterShardingRememberEntitiesNewExtractorMultiJvmNode3
|
||||
extends PersistentClusterShardingRememberEntitiesNewExtractorSpec
|
||||
|
||||
class DDataClusterShardingRememberEntitiesNewExtractorSpec extends ClusterShardingRememberEntitiesNewExtractorSpec(
|
||||
DDataClusterShardingRememberEntitiesNewExtractorSpecConfig)
|
||||
class DDataClusterShardingRememberEntitiesNewExtractorSpec
|
||||
extends ClusterShardingRememberEntitiesNewExtractorSpec(DDataClusterShardingRememberEntitiesNewExtractorSpecConfig)
|
||||
|
||||
class DDataClusterShardingRememberEntitiesNewExtractorMultiJvmNode1 extends DDataClusterShardingRememberEntitiesNewExtractorSpec
|
||||
class DDataClusterShardingRememberEntitiesNewExtractorMultiJvmNode2 extends DDataClusterShardingRememberEntitiesNewExtractorSpec
|
||||
class DDataClusterShardingRememberEntitiesNewExtractorMultiJvmNode3 extends DDataClusterShardingRememberEntitiesNewExtractorSpec
|
||||
class DDataClusterShardingRememberEntitiesNewExtractorMultiJvmNode1
|
||||
extends DDataClusterShardingRememberEntitiesNewExtractorSpec
|
||||
class DDataClusterShardingRememberEntitiesNewExtractorMultiJvmNode2
|
||||
extends DDataClusterShardingRememberEntitiesNewExtractorSpec
|
||||
class DDataClusterShardingRememberEntitiesNewExtractorMultiJvmNode3
|
||||
extends DDataClusterShardingRememberEntitiesNewExtractorSpec
|
||||
|
||||
abstract class ClusterShardingRememberEntitiesNewExtractorSpec(config: ClusterShardingRememberEntitiesNewExtractorSpecConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender {
|
||||
abstract class ClusterShardingRememberEntitiesNewExtractorSpec(
|
||||
config: ClusterShardingRememberEntitiesNewExtractorSpecConfig)
|
||||
extends MultiNodeSpec(config)
|
||||
with STMultiNodeSpec
|
||||
with ImplicitSender {
|
||||
import ClusterShardingRememberEntitiesNewExtractorSpec._
|
||||
import config._
|
||||
|
||||
|
|
@ -130,8 +138,8 @@ abstract class ClusterShardingRememberEntitiesNewExtractorSpec(config: ClusterSh
|
|||
|
||||
override def initialParticipants = roles.size
|
||||
|
||||
val storageLocations = List(new File(system.settings.config.getString(
|
||||
"akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile)
|
||||
val storageLocations = List(
|
||||
new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile)
|
||||
|
||||
override protected def atStartup(): Unit = {
|
||||
storageLocations.foreach(dir => if (dir.exists) FileUtils.deleteQuietly(dir))
|
||||
|
|
@ -144,7 +152,7 @@ abstract class ClusterShardingRememberEntitiesNewExtractorSpec(config: ClusterSh
|
|||
|
||||
def join(from: RoleName, to: RoleName): Unit = {
|
||||
runOn(from) {
|
||||
Cluster(system) join node(to).address
|
||||
Cluster(system).join(node(to).address)
|
||||
}
|
||||
enterBarrier(from.name + "-joined")
|
||||
}
|
||||
|
|
@ -152,21 +160,21 @@ abstract class ClusterShardingRememberEntitiesNewExtractorSpec(config: ClusterSh
|
|||
val cluster = Cluster(system)
|
||||
|
||||
def startShardingWithExtractor1(): Unit = {
|
||||
ClusterSharding(system).start(
|
||||
typeName = typeName,
|
||||
entityProps = ClusterShardingRememberEntitiesNewExtractorSpec.props(None),
|
||||
settings = ClusterShardingSettings(system).withRememberEntities(true).withRole("sharding"),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId1)
|
||||
ClusterSharding(system).start(typeName = typeName,
|
||||
entityProps = ClusterShardingRememberEntitiesNewExtractorSpec.props(None),
|
||||
settings =
|
||||
ClusterShardingSettings(system).withRememberEntities(true).withRole("sharding"),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId1)
|
||||
}
|
||||
|
||||
def startShardingWithExtractor2(sys: ActorSystem, probe: ActorRef): Unit = {
|
||||
ClusterSharding(sys).start(
|
||||
typeName = typeName,
|
||||
entityProps = ClusterShardingRememberEntitiesNewExtractorSpec.props(Some(probe)),
|
||||
settings = ClusterShardingSettings(system).withRememberEntities(true).withRole("sharding"),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId2)
|
||||
ClusterSharding(sys).start(typeName = typeName,
|
||||
entityProps = ClusterShardingRememberEntitiesNewExtractorSpec.props(Some(probe)),
|
||||
settings =
|
||||
ClusterShardingSettings(system).withRememberEntities(true).withRole("sharding"),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId2)
|
||||
}
|
||||
|
||||
def region(sys: ActorSystem = system) = ClusterSharding(sys).shardRegion(typeName)
|
||||
|
|
@ -297,4 +305,3 @@ abstract class ClusterShardingRememberEntitiesNewExtractorSpec(config: ClusterSh
|
|||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -37,10 +37,11 @@ object ClusterShardingRememberEntitiesSpec {
|
|||
case id: Int => (id.toString, id)
|
||||
}
|
||||
|
||||
val extractShardId: ShardRegion.ExtractShardId = msg => msg match {
|
||||
case id: Int => id.toString
|
||||
case ShardRegion.StartEntity(id) => id
|
||||
}
|
||||
val extractShardId: ShardRegion.ExtractShardId = msg =>
|
||||
msg match {
|
||||
case id: Int => id.toString
|
||||
case ShardRegion.StartEntity(id) => id
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
|
@ -79,33 +80,36 @@ abstract class ClusterShardingRememberEntitiesSpecConfig(val mode: String) exten
|
|||
"""))
|
||||
}
|
||||
|
||||
object PersistentClusterShardingRememberEntitiesSpecConfig extends ClusterShardingRememberEntitiesSpecConfig(
|
||||
ClusterShardingSettings.StateStoreModePersistence)
|
||||
object DDataClusterShardingRememberEntitiesSpecConfig extends ClusterShardingRememberEntitiesSpecConfig(
|
||||
ClusterShardingSettings.StateStoreModeDData)
|
||||
object PersistentClusterShardingRememberEntitiesSpecConfig
|
||||
extends ClusterShardingRememberEntitiesSpecConfig(ClusterShardingSettings.StateStoreModePersistence)
|
||||
object DDataClusterShardingRememberEntitiesSpecConfig
|
||||
extends ClusterShardingRememberEntitiesSpecConfig(ClusterShardingSettings.StateStoreModeDData)
|
||||
|
||||
class PersistentClusterShardingRememberEntitiesSpec extends ClusterShardingRememberEntitiesSpec(
|
||||
PersistentClusterShardingRememberEntitiesSpecConfig)
|
||||
class PersistentClusterShardingRememberEntitiesSpec
|
||||
extends ClusterShardingRememberEntitiesSpec(PersistentClusterShardingRememberEntitiesSpecConfig)
|
||||
|
||||
class PersistentClusterShardingRememberEntitiesMultiJvmNode1 extends PersistentClusterShardingRememberEntitiesSpec
|
||||
class PersistentClusterShardingRememberEntitiesMultiJvmNode2 extends PersistentClusterShardingRememberEntitiesSpec
|
||||
class PersistentClusterShardingRememberEntitiesMultiJvmNode3 extends PersistentClusterShardingRememberEntitiesSpec
|
||||
|
||||
class DDataClusterShardingRememberEntitiesSpec extends ClusterShardingRememberEntitiesSpec(
|
||||
DDataClusterShardingRememberEntitiesSpecConfig)
|
||||
class DDataClusterShardingRememberEntitiesSpec
|
||||
extends ClusterShardingRememberEntitiesSpec(DDataClusterShardingRememberEntitiesSpecConfig)
|
||||
|
||||
class DDataClusterShardingRememberEntitiesMultiJvmNode1 extends DDataClusterShardingRememberEntitiesSpec
|
||||
class DDataClusterShardingRememberEntitiesMultiJvmNode2 extends DDataClusterShardingRememberEntitiesSpec
|
||||
class DDataClusterShardingRememberEntitiesMultiJvmNode3 extends DDataClusterShardingRememberEntitiesSpec
|
||||
|
||||
abstract class ClusterShardingRememberEntitiesSpec(config: ClusterShardingRememberEntitiesSpecConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender {
|
||||
abstract class ClusterShardingRememberEntitiesSpec(config: ClusterShardingRememberEntitiesSpecConfig)
|
||||
extends MultiNodeSpec(config)
|
||||
with STMultiNodeSpec
|
||||
with ImplicitSender {
|
||||
import ClusterShardingRememberEntitiesSpec._
|
||||
import config._
|
||||
|
||||
override def initialParticipants = roles.size
|
||||
|
||||
val storageLocations = List(new File(system.settings.config.getString(
|
||||
"akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile)
|
||||
val storageLocations = List(
|
||||
new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile)
|
||||
|
||||
override protected def atStartup(): Unit = {
|
||||
storageLocations.foreach(dir => if (dir.exists) FileUtils.deleteQuietly(dir))
|
||||
|
|
@ -118,7 +122,7 @@ abstract class ClusterShardingRememberEntitiesSpec(config: ClusterShardingRememb
|
|||
|
||||
def join(from: RoleName, to: RoleName): Unit = {
|
||||
runOn(from) {
|
||||
Cluster(system) join node(to).address
|
||||
Cluster(system).join(node(to).address)
|
||||
}
|
||||
enterBarrier(from.name + "-joined")
|
||||
}
|
||||
|
|
@ -126,12 +130,11 @@ abstract class ClusterShardingRememberEntitiesSpec(config: ClusterShardingRememb
|
|||
val cluster = Cluster(system)
|
||||
|
||||
def startSharding(sys: ActorSystem = system, probe: ActorRef = testActor): Unit = {
|
||||
ClusterSharding(sys).start(
|
||||
typeName = "Entity",
|
||||
entityProps = ClusterShardingRememberEntitiesSpec.props(probe),
|
||||
settings = ClusterShardingSettings(system).withRememberEntities(true),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
ClusterSharding(sys).start(typeName = "Entity",
|
||||
entityProps = ClusterShardingRememberEntitiesSpec.props(probe),
|
||||
settings = ClusterShardingSettings(system).withRememberEntities(true),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
}
|
||||
|
||||
lazy val region = ClusterSharding(system).shardRegion("Entity")
|
||||
|
|
@ -229,4 +232,3 @@ abstract class ClusterShardingRememberEntitiesSpec(config: ClusterShardingRememb
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -60,8 +60,10 @@ class ClusterShardingSingleShardPerEntitySpecMultiJvmNode3 extends ClusterShardi
|
|||
class ClusterShardingSingleShardPerEntitySpecMultiJvmNode4 extends ClusterShardingSingleShardPerEntitySpec
|
||||
class ClusterShardingSingleShardPerEntitySpecMultiJvmNode5 extends ClusterShardingSingleShardPerEntitySpec
|
||||
|
||||
abstract class ClusterShardingSingleShardPerEntitySpec extends MultiNodeSpec(ClusterShardingSingleShardPerEntitySpecConfig)
|
||||
with STMultiNodeSpec with ImplicitSender {
|
||||
abstract class ClusterShardingSingleShardPerEntitySpec
|
||||
extends MultiNodeSpec(ClusterShardingSingleShardPerEntitySpecConfig)
|
||||
with STMultiNodeSpec
|
||||
with ImplicitSender {
|
||||
import ClusterShardingSingleShardPerEntitySpec._
|
||||
import ClusterShardingSingleShardPerEntitySpecConfig._
|
||||
|
||||
|
|
@ -69,19 +71,18 @@ abstract class ClusterShardingSingleShardPerEntitySpec extends MultiNodeSpec(Clu
|
|||
|
||||
def join(from: RoleName, to: RoleName): Unit = {
|
||||
runOn(from) {
|
||||
Cluster(system) join node(to).address
|
||||
Cluster(system).join(node(to).address)
|
||||
startSharding()
|
||||
}
|
||||
enterBarrier(from.name + "-joined")
|
||||
}
|
||||
|
||||
def startSharding(): Unit = {
|
||||
ClusterSharding(system).start(
|
||||
typeName = "Entity",
|
||||
entityProps = Props[Entity],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
ClusterSharding(system).start(typeName = "Entity",
|
||||
entityProps = Props[Entity],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
}
|
||||
|
||||
lazy val region = ClusterSharding(system).shardRegion("Entity")
|
||||
|
|
|
|||
|
|
@ -111,17 +111,15 @@ object ClusterShardingSpec {
|
|||
}
|
||||
|
||||
def receive = {
|
||||
case msg => counter forward msg
|
||||
case msg => counter.forward(msg)
|
||||
}
|
||||
}
|
||||
//#supervisor
|
||||
|
||||
}
|
||||
|
||||
abstract class ClusterShardingSpecConfig(
|
||||
val mode: String,
|
||||
val entityRecoveryStrategy: String = "all")
|
||||
extends MultiNodeConfig {
|
||||
abstract class ClusterShardingSpecConfig(val mode: String, val entityRecoveryStrategy: String = "all")
|
||||
extends MultiNodeConfig {
|
||||
|
||||
val controller = role("controller")
|
||||
val first = role("first")
|
||||
|
|
@ -186,8 +184,8 @@ object ClusterShardingDocCode {
|
|||
val numberOfShards = 100
|
||||
|
||||
val extractShardId: ShardRegion.ExtractShardId = {
|
||||
case EntityEnvelope(id, _) => (id % numberOfShards).toString
|
||||
case Get(id) => (id % numberOfShards).toString
|
||||
case EntityEnvelope(id, _) => (id % numberOfShards).toString
|
||||
case Get(id) => (id % numberOfShards).toString
|
||||
case ShardRegion.StartEntity(id) =>
|
||||
// StartEntity is used by remembering entities feature
|
||||
(id.toLong % numberOfShards).toString
|
||||
|
|
@ -197,8 +195,8 @@ object ClusterShardingDocCode {
|
|||
{
|
||||
//#extractShardId-StartEntity
|
||||
val extractShardId: ShardRegion.ExtractShardId = {
|
||||
case EntityEnvelope(id, _) => (id % numberOfShards).toString
|
||||
case Get(id) => (id % numberOfShards).toString
|
||||
case EntityEnvelope(id, _) => (id % numberOfShards).toString
|
||||
case Get(id) => (id % numberOfShards).toString
|
||||
case ShardRegion.StartEntity(id) =>
|
||||
// StartEntity is used by remembering entities feature
|
||||
(id.toLong % numberOfShards).toString
|
||||
|
|
@ -210,17 +208,15 @@ object ClusterShardingDocCode {
|
|||
|
||||
object PersistentClusterShardingSpecConfig extends ClusterShardingSpecConfig("persistence")
|
||||
object DDataClusterShardingSpecConfig extends ClusterShardingSpecConfig("ddata")
|
||||
object PersistentClusterShardingWithEntityRecoverySpecConfig extends ClusterShardingSpecConfig(
|
||||
"persistence",
|
||||
"all")
|
||||
object DDataClusterShardingWithEntityRecoverySpecConfig extends ClusterShardingSpecConfig(
|
||||
"ddata",
|
||||
"constant")
|
||||
object PersistentClusterShardingWithEntityRecoverySpecConfig extends ClusterShardingSpecConfig("persistence", "all")
|
||||
object DDataClusterShardingWithEntityRecoverySpecConfig extends ClusterShardingSpecConfig("ddata", "constant")
|
||||
|
||||
class PersistentClusterShardingSpec extends ClusterShardingSpec(PersistentClusterShardingSpecConfig)
|
||||
class DDataClusterShardingSpec extends ClusterShardingSpec(DDataClusterShardingSpecConfig)
|
||||
class PersistentClusterShardingWithEntityRecoverySpec extends ClusterShardingSpec(PersistentClusterShardingWithEntityRecoverySpecConfig)
|
||||
class DDataClusterShardingWithEntityRecoverySpec extends ClusterShardingSpec(DDataClusterShardingWithEntityRecoverySpecConfig)
|
||||
class PersistentClusterShardingWithEntityRecoverySpec
|
||||
extends ClusterShardingSpec(PersistentClusterShardingWithEntityRecoverySpecConfig)
|
||||
class DDataClusterShardingWithEntityRecoverySpec
|
||||
extends ClusterShardingSpec(DDataClusterShardingWithEntityRecoverySpecConfig)
|
||||
|
||||
class PersistentClusterShardingMultiJvmNode1 extends PersistentClusterShardingSpec
|
||||
class PersistentClusterShardingMultiJvmNode2 extends PersistentClusterShardingSpec
|
||||
|
|
@ -254,13 +250,16 @@ class DDataClusterShardingWithEntityRecoveryMultiJvmNode5 extends DDataClusterSh
|
|||
class DDataClusterShardingWithEntityRecoveryMultiJvmNode6 extends DDataClusterShardingSpec
|
||||
class DDataClusterShardingWithEntityRecoveryMultiJvmNode7 extends DDataClusterShardingSpec
|
||||
|
||||
abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends MultiNodeSpec(config) with MultiNodeClusterSpec
|
||||
with STMultiNodeSpec with ImplicitSender {
|
||||
abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig)
|
||||
extends MultiNodeSpec(config)
|
||||
with MultiNodeClusterSpec
|
||||
with STMultiNodeSpec
|
||||
with ImplicitSender {
|
||||
import ClusterShardingSpec._
|
||||
import config._
|
||||
|
||||
val storageLocations = List(new File(system.settings.config.getString(
|
||||
"akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile)
|
||||
val storageLocations = List(
|
||||
new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile)
|
||||
|
||||
override protected def atStartup(): Unit = {
|
||||
storageLocations.foreach(dir => if (dir.exists) FileUtils.deleteQuietly(dir))
|
||||
|
|
@ -273,51 +272,57 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu
|
|||
|
||||
def join(from: RoleName, to: RoleName): Unit = {
|
||||
runOn(from) {
|
||||
Cluster(system) join node(to).address
|
||||
Cluster(system).join(node(to).address)
|
||||
createCoordinator()
|
||||
}
|
||||
enterBarrier(from.name + "-joined")
|
||||
}
|
||||
|
||||
lazy val replicator = system.actorOf(Replicator.props(
|
||||
ReplicatorSettings(system).withGossipInterval(1.second).withMaxDeltaElements(10)), "replicator")
|
||||
lazy val replicator = system.actorOf(
|
||||
Replicator.props(ReplicatorSettings(system).withGossipInterval(1.second).withMaxDeltaElements(10)),
|
||||
"replicator")
|
||||
|
||||
def createCoordinator(): Unit = {
|
||||
|
||||
def coordinatorProps(typeName: String, rebalanceEnabled: Boolean, rememberEntities: Boolean) = {
|
||||
val allocationStrategy = new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1)
|
||||
val allocationStrategy =
|
||||
new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1)
|
||||
val cfg = ConfigFactory.parseString(s"""
|
||||
handoff-timeout = 10s
|
||||
shard-start-timeout = 10s
|
||||
rebalance-interval = ${if (rebalanceEnabled) "2s" else "3600s"}
|
||||
""").withFallback(system.settings.config.getConfig("akka.cluster.sharding"))
|
||||
val settings = ClusterShardingSettings(cfg).withRememberEntities(rememberEntities)
|
||||
val majorityMinCap = system.settings.config.getInt(
|
||||
"akka.cluster.sharding.distributed-data.majority-min-cap")
|
||||
val majorityMinCap = system.settings.config.getInt("akka.cluster.sharding.distributed-data.majority-min-cap")
|
||||
if (settings.stateStoreMode == "persistence")
|
||||
ShardCoordinator.props(typeName, settings, allocationStrategy)
|
||||
else
|
||||
ShardCoordinator.props(typeName, settings, allocationStrategy, replicator, majorityMinCap)
|
||||
}
|
||||
|
||||
List("counter", "rebalancingCounter", "RememberCounterEntities", "AnotherRememberCounter",
|
||||
"RememberCounter", "RebalancingRememberCounter", "AutoMigrateRememberRegionTest").foreach { typeName =>
|
||||
val rebalanceEnabled = typeName.toLowerCase.startsWith("rebalancing")
|
||||
val rememberEnabled = typeName.toLowerCase.contains("remember")
|
||||
val singletonProps = BackoffSupervisor.props(
|
||||
childProps = coordinatorProps(typeName, rebalanceEnabled, rememberEnabled),
|
||||
childName = "coordinator",
|
||||
minBackoff = 5.seconds,
|
||||
maxBackoff = 5.seconds,
|
||||
randomFactor = 0.1,
|
||||
maxNrOfRetries = -1).withDeploy(Deploy.local)
|
||||
system.actorOf(
|
||||
ClusterSingletonManager.props(
|
||||
singletonProps,
|
||||
terminationMessage = PoisonPill,
|
||||
settings = ClusterSingletonManagerSettings(system)),
|
||||
name = typeName + "Coordinator")
|
||||
}
|
||||
List("counter",
|
||||
"rebalancingCounter",
|
||||
"RememberCounterEntities",
|
||||
"AnotherRememberCounter",
|
||||
"RememberCounter",
|
||||
"RebalancingRememberCounter",
|
||||
"AutoMigrateRememberRegionTest").foreach { typeName =>
|
||||
val rebalanceEnabled = typeName.toLowerCase.startsWith("rebalancing")
|
||||
val rememberEnabled = typeName.toLowerCase.contains("remember")
|
||||
val singletonProps = BackoffSupervisor
|
||||
.props(childProps = coordinatorProps(typeName, rebalanceEnabled, rememberEnabled),
|
||||
childName = "coordinator",
|
||||
minBackoff = 5.seconds,
|
||||
maxBackoff = 5.seconds,
|
||||
randomFactor = 0.1,
|
||||
maxNrOfRetries = -1)
|
||||
.withDeploy(Deploy.local)
|
||||
system.actorOf(
|
||||
ClusterSingletonManager.props(singletonProps,
|
||||
terminationMessage = PoisonPill,
|
||||
settings = ClusterSingletonManagerSettings(system)),
|
||||
name = typeName + "Coordinator")
|
||||
}
|
||||
}
|
||||
|
||||
def createRegion(typeName: String, rememberEntities: Boolean): ActorRef = {
|
||||
|
|
@ -327,19 +332,17 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu
|
|||
entity-restart-backoff = 1s
|
||||
buffer-size = 1000
|
||||
""").withFallback(system.settings.config.getConfig("akka.cluster.sharding"))
|
||||
val settings = ClusterShardingSettings(cfg)
|
||||
.withRememberEntities(rememberEntities)
|
||||
val settings = ClusterShardingSettings(cfg).withRememberEntities(rememberEntities)
|
||||
system.actorOf(
|
||||
ShardRegion.props(
|
||||
typeName = typeName,
|
||||
entityProps = _ => qualifiedCounterProps(typeName),
|
||||
settings = settings,
|
||||
coordinatorPath = "/user/" + typeName + "Coordinator/singleton/coordinator",
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId,
|
||||
handOffStopMessage = PoisonPill,
|
||||
replicator,
|
||||
majorityMinCap = 3),
|
||||
ShardRegion.props(typeName = typeName,
|
||||
entityProps = _ => qualifiedCounterProps(typeName),
|
||||
settings = settings,
|
||||
coordinatorPath = "/user/" + typeName + "Coordinator/singleton/coordinator",
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId,
|
||||
handOffStopMessage = PoisonPill,
|
||||
replicator,
|
||||
majorityMinCap = 3),
|
||||
name = typeName + "Region")
|
||||
}
|
||||
|
||||
|
|
@ -460,17 +463,16 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu
|
|||
buffer-size = 1000
|
||||
""").withFallback(system.settings.config.getConfig("akka.cluster.sharding"))
|
||||
val settings = ClusterShardingSettings(cfg)
|
||||
val proxy = system.actorOf(
|
||||
ShardRegion.proxyProps(
|
||||
typeName = "counter",
|
||||
dataCenter = None,
|
||||
settings,
|
||||
coordinatorPath = "/user/counterCoordinator/singleton/coordinator",
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId,
|
||||
system.deadLetters,
|
||||
majorityMinCap = 0),
|
||||
name = "regionProxy")
|
||||
val proxy = system.actorOf(ShardRegion.proxyProps(typeName = "counter",
|
||||
dataCenter = None,
|
||||
settings,
|
||||
coordinatorPath =
|
||||
"/user/counterCoordinator/singleton/coordinator",
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId,
|
||||
system.deadLetters,
|
||||
majorityMinCap = 0),
|
||||
name = "regionProxy")
|
||||
|
||||
proxy ! Get(1)
|
||||
expectMsg(2)
|
||||
|
|
@ -630,27 +632,24 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu
|
|||
"easy to use with extensions" in within(50.seconds) {
|
||||
runOn(third, fourth, fifth, sixth) {
|
||||
//#counter-start
|
||||
val counterRegion: ActorRef = ClusterSharding(system).start(
|
||||
typeName = "Counter",
|
||||
entityProps = Props[Counter],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
val counterRegion: ActorRef = ClusterSharding(system).start(typeName = "Counter",
|
||||
entityProps = Props[Counter],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
//#counter-start
|
||||
ClusterSharding(system).start(
|
||||
typeName = "AnotherCounter",
|
||||
entityProps = Props[AnotherCounter],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
ClusterSharding(system).start(typeName = "AnotherCounter",
|
||||
entityProps = Props[AnotherCounter],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
|
||||
//#counter-supervisor-start
|
||||
ClusterSharding(system).start(
|
||||
typeName = "SupervisedCounter",
|
||||
entityProps = Props[CounterSupervisor],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
ClusterSharding(system).start(typeName = "SupervisedCounter",
|
||||
entityProps = Props[CounterSupervisor],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
//#counter-supervisor-start
|
||||
}
|
||||
enterBarrier("extension-started")
|
||||
|
|
@ -687,12 +686,11 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu
|
|||
}
|
||||
"easy API for starting" in within(50.seconds) {
|
||||
runOn(first) {
|
||||
val counterRegionViaStart: ActorRef = ClusterSharding(system).start(
|
||||
typeName = "ApiTest",
|
||||
entityProps = Props[Counter],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
val counterRegionViaStart: ActorRef = ClusterSharding(system).start(typeName = "ApiTest",
|
||||
entityProps = Props[Counter],
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
|
||||
val counterRegionViaGet: ActorRef = ClusterSharding(system).shardRegion("ApiTest")
|
||||
|
||||
|
|
@ -705,12 +703,11 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu
|
|||
"demonstrate API for DC proxy" in within(50.seconds) {
|
||||
runOn(sixth) {
|
||||
// #proxy-dc
|
||||
val counterProxyDcB: ActorRef = ClusterSharding(system).startProxy(
|
||||
typeName = "Counter",
|
||||
role = None,
|
||||
dataCenter = Some("B"),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
val counterProxyDcB: ActorRef = ClusterSharding(system).startProxy(typeName = "Counter",
|
||||
role = None,
|
||||
dataCenter = Some("B"),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
// #proxy-dc
|
||||
}
|
||||
enterBarrier("after-dc-proxy")
|
||||
|
|
@ -960,4 +957,3 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -80,16 +80,20 @@ class MultiDcClusterShardingSpecMultiJvmNode2 extends MultiDcClusterShardingSpec
|
|||
class MultiDcClusterShardingSpecMultiJvmNode3 extends MultiDcClusterShardingSpec
|
||||
class MultiDcClusterShardingSpecMultiJvmNode4 extends MultiDcClusterShardingSpec
|
||||
|
||||
abstract class MultiDcClusterShardingSpec extends MultiNodeSpec(MultiDcClusterShardingSpecConfig) with MultiNodeClusterSpec
|
||||
with STMultiNodeSpec with ImplicitSender {
|
||||
abstract class MultiDcClusterShardingSpec
|
||||
extends MultiNodeSpec(MultiDcClusterShardingSpecConfig)
|
||||
with MultiNodeClusterSpec
|
||||
with STMultiNodeSpec
|
||||
with ImplicitSender {
|
||||
import MultiDcClusterShardingSpec._
|
||||
import MultiDcClusterShardingSpecConfig._
|
||||
|
||||
def join(from: RoleName, to: RoleName): Unit = {
|
||||
runOn(from) {
|
||||
cluster join node(to).address
|
||||
cluster.join(node(to).address)
|
||||
startSharding()
|
||||
withClue(s"Failed waiting for ${cluster.selfUniqueAddress} to be up. Current state: ${cluster.state}" + cluster.state) {
|
||||
withClue(
|
||||
s"Failed waiting for ${cluster.selfUniqueAddress} to be up. Current state: ${cluster.state}" + cluster.state) {
|
||||
within(15.seconds) {
|
||||
awaitAssert(cluster.state.members.exists { m =>
|
||||
m.uniqueAddress == cluster.selfUniqueAddress && m.status == MemberStatus.Up
|
||||
|
|
@ -101,12 +105,11 @@ abstract class MultiDcClusterShardingSpec extends MultiNodeSpec(MultiDcClusterSh
|
|||
}
|
||||
|
||||
def startSharding(): Unit = {
|
||||
ClusterSharding(system).start(
|
||||
typeName = "Entity",
|
||||
entityProps = Props[Entity](),
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
ClusterSharding(system).start(typeName = "Entity",
|
||||
entityProps = Props[Entity](),
|
||||
settings = ClusterShardingSettings(system),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
}
|
||||
|
||||
lazy val region = ClusterSharding(system).shardRegion("Entity")
|
||||
|
|
@ -190,12 +193,11 @@ abstract class MultiDcClusterShardingSpec extends MultiNodeSpec(MultiDcClusterSh
|
|||
|
||||
"allow proxy within same data center" in {
|
||||
runOn(second) {
|
||||
val proxy = ClusterSharding(system).startProxy(
|
||||
typeName = "Entity",
|
||||
role = None,
|
||||
dataCenter = None, // by default use own DC
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
val proxy = ClusterSharding(system).startProxy(typeName = "Entity",
|
||||
role = None,
|
||||
dataCenter = None, // by default use own DC
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
proxy ! GetCount("5")
|
||||
expectMsg(1)
|
||||
}
|
||||
|
|
@ -204,12 +206,11 @@ abstract class MultiDcClusterShardingSpec extends MultiNodeSpec(MultiDcClusterSh
|
|||
|
||||
"allow proxy across different data centers" in {
|
||||
runOn(second) {
|
||||
val proxy = ClusterSharding(system).startProxy(
|
||||
typeName = "Entity",
|
||||
role = None,
|
||||
dataCenter = Some("DC2"), // proxy to other DC
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
val proxy = ClusterSharding(system).startProxy(typeName = "Entity",
|
||||
role = None,
|
||||
dataCenter = Some("DC2"), // proxy to other DC
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
|
||||
proxy ! GetCount("5")
|
||||
expectMsg(2)
|
||||
|
|
@ -219,4 +220,3 @@ abstract class MultiDcClusterShardingSpec extends MultiNodeSpec(MultiDcClusterSh
|
|||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue