format source with scalafmt
This commit is contained in:
parent
0f40491d42
commit
ce404e4f53
1669 changed files with 43208 additions and 35404 deletions
|
|
@ -88,7 +88,8 @@ abstract class ClusterShardingFailureSpecConfig(val mode: String) extends MultiN
|
|||
object PersistentClusterShardingFailureSpecConfig extends ClusterShardingFailureSpecConfig("persistence")
|
||||
object DDataClusterShardingFailureSpecConfig extends ClusterShardingFailureSpecConfig("ddata")
|
||||
|
||||
class PersistentClusterShardingFailureSpec extends ClusterShardingFailureSpec(PersistentClusterShardingFailureSpecConfig)
|
||||
class PersistentClusterShardingFailureSpec
|
||||
extends ClusterShardingFailureSpec(PersistentClusterShardingFailureSpecConfig)
|
||||
class DDataClusterShardingFailureSpec extends ClusterShardingFailureSpec(DDataClusterShardingFailureSpecConfig)
|
||||
|
||||
class PersistentClusterShardingFailureMultiJvmNode1 extends PersistentClusterShardingFailureSpec
|
||||
|
|
@ -99,14 +100,17 @@ class DDataClusterShardingFailureMultiJvmNode1 extends DDataClusterShardingFailu
|
|||
class DDataClusterShardingFailureMultiJvmNode2 extends DDataClusterShardingFailureSpec
|
||||
class DDataClusterShardingFailureMultiJvmNode3 extends DDataClusterShardingFailureSpec
|
||||
|
||||
abstract class ClusterShardingFailureSpec(config: ClusterShardingFailureSpecConfig) extends MultiNodeSpec(config) with STMultiNodeSpec with ImplicitSender {
|
||||
abstract class ClusterShardingFailureSpec(config: ClusterShardingFailureSpecConfig)
|
||||
extends MultiNodeSpec(config)
|
||||
with STMultiNodeSpec
|
||||
with ImplicitSender {
|
||||
import ClusterShardingFailureSpec._
|
||||
import config._
|
||||
|
||||
override def initialParticipants = roles.size
|
||||
|
||||
val storageLocations = List(new File(system.settings.config.getString(
|
||||
"akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile)
|
||||
val storageLocations = List(
|
||||
new File(system.settings.config.getString("akka.cluster.sharding.distributed-data.durable.lmdb.dir")).getParentFile)
|
||||
|
||||
override protected def atStartup(): Unit = {
|
||||
storageLocations.foreach(dir => if (dir.exists) FileUtils.deleteQuietly(dir))
|
||||
|
|
@ -121,7 +125,7 @@ abstract class ClusterShardingFailureSpec(config: ClusterShardingFailureSpecConf
|
|||
|
||||
def join(from: RoleName, to: RoleName): Unit = {
|
||||
runOn(from) {
|
||||
cluster join node(to).address
|
||||
cluster.join(node(to).address)
|
||||
startSharding()
|
||||
|
||||
within(remaining) {
|
||||
|
|
@ -135,12 +139,11 @@ abstract class ClusterShardingFailureSpec(config: ClusterShardingFailureSpecConf
|
|||
}
|
||||
|
||||
def startSharding(): Unit = {
|
||||
ClusterSharding(system).start(
|
||||
typeName = "Entity",
|
||||
entityProps = Props[Entity],
|
||||
settings = ClusterShardingSettings(system).withRememberEntities(true),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
ClusterSharding(system).start(typeName = "Entity",
|
||||
entityProps = Props[Entity],
|
||||
settings = ClusterShardingSettings(system).withRememberEntities(true),
|
||||
extractEntityId = extractEntityId,
|
||||
extractShardId = extractShardId)
|
||||
}
|
||||
|
||||
lazy val region = ClusterSharding(system).shardRegion("Entity")
|
||||
|
|
@ -276,4 +279,3 @@ abstract class ClusterShardingFailureSpec(config: ClusterShardingFailureSpecConf
|
|||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue