=cls #18978 Lazy startup of shards when rememberEntities=false
* and don't populate the unallocatedShards Set in the State when rememberEntities=false
This commit is contained in:
parent
516b1f0954
commit
a6fd7b448f
3 changed files with 41 additions and 24 deletions
|
|
@ -257,7 +257,15 @@ object ShardCoordinator {
|
||||||
// shards for each region
|
// shards for each region
|
||||||
regions: Map[ActorRef, Vector[ShardId]] = Map.empty,
|
regions: Map[ActorRef, Vector[ShardId]] = Map.empty,
|
||||||
regionProxies: Set[ActorRef] = Set.empty,
|
regionProxies: Set[ActorRef] = Set.empty,
|
||||||
unallocatedShards: Set[ShardId] = Set.empty) extends ClusterShardingSerializable {
|
unallocatedShards: Set[ShardId] = Set.empty,
|
||||||
|
rememberEntities: Boolean = false) extends ClusterShardingSerializable {
|
||||||
|
|
||||||
|
def withRememberEntities(enabled: Boolean): State = {
|
||||||
|
if (enabled)
|
||||||
|
copy(rememberEntities = enabled)
|
||||||
|
else
|
||||||
|
copy(unallocatedShards = Set.empty, rememberEntities = enabled)
|
||||||
|
}
|
||||||
|
|
||||||
def updated(event: DomainEvent): State = event match {
|
def updated(event: DomainEvent): State = event match {
|
||||||
case ShardRegionRegistered(region) ⇒
|
case ShardRegionRegistered(region) ⇒
|
||||||
|
|
@ -268,28 +276,34 @@ object ShardCoordinator {
|
||||||
copy(regionProxies = regionProxies + proxy)
|
copy(regionProxies = regionProxies + proxy)
|
||||||
case ShardRegionTerminated(region) ⇒
|
case ShardRegionTerminated(region) ⇒
|
||||||
require(regions.contains(region), s"Terminated region $region not registered: $this")
|
require(regions.contains(region), s"Terminated region $region not registered: $this")
|
||||||
|
val newUnallocatedShards =
|
||||||
|
if (rememberEntities) (unallocatedShards ++ regions(region)) else unallocatedShards
|
||||||
copy(
|
copy(
|
||||||
regions = regions - region,
|
regions = regions - region,
|
||||||
shards = shards -- regions(region),
|
shards = shards -- regions(region),
|
||||||
unallocatedShards = unallocatedShards ++ regions(region))
|
unallocatedShards = newUnallocatedShards)
|
||||||
case ShardRegionProxyTerminated(proxy) ⇒
|
case ShardRegionProxyTerminated(proxy) ⇒
|
||||||
require(regionProxies.contains(proxy), s"Terminated region proxy $proxy not registered: $this")
|
require(regionProxies.contains(proxy), s"Terminated region proxy $proxy not registered: $this")
|
||||||
copy(regionProxies = regionProxies - proxy)
|
copy(regionProxies = regionProxies - proxy)
|
||||||
case ShardHomeAllocated(shard, region) ⇒
|
case ShardHomeAllocated(shard, region) ⇒
|
||||||
require(regions.contains(region), s"Region $region not registered: $this")
|
require(regions.contains(region), s"Region $region not registered: $this")
|
||||||
require(!shards.contains(shard), s"Shard [$shard] already allocated: $this")
|
require(!shards.contains(shard), s"Shard [$shard] already allocated: $this")
|
||||||
|
val newUnallocatedShards =
|
||||||
|
if (rememberEntities) (unallocatedShards - shard) else unallocatedShards
|
||||||
copy(
|
copy(
|
||||||
shards = shards.updated(shard, region),
|
shards = shards.updated(shard, region),
|
||||||
regions = regions.updated(region, regions(region) :+ shard),
|
regions = regions.updated(region, regions(region) :+ shard),
|
||||||
unallocatedShards = unallocatedShards - shard)
|
unallocatedShards = newUnallocatedShards)
|
||||||
case ShardHomeDeallocated(shard) ⇒
|
case ShardHomeDeallocated(shard) ⇒
|
||||||
require(shards.contains(shard), s"Shard [$shard] not allocated: $this")
|
require(shards.contains(shard), s"Shard [$shard] not allocated: $this")
|
||||||
val region = shards(shard)
|
val region = shards(shard)
|
||||||
require(regions.contains(region), s"Region $region for shard [$shard] not registered: $this")
|
require(regions.contains(region), s"Region $region for shard [$shard] not registered: $this")
|
||||||
|
val newUnallocatedShards =
|
||||||
|
if (rememberEntities) (unallocatedShards + shard) else unallocatedShards
|
||||||
copy(
|
copy(
|
||||||
shards = shards - shard,
|
shards = shards - shard,
|
||||||
regions = regions.updated(region, regions(region).filterNot(_ == shard)),
|
regions = regions.updated(region, regions(region).filterNot(_ == shard)),
|
||||||
unallocatedShards = unallocatedShards + shard)
|
unallocatedShards = newUnallocatedShards)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -381,7 +395,7 @@ abstract class ShardCoordinator(typeName: String, settings: ClusterShardingSetti
|
||||||
val cluster = Cluster(context.system)
|
val cluster = Cluster(context.system)
|
||||||
val removalMargin = cluster.settings.DownRemovalMargin
|
val removalMargin = cluster.settings.DownRemovalMargin
|
||||||
|
|
||||||
var state = State.empty
|
var state = State.empty.withRememberEntities(settings.rememberEntities)
|
||||||
var rebalanceInProgress = Set.empty[ShardId]
|
var rebalanceInProgress = Set.empty[ShardId]
|
||||||
var unAckedHostShards = Map.empty[ShardId, Cancellable]
|
var unAckedHostShards = Map.empty[ShardId, Cancellable]
|
||||||
// regions that have requested handoff, for graceful shutdown
|
// regions that have requested handoff, for graceful shutdown
|
||||||
|
|
@ -653,7 +667,10 @@ abstract class ShardCoordinator(typeName: String, settings: ClusterShardingSetti
|
||||||
unAckedHostShards = unAckedHostShards.updated(shard, cancel)
|
unAckedHostShards = unAckedHostShards.updated(shard, cancel)
|
||||||
}
|
}
|
||||||
|
|
||||||
def allocateShardHomes(): Unit = state.unallocatedShards.foreach { self ! GetShardHome(_) }
|
def allocateShardHomes(): Unit = {
|
||||||
|
if (settings.rememberEntities)
|
||||||
|
state.unallocatedShards.foreach { self ! GetShardHome(_) }
|
||||||
|
}
|
||||||
|
|
||||||
def continueGetShardHome(shard: ShardId, region: ActorRef, getShardHomeSender: ActorRef): Unit =
|
def continueGetShardHome(shard: ShardId, region: ActorRef, getShardHomeSender: ActorRef): Unit =
|
||||||
if (!rebalanceInProgress.contains(shard)) {
|
if (!rebalanceInProgress.contains(shard)) {
|
||||||
|
|
@ -747,6 +764,7 @@ class PersistentShardCoordinator(typeName: String, settings: ClusterShardingSett
|
||||||
state = st
|
state = st
|
||||||
|
|
||||||
case RecoveryCompleted ⇒
|
case RecoveryCompleted ⇒
|
||||||
|
state = state.withRememberEntities(settings.rememberEntities)
|
||||||
watchStateActors()
|
watchStateActors()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -798,6 +816,7 @@ class DDataShardCoordinator(typeName: String, settings: ClusterShardingSettings,
|
||||||
|
|
||||||
implicit val node = Cluster(context.system)
|
implicit val node = Cluster(context.system)
|
||||||
val CoordinatorStateKey = LWWRegisterKey[State](s"${typeName}CoordinatorState")
|
val CoordinatorStateKey = LWWRegisterKey[State](s"${typeName}CoordinatorState")
|
||||||
|
val initEmptyState = State.empty.withRememberEntities(settings.rememberEntities)
|
||||||
|
|
||||||
node.subscribe(self, ClusterShuttingDown.getClass)
|
node.subscribe(self, ClusterShuttingDown.getClass)
|
||||||
|
|
||||||
|
|
@ -809,7 +828,7 @@ class DDataShardCoordinator(typeName: String, settings: ClusterShardingSettings,
|
||||||
// This state will drop all other messages since they will be retried
|
// This state will drop all other messages since they will be retried
|
||||||
def waitingForState: Receive = ({
|
def waitingForState: Receive = ({
|
||||||
case g @ GetSuccess(CoordinatorStateKey, _) ⇒
|
case g @ GetSuccess(CoordinatorStateKey, _) ⇒
|
||||||
state = g.get(CoordinatorStateKey).value
|
state = g.get(CoordinatorStateKey).value.withRememberEntities(settings.rememberEntities)
|
||||||
context.become(waitingForStateInitialized)
|
context.become(waitingForStateInitialized)
|
||||||
// note that watchStateActors may call update
|
// note that watchStateActors may call update
|
||||||
watchStateActors()
|
watchStateActors()
|
||||||
|
|
@ -880,7 +899,7 @@ class DDataShardCoordinator(typeName: String, settings: ClusterShardingSettings,
|
||||||
|
|
||||||
def sendUpdate(evt: DomainEvent) = {
|
def sendUpdate(evt: DomainEvent) = {
|
||||||
val s = state.updated(evt)
|
val s = state.updated(evt)
|
||||||
replicator ! Update(CoordinatorStateKey, LWWRegister(State.empty), WriteMajority(updatingStateTimeout), Some(evt)) { reg ⇒
|
replicator ! Update(CoordinatorStateKey, LWWRegister(initEmptyState), WriteMajority(updatingStateTimeout), Some(evt)) { reg ⇒
|
||||||
reg.withValue(s)
|
reg.withValue(s)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -752,8 +752,7 @@ class ShardRegion(
|
||||||
None
|
None
|
||||||
case None ⇒
|
case None ⇒
|
||||||
throw new IllegalStateException("Shard must not be allocated to a proxy only ShardRegion")
|
throw new IllegalStateException("Shard must not be allocated to a proxy only ShardRegion")
|
||||||
}
|
})
|
||||||
)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -232,25 +232,26 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu
|
||||||
val replicator = system.actorOf(Replicator.props(
|
val replicator = system.actorOf(Replicator.props(
|
||||||
ReplicatorSettings(system).withGossipInterval(1.second).withMaxDeltaElements(10)), "replicator")
|
ReplicatorSettings(system).withGossipInterval(1.second).withMaxDeltaElements(10)), "replicator")
|
||||||
|
|
||||||
def coordinatorProps(typeName: String, rebalanceEnabled: Boolean) = {
|
def coordinatorProps(typeName: String, rebalanceEnabled: Boolean, rememberEntities: Boolean) = {
|
||||||
val allocationStrategy = new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1)
|
val allocationStrategy = new ShardCoordinator.LeastShardAllocationStrategy(rebalanceThreshold = 2, maxSimultaneousRebalance = 1)
|
||||||
val cfg = ConfigFactory.parseString(s"""
|
val cfg = ConfigFactory.parseString(s"""
|
||||||
handoff-timeout = 10s
|
handoff-timeout = 10s
|
||||||
shard-start-timeout = 10s
|
shard-start-timeout = 10s
|
||||||
rebalance-interval = ${if (rebalanceEnabled) "2s" else "3600s"}
|
rebalance-interval = ${if (rebalanceEnabled) "2s" else "3600s"}
|
||||||
""").withFallback(system.settings.config.getConfig("akka.cluster.sharding"))
|
""").withFallback(system.settings.config.getConfig("akka.cluster.sharding"))
|
||||||
val settings = ClusterShardingSettings(cfg)
|
val settings = ClusterShardingSettings(cfg).withRememberEntities(rememberEntities)
|
||||||
if (settings.stateStoreMode == "persistence")
|
if (settings.stateStoreMode == "persistence")
|
||||||
ShardCoordinator.props(typeName, settings, allocationStrategy)
|
ShardCoordinator.props(typeName, settings, allocationStrategy)
|
||||||
else
|
else
|
||||||
ShardCoordinator.props(typeName, settings, allocationStrategy, replicator)
|
ShardCoordinator.props(typeName, settings, allocationStrategy, replicator)
|
||||||
}
|
}
|
||||||
|
|
||||||
List("counter", "rebalancingCounter", "PersistentCounterEntities", "AnotherPersistentCounter",
|
List("counter", "rebalancingCounter", "RememberCounterEntities", "AnotherRememberCounter",
|
||||||
"PersistentCounter", "RebalancingPersistentCounter", "AutoMigrateRegionTest").foreach { typeName ⇒
|
"RememberCounter", "RebalancingRememberCounter", "AutoMigrateRememberRegionTest").foreach { typeName ⇒
|
||||||
val rebalanceEnabled = typeName.toLowerCase.startsWith("rebalancing")
|
val rebalanceEnabled = typeName.toLowerCase.startsWith("rebalancing")
|
||||||
|
val rememberEnabled = typeName.toLowerCase.contains("remember")
|
||||||
val singletonProps = BackoffSupervisor.props(
|
val singletonProps = BackoffSupervisor.props(
|
||||||
childProps = coordinatorProps(typeName, rebalanceEnabled),
|
childProps = coordinatorProps(typeName, rebalanceEnabled, rememberEnabled),
|
||||||
childName = "coordinator",
|
childName = "coordinator",
|
||||||
minBackoff = 5.seconds,
|
minBackoff = 5.seconds,
|
||||||
maxBackoff = 5.seconds,
|
maxBackoff = 5.seconds,
|
||||||
|
|
@ -286,11 +287,11 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu
|
||||||
lazy val region = createRegion("counter", rememberEntities = false)
|
lazy val region = createRegion("counter", rememberEntities = false)
|
||||||
lazy val rebalancingRegion = createRegion("rebalancingCounter", rememberEntities = false)
|
lazy val rebalancingRegion = createRegion("rebalancingCounter", rememberEntities = false)
|
||||||
|
|
||||||
lazy val persistentEntitiesRegion = createRegion("PersistentCounterEntities", rememberEntities = true)
|
lazy val persistentEntitiesRegion = createRegion("RememberCounterEntities", rememberEntities = true)
|
||||||
lazy val anotherPersistentRegion = createRegion("AnotherPersistentCounter", rememberEntities = true)
|
lazy val anotherPersistentRegion = createRegion("AnotherRememberCounter", rememberEntities = true)
|
||||||
lazy val persistentRegion = createRegion("PersistentCounter", rememberEntities = true)
|
lazy val persistentRegion = createRegion("RememberCounter", rememberEntities = true)
|
||||||
lazy val rebalancingPersistentRegion = createRegion("RebalancingPersistentCounter", rememberEntities = true)
|
lazy val rebalancingPersistentRegion = createRegion("RebalancingRememberCounter", rememberEntities = true)
|
||||||
lazy val autoMigrateRegion = createRegion("AutoMigrateRegionTest", rememberEntities = true)
|
lazy val autoMigrateRegion = createRegion("AutoMigrateRememberRegionTest", rememberEntities = true)
|
||||||
|
|
||||||
s"Cluster sharding ($mode)" must {
|
s"Cluster sharding ($mode)" must {
|
||||||
|
|
||||||
|
|
@ -529,7 +530,6 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu
|
||||||
}
|
}
|
||||||
|
|
||||||
"rebalance to nodes with less shards" in within(60 seconds) {
|
"rebalance to nodes with less shards" in within(60 seconds) {
|
||||||
|
|
||||||
runOn(fourth) {
|
runOn(fourth) {
|
||||||
for (n ← 1 to 10) {
|
for (n ← 1 to 10) {
|
||||||
rebalancingRegion ! EntityEnvelope(n, Increment)
|
rebalancingRegion ! EntityEnvelope(n, Increment)
|
||||||
|
|
@ -558,7 +558,6 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu
|
||||||
}
|
}
|
||||||
|
|
||||||
enterBarrier("after-9")
|
enterBarrier("after-9")
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -811,7 +810,7 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu
|
||||||
|
|
||||||
autoMigrateRegion ! Get(1)
|
autoMigrateRegion ! Get(1)
|
||||||
expectMsg(2)
|
expectMsg(2)
|
||||||
lastSender.path should ===(node(third) / "user" / "AutoMigrateRegionTestRegion" / "1" / "1")
|
lastSender.path should ===(node(third) / "user" / "AutoMigrateRememberRegionTestRegion" / "1" / "1")
|
||||||
|
|
||||||
//Kill region 3
|
//Kill region 3
|
||||||
system.actorSelection(lastSender.path.parent.parent) ! PoisonPill
|
system.actorSelection(lastSender.path.parent.parent) ! PoisonPill
|
||||||
|
|
@ -821,7 +820,7 @@ abstract class ClusterShardingSpec(config: ClusterShardingSpecConfig) extends Mu
|
||||||
// Wait for migration to happen
|
// Wait for migration to happen
|
||||||
//Test the shard, thus counter was moved onto node 4 and started.
|
//Test the shard, thus counter was moved onto node 4 and started.
|
||||||
runOn(fourth) {
|
runOn(fourth) {
|
||||||
val counter1 = system.actorSelection(system / "AutoMigrateRegionTestRegion" / "1" / "1")
|
val counter1 = system.actorSelection(system / "AutoMigrateRememberRegionTestRegion" / "1" / "1")
|
||||||
val probe = TestProbe()
|
val probe = TestProbe()
|
||||||
awaitAssert({
|
awaitAssert({
|
||||||
counter1.tell(Identify(1), probe.ref)
|
counter1.tell(Identify(1), probe.ref)
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue