=clu #18337 Disable down-removal-margin by default
For manual downing it is not needed. For auto-down it doesn't add any extra safety, since that is not handling network partitions anyway. The setting is still useful if you implement downing strategies that handle network partitions, e.g. by keeping the larger side of the partition and shutting down the smaller side.
This commit is contained in:
parent
baa4399521
commit
bfde1eff19
7 changed files with 17 additions and 8 deletions
|
|
@ -58,7 +58,6 @@ abstract class ClusterShardingFailureSpecConfig(val mode: String) extends MultiN
|
|||
akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
|
||||
akka.remote.log-remote-lifecycle-events = off
|
||||
akka.cluster.auto-down-unreachable-after = 0s
|
||||
akka.cluster.down-removal-margin = 5s
|
||||
akka.cluster.roles = ["backend"]
|
||||
akka.persistence.journal.plugin = "akka.persistence.journal.leveldb-shared"
|
||||
akka.persistence.journal.leveldb-shared {
|
||||
|
|
|
|||
|
|
@ -66,7 +66,6 @@ abstract class ClusterShardingLeavingSpecConfig(val mode: String) extends MultiN
|
|||
akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
|
||||
akka.remote.log-remote-lifecycle-events = off
|
||||
akka.cluster.auto-down-unreachable-after = 0s
|
||||
akka.cluster.down-removal-margin = 5s
|
||||
akka.persistence.journal.plugin = "akka.persistence.journal.leveldb-shared"
|
||||
akka.persistence.journal.leveldb-shared {
|
||||
timeout = 5s
|
||||
|
|
|
|||
|
|
@ -111,7 +111,6 @@ abstract class ClusterShardingSpecConfig(val mode: String) extends MultiNodeConf
|
|||
akka.actor.provider = "akka.cluster.ClusterActorRefProvider"
|
||||
akka.remote.log-remote-lifecycle-events = off
|
||||
akka.cluster.auto-down-unreachable-after = 0s
|
||||
akka.cluster.down-removal-margin = 5s
|
||||
akka.cluster.roles = ["backend"]
|
||||
akka.cluster.distributed-data.gossip-interval = 1s
|
||||
akka.persistence.journal.plugin = "akka.persistence.journal.leveldb-shared"
|
||||
|
|
|
|||
|
|
@ -556,8 +556,11 @@ class ClusterSingletonManager(
|
|||
}
|
||||
|
||||
def scheduleDelayedMemberRemoved(m: Member): Unit = {
|
||||
if (removalMargin > Duration.Zero) {
|
||||
log.debug("Schedule DelayedMemberRemoved for [{}]", m.address)
|
||||
context.system.scheduler.scheduleOnce(removalMargin, self, DelayedMemberRemoved(m))(context.dispatcher)
|
||||
} else
|
||||
self ! DelayedMemberRemoved(m)
|
||||
}
|
||||
|
||||
def gotoOldest(): State = {
|
||||
|
|
|
|||
|
|
@ -29,11 +29,16 @@ akka {
|
|||
# Disable with "off" or specify a duration to enable auto-down.
|
||||
auto-down-unreachable-after = off
|
||||
|
||||
# Margin until shards or singletons that belonged to a downed/removed
|
||||
# Time margin after which shards or singletons that belonged to a downed/removed
|
||||
# partition are created in surviving partition. The purpose of this margin is that
|
||||
# in case of a network partition the persistent actors in the non-surviving partitions
|
||||
# must be stopped before corresponding persistent actors are started somewhere else.
|
||||
down-removal-margin = 20s
|
||||
# This is useful if you implement downing strategies that handle network partitions,
|
||||
# e.g. by keeping the larger side of the partition and shutting down the smaller side.
|
||||
# It will not add any extra safety for auto-down-unreachable-after, since that is not
|
||||
# handling network partitions.
|
||||
# Disable with "off" or specify a duration to enable.
|
||||
down-removal-margin = off
|
||||
|
||||
# The roles of this member. List of strings, e.g. roles = ["A", "B"].
|
||||
# The roles are part of the membership information and can be used by
|
||||
|
|
|
|||
|
|
@ -69,7 +69,10 @@ final class ClusterSettings(val config: Config, val systemName: String) {
|
|||
|
||||
val DownRemovalMargin: FiniteDuration = {
|
||||
val key = "down-removal-margin"
|
||||
cc.getMillisDuration(key) requiring (_ > Duration.Zero, key + " > 0s")
|
||||
cc.getString(key).toLowerCase(Locale.ROOT) match {
|
||||
case "off" ⇒ Duration.Zero
|
||||
case _ ⇒ cc.getMillisDuration(key) requiring (_ >= Duration.Zero, key + " >= 0s, or off")
|
||||
}
|
||||
}
|
||||
|
||||
val Roles: Set[String] = immutableSeq(cc.getStringList("roles")).toSet
|
||||
|
|
|
|||
|
|
@ -39,6 +39,7 @@ class ClusterConfigSpec extends AkkaSpec {
|
|||
UnreachableNodesReaperInterval should ===(1 second)
|
||||
PublishStatsInterval should ===(Duration.Undefined)
|
||||
AutoDownUnreachableAfter should ===(Duration.Undefined)
|
||||
DownRemovalMargin should ===(Duration.Zero)
|
||||
MinNrOfMembers should ===(1)
|
||||
MinNrOfMembersOfRole should ===(Map.empty[String, Int])
|
||||
Roles should ===(Set.empty[String])
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue