Remove Exiting/Down node from other DC, #24171

* When leaving/downing the last node in a DC it would not
  be removed in another DC, since that was only done by the
  leader in the owning DC (and that is gone).
* It should be ok to eagerly remove such nodes also by
  leaders in other DCs.
* Note that gossip is already sent out so for the last node
  that will be spread to other DC, unless there is a network
  partition. For that we can't do anything. It will be replaced
  if joining again.
This commit is contained in:
Patrik Nordwall 2018-01-16 07:55:49 +01:00
parent fb72274b71
commit 2733a26540
2 changed files with 86 additions and 1 deletions

View file

@ -0,0 +1,72 @@
/**
* Copyright (C) 2018 Lightbend Inc. <https://www.lightbend.com>
*/
package akka.cluster
import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec }
import com.typesafe.config.ConfigFactory
import scala.concurrent.duration._
object MultiDcLastNodeSpec extends MultiNodeConfig {
val first = role("first")
val second = role("second")
val third = role("third")
commonConfig(ConfigFactory.parseString(
s"""
#akka.loglevel = DEBUG
""").withFallback(MultiNodeClusterSpec.clusterConfig))
nodeConfig(first, second)(ConfigFactory.parseString(
"""
akka.cluster.multi-data-center.self-data-center = "dc1"
"""))
nodeConfig(third)(ConfigFactory.parseString(
"""
akka.cluster.multi-data-center.self-data-center = "dc2"
"""))
}
class MultiDcLastNodeMultiJvmNode1 extends MultiDcLastNodeSpec
class MultiDcLastNodeMultiJvmNode2 extends MultiDcLastNodeSpec
class MultiDcLastNodeMultiJvmNode3 extends MultiDcLastNodeSpec
abstract class MultiDcLastNodeSpec extends MultiNodeSpec(MultiDcLastNodeSpec)
with MultiNodeClusterSpec {
import MultiDcLastNodeSpec._
"A multi-dc cluster with one remaining node in other DC" must {
"join" in {
runOn(first) {
cluster.join(first)
}
runOn(second, third) {
cluster.join(first)
}
enterBarrier("join-cluster")
within(20.seconds) {
awaitAssert(clusterView.members.filter(_.status == MemberStatus.Up) should have size 3)
}
enterBarrier("cluster started")
}
"be able to leave" in {
runOn(third) {
// this works in same way for down
cluster.leave(address(third))
}
runOn(first, second) {
awaitAssert(clusterView.members.map(_.address) should not contain address(third))
}
enterBarrier("cross-data-center-left")
}
}
}