pekko/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiDcSplitBrainSpec.scala

301 lines
9.8 KiB
Scala
Raw Normal View History

/*
* Copyright (C) 2009-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package akka.cluster
import akka.actor.ActorSystem
import akka.cluster.ClusterEvent._
2017-07-05 11:08:55 +02:00
import akka.remote.testconductor.RoleName
import akka.remote.testkit.{ MultiNodeConfig, MultiNodeSpec }
import akka.remote.transport.ThrottlerTransportAdapter.Direction
import akka.testkit.TestProbe
import com.typesafe.config.ConfigFactory
import scala.concurrent.Await
import scala.concurrent.duration._
2017-07-04 17:11:21 +02:00
object MultiDcSplitBrainMultiJvmSpec extends MultiNodeConfig {
val first = role("first")
val second = role("second")
val third = role("third")
val fourth = role("fourth")
val fifth = role("fifth")
2017-07-05 11:08:55 +02:00
commonConfig(ConfigFactory.parseString(
"""
akka.loglevel = DEBUG # issue #24955
akka.cluster.debug.verbose-heartbeat-logging = on
akka.cluster.debug.verbose-gossip-logging = on
akka.remote.netty.tcp.connection-timeout = 5 s # speedup in case of connection issue
akka.remote.retry-gate-closed-for = 1 s
akka.cluster.multi-data-center {
failure-detector {
acceptable-heartbeat-pause = 4s
heartbeat-interval = 1s
}
}
akka.cluster {
gossip-interval = 500ms
leader-actions-interval = 1s
auto-down-unreachable-after = 1s
}
2017-07-05 11:08:55 +02:00
""").withFallback(MultiNodeClusterSpec.clusterConfig))
nodeConfig(first, second)(ConfigFactory.parseString(
"""
akka.cluster.multi-data-center.self-data-center = "dc1"
"""))
nodeConfig(third, fourth, fifth)(ConfigFactory.parseString(
"""
akka.cluster.multi-data-center.self-data-center = "dc2"
"""))
testTransport(on = true)
}
2017-07-05 11:08:55 +02:00
class MultiDcSplitBrainMultiJvmNode1 extends MultiDcSplitBrainSpec
class MultiDcSplitBrainMultiJvmNode2 extends MultiDcSplitBrainSpec
class MultiDcSplitBrainMultiJvmNode3 extends MultiDcSplitBrainSpec
class MultiDcSplitBrainMultiJvmNode4 extends MultiDcSplitBrainSpec
class MultiDcSplitBrainMultiJvmNode5 extends MultiDcSplitBrainSpec
2017-07-04 17:11:21 +02:00
abstract class MultiDcSplitBrainSpec
extends MultiNodeSpec(MultiDcSplitBrainMultiJvmSpec)
with MultiNodeClusterSpec {
2017-07-04 17:11:21 +02:00
import MultiDcSplitBrainMultiJvmSpec._
val dc1 = List(first, second)
val dc2 = List(third, fourth, fifth)
var splits = 0
var unsplits = 0
def splitDataCenters(doNotVerify: Set[RoleName]): Unit = {
splits += 1
val memberNodes = (dc1 ++ dc2).filterNot(doNotVerify)
val probe = TestProbe()
runOn(memberNodes: _*) {
cluster.subscribe(probe.ref, classOf[UnreachableDataCenter])
probe.expectMsgType[CurrentClusterState]
}
enterBarrier(s"split-$splits")
runOn(first) {
for (dc1Node <- dc1; dc2Node <- dc2) {
testConductor.blackhole(dc1Node, dc2Node, Direction.Both).await
}
}
enterBarrier(s"after-split-$splits")
runOn(memberNodes: _*) {
probe.expectMsgType[UnreachableDataCenter](15.seconds)
cluster.unsubscribe(probe.ref)
runOn(dc1: _*) {
awaitAssert {
cluster.state.unreachableDataCenters should ===(Set("dc2"))
}
}
runOn(dc2: _*) {
awaitAssert {
cluster.state.unreachableDataCenters should ===(Set("dc1"))
}
}
cluster.state.unreachable should ===(Set.empty)
}
enterBarrier(s"after-split-verified-$splits")
}
def unsplitDataCenters(notMembers: Set[RoleName]): Unit = {
unsplits += 1
val memberNodes = (dc1 ++ dc2).filterNot(notMembers)
val probe = TestProbe()
runOn(memberNodes: _*) {
cluster.subscribe(probe.ref, classOf[ReachableDataCenter])
probe.expectMsgType[CurrentClusterState]
}
enterBarrier(s"unsplit-$unsplits")
runOn(first) {
for (dc1Node <- dc1; dc2Node <- dc2) {
testConductor.passThrough(dc1Node, dc2Node, Direction.Both).await
}
}
enterBarrier(s"after-unsplit-$unsplits")
runOn(memberNodes: _*) {
probe.expectMsgType[ReachableDataCenter](25.seconds)
system.log.debug("Reachable data center received")
cluster.unsubscribe(probe.ref)
awaitAssert {
cluster.state.unreachableDataCenters should ===(Set.empty)
system.log.debug("Cluster state: {}", cluster.state)
}
}
enterBarrier(s"after-unsplit-verified-$unsplits")
}
2017-07-04 17:11:21 +02:00
"A cluster with multiple data centers" must {
"be able to form two data centers" in {
awaitClusterUp(first, second, third)
}
2017-07-04 17:11:21 +02:00
"be able to have a data center member join while there is inter data center split" in within(20.seconds) {
// introduce a split between data centers
splitDataCenters(doNotVerify = Set(fourth, fifth))
runOn(fourth) {
cluster.join(third)
}
2017-07-04 17:11:21 +02:00
enterBarrier("inter-data-center unreachability")
// should be able to join and become up since the
// split is between dc1 and dc2
runOn(third, fourth) {
awaitAssert(clusterView.members.collect {
case m if m.dataCenter == "dc2" && m.status == MemberStatus.Up => m.address
2017-07-05 11:08:55 +02:00
} should ===(Set(address(third), address(fourth))))
}
enterBarrier("dc2-join-completed")
unsplitDataCenters(notMembers = Set(fifth))
runOn(dc1: _*) {
awaitAssert(clusterView.members.collect {
case m if m.dataCenter == "dc2" && m.status == MemberStatus.Up => m.address
2017-07-05 11:08:55 +02:00
} should ===(Set(address(third), address(fourth))))
}
2017-07-04 17:11:21 +02:00
enterBarrier("inter-data-center-split-1-done")
}
// fifth is still not a member of the cluster
2017-07-04 17:11:21 +02:00
"be able to have data center member leave while there is inter data center split" in within(20.seconds) {
splitDataCenters(doNotVerify = Set(fifth))
runOn(fourth) {
2017-07-05 11:08:55 +02:00
cluster.leave(fourth)
}
2017-07-05 11:08:55 +02:00
runOn(third) {
awaitAssert(clusterView.members.filter(_.address == address(fourth)) should ===(Set.empty))
}
enterBarrier("node-4-left")
unsplitDataCenters(notMembers = Set(fourth, fifth))
runOn(first, second) {
awaitAssert(clusterView.members.filter(_.address == address(fourth)) should ===(Set.empty))
}
2017-07-04 17:11:21 +02:00
enterBarrier("inter-data-center-split-2-done")
}
// forth has left the cluster, fifth is still not a member
"be able to have data center member restart (same host:port) while there is inter data center split" in within(60.seconds) {
val subscribeProbe = TestProbe()
runOn(first, second, third, fifth) {
Cluster(system).subscribe(subscribeProbe.ref, InitialStateAsSnapshot, classOf[MemberUp], classOf[MemberRemoved])
subscribeProbe.expectMsgType[CurrentClusterState]
}
enterBarrier("subscribed")
runOn(fifth) {
Cluster(system).join(third)
}
var fifthOriginalUniqueAddress: Option[UniqueAddress] = None
runOn(first, second, third, fifth) {
awaitAssert(clusterView.members.collect {
case m if m.dataCenter == "dc2" && m.status == MemberStatus.Up => m.address
} should ===(Set(address(third), address(fifth))))
fifthOriginalUniqueAddress = clusterView.members.collectFirst { case m if m.address == address(fifth) => m.uniqueAddress }
}
enterBarrier("fifth-joined")
splitDataCenters(doNotVerify = Set(fourth))
runOn(fifth) {
Cluster(system).shutdown()
}
runOn(third) {
awaitAssert(clusterView.members.collect {
case m if m.dataCenter == "dc2" => m.address
} should ===(Set(address(third))))
}
enterBarrier("fifth-removed")
runOn(fifth) {
// we can't use any multi-jvm test facilities on fifth after this, because have to shutdown
// actor system to be able to start new with same port
val thirdAddress = address(third)
enterBarrier("fifth-waiting-for-termination")
Await.ready(system.whenTerminated, remaining)
val port = Cluster(system).selfAddress.port.get
val restartedSystem = ActorSystem(
system.name,
ConfigFactory.parseString(
s"""
akka.remote.netty.tcp.port = $port
akka.remote.artery.canonical.port = $port
akka.coordinated-shutdown.terminate-actor-system = on
""").withFallback(system.settings.config))
Cluster(restartedSystem).join(thirdAddress)
Await.ready(restartedSystem.whenTerminated, remaining)
}
// no multi-jvm test facilities on fifth after this
val remainingRoles = roles.filterNot(_ == fifth)
runOn(remainingRoles: _*) {
enterBarrier("fifth-waiting-for-termination")
}
runOn(first) {
for (dc1Node <- dc1; dc2Node <- dc2) {
testConductor.passThrough(dc1Node, dc2Node, Direction.Both).await
}
testConductor.shutdown(fifth)
}
runOn(remainingRoles: _*) {
enterBarrier("fifth-restarted")
}
runOn(first, second, third) {
awaitAssert(clusterView.members.collectFirst {
case m if m.dataCenter == "dc2" && m.address == fifthOriginalUniqueAddress.get.address => m.uniqueAddress
} should not be fifthOriginalUniqueAddress) // different uid
subscribeProbe.expectMsgType[MemberUp].member.uniqueAddress should ===(fifthOriginalUniqueAddress.get)
subscribeProbe.expectMsgType[MemberRemoved].member.uniqueAddress should ===(fifthOriginalUniqueAddress.get)
subscribeProbe.expectMsgType[MemberUp].member.address should ===(fifthOriginalUniqueAddress.get.address)
}
runOn(remainingRoles: _*) {
enterBarrier("fifth-re-joined")
}
runOn(first) {
// to shutdown the restartedSystem on fifth
Cluster(system).leave(fifthOriginalUniqueAddress.get.address)
}
runOn(first, second, third) {
awaitAssert({
clusterView.members.map(_.address) should ===(Set(address(first), address(second), address(third)))
})
}
runOn(remainingRoles: _*) {
enterBarrier("restarted-fifth-removed")
}
}
}
}