Clear system messages sequence number for restarted node, #24847

* Notice that the incarnation has changed in SystemMessageDelivery
  and then reset the sequence number
* Take the incarnation number into account in the ClearSystemMessageDelivery
  message
* Trigger quarantine earlier in ClusterRemoteWatcher if node with
  same host:port joined
* Change quarantine-removed-node-after to 5s, shouldn't be necessary
  to delay it 30s
* test reproducer
This commit is contained in:
Patrik Nordwall 2018-04-10 11:39:55 +02:00 committed by GitHub
parent 85026f5f1d
commit 43dc381d59
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
6 changed files with 165 additions and 15 deletions

View file

@ -74,7 +74,7 @@ akka {
# Artery only setting
# When a node has been gracefully removed, let this time pass (to allow for example
# cluster singleton handover to complete) and then quarantine the removed node.
quarantine-removed-node-after=30s
quarantine-removed-node-after = 5s
# If this is set to "off", the leader will not move 'Joining' members to 'Up' during a network
# split. This feature allows the leader to accept 'Joining' members to be 'WeaklyUp'

View file

@ -5,9 +5,11 @@
package akka.cluster
import scala.concurrent.duration.FiniteDuration
import akka.actor._
import akka.cluster.ClusterEvent.CurrentClusterState
import akka.cluster.ClusterEvent.MemberEvent
import akka.cluster.ClusterEvent.MemberJoined
import akka.cluster.ClusterEvent.MemberUp
import akka.cluster.ClusterEvent.MemberRemoved
import akka.cluster.ClusterEvent.MemberWeaklyUp
@ -59,6 +61,8 @@ private[cluster] class ClusterRemoteWatcher(
private final case class DelayedQuarantine(m: Member, previousStatus: MemberStatus) extends NoSerializationVerificationNeeded
private var pendingDelayedQuarantine: Set[UniqueAddress] = Set.empty
var clusterNodes: Set[Address] = Set.empty
override def preStart(): Unit = {
@ -78,6 +82,7 @@ private[cluster] class ClusterRemoteWatcher(
clusterNodes = state.members.collect { case m if m.address != selfAddress m.address }
clusterNodes foreach takeOverResponsibility
unreachable = unreachable diff clusterNodes
case MemberJoined(m) memberJoined(m)
case MemberUp(m) memberUp(m)
case MemberWeaklyUp(m) memberUp(m)
case MemberRemoved(m, previousStatus) memberRemoved(m, previousStatus)
@ -85,8 +90,14 @@ private[cluster] class ClusterRemoteWatcher(
case DelayedQuarantine(m, previousStatus) delayedQuarantine(m, previousStatus)
}
private def memberJoined(m: Member): Unit = {
if (m.address != selfAddress)
quarantineOldIncarnation(m)
}
def memberUp(m: Member): Unit =
if (m.address != selfAddress) {
quarantineOldIncarnation(m)
clusterNodes += m.address
takeOverResponsibility(m.address)
unreachable -= m.address
@ -99,8 +110,11 @@ private[cluster] class ClusterRemoteWatcher(
if (previousStatus == MemberStatus.Down) {
quarantine(m.address, Some(m.uniqueAddress.longUid), s"Cluster member removed, previous status [$previousStatus]")
} else if (arteryEnabled) {
// don't quarantine gracefully removed members (leaving) directly,
// Don't quarantine gracefully removed members (leaving) directly,
// give Cluster Singleton some time to exchange TakeOver/HandOver messages.
// If new incarnation of same host:port is seen then the quarantine of previous incarnation
// is triggered earlier.
pendingDelayedQuarantine += m.uniqueAddress
import context.dispatcher
context.system.scheduler.scheduleOnce(cluster.settings.QuarantineRemovedNodeAfter, self, DelayedQuarantine(m, previousStatus))
}
@ -108,10 +122,24 @@ private[cluster] class ClusterRemoteWatcher(
publishAddressTerminated(m.address)
}
def delayedQuarantine(m: Member, previousStatus: MemberStatus): Unit =
quarantine(m.address, Some(m.uniqueAddress.longUid), s"Cluster member removed, previous status [$previousStatus]")
def quarantineOldIncarnation(newIncarnation: Member): Unit = {
// If new incarnation of same host:port is seen then quarantine previous incarnation
if (pendingDelayedQuarantine.nonEmpty)
pendingDelayedQuarantine.find(_.address == newIncarnation.address).foreach { oldIncarnation
pendingDelayedQuarantine -= oldIncarnation
quarantine(oldIncarnation.address, Some(oldIncarnation.longUid),
s"Cluster member removed, new incarnation joined")
}
}
override def watchNode(watchee: InternalActorRef) =
def delayedQuarantine(m: Member, previousStatus: MemberStatus): Unit = {
if (pendingDelayedQuarantine(m.uniqueAddress)) {
pendingDelayedQuarantine -= m.uniqueAddress
quarantine(m.address, Some(m.uniqueAddress.longUid), s"Cluster member removed, previous status [$previousStatus]")
}
}
override def watchNode(watchee: InternalActorRef): Unit =
if (!clusterNodes(watchee.path.address)) super.watchNode(watchee)
/**

View file

@ -0,0 +1,87 @@
/*
* Copyright (C) 2018 Lightbend Inc. <https://www.lightbend.com>
*/
package akka.cluster
import scala.concurrent.duration._
import akka.actor.ActorIdentity
import akka.actor.Identify
import akka.actor.PoisonPill
import akka.remote.artery.ArteryMultiNodeSpec
import akka.testkit.ImplicitSender
import akka.testkit.TestActors
/**
* Reproducer for issue #24847
*/
class ResetSystemMessageSeqNrSpec extends ArteryMultiNodeSpec("""
akka.loglevel = INFO
akka.actor.provider=cluster
akka.cluster.jmx.multi-mbeans-in-same-jvm = on
""") with ImplicitSender {
"System messages sequence numbers" should {
"be reset when connecting to new incarnation" in {
val sys2 = newRemoteSystem(name = Some(system.name))
Cluster(system).join(Cluster(system).selfAddress)
Cluster(sys2).join(Cluster(system).selfAddress)
within(10.seconds) {
awaitAssert {
Cluster(system).state.members.map(_.uniqueAddress) should ===(Set(
Cluster(system).selfUniqueAddress, Cluster(sys2).selfUniqueAddress))
}
}
sys2.actorOf(TestActors.echoActorProps, name = "echo1")
system.actorSelection(rootActorPath(sys2) / "user" / "echo1") ! Identify("1")
val echo1 = expectMsgType[ActorIdentity].ref.get
watch(echo1)
sys2.actorOf(TestActors.echoActorProps, name = "echo2")
system.actorSelection(rootActorPath(sys2) / "user" / "echo2") ! Identify("2")
val echo2 = expectMsgType[ActorIdentity].ref.get
watch(echo2)
echo2 ! PoisonPill
expectTerminated(echo2) // now we know that the watch of echo1 has been established
Cluster(sys2).leave(Cluster(sys2).selfAddress)
within(10.seconds) {
awaitAssert {
Cluster(system).state.members.map(_.uniqueAddress) should not contain Cluster(sys2).selfUniqueAddress
}
}
expectTerminated(echo1)
shutdown(sys2)
val sys3 = newRemoteSystem(
name = Some(system.name),
extraConfig = Some(s"akka.remote.artery.canonical.port=${Cluster(sys2).selfAddress.port.get}"))
Cluster(sys3).join(Cluster(system).selfAddress)
within(10.seconds) {
awaitAssert {
Cluster(system).state.members.map(_.uniqueAddress) should ===(Set(
Cluster(system).selfUniqueAddress, Cluster(sys3).selfUniqueAddress))
}
}
sys3.actorOf(TestActors.echoActorProps, name = "echo3")
system.actorSelection(rootActorPath(sys3) / "user" / "echo3") ! Identify("3")
val echo3 = expectMsgType[ActorIdentity].ref.get
watch(echo3)
// To clearly see the reproducer for issue #24847 one could put a sleep here and observe the
// "negative acknowledgment" log messages, but it also failed on the next expectTerminated because
// the Watch message was never delivered.
echo3 ! PoisonPill
expectTerminated(echo3)
}
}
}