From 72f678281ed9aad6571d2483425398213ab555c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Thu, 7 Jun 2012 14:21:14 +0200 Subject: [PATCH 01/20] Fixed wrong formatting in docs --- akka-docs/cluster/cluster.rst | 7 ++++--- akka-docs/general/message-send-semantics.rst | 6 +++--- 2 files changed, 7 insertions(+), 6 deletions(-) diff --git a/akka-docs/cluster/cluster.rst b/akka-docs/cluster/cluster.rst index fb53f13131..1368d7835f 100644 --- a/akka-docs/cluster/cluster.rst +++ b/akka-docs/cluster/cluster.rst @@ -5,8 +5,7 @@ Cluster Specification ###################### -.. note:: *This document describes the new clustering coming in Akka Coltrane and -is not available in the latest stable release)* +.. note:: *This document describes the new clustering coming in Akka Coltrane and is not available in the latest stable release)* Intro ===== @@ -304,7 +303,9 @@ node from the cluster, marking it as ``removed``. A node can also be removed forcefully by moving it directly to the ``removed`` state using the ``remove`` action. The cluster will rebalance based on the new -cluster membership. +cluster membership. This will also happen if you are shutting the system down +forcefully (through an external ``SIGKILL`` signal, ``System.exit(status)`` or +similar. If a node is unreachable then gossip convergence is not possible and therefore any ``leader`` actions are also not possible (for instance, allowing a node to diff --git a/akka-docs/general/message-send-semantics.rst b/akka-docs/general/message-send-semantics.rst index d9488d1f2b..41eb727358 100644 --- a/akka-docs/general/message-send-semantics.rst +++ b/akka-docs/general/message-send-semantics.rst @@ -48,14 +48,14 @@ At-most-once Actual transports may provide stronger semantics, but at-most-once is the semantics you should expect. -The alternatives would be once-and-only-once, which is extremely costly, +The alternatives would be once-and-only-once, which is extremely costly, or at-least-once which essentially requires idempotency of message processing, which is a user-level concern. Ordering is preserved on a per-sender basis ------------------------------------------- -Actor ``A1` sends messages ``M1``, ``M2``, ``M3`` to ``A2`` +Actor ``A1`` sends messages ``M1``, ``M2``, ``M3`` to ``A2`` Actor ``A3`` sends messages ``M4``, ``M5``, ``M6`` to ``A2`` This means that: @@ -66,4 +66,4 @@ This means that: 5) ``A2`` can see messages from ``A1`` interleaved with messages from ``A3`` 6) Since there is no guaranteed delivery, none, some or all of the messages may arrive to ``A2`` -.. _Erlang documentation: http://www.erlang.org/faq/academic.html \ No newline at end of file +.. _Erlang documentation: http://www.erlang.org/faq/academic.html From 57fadc1f7d776738b874f10ff9eb6e49c4055ff4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 8 Jun 2012 11:50:36 +0200 Subject: [PATCH 02/20] Added MembershipChangeListenerRemovedMultiJvmSpec MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../MembershipChangeListenerRemovedSpec.scala | 71 +++++++++++++++++++ 1 file changed, 71 insertions(+) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerRemovedSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerRemovedSpec.scala new file mode 100644 index 0000000000..6b737a22e2 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerRemovedSpec.scala @@ -0,0 +1,71 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import scala.collection.immutable.SortedSet +import org.scalatest.BeforeAndAfter +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object MembershipChangeListenerRemovedMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class MembershipChangeListenerRemovedMultiJvmNode1 extends MembershipChangeListenerRemovedSpec +class MembershipChangeListenerRemovedMultiJvmNode2 extends MembershipChangeListenerRemovedSpec +class MembershipChangeListenerRemovedMultiJvmNode3 extends MembershipChangeListenerRemovedSpec + +abstract class MembershipChangeListenerRemovedSpec extends MultiNodeSpec(MembershipChangeListenerRemovedMultiJvmSpec) + with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + import MembershipChangeListenerRemovedMultiJvmSpec._ + + override def initialParticipants = 3 + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + lazy val thirdAddress = node(third).address + + val reaperWaitingTime = 30.seconds.dilated + + "A registered MembershipChangeListener" must { + "be notified when new node is REMOVED" taggedAs LongRunningTest in { + + runOn(first) { + cluster.self + } + testConductor.enter("first-started") + + runOn(second, third) { + cluster.join(firstAddress) + } + awaitUpConvergence(numberOfMembers = 3) + testConductor.enter("rest-started") + + runOn(third) { + val removedLatch = TestLatch() + cluster.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + println("------- MembershipChangeListener " + members.mkString(", ")) + if (members.size == 3 && members.find(_.address == secondAddress).isEmpty) + removedLatch.countDown() + } + }) + removedLatch.await + } + + runOn(first) { + cluster.leave(secondAddress) + } + + testConductor.enter("finished") + } + } +} From 45b2484f62982c19ece9aca1b3301375153db7e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 8 Jun 2012 11:51:34 +0200 Subject: [PATCH 03/20] Implemented/Fixed Cluster.remove() and state transition from LEAVING -> REMOVED. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/cluster/Cluster.scala | 91 +++++++++++-------- 1 file changed, 55 insertions(+), 36 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index c5ad773989..b2fe9c7352 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -50,7 +50,7 @@ sealed trait ClusterMessage extends Serializable /** * Cluster commands sent by the USER. */ -object ClusterAction { +object ClusterUserAction { /** * Command to join the cluster. Sent when a node (reprsesented by 'address') @@ -72,6 +72,12 @@ object ClusterAction { * Command to remove a node from the cluster immediately. */ case class Remove(address: Address) extends ClusterMessage +} + +/** + * Cluster commands sent by the LEADER. + */ +object ClusterLeaderAction { /** * Command to mark a node to be removed from the cluster immediately. @@ -197,8 +203,8 @@ case class Gossip( } /** - * Marks the gossip as seen by this node (selfAddress) by updating the address entry in the 'gossip.overview.seen' - * Map with the VectorClock for the new gossip. + * Marks the gossip as seen by this node (address) by updating the address entry in the 'gossip.overview.seen' + * Map with the VectorClock (version) for the new gossip. */ def seen(address: Address): Gossip = { if (overview.seen.contains(address) && overview.seen(address) == version) this @@ -253,7 +259,8 @@ case class Gossip( * Instantiated as a single instance for each Cluster - e.g. commands are serialized to Cluster message after message. */ final class ClusterCommandDaemon extends Actor { - import ClusterAction._ + import ClusterUserAction._ + import ClusterLeaderAction._ val cluster = Cluster(context.system) val log = Logging(context.system, this) @@ -331,8 +338,6 @@ trait ClusterNodeMBean { def leave(address: String) def down(address: String) def remove(address: String) - - def shutdown() } /** @@ -499,10 +504,14 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks. + * + * INTERNAL API: + * Should not called by the user. The user can issue a LEAVE command which will tell the node + * to go through graceful handoff process LEAVE -> EXITING -> REMOVED -> SHUTDOWN. */ - def shutdown(): Unit = { + private[akka] def shutdown(): Unit = { if (isRunning.compareAndSet(true, false)) { - log.info("Cluster Node [{}] - Shutting down cluster Node and cluster daemons...", selfAddress) + log.info("Cluster Node [{}] - Shutting down cluster node...", selfAddress) gossipCanceller.cancel() failureDetectorReaperCanceller.cancel() leaderActionsCanceller.cancel() @@ -512,6 +521,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ } catch { case e: InstanceNotFoundException ⇒ // ignore - we are running multiple cluster nodes in the same JVM (probably for testing) } + log.info("Cluster Node [{}] - Cluster node successfully shut down", selfAddress) } } @@ -543,7 +553,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ */ def join(address: Address): Unit = { val connection = clusterCommandConnectionFor(address) - val command = ClusterAction.Join(selfAddress) + val command = ClusterUserAction.Join(selfAddress) log.info("Cluster Node [{}] - Trying to send JOIN to [{}] through connection [{}]", selfAddress, address, connection) connection ! command } @@ -552,21 +562,21 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * Send command to issue state transition to LEAVING for the node specified by 'address'. */ def leave(address: Address): Unit = { - clusterCommandDaemon ! ClusterAction.Leave(address) + clusterCommandDaemon ! ClusterUserAction.Leave(address) } /** - * Send command to issue state transition to from DOWN to EXITING for the node specified by 'address'. + * Send command to DOWN the node specified by 'address'. */ def down(address: Address): Unit = { - clusterCommandDaemon ! ClusterAction.Down(address) + clusterCommandDaemon ! ClusterUserAction.Down(address) } /** - * Send command to issue state transition to REMOVED for the node specified by 'address'. + * Send command to REMOVE the node specified by 'address'. */ def remove(address: Address): Unit = { - clusterCommandDaemon ! ClusterAction.Remove(address) + clusterCommandDaemon ! ClusterUserAction.Remove(address) } // ======================================================== @@ -642,13 +652,15 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ */ private[cluster] final def exiting(address: Address): Unit = { log.info("Cluster Node [{}] - Marking node [{}] as EXITING", selfAddress, address) + // FIXME implement when we implement hand-off } /** * State transition to REMOVED. */ private[cluster] final def removing(address: Address): Unit = { - log.info("Cluster Node [{}] - Marking node [{}] as REMOVED", selfAddress, address) + log.info("Cluster Node [{}] - Node has been REMOVED by the leader - shutting down...", selfAddress) + shutdown() } /** @@ -727,6 +739,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val winningGossip = if (remoteGossip.version <> localGossip.version) { // concurrent + println("=======>>> CONCURRENT") val mergedGossip = remoteGossip merge localGossip val versionedMergedGossip = mergedGossip + vclockNode @@ -737,20 +750,23 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ versionedMergedGossip } else if (remoteGossip.version < localGossip.version) { + println("=======>>> LOCAL") // local gossip is newer localGossip } else { + println("=======>>> REMOTE") // remote gossip is newer remoteGossip } + println("=======>>> WINNING " + winningGossip.members.mkString(", ")) val newState = localState copy (latestGossip = winningGossip seen selfAddress) // if we won the race then update else try again if (!state.compareAndSet(localState, newState)) receive(sender, remoteGossip) // recur if we fail the update else { - log.debug("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, sender.address) + log.info("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, sender.address) if (sender.address != selfAddress) failureDetector heartbeat sender.address @@ -772,8 +788,8 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * @param oldState the state to change the member status in * @return the updated new state with the new member status */ - private def switchMemberStatusTo(newStatus: MemberStatus, state: State): State = { - log.info("Cluster Node [{}] - Switching membership status to [{}]", selfAddress, newStatus) + private def switchMemberStatusTo(newStatus: MemberStatus, state: State): State = { // TODO: Removed this method? Currently not used. + log.debug("Cluster Node [{}] - Switching membership status to [{}]", selfAddress, newStatus) val localSelf = self @@ -789,7 +805,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ else member } - // ugly crap to work around bug in scala colletions ('val ss: SortedSet[Member] = SortedSet.empty[Member] ++ aSet' does not compile) + // NOTE: ugly crap to work around bug in scala colletions ('val ss: SortedSet[Member] = SortedSet.empty[Member] ++ aSet' does not compile) val newMembersSortedSet = SortedSet[Member](newMembersSet.toList: _*) val newGossip = localGossip copy (members = newMembersSortedSet) @@ -936,8 +952,8 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val localUnreachableMembers = localOverview.unreachable // Leader actions are as follows: - // 1. Move JOINING => UP -- When a node joins the cluster - // 2. Move EXITING => REMOVED -- When all nodes have seen that the node is EXITING (convergence) + // 1. Move EXITING => REMOVED -- When all nodes have seen that the node is EXITING (convergence) - remove the nodes from the node ring + // 2. Move JOINING => UP -- When a node joins the cluster // 3. Move LEAVING => EXITING -- When all partition handoff has completed // 4. Move UNREACHABLE => DOWN -- When the node is in the UNREACHABLE set it can be auto-down by leader // 5. Updating the vclock version for the changes @@ -951,9 +967,20 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val newMembers = - localMembers map { member ⇒ + // ---------------------- + // 1. Move EXITING => REMOVED - e.g. remove the nodes from the 'members' set/node ring + // ---------------------- + localMembers filter { member ⇒ + if (member.status == MemberStatus.Exiting) { + log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED - Removing node from node ring", selfAddress, member.address) + hasChangedState = true + clusterCommandConnectionFor(member.address) ! ClusterUserAction.Remove(member.address) // tell the removed node to shut himself down + false + } else true + + } map { member ⇒ // ---------------------- - // 1. Move JOINING => UP (once all nodes have seen that this node is JOINING e.g. we have a convergence) + // 2. Move JOINING => UP (once all nodes have seen that this node is JOINING e.g. we have a convergence) // ---------------------- if (member.status == MemberStatus.Joining) { log.info("Cluster Node [{}] - Leader is moving node [{}] from JOINING to UP", selfAddress, member.address) @@ -961,16 +988,6 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ member copy (status = MemberStatus.Up) } else member - } map { member ⇒ - // ---------------------- - // 2. Move EXITING => REMOVED (once all nodes have seen that this node is EXITING e.g. we have a convergence) - // ---------------------- - if (member.status == MemberStatus.Exiting) { - log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED", selfAddress, member.address) - hasChangedState = true - member copy (status = MemberStatus.Removed) - } else member - } map { member ⇒ // ---------------------- // 3. Move LEAVING => EXITING (once we have a convergence on LEAVING *and* if we have a successful partition handoff) @@ -978,10 +995,12 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ if (member.status == MemberStatus.Leaving && hasPartionHandoffCompletedSuccessfully(localGossip)) { log.info("Cluster Node [{}] - Leader is moving node [{}] from LEAVING to EXITING", selfAddress, member.address) hasChangedState = true + clusterCommandConnectionFor(member.address) ! ClusterLeaderAction.Exit(member.address) // FIXME should use ? to await completion of handoff? member copy (status = MemberStatus.Exiting) } else member } + localGossip copy (members = newMembers) // update gossip } else if (autoDown) { @@ -1045,7 +1064,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // First check that: // 1. we don't have any members that are unreachable (unreachable.isEmpty == true), or - // 2. all unreachable members in the set have status DOWN + // 2. all unreachable members in the set have status DOWN or REMOVED // Else we can't continue to check for convergence // When that is done we check that all the entries in the 'seen' table have the same vector clock version if (unreachable.isEmpty || !unreachable.exists { m ⇒ @@ -1055,8 +1074,10 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val seen = gossip.overview.seen val views = Set.empty[VectorClock] ++ seen.values + println("=======>>> VIEWS " + views.size) if (views.size == 1) { log.debug("Cluster Node [{}] - Cluster convergence reached", selfAddress) + println("=======>>> ----------------------- HAS CONVERGENCE") Some(gossip) } else None } else None @@ -1144,8 +1165,6 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ def down(address: String) = clusterNode.down(AddressFromURIString(address)) def remove(address: String) = clusterNode.remove(AddressFromURIString(address)) - - def shutdown() = clusterNode.shutdown() } log.info("Cluster Node [{}] - registering cluster JMX MBean [{}]", selfAddress, clusterMBeanName) try { From 469fcd8305257f0b00525ad60b5b066781fe5920 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 16 Jun 2012 00:00:19 +0200 Subject: [PATCH 04/20] Redesign of life-cycle management of EXITING -> REMOVED. Fixes #2177. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Removed REMOVED as explicit valid member state - Implemented leader moving either itself or other member from EXITING -> REMOVED - Added sending Remove message for removed node to shut down itself - Fixed a few bugs - Removed 'remove' from Cluster and JMX interface - Added bunch of ScalaDoc - Added isRunning method Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/cluster/Cluster.scala | 343 ++++++++++-------- 1 file changed, 194 insertions(+), 149 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 694793249f..ad9f9abaa4 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -6,27 +6,27 @@ package akka.cluster import akka.actor._ import akka.actor.Status._ +import akka.ConfigurationException +import akka.dispatch.Await +import akka.dispatch.MonitorableThreadFactory +import akka.event.Logging +import akka.jsr166y.ThreadLocalRandom +import akka.pattern.ask import akka.remote._ import akka.routing._ -import akka.event.Logging -import akka.dispatch.Await -import akka.pattern.ask import akka.util._ import akka.util.duration._ -import akka.ConfigurationException -import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean } -import java.util.concurrent.TimeUnit._ -import java.util.concurrent.TimeoutException -import akka.jsr166y.ThreadLocalRandom -import java.lang.management.ManagementFactory -import java.io.Closeable -import javax.management._ -import scala.collection.immutable.{ Map, SortedSet } -import scala.annotation.tailrec -import com.google.protobuf.ByteString import akka.util.internal.HashedWheelTimer -import akka.dispatch.MonitorableThreadFactory +import com.google.protobuf.ByteString +import java.io.Closeable +import java.lang.management.ManagementFactory +import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean } +import java.util.concurrent.TimeoutException +import java.util.concurrent.TimeUnit._ +import javax.management._ import MemberStatus._ +import scala.annotation.tailrec +import scala.collection.immutable.{ Map, SortedSet } /** * Interface for membership change listener. @@ -69,11 +69,6 @@ object ClusterUserAction { * Command to mark node as temporary down. */ case class Down(address: Address) extends ClusterMessage - - /** - * Command to remove a node from the cluster immediately. - */ - case class Remove(address: Address) extends ClusterMessage } /** @@ -82,15 +77,25 @@ object ClusterUserAction { object ClusterLeaderAction { /** + * INTERNAL API. + * * Command to mark a node to be removed from the cluster immediately. * Can only be sent by the leader. */ - private[akka] case class Exit(address: Address) extends ClusterMessage + private[cluster] case class Exit(address: Address) extends ClusterMessage + + /** + * INTERNAL API. + * + * Command to remove a node from the cluster immediately. + */ + private[cluster] case class Remove(address: Address) extends ClusterMessage } /** * Represents the address and the current status of a cluster member node. * + * Note: `hashCode` and `equals` are solely based on the underlying `Address`, not its `MemberStatus`. */ class Member(val address: Address, val status: MemberStatus) extends ClusterMessage { override def hashCode = address.## @@ -105,7 +110,7 @@ class Member(val address: Address, val status: MemberStatus) extends ClusterMess object Member { /** - * Sort Address by host and port + * `Address` ordering type class, sorts addresses by host and port. */ implicit val addressOrdering: Ordering[Address] = Ordering.fromLessThan[Address] { (a, b) ⇒ if (a.host != b.host) a.host.getOrElse("").compareTo(b.host.getOrElse("")) < 0 @@ -113,6 +118,9 @@ object Member { else false } + /** + * `Member` ordering type class, sorts members by `Address`. + */ implicit val ordering: Ordering[Member] = new Ordering[Member] { def compare(x: Member, y: Member) = addressOrdering.compare(x.address, y.address) } @@ -154,10 +162,11 @@ case class GossipEnvelope(from: Address, gossip: Gossip) extends ClusterMessage * Can be one of: Joining, Up, Leaving, Exiting and Down. */ sealed trait MemberStatus extends ClusterMessage { + /** - * Using the same notion for 'unavailable' as 'non-convergence': DOWN and REMOVED. + * Using the same notion for 'unavailable' as 'non-convergence': DOWN */ - def isUnavailable: Boolean = this == Down || this == Removed + def isUnavailable: Boolean = this == Down } object MemberStatus { @@ -223,6 +232,7 @@ case class Gossip( // FIXME can be disabled as optimization assertInvariants + private def assertInvariants: Unit = { val unreachableAndLive = members.intersect(overview.unreachable) if (unreachableAndLive.nonEmpty) @@ -248,6 +258,9 @@ case class Gossip( */ def :+(node: VectorClock.Node): Gossip = copy(version = version :+ node) + /** + * Adds a member to the member node ring. + */ def :+(member: Member): Gossip = { if (members contains member) this else this copy (members = members + member) @@ -312,11 +325,14 @@ case class Gossip( case class Heartbeat(from: Address) extends ClusterMessage /** + * INTERNAL API. + * * Manages routing of the different cluster commands. * Instantiated as a single instance for each Cluster - e.g. commands are serialized to Cluster message after message. */ -private[akka] final class ClusterCommandDaemon(cluster: Cluster) extends Actor { - import ClusterAction._ +private[cluster] final class ClusterCommandDaemon(cluster: Cluster) extends Actor { + import ClusterUserAction._ + import ClusterLeaderAction._ val log = Logging(context.system, this) @@ -332,10 +348,12 @@ private[akka] final class ClusterCommandDaemon(cluster: Cluster) extends Actor { } /** + * INTERNAL API. + * * Pooled and routed with N number of configurable instances. * Concurrent access to Cluster. */ -private[akka] final class ClusterGossipDaemon(cluster: Cluster) extends Actor { +private[cluster] final class ClusterGossipDaemon(cluster: Cluster) extends Actor { val log = Logging(context.system, this) def receive = { @@ -347,9 +365,11 @@ private[akka] final class ClusterGossipDaemon(cluster: Cluster) extends Actor { } /** + * INTERNAL API. + * * Supervisor managing the different Cluster daemons. */ -private[akka] final class ClusterDaemonSupervisor(cluster: Cluster) extends Actor { +private[cluster] final class ClusterDaemonSupervisor(cluster: Cluster) extends Actor { val log = Logging(context.system, this) private val commands = context.actorOf(Props(new ClusterCommandDaemon(cluster)), "commands") @@ -402,11 +422,11 @@ trait ClusterNodeMBean { def isSingleton: Boolean def isConvergence: Boolean def isAvailable: Boolean + def isRunning: Boolean def join(address: String) def leave(address: String) def down(address: String) - def remove(address: String) } /** @@ -459,7 +479,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) private val serialization = remote.serialization - private val isRunning = new AtomicBoolean(true) + private val _isRunning = new AtomicBoolean(true) private val log = Logging(system, "Node") private val mBeanServer = ManagementFactory.getPlatformMBeanServer @@ -566,6 +586,11 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } } + /** + * Returns true if the cluster node is up and running, false if it is shut down. + */ + def isRunning: Boolean = _isRunning.get + /** * Latest gossip. */ @@ -574,7 +599,10 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) /** * Member status for this node. */ - def status: MemberStatus = self.status + def status: MemberStatus = { + if (isRunning) self.status + else MemberStatus.Removed + } /** * Is this node the leader? @@ -606,38 +634,6 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) */ def isAvailable: Boolean = !isUnavailable(state.get) - /** - * Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks. - * - * INTERNAL API: - * Should not called by the user. The user can issue a LEAVE command which will tell the node - * to go through graceful handoff process LEAVE -> EXITING -> REMOVED -> SHUTDOWN. - */ - private[akka] def shutdown(): Unit = { - if (isRunning.compareAndSet(true, false)) { - log.info("Cluster Node [{}] - Shutting down cluster Node and cluster daemons...", selfAddress) - - // cancel the periodic tasks, note that otherwise they will be run when scheduler is shutdown - gossipTask.cancel() - heartbeatTask.cancel() - failureDetectorReaperTask.cancel() - leaderActionsTask.cancel() - clusterScheduler.close() - - // FIXME isTerminated check can be removed when ticket #2221 is fixed - // now it prevents logging if system is shutdown (or in progress of shutdown) - if (!clusterDaemons.isTerminated) - system.stop(clusterDaemons) - - try { - mBeanServer.unregisterMBean(clusterMBeanName) - } catch { - case e: InstanceNotFoundException ⇒ // ignore - we are running multiple cluster nodes in the same JVM (probably for testing) - } - log.info("Cluster Node [{}] - Cluster node successfully shut down", selfAddress) - } - } - /** * Registers a listener to subscribe to cluster membership changes. */ @@ -685,34 +681,57 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) clusterCommandDaemon ! ClusterUserAction.Down(address) } - /** - * Send command to REMOVE the node specified by 'address'. - */ - def remove(address: Address): Unit = { - clusterCommandDaemon ! ClusterUserAction.Remove(address) - } - // ======================================================== // ===================== INTERNAL API ===================== // ======================================================== /** - * State transition to JOINING. - * New node joining. + * INTERNAL API. + * + * Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks. + * + * Should not called by the user. The user can issue a LEAVE command which will tell the node + * to go through graceful handoff process `LEAVE -> EXITING -> REMOVED -> SHUTDOWN`. + */ + private[cluster] def shutdown(): Unit = { + if (_isRunning.compareAndSet(true, false)) { + log.info("Cluster Node [{}] - Shutting down cluster Node and cluster daemons...", selfAddress) + + // cancel the periodic tasks, note that otherwise they will be run when scheduler is shutdown + gossipTask.cancel() + heartbeatTask.cancel() + failureDetectorReaperTask.cancel() + leaderActionsTask.cancel() + clusterScheduler.close() + + // FIXME isTerminated check can be removed when ticket #2221 is fixed + // now it prevents logging if system is shutdown (or in progress of shutdown) + if (!clusterDaemons.isTerminated) + system.stop(clusterDaemons) + + try { + mBeanServer.unregisterMBean(clusterMBeanName) + } catch { + case e: InstanceNotFoundException ⇒ // ignore - we are running multiple cluster nodes in the same JVM (probably for testing) + } + log.info("Cluster Node [{}] - Cluster node successfully shut down", selfAddress) + } + } + + /** + * INTERNAL API. + * + * State transition to JOINING - new node joining. */ @tailrec private[cluster] final def joining(node: Address): Unit = { - log.info("Cluster Node [{}] - Node [{}] is JOINING", selfAddress, node) - val localState = state.get val localGossip = localState.latestGossip val localMembers = localGossip.members val localUnreachable = localGossip.overview.unreachable val alreadyMember = localMembers.exists(_.address == node) - val isUnreachable = localUnreachable.exists { m ⇒ - m.address == node && m.status != Down && m.status != Removed - } + val isUnreachable = localUnreachable.exists { m ⇒ m.address == node && m.status != Down } if (!alreadyMember && !isUnreachable) { @@ -730,6 +749,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) if (!state.compareAndSet(localState, newState)) joining(node) // recur if we failed update else { + log.info("Cluster Node [{}] - Node [{}] is JOINING", selfAddress, node) // treat join as initial heartbeat, so that it becomes unavailable if nothing more happens if (node != selfAddress) failureDetector heartbeat node notifyMembershipChangeListeners(localState, newState) @@ -738,17 +758,16 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** + * INTERNAL API. + * * State transition to LEAVING. */ @tailrec private[cluster] final def leaving(address: Address) { - log.info("Cluster Node [{}] - Marking address [{}] as LEAVING", selfAddress, address) - val localState = state.get val localGossip = localState.latestGossip - val localMembers = localGossip.members - val newMembers = localMembers + Member(address, Leaving) // mark node as LEAVING + val newMembers = localGossip.members + Member(address, Leaving) // mark node as LEAVING val newGossip = localGossip copy (members = newMembers) val versionedGossip = newGossip :+ vclockNode @@ -758,27 +777,31 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) if (!state.compareAndSet(localState, newState)) leaving(address) // recur if we failed update else { + log.info("Cluster Node [{}] - Marked address [{}] as LEAVING", selfAddress, address) notifyMembershipChangeListeners(localState, newState) } } - private def notifyMembershipChangeListeners(oldState: State, newState: State): Unit = { - val oldMembersStatus = oldState.latestGossip.members.toSeq.map(m ⇒ (m.address, m.status)) - val newMembersStatus = newState.latestGossip.members.toSeq.map(m ⇒ (m.address, m.status)) - if (newMembersStatus != oldMembersStatus) - newState.memberMembershipChangeListeners foreach { _ notify newState.latestGossip.members } - } - /** + * INTERNAL API. + * * State transition to EXITING. */ private[cluster] final def exiting(address: Address): Unit = { - log.info("Cluster Node [{}] - Marking node [{}] as EXITING", selfAddress, address) + log.info("Cluster Node [{}] - Marked node [{}] as EXITING", selfAddress, address) // FIXME implement when we implement hand-off } /** + * INTERNAL API. + * * State transition to REMOVED. + * + * This method is for now only called after the LEADER have sent a Removed message - telling the node + * to shut down himself. + * + * In the future we might change this to allow the USER to send a Removed(address) message telling an + * arbitrary node to be moved direcly from UP -> REMOVED. */ private[cluster] final def removing(address: Address): Unit = { log.info("Cluster Node [{}] - Node has been REMOVED by the leader - shutting down...", selfAddress) @@ -786,6 +809,8 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** + * INTERNAL API. + * * The node to DOWN is removed from the 'members' set and put in the 'unreachable' set (if not already there) * and its status is set to DOWN. The node is also removed from the 'seen' table. * @@ -843,6 +868,8 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** + * INTERNAL API. + * * Receive new gossip. */ @tailrec @@ -856,9 +883,8 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val mergedGossip = remoteGossip merge localGossip val versionedMergedGossip = mergedGossip :+ vclockNode - // FIXME change to debug log level, when failure detector is stable - log.info( - """Can't establish a causal relationship between "remote" gossip [{}] and "local" gossip [{}] - merging them into [{}]""", + log.debug( + """Can't establish a causal relationship between "remote" gossip and "local" gossip - Remote[{}] - Local[{}] - merging them into [{}]""", remoteGossip, localGossip, versionedMergedGossip) versionedMergedGossip @@ -883,7 +909,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** - * INTERNAL API + * INTERNAL API. */ private[cluster] def receiveHeartbeat(from: Address): Unit = failureDetector heartbeat from @@ -893,11 +919,11 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) private def autoJoin(): Unit = nodeToJoin foreach join /** - * INTERNAL API + * INTERNAL API. * * Gossips latest gossip to an address. */ - private[akka] def gossipTo(address: Address): Unit = { + private[cluster] def gossipTo(address: Address): Unit = { val connection = clusterGossipConnectionFor(address) log.debug("Cluster Node [{}] - Gossiping to [{}]", selfAddress, connection) connection ! GossipEnvelope(selfAddress, latestGossip) @@ -917,18 +943,18 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** - * INTERNAL API + * INTERNAL API. */ - private[akka] def gossipToUnreachableProbablity(membersSize: Int, unreachableSize: Int): Double = + private[cluster] def gossipToUnreachableProbablity(membersSize: Int, unreachableSize: Int): Double = (membersSize + unreachableSize) match { case 0 ⇒ 0.0 case sum ⇒ unreachableSize.toDouble / sum } /** - * INTERNAL API + * INTERNAL API. */ - private[akka] def gossipToDeputyProbablity(membersSize: Int, unreachableSize: Int, nrOfDeputyNodes: Int): Double = { + private[cluster] def gossipToDeputyProbablity(membersSize: Int, unreachableSize: Int, nrOfDeputyNodes: Int): Double = { if (nrOfDeputyNodes > membersSize) 1.0 else if (nrOfDeputyNodes == 0) 0.0 else (membersSize + unreachableSize) match { @@ -938,11 +964,11 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** - * INTERNAL API + * INTERNAL API. * * Initates a new round of gossip. */ - private[akka] def gossip(): Unit = { + private[cluster] def gossip(): Unit = { val localState = state.get log.debug("Cluster Node [{}] - Initiating new round of gossip", selfAddress) @@ -979,9 +1005,9 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** - * INTERNAL API + * INTERNAL API. */ - private[akka] def heartbeat(): Unit = { + private[cluster] def heartbeat(): Unit = { val localState = state.get if (!isSingletonCluster(localState)) { @@ -996,12 +1022,12 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** - * INTERNAL API + * INTERNAL API. * * Reaps the unreachable members (moves them to the 'unreachable' list in the cluster overview) according to the failure detector's verdict. */ @tailrec - final private[akka] def reapUnreachableMembers(): Unit = { + final private[cluster] def reapUnreachableMembers(): Unit = { val localState = state.get if (!isSingletonCluster(localState) && isAvailable(localState)) { @@ -1040,12 +1066,12 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } /** - * INTERNAL API + * INTERNAL API. * * Runs periodic leader actions, such as auto-downing unreachable nodes, assigning partitions etc. */ @tailrec - final private[akka] def leaderActions(): Unit = { + final private[cluster] def leaderActions(): Unit = { val localState = state.get val localGossip = localState.latestGossip val localMembers = localGossip.members @@ -1065,13 +1091,17 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val localUnreachableMembers = localOverview.unreachable // Leader actions are as follows: - // 1. Move EXITING => REMOVED -- When all nodes have seen that the node is EXITING (convergence) - remove the nodes from the node ring + // 1. Move EXITING => REMOVED -- When all nodes have seen that the node is EXITING (convergence) - remove the nodes from the node ring and seen table // 2. Move JOINING => UP -- When a node joins the cluster // 3. Move LEAVING => EXITING -- When all partition handoff has completed // 4. Move UNREACHABLE => DOWN -- When the node is in the UNREACHABLE set it can be auto-down by leader // 5. Updating the vclock version for the changes // 6. Updating the 'seen' table + // store away removed and exiting members so we can separate the pure state changes (that can be retried on collision) and the side-effecting message sending + var removedMembers = Set.empty[Member] + var exitingMembers = Set.empty[Member] + var hasChangedState = false val newGossip = @@ -1079,21 +1109,20 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // we have convergence - so we can't have unreachable nodes val newMembers = - // ---------------------- - // 1. Move EXITING => REMOVED - e.g. remove the nodes from the 'members' set/node ring + // 1. Move EXITING => REMOVED - e.g. remove the nodes from the 'members' set/node ring and seen table // ---------------------- - // localMembers filter { member ⇒ - // if (member.status == MemberStatus.Exiting) { - // log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED - Removing node from node ring", selfAddress, member.address) - // hasChangedState = true - // clusterCommandConnectionFor(member.address) ! ClusterUserAction.Remove(member.address) // tell the removed node to shut himself down - // false - // } else true + localMembers filter { member ⇒ + if (member.status == MemberStatus.Exiting) { + log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED - and removing node from node ring", selfAddress, member.address) + hasChangedState = true + removedMembers = removedMembers + member + false + } else true - localMembers map { member ⇒ + } map { member ⇒ // ---------------------- - // 1. Move JOINING => UP (once all nodes have seen that this node is JOINING e.g. we have a convergence) + // 2. Move JOINING => UP (once all nodes have seen that this node is JOINING e.g. we have a convergence) // ---------------------- if (member.status == Joining) { log.info("Cluster Node [{}] - Leader is moving node [{}] from JOINING to UP", selfAddress, member.address) @@ -1101,16 +1130,6 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) member copy (status = Up) } else member - } map { member ⇒ - // ---------------------- - // 2. Move EXITING => REMOVED (once all nodes have seen that this node is EXITING e.g. we have a convergence) - // ---------------------- - if (member.status == Exiting) { - log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED", selfAddress, member.address) - hasChangedState = true - member copy (status = Removed) - } else member - } map { member ⇒ // ---------------------- // 3. Move LEAVING => EXITING (once we have a convergence on LEAVING *and* if we have a successful partition handoff) @@ -1118,15 +1137,20 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) if (member.status == Leaving && hasPartionHandoffCompletedSuccessfully(localGossip)) { log.info("Cluster Node [{}] - Leader is moving node [{}] from LEAVING to EXITING", selfAddress, member.address) hasChangedState = true - -// clusterCommandConnectionFor(member.address) ! ClusterLeaderAction.Exit(member.address) // FIXME should use ? to await completion of handoff? + exitingMembers = exitingMembers + member member copy (status = Exiting) - } else member } - localGossip copy (members = newMembers) // update gossip + // removing REMOVED nodes from the 'seen' table + val newSeen = removedMembers.foldLeft(localSeen) { (seen, removed) ⇒ seen - removed.address } + + // removing REMOVED nodes from the 'unreachable' set + val newUnreachableMembers = removedMembers.foldLeft(localUnreachableMembers) { (unreachable, removed) ⇒ unreachable - removed } + + val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachableMembers) // update gossip overview + localGossip copy (members = newMembers, overview = newOverview) // update gossip } else if (AutoDown) { // we don't have convergence - so we might have unreachable nodes @@ -1147,9 +1171,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) } // removing nodes marked as DOWN from the 'seen' table - val newSeen = localSeen -- newUnreachableMembers.collect { - case m if m.status == Down ⇒ m.address - } + val newSeen = localSeen -- newUnreachableMembers.collect { case m if m.status == Down ⇒ m.address } val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachableMembers) // update gossip overview localGossip copy (overview = newOverview) // update gossip @@ -1165,14 +1187,35 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // ---------------------- // 6. Updating the 'seen' table + // Unless the leader (this node) is part of the removed members, i.e. the leader have moved himself from EXITING -> REMOVED // ---------------------- - val seenVersionedGossip = versionedGossip seen selfAddress + val seenVersionedGossip = + if (removedMembers.exists(_.address == selfAddress)) versionedGossip + else versionedGossip seen selfAddress val newState = localState copy (latestGossip = seenVersionedGossip) // if we won the race then update else try again if (!state.compareAndSet(localState, newState)) leaderActions() // recur else { + // do the side-effecting notifications on state-change success + + if (removedMembers.exists(_.address == selfAddress)) { + // we now know that this node (the leader) is just about to shut down since it will be moved from EXITING -> REMOVED + // so now let's gossip out this information directly since there will not be any other chance + gossip() + } + + // tell all removed members to remove and shut down themselves + removedMembers.map(_.address) foreach { address ⇒ + clusterCommandConnectionFor(address) ! ClusterLeaderAction.Remove(address) + } + + // tell all exiting members to exit + exitingMembers.map(_.address) foreach { address ⇒ + clusterCommandConnectionFor(address) ! ClusterLeaderAction.Exit(address) // FIXME should use ? to await completion of handoff? + } + notifyMembershipChangeListeners(localState, newState) } } @@ -1196,9 +1239,7 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) // Else we can't continue to check for convergence // When that is done we check that all the entries in the 'seen' table have the same vector clock version // and that all members exists in seen table - val hasUnreachable = unreachable.nonEmpty && unreachable.exists { m ⇒ - m.status != Down && m.status != Removed - } + val hasUnreachable = unreachable.nonEmpty && unreachable.exists { _.status != Down } val allMembersInSeen = gossip.members.forall(m ⇒ seen.contains(m.address)) if (hasUnreachable) { @@ -1227,14 +1268,18 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) private def isUnavailable(state: State): Boolean = { val localGossip = state.latestGossip - val localOverview = localGossip.overview - val localMembers = localGossip.members - val localUnreachableMembers = localOverview.unreachable - val isUnreachable = localUnreachableMembers exists { _.address == selfAddress } - val hasUnavailableMemberStatus = localMembers exists { m ⇒ (m == self) && m.status.isUnavailable } + val isUnreachable = localGossip.overview.unreachable exists { _.address == selfAddress } + val hasUnavailableMemberStatus = localGossip.members exists { _.status.isUnavailable } isUnreachable || hasUnavailableMemberStatus } + private def notifyMembershipChangeListeners(oldState: State, newState: State): Unit = { + val oldMembersStatus = oldState.latestGossip.members.toSeq.map(m ⇒ (m.address, m.status)) + val newMembersStatus = newState.latestGossip.members.toSeq.map(m ⇒ (m.address, m.status)) + if (newMembersStatus != oldMembersStatus) + newState.memberMembershipChangeListeners foreach { _ notify newState.latestGossip.members } + } + /** * Looks up and returns the local cluster command connection. */ @@ -1257,9 +1302,9 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) addresses drop 1 take NrOfDeputyNodes filterNot (_ == selfAddress) /** - * INTERNAL API + * INTERNAL API. */ - private[akka] def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = + private[cluster] def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = if (addresses.isEmpty) None else Some(addresses(ThreadLocalRandom.current nextInt addresses.size)) @@ -1302,6 +1347,8 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) def isAvailable: Boolean = clusterNode.isAvailable + def isRunning: Boolean = clusterNode.isRunning + // JMX commands def join(address: String) = clusterNode.join(AddressFromURIString(address)) @@ -1309,8 +1356,6 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) def leave(address: String) = clusterNode.leave(AddressFromURIString(address)) def down(address: String) = clusterNode.down(AddressFromURIString(address)) - - def remove(address: String) = clusterNode.remove(AddressFromURIString(address)) } log.info("Cluster Node [{}] - registering cluster JMX MBean [{}]", selfAddress, clusterMBeanName) try { From 41ec4363145b3e91f0436a986b8be0851ce8386a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 16 Jun 2012 00:01:02 +0200 Subject: [PATCH 05/20] Removed 'remove' from, and added 'isRunning' to, 'akka-cluster' admin script. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- akka-kernel/src/main/dist/bin/akka-cluster | 29 +++++++++++----------- 1 file changed, 14 insertions(+), 15 deletions(-) diff --git a/akka-kernel/src/main/dist/bin/akka-cluster b/akka-kernel/src/main/dist/bin/akka-cluster index 3e76cdbb11..fe3af38449 100755 --- a/akka-kernel/src/main/dist/bin/akka-cluster +++ b/akka-kernel/src/main/dist/bin/akka-cluster @@ -63,20 +63,6 @@ case "$2" in $JMX_CLIENT $HOST akka:type=Cluster leave=$ACTOR_SYSTEM_URL ;; - remove) - if [ $# -ne 3 ]; then - echo "Usage: $SELF remove " - exit 1 - fi - - ensureNodeIsRunningAndAvailable - shift - - ACTOR_SYSTEM_URL=$2 - echo "Scheduling $ACTOR_SYSTEM_URL to REMOVE" - $JMX_CLIENT $HOST akka:type=Cluster remove=$ACTOR_SYSTEM_URL - ;; - down) if [ $# -ne 3 ]; then echo "Usage: $SELF down " @@ -169,19 +155,32 @@ case "$2" in $JMX_CLIENT $HOST akka:type=Cluster Available ;; + is-running) + if [ $# -ne 2 ]; then + echo "Usage: $SELF is-running" + exit 1 + fi + + ensureNodeIsRunningAndAvailable + shift + + echo "Checking if member node on $HOST is AVAILABLE" + $JMX_CLIENT $HOST akka:type=Cluster Running + ;; + *) printf "Usage: bin/$SELF ...\n" printf "\n" printf "Supported commands are:\n" printf "%26s - %s\n" "join " "Sends request a JOIN node with the specified URL" printf "%26s - %s\n" "leave " "Sends a request for node with URL to LEAVE the cluster" - printf "%26s - %s\n" "remove " "Sends a request for node with URL to be instantly REMOVED from the cluster" printf "%26s - %s\n" "down " "Sends a request for marking node with URL as DOWN" printf "%26s - %s\n" member-status "Asks the member node for its current status" printf "%26s - %s\n" cluster-status "Asks the cluster for its current status (member ring, unavailable nodes, meta data etc.)" printf "%26s - %s\n" leader "Asks the cluster who the current leader is" printf "%26s - %s\n" is-singleton "Checks if the cluster is a singleton cluster (single node cluster)" printf "%26s - %s\n" is-available "Checks if the member node is available" + printf "%26s - %s\n" is-running "Checks if the member node is running" printf "%26s - %s\n" has-convergence "Checks if there is a cluster convergence" printf "Where the should be on the format of 'akka://actor-system-name@hostname:port'\n" printf "\n" From 2822ba52465664706316be4b2fc2b7e3f1aca01b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 16 Jun 2012 00:01:58 +0200 Subject: [PATCH 06/20] Fixed and enabled tests that test LEAVING -> EXITING -> REMOVED. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- ...LeavingAndExitingAndBeingRemovedSpec.scala | 22 +++++++++---------- .../cluster/NodeLeavingAndExitingSpec.scala | 3 +-- .../scala/akka/cluster/NodeLeavingSpec.scala | 3 +-- 3 files changed, 13 insertions(+), 15 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index 01e5f8aa74..7a233f9395 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -18,9 +18,9 @@ object NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec extends MultiNodeConfig commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode1 extends NodeLeavingAndExitingAndBeingRemovedSpec with AccrualFailureDetectorStrategy -class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode2 extends NodeLeavingAndExitingAndBeingRemovedSpec with AccrualFailureDetectorStrategy -class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode3 extends NodeLeavingAndExitingAndBeingRemovedSpec with AccrualFailureDetectorStrategy +class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode1 extends NodeLeavingAndExitingAndBeingRemovedSpec with FailureDetectorPuppetStrategy +class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode2 extends NodeLeavingAndExitingAndBeingRemovedSpec with FailureDetectorPuppetStrategy +class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode3 extends NodeLeavingAndExitingAndBeingRemovedSpec with FailureDetectorPuppetStrategy abstract class NodeLeavingAndExitingAndBeingRemovedSpec extends MultiNodeSpec(NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec) @@ -36,8 +36,7 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec "A node that is LEAVING a non-singleton cluster" must { - // FIXME make it work and remove ignore - "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest ignore { + "eventually set to REMOVED by the reaper, and removed from membership ring and seen table" taggedAs LongRunningTest in { awaitClusterUp(first, second, third) @@ -50,13 +49,14 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec // verify that the 'second' node is no longer part of the 'members' set awaitCond(cluster.latestGossip.members.forall(_.address != secondAddress), reaperWaitingTime) - // verify that the 'second' node is part of the 'unreachable' set - awaitCond(cluster.latestGossip.overview.unreachable.exists(_.status == MemberStatus.Removed), reaperWaitingTime) + // verify that the 'second' node is not part of the 'unreachable' set + awaitCond(cluster.latestGossip.overview.unreachable.forall(_.address != secondAddress), reaperWaitingTime) + } - // verify node that got removed is 'second' node - val isRemoved = cluster.latestGossip.overview.unreachable.find(_.status == MemberStatus.Removed) - isRemoved must be('defined) - isRemoved.get.address must be(secondAddress) + runOn(second) { + // verify that the second node is shut down and has status REMOVED + awaitCond(!cluster.isRunning, reaperWaitingTime) + awaitCond(cluster.status == MemberStatus.Removed, reaperWaitingTime) } testConductor.enter("finished") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 6378a74040..ef285b5070 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -42,8 +42,7 @@ abstract class NodeLeavingAndExitingSpec "A node that is LEAVING a non-singleton cluster" must { - // FIXME make it work and remove ignore - "be moved to EXITING by the leader" taggedAs LongRunningTest ignore { + "be moved to EXITING by the leader" taggedAs LongRunningTest in { awaitClusterUp(first, second, third) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index 8ea21e9380..8f637d87e5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -36,8 +36,7 @@ abstract class NodeLeavingSpec "A node that is LEAVING a non-singleton cluster" must { - // FIXME make it work and remove ignore - "be marked as LEAVING in the converged membership table" taggedAs LongRunningTest ignore { + "be marked as LEAVING in the converged membership table" taggedAs LongRunningTest in { awaitClusterUp(first, second, third) From 616aaacbe87a52893cbeb42e6591ca70e33dc697 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 16 Jun 2012 00:02:20 +0200 Subject: [PATCH 07/20] Changed logging in FD from INFO to DEBUG. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/cluster/AccrualFailureDetector.scala | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index 6632111f00..62d5fa4eb9 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -165,8 +165,7 @@ class AccrualFailureDetector( else PhiFactor * timestampDiff / mean } - // FIXME change to debug log level, when failure detector is stable - log.info("Phi value [{}] and threshold [{}] for connection [{}] ", phi, threshold, connection) + log.debug("Phi value [{}] and threshold [{}] for connection [{}] ", phi, threshold, connection) phi } From 486853b7bd534571e54866358811c74f6d85537e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 16 Jun 2012 00:03:13 +0200 Subject: [PATCH 08/20] Removed MembershipChangeListenerRemovedSpec. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since there is no listener being called for the transition to REMOVED. Signed-off-by: Jonas Bonér --- .../MembershipChangeListenerRemovedSpec.scala | 71 ------------------- 1 file changed, 71 deletions(-) delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerRemovedSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerRemovedSpec.scala deleted file mode 100644 index 6b737a22e2..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerRemovedSpec.scala +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import scala.collection.immutable.SortedSet -import org.scalatest.BeforeAndAfter -import com.typesafe.config.ConfigFactory -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ -import akka.util.duration._ - -object MembershipChangeListenerRemovedMultiJvmSpec extends MultiNodeConfig { - val first = role("first") - val second = role("second") - val third = role("third") - - commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) -} - -class MembershipChangeListenerRemovedMultiJvmNode1 extends MembershipChangeListenerRemovedSpec -class MembershipChangeListenerRemovedMultiJvmNode2 extends MembershipChangeListenerRemovedSpec -class MembershipChangeListenerRemovedMultiJvmNode3 extends MembershipChangeListenerRemovedSpec - -abstract class MembershipChangeListenerRemovedSpec extends MultiNodeSpec(MembershipChangeListenerRemovedMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { - import MembershipChangeListenerRemovedMultiJvmSpec._ - - override def initialParticipants = 3 - - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - lazy val thirdAddress = node(third).address - - val reaperWaitingTime = 30.seconds.dilated - - "A registered MembershipChangeListener" must { - "be notified when new node is REMOVED" taggedAs LongRunningTest in { - - runOn(first) { - cluster.self - } - testConductor.enter("first-started") - - runOn(second, third) { - cluster.join(firstAddress) - } - awaitUpConvergence(numberOfMembers = 3) - testConductor.enter("rest-started") - - runOn(third) { - val removedLatch = TestLatch() - cluster.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - println("------- MembershipChangeListener " + members.mkString(", ")) - if (members.size == 3 && members.find(_.address == secondAddress).isEmpty) - removedLatch.countDown() - } - }) - removedLatch.await - } - - runOn(first) { - cluster.leave(secondAddress) - } - - testConductor.enter("finished") - } - } -} From 86dc1fe69d9b981a387a5c20cca4a8d4244e770a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 16 Jun 2012 00:03:45 +0200 Subject: [PATCH 09/20] Minor edits to cluster specification. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- akka-docs/cluster/cluster.rst | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/akka-docs/cluster/cluster.rst b/akka-docs/cluster/cluster.rst index 1368d7835f..0126897dab 100644 --- a/akka-docs/cluster/cluster.rst +++ b/akka-docs/cluster/cluster.rst @@ -163,8 +163,8 @@ After gossip convergence a ``leader`` for the cluster can be determined. There i ``leader`` election process, the ``leader`` can always be recognised deterministically by any node whenever there is gossip convergence. The ``leader`` is simply the first node in sorted order that is able to take the leadership role, where the only -allowed member states for a ``leader`` are ``up`` or ``leaving`` (see below for more -information about member states). +allowed member states for a ``leader`` are ``up``, ``leaving`` or ``exiting`` (see +below for more information about member states). The role of the ``leader`` is to shift members in and out of the cluster, changing ``joining`` members to the ``up`` state or ``exiting`` members to the @@ -301,12 +301,6 @@ handoff has completed then the node will change to the ``exiting`` state. Once all nodes have seen the exiting state (convergence) the ``leader`` will remove the node from the cluster, marking it as ``removed``. -A node can also be removed forcefully by moving it directly to the ``removed`` -state using the ``remove`` action. The cluster will rebalance based on the new -cluster membership. This will also happen if you are shutting the system down -forcefully (through an external ``SIGKILL`` signal, ``System.exit(status)`` or -similar. - If a node is unreachable then gossip convergence is not possible and therefore any ``leader`` actions are also not possible (for instance, allowing a node to become a part of the cluster, or changing actor distribution). To be able to @@ -315,11 +309,12 @@ unreachable node is experiencing only transient difficulties then it can be explicitly marked as ``down`` using the ``down`` user action. When this node comes back up and begins gossiping it will automatically go through the joining process again. If the unreachable node will be permanently down then it can be -removed from the cluster directly with the ``remove`` user action. The cluster -can also *auto-down* a node using the accrual failure detector. +removed from the cluster directly by shutting the actor system down or killing it +through an external ``SIGKILL`` signal, invocation of ``System.exit(status)`` or +similar. The cluster can, through the leader, also *auto-down* a node. -This means that nodes can join and leave the cluster at any point in time, -e.g. provide cluster elasticity. +This means that nodes can join and leave the cluster at any point in time, i.e. +provide cluster elasticity. State Diagram for the Member States @@ -340,12 +335,12 @@ Member States - **leaving** / **exiting** states during graceful removal -- **removed** - tombstone state (no longer a member) - - **down** marked as down/offline/unreachable +- **removed** + tombstone state (no longer a member) + User Actions ^^^^^^^^^^^^ @@ -360,9 +355,6 @@ User Actions - **down** mark a node as temporarily down -- **remove** - remove a node from the cluster immediately - Leader Actions ^^^^^^^^^^^^^^ From 07dadc40cb0eb0fe5953d81975a6c0bf0c851e5a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 16 Jun 2012 00:04:37 +0200 Subject: [PATCH 10/20] Added spec testing telling a LEADER to LEAVE (and transition from UP -> LEAVING -> EXITING -> REMOVED). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../akka/cluster/LeaderLeavingSpec.scala | 93 +++++++++++++++++++ 1 file changed, 93 insertions(+) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala new file mode 100644 index 0000000000..04b93e8a8c --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala @@ -0,0 +1,93 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import scala.collection.immutable.SortedSet +import com.typesafe.config.ConfigFactory +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object LeaderLeavingMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" + akka.cluster { + leader-actions-interval = 5 s # increase the leader action task frequency to make sure we get a chance to test the LEAVING state + unreachable-nodes-reaper-interval = 30 s + } + """) + .withFallback(MultiNodeClusterSpec.clusterConfig))) +} + +class LeaderLeavingMultiJvmNode1 extends LeaderLeavingSpec with FailureDetectorPuppetStrategy +class LeaderLeavingMultiJvmNode2 extends LeaderLeavingSpec with FailureDetectorPuppetStrategy +class LeaderLeavingMultiJvmNode3 extends LeaderLeavingSpec with FailureDetectorPuppetStrategy + +abstract class LeaderLeavingSpec + extends MultiNodeSpec(LeaderLeavingMultiJvmSpec) + with MultiNodeClusterSpec { + + import LeaderLeavingMultiJvmSpec._ + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + lazy val thirdAddress = node(third).address + + val reaperWaitingTime = 30.seconds.dilated + + def leaderRole = cluster.leader match { + case `firstAddress` => first + case `secondAddress` => second + case `thirdAddress` => third + } + + "A LEADER that is LEAVING" must { + + "be moved to LEAVING, then to EXITING, then to REMOVED, then be shut down and then a new LEADER should be elected" taggedAs LongRunningTest in { + + awaitClusterUp(first, second, third) + + val oldLeaderAddress = cluster.leader + + if (cluster.isLeader) { + + cluster.leave(oldLeaderAddress) + testConductor.enter("leader-left") + + // verify that the LEADER is shut down + awaitCond(!cluster.isRunning, reaperWaitingTime) + + // verify that the LEADER is REMOVED + awaitCond(cluster.status == MemberStatus.Removed, reaperWaitingTime) + + } else { + + testConductor.enter("leader-left") + + // verify that the LEADER is LEAVING + awaitCond(cluster.latestGossip.members.exists(m => m.status == MemberStatus.Leaving && m.address == oldLeaderAddress)) // wait on LEAVING + + // verify that the LEADER is EXITING + awaitCond(cluster.latestGossip.members.exists(m => m.status == MemberStatus.Exiting && m.address == oldLeaderAddress)) // wait on EXITING + + // verify that the LEADER is no longer part of the 'members' set + awaitCond(cluster.latestGossip.members.forall(_.address != oldLeaderAddress), reaperWaitingTime) + + // verify that the LEADER is not part of the 'unreachable' set + awaitCond(cluster.latestGossip.overview.unreachable.forall(_.address != oldLeaderAddress), reaperWaitingTime) + + // verify that we have a new LEADER + awaitCond(cluster.leader != oldLeaderAddress, reaperWaitingTime) + } + + testConductor.enter("finished") + } + } +} From a5fe6ea607020ebfc15270acd8b1ea5a2a85a2de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 18 Jun 2012 13:51:00 +0200 Subject: [PATCH 11/20] Fixed wrong ScalaDoc. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- akka-actor/src/main/scala/akka/AkkaException.scala | 1 - 1 file changed, 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index 04e820419f..8e49c7cb11 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -9,7 +9,6 @@ package akka *
    *
  • a uuid for tracking purposes
  • *
  • toString that includes exception name, message and uuid
  • - *
  • toLongString which also includes the stack trace
  • *
*/ //TODO add @SerialVersionUID(1L) when SI-4804 is fixed From e362c2f48819c1cccd4c96e2f9b13efae34012db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 18 Jun 2012 13:51:54 +0200 Subject: [PATCH 12/20] Cleaned up LeaderLeavingSpec. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …and turned it off until redesign of leader leaving is implement. Signed-off-by: Jonas Bonér --- .../akka/cluster/LeaderLeavingSpec.scala | 20 ++++++------------- 1 file changed, 6 insertions(+), 14 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala index 04b93e8a8c..f3274583b5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala @@ -40,17 +40,9 @@ abstract class LeaderLeavingSpec lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address - val reaperWaitingTime = 30.seconds.dilated - - def leaderRole = cluster.leader match { - case `firstAddress` => first - case `secondAddress` => second - case `thirdAddress` => third - } - "A LEADER that is LEAVING" must { - "be moved to LEAVING, then to EXITING, then to REMOVED, then be shut down and then a new LEADER should be elected" taggedAs LongRunningTest in { + "be moved to LEAVING, then to EXITING, then to REMOVED, then be shut down and then a new LEADER should be elected" taggedAs LongRunningTest ignore { awaitClusterUp(first, second, third) @@ -62,10 +54,10 @@ abstract class LeaderLeavingSpec testConductor.enter("leader-left") // verify that the LEADER is shut down - awaitCond(!cluster.isRunning, reaperWaitingTime) + awaitCond(!cluster.isRunning) // verify that the LEADER is REMOVED - awaitCond(cluster.status == MemberStatus.Removed, reaperWaitingTime) + awaitCond(cluster.status == MemberStatus.Removed) } else { @@ -78,13 +70,13 @@ abstract class LeaderLeavingSpec awaitCond(cluster.latestGossip.members.exists(m => m.status == MemberStatus.Exiting && m.address == oldLeaderAddress)) // wait on EXITING // verify that the LEADER is no longer part of the 'members' set - awaitCond(cluster.latestGossip.members.forall(_.address != oldLeaderAddress), reaperWaitingTime) + awaitCond(cluster.latestGossip.members.forall(_.address != oldLeaderAddress)) // verify that the LEADER is not part of the 'unreachable' set - awaitCond(cluster.latestGossip.overview.unreachable.forall(_.address != oldLeaderAddress), reaperWaitingTime) + awaitCond(cluster.latestGossip.overview.unreachable.forall(_.address != oldLeaderAddress)) // verify that we have a new LEADER - awaitCond(cluster.leader != oldLeaderAddress, reaperWaitingTime) + awaitCond(cluster.leader != oldLeaderAddress) } testConductor.enter("finished") From c0dff0050b353bd9683d8f2eb26318d2e309baaa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 18 Jun 2012 13:52:06 +0200 Subject: [PATCH 13/20] Minor edit . MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index 6f3ddfc866..798dd0058d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -20,9 +20,7 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { val fifth = role("fifth") commonConfig(ConfigFactory.parseString(""" - akka.cluster { - nr-of-deputy-nodes = 0 - } + akka.cluster.nr-of-deputy-nodes = 0 akka.loglevel = INFO """)) } From 8b6652a79491dfecacec322cccc51e8ad4307960 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 18 Jun 2012 13:53:49 +0200 Subject: [PATCH 14/20] Fixed all issues from review. In particular fully separated state transformation and preparation for side-effecting processing. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/cluster/Cluster.scala | 196 ++++++++++-------- 1 file changed, 115 insertions(+), 81 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index b233b9bfbf..528672363d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -770,19 +770,20 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) private[cluster] final def leaving(address: Address) { val localState = state.get val localGossip = localState.latestGossip + if (localGossip.members.exists(_.address == address)) { // only try to update if the node is available (in the member ring) + val newMembers = localGossip.members map { member ⇒ if (member.address == address) Member(address, Leaving) else member } // mark node as LEAVING + val newGossip = localGossip copy (members = newMembers) - val newMembers = localGossip.members + Member(address, Leaving) // mark node as LEAVING - val newGossip = localGossip copy (members = newMembers) + val versionedGossip = newGossip :+ vclockNode + val seenVersionedGossip = versionedGossip seen selfAddress - val versionedGossip = newGossip :+ vclockNode - val seenVersionedGossip = versionedGossip seen selfAddress + val newState = localState copy (latestGossip = seenVersionedGossip) - val newState = localState copy (latestGossip = seenVersionedGossip) - - if (!state.compareAndSet(localState, newState)) leaving(address) // recur if we failed update - else { - log.info("Cluster Node [{}] - Marked address [{}] as LEAVING", selfAddress, address) - notifyMembershipChangeListeners(localState, newState) + if (!state.compareAndSet(localState, newState)) leaving(address) // recur if we failed update + else { + log.info("Cluster Node [{}] - Marked address [{}] as LEAVING", selfAddress, address) + notifyMembershipChangeListeners(localState, newState) + } } } @@ -1082,115 +1083,126 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val isLeader = localMembers.nonEmpty && (selfAddress == localMembers.head.address) - // FIXME implement partion handoff and a check if it is completed - now just returns TRUE - e.g. has completed successfully - def hasPartionHandoffCompletedSuccessfully(gossip: Gossip): Boolean = { - true - } - if (isLeader && isAvailable(localState)) { // only run the leader actions if we are the LEADER and available val localOverview = localGossip.overview val localSeen = localOverview.seen val localUnreachableMembers = localOverview.unreachable + val hasPartionHandoffCompletedSuccessfully: Boolean = { + // FIXME implement partion handoff and a check if it is completed - now just returns TRUE - e.g. has completed successfully + true + } // Leader actions are as follows: // 1. Move EXITING => REMOVED -- When all nodes have seen that the node is EXITING (convergence) - remove the nodes from the node ring and seen table // 2. Move JOINING => UP -- When a node joins the cluster // 3. Move LEAVING => EXITING -- When all partition handoff has completed // 4. Move UNREACHABLE => DOWN -- When the node is in the UNREACHABLE set it can be auto-down by leader - // 5. Updating the vclock version for the changes - // 6. Updating the 'seen' table + // 5. Store away all stuff needed for the side-effecting processing in 10. + // 6. Updating the vclock version for the changes + // 7. Updating the 'seen' table + // 8. Try to update the state with the new gossip + // 9. If failure - retry + // 10. If success - run all the side-effecting processing - // store away removed and exiting members so we can separate the pure state changes (that can be retried on collision) and the side-effecting message sending - var removedMembers = Set.empty[Member] - var exitingMembers = Set.empty[Member] - - var hasChangedState = false - val newGossip = + val ( + newGossip: Gossip, + hasChangedState: Boolean, + upMembers: Set[Member], + exitingMembers: Set[Member], + removedMembers: Set[Member], + unreachableButNotDownedMembers: Set[Member]) = if (convergence(localGossip).isDefined) { // we have convergence - so we can't have unreachable nodes + // transform the node member ring - filterNot/map/map val newMembers = - // ---------------------- - // 1. Move EXITING => REMOVED - e.g. remove the nodes from the 'members' set/node ring and seen table - // ---------------------- - localMembers filter { member ⇒ - if (member.status == MemberStatus.Exiting) { - log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED - and removing node from node ring", selfAddress, member.address) - hasChangedState = true - removedMembers = removedMembers + member - false - } else true + localMembers filterNot { member ⇒ + // ---------------------- + // 1. Move EXITING => REMOVED - e.g. remove the nodes from the 'members' set/node ring and seen table + // ---------------------- + member.status == MemberStatus.Exiting } map { member ⇒ // ---------------------- // 2. Move JOINING => UP (once all nodes have seen that this node is JOINING e.g. we have a convergence) // ---------------------- - if (member.status == Joining) { - log.info("Cluster Node [{}] - Leader is moving node [{}] from JOINING to UP", selfAddress, member.address) - hasChangedState = true - member copy (status = Up) - } else member + if (member.status == Joining) member copy (status = Up) + else member } map { member ⇒ // ---------------------- // 3. Move LEAVING => EXITING (once we have a convergence on LEAVING *and* if we have a successful partition handoff) // ---------------------- - if (member.status == Leaving && hasPartionHandoffCompletedSuccessfully(localGossip)) { - log.info("Cluster Node [{}] - Leader is moving node [{}] from LEAVING to EXITING", selfAddress, member.address) - hasChangedState = true - exitingMembers = exitingMembers + member - member copy (status = Exiting) - } else member - + if (member.status == Leaving && hasPartionHandoffCompletedSuccessfully) member copy (status = Exiting) + else member } + // ---------------------- + // 5. Store away all stuff needed for the side-effecting processing in 10. + // ---------------------- + + // Check for the need to do side-effecting on successful state change + // Repeat the checking for transitions between JOINING -> UP, LEAVING -> EXITING, EXITING -> REMOVED + // to check for state-changes and to store away removed and exiting members for later notification + // 1. check for state-changes to update + // 2. store away removed and exiting members so we can separate the pure state changes (that can be retried on collision) and the side-effecting message sending + val (removedMembers, newMembers1) = localMembers partition (_.status == Exiting) + + val (upMembers, newMembers2) = newMembers1 partition (_.status == Joining) + + val (exitingMembers, newMembers3) = newMembers2 partition (_.status == Leaving && hasPartionHandoffCompletedSuccessfully) + + val hasChangedState = removedMembers.nonEmpty || upMembers.nonEmpty || exitingMembers.nonEmpty + // removing REMOVED nodes from the 'seen' table - val newSeen = removedMembers.foldLeft(localSeen) { (seen, removed) ⇒ seen - removed.address } + //val newSeen = removedMembers.foldLeft(localSeen) { (seen, removed) ⇒ seen - removed.address } + val newSeen = localSeen -- removedMembers.map(_.address) // removing REMOVED nodes from the 'unreachable' set - val newUnreachableMembers = removedMembers.foldLeft(localUnreachableMembers) { (unreachable, removed) ⇒ unreachable - removed } + //val newUnreachableMembers = removedMembers.foldLeft(localUnreachableMembers) { (unreachable, removed) ⇒ unreachable - removed } + val newUnreachableMembers = localUnreachableMembers -- removedMembers val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachableMembers) // update gossip overview - localGossip copy (members = newMembers, overview = newOverview) // update gossip + val newGossip = localGossip copy (members = newMembers, overview = newOverview) // update gossip + + (newGossip, hasChangedState, upMembers, exitingMembers, removedMembers, Set.empty[Member]) } else if (AutoDown) { // we don't have convergence - so we might have unreachable nodes - // if 'auto-down' is turned on, then try to auto-down any unreachable nodes - // ---------------------- - // 4. Move UNREACHABLE => DOWN (auto-downing by leader) - // ---------------------- - val newUnreachableMembers = - localUnreachableMembers.map { member ⇒ - // no need to DOWN members already DOWN - if (member.status == Down) member - else { - log.info("Cluster Node [{}] - Leader is marking unreachable node [{}] as DOWN", selfAddress, member.address) - hasChangedState = true - member copy (status = Down) - } - } + // if 'auto-down' is turned on, then try to auto-down any unreachable nodes + val newUnreachableMembers = localUnreachableMembers.map { member ⇒ + // ---------------------- + // 5. Move UNREACHABLE => DOWN (auto-downing by leader) + // ---------------------- + if (member.status == Down) member // no need to DOWN members already DOWN + else member copy (status = Down) + } + + // Check for the need to do side-effecting on successful state change + val (unreachableButNotDownedMembers, _) = localUnreachableMembers partition (_.status != Down) // removing nodes marked as DOWN from the 'seen' table val newSeen = localSeen -- newUnreachableMembers.collect { case m if m.status == Down ⇒ m.address } val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachableMembers) // update gossip overview - localGossip copy (overview = newOverview) // update gossip + val newGossip = localGossip copy (overview = newOverview) // update gossip - } else localGossip + (newGossip, unreachableButNotDownedMembers.nonEmpty, Set.empty[Member], Set.empty[Member], Set.empty[Member], unreachableButNotDownedMembers) + + } else (localGossip, false, Set.empty[Member], Set.empty[Member], Set.empty[Member], Set.empty[Member]) if (hasChangedState) { // we have a change of state - version it and try to update - // ---------------------- - // 5. Updating the vclock version for the changes + // 6. Updating the vclock version for the changes // ---------------------- val versionedGossip = newGossip :+ vclockNode // ---------------------- - // 6. Updating the 'seen' table + // 7. Updating the 'seen' table // Unless the leader (this node) is part of the removed members, i.e. the leader have moved himself from EXITING -> REMOVED // ---------------------- val seenVersionedGossip = @@ -1199,27 +1211,49 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val newState = localState copy (latestGossip = seenVersionedGossip) - // if we won the race then update else try again - if (!state.compareAndSet(localState, newState)) leaderActions() // recur - else { - // do the side-effecting notifications on state-change success + // ---------------------- + // 8. Try to update the state with the new gossip + // ---------------------- + if (!state.compareAndSet(localState, newState)) { - if (removedMembers.exists(_.address == selfAddress)) { - // we now know that this node (the leader) is just about to shut down since it will be moved from EXITING -> REMOVED - // so now let's gossip out this information directly since there will not be any other chance - gossip() - } + // ---------------------- + // 9. Failure - retry + // ---------------------- + leaderActions() // recur + + } else { + // ---------------------- + // 10. Success - run all the side-effecting processing + // ---------------------- + + // if (removedMembers.exists(_.address == selfAddress)) { + // // we now know that this node (the leader) is just about to shut down since it will be moved from EXITING -> REMOVED + // // so now let's gossip out this information directly since there will not be any other chance + // gossip() + // } + + // log the move of members from joining to up + upMembers foreach { member ⇒ log.info("Cluster Node [{}] - Leader is moving node [{}] from JOINING to UP", selfAddress, member.address) } // tell all removed members to remove and shut down themselves - removedMembers.map(_.address) foreach { address ⇒ + removedMembers foreach { member ⇒ + val address = member.address + log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED - and removing node from node ring", selfAddress, address) clusterCommandConnectionFor(address) ! ClusterLeaderAction.Remove(address) } // tell all exiting members to exit - exitingMembers.map(_.address) foreach { address ⇒ + exitingMembers foreach { member ⇒ + val address = member.address + log.info("Cluster Node [{}] - Leader is moving node [{}] from LEAVING to EXITING", selfAddress, address) clusterCommandConnectionFor(address) ! ClusterLeaderAction.Exit(address) // FIXME should use ? to await completion of handoff? } + // log the auto-downing of the unreachable nodes + unreachableButNotDownedMembers foreach { member ⇒ + log.info("Cluster Node [{}] - Leader is marking unreachable node [{}] as DOWN", selfAddress, member.address) + } + notifyMembershipChangeListeners(localState, newState) } } @@ -1273,13 +1307,13 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) private def isUnavailable(state: State): Boolean = { val localGossip = state.latestGossip val isUnreachable = localGossip.overview.unreachable exists { _.address == selfAddress } - val hasUnavailableMemberStatus = localGossip.members exists { _.status.isUnavailable } + val hasUnavailableMemberStatus = localGossip.members exists { m ⇒ (m == self) && m.status.isUnavailable } isUnreachable || hasUnavailableMemberStatus } private def notifyMembershipChangeListeners(oldState: State, newState: State): Unit = { - val oldMembersStatus = oldState.latestGossip.members.toSeq.map(m ⇒ (m.address, m.status)) - val newMembersStatus = newState.latestGossip.members.toSeq.map(m ⇒ (m.address, m.status)) + val oldMembersStatus = oldState.latestGossip.members.map(m ⇒ (m.address, m.status)) + val newMembersStatus = newState.latestGossip.members.map(m ⇒ (m.address, m.status)) if (newMembersStatus != oldMembersStatus) newState.memberMembershipChangeListeners foreach { _ notify newState.latestGossip.members } } From 49586bd01dbcae3e6f0b7bba9f8fba841194177b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 18 Jun 2012 15:25:17 +0200 Subject: [PATCH 15/20] Change Member ordering so it sorts members by host and port with the exception that it puts all members that are in MemberStatus.EXITING last. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit To fix LEADER leaving and allow handoff to new leader before moving old leader from EXITING -> REMOVED. Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/cluster/Cluster.scala | 22 ++++++++++--------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 528672363d..cc91680b4a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -105,7 +105,7 @@ class Member(val address: Address, val status: MemberStatus) extends ClusterMess } /** - * Factory and Utility module for Member instances. + * Module with factory and ordering methods for Member instances. */ object Member { @@ -119,10 +119,13 @@ object Member { } /** - * `Member` ordering type class, sorts members by `Address`. + * `Member` ordering type class, sorts members by host and port with the exception that + * it puts all members that are in MemberStatus.EXITING last. */ - implicit val ordering: Ordering[Member] = new Ordering[Member] { - def compare(x: Member, y: Member) = addressOrdering.compare(x.address, y.address) + implicit val ordering: Ordering[Member] = Ordering.fromLessThan[Member] { (a, b) ⇒ + if (a.status == MemberStatus.Exiting && b.status != MemberStatus.Exiting) false + else if (a.status != MemberStatus.Exiting && b.status == MemberStatus.Exiting) true + else addressOrdering.compare(a.address, b.address) < 0 } def apply(address: Address, status: MemberStatus): Member = new Member(address, status) @@ -301,8 +304,7 @@ case class Gossip( // 4. merge members by selecting the single Member with highest MemberStatus out of the Member groups, // and exclude unreachable - val mergedMembers = Gossip.emptyMembers ++ Member.pickHighestPriority(this.members, that.members). - filterNot(mergedUnreachable.contains) + val mergedMembers = Gossip.emptyMembers ++ Member.pickHighestPriority(this.members, that.members).filterNot(mergedUnreachable.contains) // 5. fresh seen table val mergedSeen = Map.empty[Address, VectorClock] @@ -1109,10 +1111,10 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val ( newGossip: Gossip, hasChangedState: Boolean, - upMembers: Set[Member], - exitingMembers: Set[Member], - removedMembers: Set[Member], - unreachableButNotDownedMembers: Set[Member]) = + upMembers, + exitingMembers, + removedMembers, + unreachableButNotDownedMembers) = if (convergence(localGossip).isDefined) { // we have convergence - so we can't have unreachable nodes From 6b02c48be9904e15537ed2a02fc77eef98bd070e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 18 Jun 2012 15:25:46 +0200 Subject: [PATCH 16/20] Added spec testing the Ordering[Address] and Ordering[Member]. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../akka/cluster/MemberOrderingSpec.scala | 108 ++++++++++++++++++ 1 file changed, 108 insertions(+) create mode 100644 akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala diff --git a/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala new file mode 100644 index 0000000000..7528750a22 --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala @@ -0,0 +1,108 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import akka.actor.{ Address, AddressFromURIString } +import akka.testkit.AkkaSpec +import java.net.InetSocketAddress +import scala.collection.immutable.SortedSet + +class MemberOrderingSpec extends AkkaSpec { + import Member.ordering + import Member.addressOrdering + import MemberStatus._ + + "An Ordering[Member]" must { + + "order non-exiting members by host:port" in { + val members = SortedSet.empty[Member] + + Member(AddressFromURIString("akka://sys@darkstar:1112"), Up) + + Member(AddressFromURIString("akka://sys@darkstar:1113"), Joining) + + Member(AddressFromURIString("akka://sys@darkstar:1111"), Up) + + val seq = members.toSeq + seq.size must equal(3) + seq(0) must equal(Member(AddressFromURIString("akka://sys@darkstar:1111"), Up)) + seq(1) must equal(Member(AddressFromURIString("akka://sys@darkstar:1112"), Up)) + seq(2) must equal(Member(AddressFromURIString("akka://sys@darkstar:1113"), Joining)) + } + + "order exiting members by last" in { + val members = SortedSet.empty[Member] + + Member(AddressFromURIString("akka://sys@darkstar:1112"), Exiting) + + Member(AddressFromURIString("akka://sys@darkstar:1113"), Up) + + Member(AddressFromURIString("akka://sys@darkstar:1111"), Joining) + + val seq = members.toSeq + seq.size must equal(3) + seq(0) must equal(Member(AddressFromURIString("akka://sys@darkstar:1111"), Joining)) + seq(1) must equal(Member(AddressFromURIString("akka://sys@darkstar:1113"), Up)) + seq(2) must equal(Member(AddressFromURIString("akka://sys@darkstar:1112"), Exiting)) + } + + "order multiple exiting members by last but internally by host:port" in { + val members = SortedSet.empty[Member] + + Member(AddressFromURIString("akka://sys@darkstar:1112"), Exiting) + + Member(AddressFromURIString("akka://sys@darkstar:1113"), Leaving) + + Member(AddressFromURIString("akka://sys@darkstar:1111"), Up) + + Member(AddressFromURIString("akka://sys@darkstar:1110"), Exiting) + + val seq = members.toSeq + seq.size must equal(4) + seq(0) must equal(Member(AddressFromURIString("akka://sys@darkstar:1111"), Up)) + seq(1) must equal(Member(AddressFromURIString("akka://sys@darkstar:1113"), Leaving)) + seq(2) must equal(Member(AddressFromURIString("akka://sys@darkstar:1110"), Exiting)) + seq(3) must equal(Member(AddressFromURIString("akka://sys@darkstar:1112"), Exiting)) + } + } + + "An Ordering[Address]" must { + + "order addresses by port" in { + val addresses = SortedSet.empty[Address] + + AddressFromURIString("akka://sys@darkstar:1112") + + AddressFromURIString("akka://sys@darkstar:1113") + + AddressFromURIString("akka://sys@darkstar:1110") + + AddressFromURIString("akka://sys@darkstar:1111") + + val seq = addresses.toSeq + seq.size must equal(4) + seq(0) must equal(AddressFromURIString("akka://sys@darkstar:1110")) + seq(1) must equal(AddressFromURIString("akka://sys@darkstar:1111")) + seq(2) must equal(AddressFromURIString("akka://sys@darkstar:1112")) + seq(3) must equal(AddressFromURIString("akka://sys@darkstar:1113")) + } + + "order addresses by hostname" in { + val addresses = SortedSet.empty[Address] + + AddressFromURIString("akka://sys@darkstar2:1110") + + AddressFromURIString("akka://sys@darkstar1:1110") + + AddressFromURIString("akka://sys@darkstar3:1110") + + AddressFromURIString("akka://sys@darkstar0:1110") + + val seq = addresses.toSeq + seq.size must equal(4) + seq(0) must equal(AddressFromURIString("akka://sys@darkstar0:1110")) + seq(1) must equal(AddressFromURIString("akka://sys@darkstar1:1110")) + seq(2) must equal(AddressFromURIString("akka://sys@darkstar2:1110")) + seq(3) must equal(AddressFromURIString("akka://sys@darkstar3:1110")) + } + + "order addresses by hostname and port" in { + val addresses = SortedSet.empty[Address] + + AddressFromURIString("akka://sys@darkstar2:1110") + + AddressFromURIString("akka://sys@darkstar0:1111") + + AddressFromURIString("akka://sys@darkstar2:1111") + + AddressFromURIString("akka://sys@darkstar0:1110") + + val seq = addresses.toSeq + seq.size must equal(4) + seq(0) must equal(AddressFromURIString("akka://sys@darkstar0:1110")) + seq(1) must equal(AddressFromURIString("akka://sys@darkstar0:1111")) + seq(2) must equal(AddressFromURIString("akka://sys@darkstar2:1110")) + seq(3) must equal(AddressFromURIString("akka://sys@darkstar2:1111")) + } + } +} From 6e60d51263070fcf2897e79943289db47a1b14c6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 18 Jun 2012 15:26:23 +0200 Subject: [PATCH 17/20] Reenabled LeaderLeavingSpec and added successful leader-handoff assertion MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../scala/akka/cluster/LeaderLeavingSpec.scala | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala index f3274583b5..37312a7351 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala @@ -21,9 +21,8 @@ object LeaderLeavingMultiJvmSpec extends MultiNodeConfig { akka.cluster { leader-actions-interval = 5 s # increase the leader action task frequency to make sure we get a chance to test the LEAVING state unreachable-nodes-reaper-interval = 30 s - } - """) - .withFallback(MultiNodeClusterSpec.clusterConfig))) + }""") + .withFallback(MultiNodeClusterSpec.clusterConfig))) } class LeaderLeavingMultiJvmNode1 extends LeaderLeavingSpec with FailureDetectorPuppetStrategy @@ -42,7 +41,7 @@ abstract class LeaderLeavingSpec "A LEADER that is LEAVING" must { - "be moved to LEAVING, then to EXITING, then to REMOVED, then be shut down and then a new LEADER should be elected" taggedAs LongRunningTest ignore { + "be moved to LEAVING, then to EXITING, then to REMOVED, then be shut down and then a new LEADER should be elected" taggedAs LongRunningTest in { awaitClusterUp(first, second, third) @@ -53,8 +52,11 @@ abstract class LeaderLeavingSpec cluster.leave(oldLeaderAddress) testConductor.enter("leader-left") + // verify that a NEW LEADER have taken over + awaitCond(!cluster.isLeader) + // verify that the LEADER is shut down - awaitCond(!cluster.isRunning) + awaitCond(!cluster.isRunning, 30.seconds.dilated) // verify that the LEADER is REMOVED awaitCond(cluster.status == MemberStatus.Removed) From 4f8522dc6381cb236d6ff833ada4abf34b6bc9ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 19 Jun 2012 14:21:11 +0200 Subject: [PATCH 18/20] Merged in the old MemberSpec with the new MemberOrderingSpec. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../akka/cluster/MemberOrderingSpec.scala | 34 +++++++++++++- .../test/scala/akka/cluster/MemberSpec.scala | 45 ------------------- 2 files changed, 32 insertions(+), 47 deletions(-) delete mode 100644 akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala diff --git a/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala index 7528750a22..d8687312da 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala @@ -5,11 +5,14 @@ package akka.cluster import akka.actor.{ Address, AddressFromURIString } -import akka.testkit.AkkaSpec import java.net.InetSocketAddress +import org.scalatest.matchers.MustMatchers +import org.scalatest.WordSpec import scala.collection.immutable.SortedSet +import scala.util.Random -class MemberOrderingSpec extends AkkaSpec { +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class MemberOrderingSpec extends WordSpec with MustMatchers { import Member.ordering import Member.addressOrdering import MemberStatus._ @@ -56,6 +59,33 @@ class MemberOrderingSpec extends AkkaSpec { seq(2) must equal(Member(AddressFromURIString("akka://sys@darkstar:1110"), Exiting)) seq(3) must equal(Member(AddressFromURIString("akka://sys@darkstar:1112"), Exiting)) } + + "be sorted by address correctly" in { + import Member.ordering + // sorting should be done on host and port, only + val m1 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Up) + val m2 = Member(Address("akka", "sys1", "host1", 10000), MemberStatus.Up) + val m3 = Member(Address("cluster", "sys2", "host2", 8000), MemberStatus.Up) + val m4 = Member(Address("cluster", "sys2", "host2", 9000), MemberStatus.Up) + val m5 = Member(Address("cluster", "sys1", "host2", 10000), MemberStatus.Up) + + val expected = IndexedSeq(m1, m2, m3, m4, m5) + val shuffled = Random.shuffle(expected) + shuffled.sorted must be(expected) + (SortedSet.empty[Member] ++ shuffled).toIndexedSeq must be(expected) + } + + "have stable equals and hashCode" in { + val m1 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Joining) + val m2 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Up) + val m3 = Member(Address("akka", "sys1", "host1", 10000), MemberStatus.Up) + + m1 must be(m2) + m1.hashCode must be(m2.hashCode) + + m3 must not be (m2) + m3 must not be (m1) + } } "An Ordering[Address]" must { diff --git a/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala deleted file mode 100644 index bc1f70ae86..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala +++ /dev/null @@ -1,45 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import akka.actor.Address -import scala.util.Random -import scala.collection.immutable.SortedSet - -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class MemberSpec extends WordSpec with MustMatchers { - - "Member" must { - - "be sorted by address correctly" in { - import Member.ordering - // sorting should be done on host and port, only - val m1 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Up) - val m2 = Member(Address("akka", "sys1", "host1", 10000), MemberStatus.Up) - val m3 = Member(Address("cluster", "sys2", "host2", 8000), MemberStatus.Up) - val m4 = Member(Address("cluster", "sys2", "host2", 9000), MemberStatus.Up) - val m5 = Member(Address("cluster", "sys1", "host2", 10000), MemberStatus.Up) - - val expected = IndexedSeq(m1, m2, m3, m4, m5) - val shuffled = Random.shuffle(expected) - shuffled.sorted must be(expected) - (SortedSet.empty[Member] ++ shuffled).toIndexedSeq must be(expected) - } - - "have stable equals and hashCode" in { - val m1 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Joining) - val m2 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Up) - val m3 = Member(Address("akka", "sys1", "host1", 10000), MemberStatus.Up) - - m1 must be(m2) - m1.hashCode must be(m2.hashCode) - - m3 must not be (m2) - m3 must not be (m1) - } - } -} From fd54a93135271b823008ecc3ed04436694c3548e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 19 Jun 2012 14:21:56 +0200 Subject: [PATCH 19/20] Added ScalaDoc on 'def status: MemberStatus' describing the MemberStatus.Removed semantics. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- akka-cluster/src/main/scala/akka/cluster/Cluster.scala | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index cc91680b4a..ce3daa2dbd 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -603,7 +603,11 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) def latestGossip: Gossip = state.get.latestGossip /** - * Member status for this node. + * Member status for this node (`MemberStatus`). + * + * NOTE: If the node has been removed from the cluster (and shut down) then it's status is set to the 'REMOVED' tombstone state + * and is no longer present in the node ring or any other part of the gossiping state. However in order to maintain the + * model and the semantics the user would expect, this method will in this situation return `MemberStatus.Removed`. */ def status: MemberStatus = { if (isRunning) self.status @@ -1160,11 +1164,9 @@ class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) val hasChangedState = removedMembers.nonEmpty || upMembers.nonEmpty || exitingMembers.nonEmpty // removing REMOVED nodes from the 'seen' table - //val newSeen = removedMembers.foldLeft(localSeen) { (seen, removed) ⇒ seen - removed.address } val newSeen = localSeen -- removedMembers.map(_.address) // removing REMOVED nodes from the 'unreachable' set - //val newUnreachableMembers = removedMembers.foldLeft(localUnreachableMembers) { (unreachable, removed) ⇒ unreachable - removed } val newUnreachableMembers = localUnreachableMembers -- removedMembers val newOverview = localOverview copy (seen = newSeen, unreachable = newUnreachableMembers) // update gossip overview From 9011c310e1b0bb37a5d0f73a71a9833a3a164c15 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 19 Jun 2012 14:27:12 +0200 Subject: [PATCH 20/20] Minor cleanup. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- akka-cluster/src/main/scala/akka/cluster/Cluster.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index ce3daa2dbd..411c9d4b18 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -123,8 +123,8 @@ object Member { * it puts all members that are in MemberStatus.EXITING last. */ implicit val ordering: Ordering[Member] = Ordering.fromLessThan[Member] { (a, b) ⇒ - if (a.status == MemberStatus.Exiting && b.status != MemberStatus.Exiting) false - else if (a.status != MemberStatus.Exiting && b.status == MemberStatus.Exiting) true + if (a.status == Exiting && b.status != Exiting) false + else if (a.status != Exiting && b.status == Exiting) true else addressOrdering.compare(a.address, b.address) < 0 }