Always removeNode when shutdown, see 2137

This commit is contained in:
Patrik Nordwall 2012-06-04 14:29:32 +02:00
parent 415366e881
commit f30a1a0b1f
6 changed files with 3 additions and 7 deletions

View file

@ -41,7 +41,6 @@ class ClientDowningNodeThatIsUnreachableSpec
testConductor.enter("all-up") testConductor.enter("all-up")
// kill 'third' node // kill 'third' node
testConductor.removeNode(third)
testConductor.shutdown(third, 0) testConductor.shutdown(third, 0)
// mark 'third' node as DOWN // mark 'third' node as DOWN

View file

@ -54,7 +54,6 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi
"mark node as 'unavailable' if a node in the cluster is shut down (and its heartbeats stops)" taggedAs LongRunningTest in { "mark node as 'unavailable' if a node in the cluster is shut down (and its heartbeats stops)" taggedAs LongRunningTest in {
runOn(first) { runOn(first) {
testConductor.removeNode(third)
testConductor.shutdown(third, 0) testConductor.shutdown(third, 0)
} }

View file

@ -49,7 +49,6 @@ class LeaderDowningNodeThatIsUnreachableSpec
testConductor.enter("all-up") testConductor.enter("all-up")
// kill 'fourth' node // kill 'fourth' node
testConductor.removeNode(fourth)
testConductor.shutdown(fourth, 0) testConductor.shutdown(fourth, 0)
testConductor.enter("down-fourth-node") testConductor.enter("down-fourth-node")
@ -89,7 +88,6 @@ class LeaderDowningNodeThatIsUnreachableSpec
testConductor.enter("all-up") testConductor.enter("all-up")
// kill 'second' node // kill 'second' node
testConductor.removeNode(second)
testConductor.shutdown(second, 0) testConductor.shutdown(second, 0)
testConductor.enter("down-second-node") testConductor.enter("down-second-node")

View file

@ -65,7 +65,6 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp
case `controller` case `controller`
testConductor.enter("before-shutdown") testConductor.enter("before-shutdown")
testConductor.removeNode(leader)
testConductor.shutdown(leader, 0) testConductor.shutdown(leader, 0)
testConductor.enter("after-shutdown", "after-down", "completed") testConductor.enter("after-shutdown", "after-down", "completed")

View file

@ -54,7 +54,6 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec)
"become singleton cluster when one node is shutdown" taggedAs LongRunningTest in { "become singleton cluster when one node is shutdown" taggedAs LongRunningTest in {
runOn(first) { runOn(first) {
val secondAddress = node(second).address val secondAddress = node(second).address
testConductor.removeNode(second)
testConductor.shutdown(second, 0) testConductor.shutdown(second, 0)
awaitUpConvergence(numberOfMembers = 1, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds) awaitUpConvergence(numberOfMembers = 1, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds)
cluster.isSingletonCluster must be(true) cluster.isSingletonCluster must be(true)

View file

@ -168,7 +168,8 @@ trait Conductor { this: TestConductorExt ⇒
/** /**
* Tell the remote node to shut itself down using System.exit with the given * Tell the remote node to shut itself down using System.exit with the given
* exitValue. * exitValue. The node will also be removed, so that the remaining nodes may still
* pass subsequent barriers.
* *
* @param node is the symbolic name of the node which is to be affected * @param node is the symbolic name of the node which is to be affected
* @param exitValue is the return code which shall be given to System.exit * @param exitValue is the return code which shall be given to System.exit
@ -441,6 +442,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP
if (exitValueOrKill < 0) { if (exitValueOrKill < 0) {
// TODO: kill via SBT // TODO: kill via SBT
} else { } else {
barrier ! BarrierCoordinator.RemoveClient(node)
nodes(node).fsm forward ToClient(TerminateMsg(exitValueOrKill)) nodes(node).fsm forward ToClient(TerminateMsg(exitValueOrKill))
} }
case Remove(node) case Remove(node)