Renamed methods in public Cluster API, plus changed semantics of Cluster.join (was wrong before).

Signed-off-by: Jonas Bonér <jonas@jonasboner.com>
This commit is contained in:
Jonas Bonér 2012-04-12 22:50:50 +02:00
parent 3e47dfa7eb
commit e500548494
4 changed files with 21 additions and 15 deletions

View file

@ -490,30 +490,34 @@ class Cluster(system: ExtendedActorSystem) extends Extension {
}
/**
* Send command to JOIN one node to another.
* Try to join this cluster node with the node specified by 'address'.
* A 'Join(thisNodeAddress)'' command is sent to the node to join.
*/
def scheduleNodeJoin(address: Address) {
clusterCommandDaemon ! ClusterAction.Join(address)
def join(address: Address) {
val connection = clusterCommandConnectionFor(address)
val command = ClusterAction.Join(remoteAddress)
log.info("Cluster Node [{}] - Trying to send JOIN to [{}] through connection [{}]", remoteAddress, address, connection)
connection ! command
}
/**
* Send command to issue state transition to LEAVING.
* Send command to issue state transition to LEAVING for the node specified by 'address'.
*/
def scheduleNodeLeave(address: Address) {
def leave(address: Address) {
clusterCommandDaemon ! ClusterAction.Leave(address)
}
/**
* Send command to issue state transition to EXITING.
* Send command to issue state transition to from DOWN to EXITING for the node specified by 'address'.
*/
def scheduleNodeDown(address: Address) {
def down(address: Address) {
clusterCommandDaemon ! ClusterAction.Down(address)
}
/**
* Send command to issue state transition to REMOVED.
* Send command to issue state transition to REMOVED for the node specified by 'address'.
*/
def scheduleNodeRemove(address: Address) {
def remove(address: Address) {
clusterCommandDaemon ! ClusterAction.Remove(address)
}

View file

@ -92,7 +92,7 @@ class ClientDowningSpec extends ClusterSpec("akka.cluster.auto-down = off") with
system3.shutdown()
// client marks node3 as DOWN
node1.scheduleNodeDown(address3)
node1.down(address3)
println("Give the system time to converge...")
Thread.sleep(10.seconds.dilated.toMillis)
@ -107,7 +107,7 @@ class ClientDowningSpec extends ClusterSpec("akka.cluster.auto-down = off") with
system4.shutdown()
// clien marks node4 as DOWN
node2.scheduleNodeDown(address4)
node2.down(address4)
println("Give the system time to converge...")
Thread.sleep(10.seconds.dilated.toMillis)

View file

@ -110,7 +110,8 @@ class JoinTwoClustersSpec extends ClusterSpec("akka.cluster.failure-detector.thr
node6.isLeader must be(false)
// join
node1.scheduleNodeJoin(node4.remoteAddress)
node4.join(node1.remoteAddress)
//node1.scheduleNodeJoin(node4.remoteAddress)
println("Give the system time to converge...")
Thread.sleep(10.seconds.dilated.toMillis)
@ -127,7 +128,8 @@ class JoinTwoClustersSpec extends ClusterSpec("akka.cluster.failure-detector.thr
"be able to 'elect' a single leader after joining (C -> A + B)" taggedAs LongRunningTest in {
// join
node5.scheduleNodeJoin(node4.remoteAddress)
node4.join(node5.remoteAddress)
//node5.scheduleNodeJoin(node4.remoteAddress)
println("Give the system time to converge...")
Thread.sleep(10.seconds.dilated.toMillis)

View file

@ -80,7 +80,7 @@ class LeaderElectionSpec extends ClusterSpec with ImplicitSender {
system1.shutdown()
// user marks node1 as DOWN
node2.scheduleNodeDown(address1)
node2.down(address1)
println("Give the system time to converge...")
Thread.sleep(10.seconds.dilated.toMillis)
@ -98,7 +98,7 @@ class LeaderElectionSpec extends ClusterSpec with ImplicitSender {
system2.shutdown()
// user marks node2 as DOWN
node3.scheduleNodeDown(address2)
node3.down(address2)
println("Give the system time to converge...")
Thread.sleep(10.seconds.dilated.toMillis)