2013-02-17 17:35:43 +01:00
|
|
|
/**
|
|
|
|
|
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
|
|
|
|
|
*/
|
|
|
|
|
package akka.cluster
|
|
|
|
|
|
|
|
|
|
import language.postfixOps
|
|
|
|
|
import scala.collection.immutable
|
|
|
|
|
import com.typesafe.config.ConfigFactory
|
|
|
|
|
import org.scalatest.BeforeAndAfter
|
|
|
|
|
import akka.remote.testkit.MultiNodeConfig
|
|
|
|
|
import akka.remote.testkit.MultiNodeSpec
|
|
|
|
|
import akka.testkit._
|
|
|
|
|
import scala.concurrent.duration._
|
|
|
|
|
import akka.actor.Address
|
|
|
|
|
import akka.actor.ActorSystem
|
|
|
|
|
import akka.actor.Props
|
|
|
|
|
import akka.actor.Actor
|
|
|
|
|
import akka.actor.RootActorPath
|
|
|
|
|
import akka.cluster.MemberStatus._
|
2013-05-29 16:13:10 +02:00
|
|
|
import akka.actor.Deploy
|
2013-02-17 17:35:43 +01:00
|
|
|
|
|
|
|
|
object RestartFirstSeedNodeMultiJvmSpec extends MultiNodeConfig {
|
|
|
|
|
val seed1 = role("seed1")
|
|
|
|
|
val seed2 = role("seed2")
|
|
|
|
|
val seed3 = role("seed3")
|
|
|
|
|
|
|
|
|
|
commonConfig(debugConfig(on = false).
|
2013-09-11 16:09:51 +02:00
|
|
|
withFallback(ConfigFactory.parseString("akka.cluster.auto-down-unreachable-after = 0s")).
|
|
|
|
|
withFallback(MultiNodeClusterSpec.clusterConfig))
|
2013-02-17 17:35:43 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
|
|
class RestartFirstSeedNodeMultiJvmNode1 extends RestartFirstSeedNodeSpec
|
|
|
|
|
class RestartFirstSeedNodeMultiJvmNode2 extends RestartFirstSeedNodeSpec
|
|
|
|
|
class RestartFirstSeedNodeMultiJvmNode3 extends RestartFirstSeedNodeSpec
|
|
|
|
|
|
|
|
|
|
abstract class RestartFirstSeedNodeSpec
|
|
|
|
|
extends MultiNodeSpec(RestartFirstSeedNodeMultiJvmSpec)
|
|
|
|
|
with MultiNodeClusterSpec with ImplicitSender {
|
|
|
|
|
|
|
|
|
|
import RestartFirstSeedNodeMultiJvmSpec._
|
|
|
|
|
|
|
|
|
|
@volatile var seedNode1Address: Address = _
|
|
|
|
|
|
|
|
|
|
// use a separate ActorSystem, to be able to simulate restart
|
|
|
|
|
lazy val seed1System = ActorSystem(system.name, system.settings.config)
|
|
|
|
|
|
|
|
|
|
def missingSeed = address(seed3).copy(port = Some(61313))
|
|
|
|
|
def seedNodes: immutable.IndexedSeq[Address] = Vector(seedNode1Address, seed2, seed3, missingSeed)
|
|
|
|
|
|
|
|
|
|
lazy val restartedSeed1System = ActorSystem(system.name,
|
|
|
|
|
ConfigFactory.parseString("akka.remote.netty.tcp.port=" + seedNodes.head.port.get).
|
|
|
|
|
withFallback(system.settings.config))
|
|
|
|
|
|
|
|
|
|
override def afterAll(): Unit = {
|
|
|
|
|
runOn(seed1) {
|
|
|
|
|
if (seed1System.isTerminated)
|
2013-05-02 17:12:36 +02:00
|
|
|
shutdown(restartedSeed1System)
|
2013-02-17 17:35:43 +01:00
|
|
|
else
|
2013-05-02 17:12:36 +02:00
|
|
|
shutdown(seed1System)
|
2013-02-17 17:35:43 +01:00
|
|
|
}
|
|
|
|
|
super.afterAll()
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
"Cluster seed nodes" must {
|
|
|
|
|
"be able to restart first seed node and join other seed nodes" taggedAs LongRunningTest in within(40 seconds) {
|
|
|
|
|
// seed1System is a separate ActorSystem, to be able to simulate restart
|
|
|
|
|
// we must transfer its address to seed2 and seed3
|
|
|
|
|
runOn(seed2, seed3) {
|
|
|
|
|
system.actorOf(Props(new Actor {
|
|
|
|
|
def receive = {
|
|
|
|
|
case a: Address ⇒
|
|
|
|
|
seedNode1Address = a
|
2014-01-16 15:16:35 +01:00
|
|
|
sender() ! "ok"
|
2013-02-17 17:35:43 +01:00
|
|
|
}
|
2013-05-29 16:13:10 +02:00
|
|
|
}).withDeploy(Deploy.local), name = "address-receiver")
|
2013-02-17 17:35:43 +01:00
|
|
|
enterBarrier("seed1-address-receiver-ready")
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
runOn(seed1) {
|
|
|
|
|
enterBarrier("seed1-address-receiver-ready")
|
|
|
|
|
seedNode1Address = Cluster(seed1System).selfAddress
|
|
|
|
|
List(seed2, seed3) foreach { r ⇒
|
2013-03-26 18:17:50 +01:00
|
|
|
system.actorSelection(RootActorPath(r) / "user" / "address-receiver") ! seedNode1Address
|
2013-02-17 17:35:43 +01:00
|
|
|
expectMsg(5 seconds, "ok")
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
enterBarrier("seed1-address-transfered")
|
|
|
|
|
|
|
|
|
|
// now we can join seed1System, seed2, seed3 together
|
|
|
|
|
runOn(seed1) {
|
|
|
|
|
Cluster(seed1System).joinSeedNodes(seedNodes)
|
2013-12-17 14:25:56 +01:00
|
|
|
awaitAssert(Cluster(seed1System).readView.members.size should be(3))
|
|
|
|
|
awaitAssert(Cluster(seed1System).readView.members.map(_.status) should be(Set(Up)))
|
2013-02-17 17:35:43 +01:00
|
|
|
}
|
|
|
|
|
runOn(seed2, seed3) {
|
|
|
|
|
cluster.joinSeedNodes(seedNodes)
|
2013-03-05 21:05:11 +01:00
|
|
|
awaitMembersUp(3)
|
2013-02-17 17:35:43 +01:00
|
|
|
}
|
|
|
|
|
enterBarrier("started")
|
|
|
|
|
|
|
|
|
|
// shutdown seed1System
|
|
|
|
|
runOn(seed1) {
|
2013-05-02 17:12:36 +02:00
|
|
|
shutdown(seed1System, remaining)
|
2013-02-17 17:35:43 +01:00
|
|
|
}
|
|
|
|
|
runOn(seed2, seed3) {
|
2013-03-05 21:05:11 +01:00
|
|
|
awaitMembersUp(2, canNotBePartOfMemberRing = Set(seedNodes.head))
|
2013-12-17 14:25:56 +01:00
|
|
|
awaitAssert(clusterView.unreachableMembers.map(_.address) should not contain (seedNodes.head))
|
2013-02-17 17:35:43 +01:00
|
|
|
}
|
|
|
|
|
enterBarrier("seed1-shutdown")
|
|
|
|
|
|
|
|
|
|
// then start restartedSeed1System, which has the same address as seed1System
|
|
|
|
|
runOn(seed1) {
|
|
|
|
|
Cluster(restartedSeed1System).joinSeedNodes(seedNodes)
|
2013-12-17 14:25:56 +01:00
|
|
|
awaitAssert(Cluster(restartedSeed1System).readView.members.size should be(3))
|
|
|
|
|
awaitAssert(Cluster(restartedSeed1System).readView.members.map(_.status) should be(Set(Up)))
|
2013-02-17 17:35:43 +01:00
|
|
|
}
|
|
|
|
|
runOn(seed2, seed3) {
|
2013-03-05 21:05:11 +01:00
|
|
|
awaitMembersUp(3)
|
2013-02-17 17:35:43 +01:00
|
|
|
}
|
|
|
|
|
enterBarrier("seed1-restarted")
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
}
|