diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index 3a4148e3f0..22f9ada0c8 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -11,9 +11,9 @@ import akka.testkit._ import akka.actor.Address object ClientDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { - val first = role("first") + val first = role("first") val second = role("second") - val third = role("third") + val third = role("third") val fourth = role("fourth") commonConfig(debugConfig(on = false). @@ -39,7 +39,7 @@ class ClientDowningNodeThatIsUnreachableSpec "be able to DOWN a node that is UNREACHABLE (killed)" taggedAs LongRunningTest in { runOn(first) { cluster.self - awaitUpConvergence(nrOfMembers = 4) + awaitUpConvergence(numberOfMembers = 4) val thirdAddress = node(third).address testConductor.enter("all-up") @@ -52,7 +52,7 @@ class ClientDowningNodeThatIsUnreachableSpec cluster.down(thirdAddress) testConductor.enter("down-third-node") - awaitUpConvergence(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) cluster.latestGossip.members.exists(_.address == thirdAddress) must be(false) testConductor.enter("await-completion") } @@ -60,20 +60,20 @@ class ClientDowningNodeThatIsUnreachableSpec runOn(third) { cluster.join(node(first).address) - awaitUpConvergence(nrOfMembers = 4) + awaitUpConvergence(numberOfMembers = 4) testConductor.enter("all-up") } runOn(second, fourth) { cluster.join(node(first).address) - awaitUpConvergence(nrOfMembers = 4) + awaitUpConvergence(numberOfMembers = 4) val thirdAddress = node(third).address testConductor.enter("all-up") testConductor.enter("down-third-node") - awaitUpConvergence(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) testConductor.enter("await-completion") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index 0f48951305..d855522b36 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -11,9 +11,9 @@ import akka.testkit._ import akka.actor.Address object ClientDowningNodeThatIsUpMultiJvmSpec extends MultiNodeConfig { - val first = role("first") + val first = role("first") val second = role("second") - val third = role("third") + val third = role("third") val fourth = role("fourth") commonConfig(debugConfig(on = false). @@ -39,7 +39,7 @@ class ClientDowningNodeThatIsUpSpec "be able to DOWN a node that is UP (healthy and available)" taggedAs LongRunningTest in { runOn(first) { cluster.self - awaitUpConvergence(nrOfMembers = 4) + awaitUpConvergence(numberOfMembers = 4) val thirdAddress = node(third).address testConductor.enter("all-up") @@ -49,27 +49,27 @@ class ClientDowningNodeThatIsUpSpec cluster.down(thirdAddress) testConductor.enter("down-third-node") - awaitUpConvergence(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) cluster.latestGossip.members.exists(_.address == thirdAddress) must be(false) testConductor.enter("await-completion") } runOn(third) { cluster.join(node(first).address) - awaitUpConvergence(nrOfMembers = 4) + awaitUpConvergence(numberOfMembers = 4) testConductor.enter("all-up") } runOn(second, fourth) { cluster.join(node(first).address) - awaitUpConvergence(nrOfMembers = 4) + awaitUpConvergence(numberOfMembers = 4) val thirdAddress = node(third).address testConductor.enter("all-up") testConductor.enter("down-third-node") - awaitUpConvergence(nrOfMembers = 3, canNotBePartOfRing = Seq(thirdAddress)) + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) testConductor.enter("await-completion") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala new file mode 100644 index 0000000000..a8191057e7 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -0,0 +1,125 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.actor.Address +import akka.util.duration._ + +object LeaderDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + + commonConfig(debugConfig(on = false). + withFallback(ConfigFactory.parseString(""" + akka.cluster { + auto-down = on + failure-detector.threshold = 4 + } + """)). + withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class LeaderDowningNodeThatIsUnreachableMultiJvmNode1 extends LeaderDowningNodeThatIsUnreachableSpec +class LeaderDowningNodeThatIsUnreachableMultiJvmNode2 extends LeaderDowningNodeThatIsUnreachableSpec +class LeaderDowningNodeThatIsUnreachableMultiJvmNode3 extends LeaderDowningNodeThatIsUnreachableSpec +class LeaderDowningNodeThatIsUnreachableMultiJvmNode4 extends LeaderDowningNodeThatIsUnreachableSpec + +class LeaderDowningNodeThatIsUnreachableSpec + extends MultiNodeSpec(LeaderDowningNodeThatIsUnreachableMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender with BeforeAndAfter { + import LeaderDowningNodeThatIsUnreachableMultiJvmSpec._ + + override def initialParticipants = 4 + + "The Leader in a 4 node cluster" must { + + "be able to DOWN a 'last' node that is UNREACHABLE" taggedAs LongRunningTest in { + runOn(first) { + cluster.self + awaitUpConvergence(numberOfMembers = 4) + + val fourthAddress = node(fourth).address + testConductor.enter("all-up") + + // kill 'fourth' node + testConductor.shutdown(fourth, 0) + testConductor.removeNode(fourth) + testConductor.enter("down-fourth-node") + + // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- + + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(fourthAddress), 30.seconds.dilated) + testConductor.enter("await-completion") + } + + runOn(fourth) { + cluster.join(node(first).address) + + awaitUpConvergence(numberOfMembers = 4) + testConductor.enter("all-up") + } + + runOn(second, third) { + cluster.join(node(first).address) + awaitUpConvergence(numberOfMembers = 4) + + val fourthAddress = node(fourth).address + testConductor.enter("all-up") + + testConductor.enter("down-fourth-node") + + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(fourthAddress), 30.seconds.dilated) + testConductor.enter("await-completion") + } + } + + "be able to DOWN a 'middle' node that is UNREACHABLE" taggedAs LongRunningTest in { + runOn(first) { + cluster.self + awaitUpConvergence(numberOfMembers = 3) + + val secondAddress = node(second).address + testConductor.enter("all-up") + + // kill 'second' node + testConductor.shutdown(second, 0) + testConductor.removeNode(second) + testConductor.enter("down-second-node") + + // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- + + awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds.dilated) + testConductor.enter("await-completion") + } + + runOn(second) { + cluster.join(node(first).address) + + awaitUpConvergence(numberOfMembers = 3) + testConductor.enter("all-up") + } + + runOn(second, third) { + cluster.join(node(first).address) + awaitUpConvergence(numberOfMembers = 3) + + val secondAddress = node(second).address + testConductor.enter("all-up") + + testConductor.enter("down-second-node") + + awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Seq(secondAddress), 30 seconds) + testConductor.enter("await-completion") + } + } + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index d5624f4999..82adf065c5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -8,14 +8,17 @@ import com.typesafe.config.ConfigFactory import akka.actor.Address import akka.remote.testconductor.RoleName import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ import akka.util.duration._ +import akka.util.Duration object MultiNodeClusterSpec { def clusterConfig: Config = ConfigFactory.parseString(""" akka.cluster { - gossip-frequency = 200 ms - leader-actions-frequency = 200 ms - periodic-tasks-initial-delay = 300 ms + gossip-frequency = 200 ms + leader-actions-frequency = 200 ms + unreachable-nodes-reaper-frequency = 200 ms + periodic-tasks-initial-delay = 300 ms } akka.test { single-expect-default = 5 s @@ -51,21 +54,19 @@ trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ /** * Wait until the expected number of members has status Up and convergence has been reached. + * Also asserts that nodes in the 'canNotBePartOfMemberRing' are *not* part of the cluster ring. */ - def awaitUpConvergence(numberOfMembers: Int): Unit = { - awaitCond(cluster.latestGossip.members.size == numberOfMembers) - awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up)) - awaitCond(cluster.convergence.isDefined, 10 seconds) - } - - /** - * Wait until the expected number of members has status Up and convergence has been reached. - * Also asserts that nodes in the 'canNotBePartOfRing' are *not* part of the cluster ring. - */ - def awaitUpConvergence(nrOfMembers: Int, canNotBePartOfRing: Seq[Address] = Seq.empty[Address]): Unit = { - awaitCond(cluster.latestGossip.members.size == nrOfMembers) - awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up)) - awaitCond(canNotBePartOfRing forall (address ⇒ !(cluster.latestGossip.members exists (_.address == address)))) + def awaitUpConvergence( + numberOfMembers: Int, + canNotBePartOfMemberRing: Seq[Address] = Seq.empty[Address], + timeout: Duration = 10.seconds.dilated): Unit = { + awaitCond(cluster.latestGossip.members.size == numberOfMembers, timeout) + awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up), timeout) + awaitCond(cluster.convergence.isDefined, timeout) + if (!canNotBePartOfMemberRing.isEmpty) // don't run this on an empty set + awaitCond( + canNotBePartOfMemberRing forall (address ⇒ !(cluster.latestGossip.members exists (_.address == address))), + timeout) } def roleOfLeader(nodesInCluster: Seq[RoleName]): RoleName = { diff --git a/akka-cluster/src/test/scala/akka/cluster/LeaderDowningSpec.scala b/akka-cluster/src/test/scala/akka/cluster/LeaderDowningSpec.scala deleted file mode 100644 index 15e6cec838..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/LeaderDowningSpec.scala +++ /dev/null @@ -1,144 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import akka.testkit._ -import akka.dispatch._ -import akka.actor._ -import akka.remote._ -import akka.util.duration._ - -import com.typesafe.config._ - -import java.net.InetSocketAddress - -class LeaderDowningSpec extends ClusterSpec with ImplicitSender { - val portPrefix = 4 - - var node1: Cluster = _ - var node2: Cluster = _ - var node3: Cluster = _ - var node4: Cluster = _ - - var system1: ActorSystemImpl = _ - var system2: ActorSystemImpl = _ - var system3: ActorSystemImpl = _ - var system4: ActorSystemImpl = _ - - try { - "The Leader in a 4 node cluster" must { - - // ======= NODE 1 ======== - system1 = ActorSystem("system1", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d550 - }""".format(portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote1 = system1.provider.asInstanceOf[RemoteActorRefProvider] - node1 = Cluster(system1) - val fd1 = node1.failureDetector - val address1 = node1.remoteAddress - - // ======= NODE 2 ======== - system2 = ActorSystem("system2", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d551 - cluster.node-to-join = "akka://system1@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote2 = system2.provider.asInstanceOf[RemoteActorRefProvider] - node2 = Cluster(system2) - val fd2 = node2.failureDetector - val address2 = node2.remoteAddress - - // ======= NODE 3 ======== - system3 = ActorSystem("system3", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d552 - cluster.node-to-join = "akka://system1@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote3 = system3.provider.asInstanceOf[RemoteActorRefProvider] - node3 = Cluster(system3) - val fd3 = node3.failureDetector - val address3 = node3.remoteAddress - - // ======= NODE 4 ======== - system4 = ActorSystem("system4", ConfigFactory - .parseString(""" - akka { - actor.provider = "akka.remote.RemoteActorRefProvider" - remote.netty.port = %d553 - cluster.node-to-join = "akka://system1@localhost:%d550" - }""".format(portPrefix, portPrefix)) - .withFallback(system.settings.config)) - .asInstanceOf[ActorSystemImpl] - val remote4 = system4.provider.asInstanceOf[RemoteActorRefProvider] - node4 = Cluster(system4) - val fd4 = node4.failureDetector - val address4 = node4.remoteAddress - - "be able to DOWN a (last) node that is UNREACHABLE" taggedAs LongRunningTest in { - - println("Give the system time to converge...") - awaitConvergence(node1 :: node2 :: node3 :: node4 :: Nil) - - // shut down system4 - node4.shutdown() - system4.shutdown() - - // wait for convergence - e.g. the leader to auto-down the failed node - println("Give the system time to converge...") - Thread.sleep(30.seconds.dilated.toMillis) - awaitConvergence(node1 :: node2 :: node3 :: Nil) - - node1.latestGossip.members.size must be(3) - node1.latestGossip.members.exists(_.address == address4) must be(false) - } - - "be able to DOWN a (middle) node that is UNREACHABLE" taggedAs LongRunningTest in { - // shut down system4 - node2.shutdown() - system2.shutdown() - - // wait for convergence - e.g. the leader to auto-down the failed node - println("Give the system time to converge...") - Thread.sleep(30.seconds.dilated.toMillis) - awaitConvergence(node1 :: node3 :: Nil) - - node1.latestGossip.members.size must be(2) - node1.latestGossip.members.exists(_.address == address4) must be(false) - node1.latestGossip.members.exists(_.address == address2) must be(false) - } - } - } catch { - case e: Exception ⇒ - e.printStackTrace - fail(e.toString) - } - - override def atTermination() { - if (node1 ne null) node1.shutdown() - if (system1 ne null) system1.shutdown() - - if (node2 ne null) node2.shutdown() - if (system2 ne null) system2.shutdown() - - if (node3 ne null) node3.shutdown() - if (system3 ne null) system3.shutdown() - - if (node4 ne null) node4.shutdown() - if (system4 ne null) system4.shutdown() - } -} diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala index f66e120195..37ebd0a193 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala @@ -18,6 +18,7 @@ import akka.event.Logging import org.scalatest.BeforeAndAfterEach import java.net.InetSocketAddress import java.net.InetAddress +import akka.testkit.TimingTest object BarrierSpec { case class Failed(ref: ActorRef, thr: Throwable) @@ -46,7 +47,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "A BarrierCoordinator" must { - "register clients and remove them" in { + "register clients and remove them" taggedAs TimingTest in { val b = getBarrier() b ! NodeInfo(A, AddressFromURIString("akka://sys"), system.deadLetters) b ! RemoveClient(B) @@ -57,7 +58,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "cannot remove RoleName(a): no client to remove"))) } - "register clients and disconnect them" in { + "register clients and disconnect them" taggedAs TimingTest in { val b = getBarrier() b ! NodeInfo(A, AddressFromURIString("akka://sys"), system.deadLetters) b ! ClientDisconnected(B) @@ -71,13 +72,13 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectMsg(Failed(b, BarrierEmpty(Data(Set(), "", Nil), "cannot disconnect RoleName(a): no client to disconnect"))) } - "fail entering barrier when nobody registered" in { + "fail entering barrier when nobody registered" taggedAs TimingTest in { val b = getBarrier() b ! EnterBarrier("b") expectMsg(ToClient(BarrierResult("b", false))) } - "enter barrier" in { + "enter barrier" taggedAs TimingTest in { val barrier = getBarrier() val a, b = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -91,7 +92,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with } } - "enter barrier with joining node" in { + "enter barrier with joining node" taggedAs TimingTest in { val barrier = getBarrier() val a, b, c = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -108,7 +109,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with } } - "enter barrier with leaving node" in { + "enter barrier with leaving node" taggedAs TimingTest in { val barrier = getBarrier() val a, b, c = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -127,7 +128,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectNoMsg(1 second) } - "leave barrier when last “arrived” is removed" in { + "leave barrier when last “arrived” is removed" taggedAs TimingTest in { val barrier = getBarrier() val a, b = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -138,7 +139,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with b.expectMsg(ToClient(BarrierResult("foo", true))) } - "fail barrier with disconnecing node" in { + "fail barrier with disconnecing node" taggedAs TimingTest in { val barrier = getBarrier() val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -151,7 +152,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA), "bar", a.ref :: Nil), B))) } - "fail barrier with disconnecing node who already arrived" in { + "fail barrier with disconnecing node who already arrived" taggedAs TimingTest in { val barrier = getBarrier() val a, b, c = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -167,7 +168,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectMsg(Failed(barrier, ClientLost(Data(Set(nodeA, nodeC), "bar", a.ref :: Nil), B))) } - "fail when entering wrong barrier" in { + "fail when entering wrong barrier" taggedAs TimingTest in { val barrier = getBarrier() val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -181,7 +182,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectMsg(Failed(barrier, WrongBarrier("foo", b.ref, Data(Set(nodeA, nodeB), "bar", a.ref :: Nil)))) } - "fail barrier after first failure" in { + "fail barrier after first failure" taggedAs TimingTest in { val barrier = getBarrier() val a = TestProbe() EventFilter[BarrierEmpty](occurrences = 1) intercept { @@ -193,7 +194,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with a.expectMsg(ToClient(BarrierResult("right", false))) } - "fail after barrier timeout" in { + "fail after barrier timeout" taggedAs TimingTest in { val barrier = getBarrier() val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -206,7 +207,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with } } - "fail if a node registers twice" in { + "fail if a node registers twice" taggedAs TimingTest in { val barrier = getBarrier() val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -218,7 +219,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectMsg(Failed(barrier, DuplicateNode(Data(Set(nodeA), "", Nil), nodeB))) } - "finally have no failure messages left" in { + "finally have no failure messages left" taggedAs TimingTest in { expectNoMsg(1 second) } @@ -226,7 +227,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with "A Controller with BarrierCoordinator" must { - "register clients and remove them" in { + "register clients and remove them" taggedAs TimingTest in { val b = getController(1) b ! NodeInfo(A, AddressFromURIString("akka://sys"), testActor) expectMsg(ToClient(Done)) @@ -237,7 +238,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with } } - "register clients and disconnect them" in { + "register clients and disconnect them" taggedAs TimingTest in { val b = getController(1) b ! NodeInfo(A, AddressFromURIString("akka://sys"), testActor) expectMsg(ToClient(Done)) @@ -250,13 +251,13 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with } } - "fail entering barrier when nobody registered" in { + "fail entering barrier when nobody registered" taggedAs TimingTest in { val b = getController(0) b ! EnterBarrier("b") expectMsg(ToClient(BarrierResult("b", false))) } - "enter barrier" in { + "enter barrier" taggedAs TimingTest in { val barrier = getController(2) val a, b = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -272,7 +273,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with } } - "enter barrier with joining node" in { + "enter barrier with joining node" taggedAs TimingTest in { val barrier = getController(2) val a, b, c = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -292,7 +293,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with } } - "enter barrier with leaving node" in { + "enter barrier with leaving node" taggedAs TimingTest in { val barrier = getController(3) val a, b, c = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -314,7 +315,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with expectNoMsg(1 second) } - "leave barrier when last “arrived” is removed" in { + "leave barrier when last “arrived” is removed" taggedAs TimingTest in { val barrier = getController(2) val a, b = TestProbe() barrier ! NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -327,7 +328,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with b.expectMsg(ToClient(BarrierResult("foo", true))) } - "fail barrier with disconnecing node" in { + "fail barrier with disconnecing node" taggedAs TimingTest in { val barrier = getController(2) val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -344,7 +345,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with a.expectMsg(ToClient(BarrierResult("bar", false))) } - "fail barrier with disconnecing node who already arrived" in { + "fail barrier with disconnecing node who already arrived" taggedAs TimingTest in { val barrier = getController(3) val a, b, c = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -363,7 +364,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with a.expectMsg(ToClient(BarrierResult("bar", false))) } - "fail when entering wrong barrier" in { + "fail when entering wrong barrier" taggedAs TimingTest in { val barrier = getController(2) val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -380,7 +381,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with b.expectMsg(ToClient(BarrierResult("foo", false))) } - "not really fail after barrier timeout" in { + "not really fail after barrier timeout" taggedAs TimingTest in { val barrier = getController(2) val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -398,7 +399,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with b.expectMsg(ToClient(BarrierResult("right", true))) } - "fail if a node registers twice" in { + "fail if a node registers twice" taggedAs TimingTest in { val controller = getController(2) val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -411,7 +412,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with b.expectMsg(ToClient(BarrierResult("initial startup", false))) } - "fail subsequent barriers if a node registers twice" in { + "fail subsequent barriers if a node registers twice" taggedAs TimingTest in { val controller = getController(1) val a, b = TestProbe() val nodeA = NodeInfo(A, AddressFromURIString("akka://sys"), a.ref) @@ -426,7 +427,7 @@ class BarrierSpec extends AkkaSpec(BarrierSpec.config) with ImplicitSender with a.expectMsg(ToClient(BarrierResult("x", false))) } - "finally have no failure messages left" in { + "finally have no failure messages left" taggedAs TimingTest in { expectNoMsg(1 second) }