From ea090bdfd2686dad41550420b069f42c4c254a26 Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 24 May 2012 13:56:50 +0200 Subject: [PATCH 01/92] get better auto-generated actor system names in tests, see #2122 --- .../src/test/scala/akka/cluster/ClusterSpec.scala | 4 ++-- .../testconductor/NetworkFailureInjector.scala | 2 +- .../scala/akka/remote/testkit/MultiNodeSpec.scala | 2 +- .../src/test/scala/akka/testkit/AkkaSpec.scala | 12 ++++++++---- 4 files changed, 12 insertions(+), 8 deletions(-) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index 38017ad00c..854d9e5584 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -47,11 +47,11 @@ abstract class ClusterSpec(_system: ActorSystem) extends AkkaSpec(_system) { def portPrefix: Int - def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName, config.withFallback(ClusterSpec.testConf))) + def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName(classOf[ClusterSpec]), config.withFallback(ClusterSpec.testConf))) def this(s: String) = this(ConfigFactory.parseString(s)) - def this() = this(ActorSystem(AkkaSpec.getCallerName, ClusterSpec.testConf)) + def this() = this(ActorSystem(AkkaSpec.getCallerName(classOf[ClusterSpec]), ClusterSpec.testConf)) def awaitConvergence(nodes: Iterable[Cluster], maxWaitTime: Duration = 60 seconds) { val deadline = maxWaitTime.fromNow diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index ba8f8d1285..b425518044 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -135,7 +135,7 @@ private[akka] object NetworkFailureInjector { } /** - * Brief overview: all network traffic passes through the `sender`/`receiver` FSMs managed + * Brief overview: all network traffic passes through the `sender`/`receiver` FSMs managed * by the FailureInjector of the TestConductor extension. These can * pass through requests immediately, drop them or throttle to a desired rate. The FSMs are * registered in the TestConductorExt.failureInjector so that settings can be applied from diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 3822a1f529..e6a1ca6dac 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -119,7 +119,7 @@ abstract class MultiNodeSpec(val mySelf: RoleName, _system: ActorSystem) extends import MultiNodeSpec._ - def this(config: MultiNodeConfig) = this(config.mySelf, ActorSystem(AkkaSpec.getCallerName, config.config)) + def this(config: MultiNodeConfig) = this(config.mySelf, ActorSystem(AkkaSpec.getCallerName(classOf[MultiNodeSpec]), config.config)) /* * Test Class Interface diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala index f24ea49b8c..c7000f2cf7 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala @@ -46,9 +46,13 @@ object AkkaSpec { ConfigFactory.parseMap(map.asJava) } - def getCallerName: String = { + def getCallerName(clazz: Class[_]): String = { val s = Thread.currentThread.getStackTrace map (_.getClassName) drop 1 dropWhile (_ matches ".*AkkaSpec.?$") - s.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_") + val reduced = s.lastIndexWhere(_ == clazz.getName) match { + case -1 ⇒ s + case z ⇒ s drop (z + 1) + } + reduced.head.replaceFirst(""".*\.""", "").replaceAll("[^a-zA-Z_0-9]", "_") } } @@ -56,13 +60,13 @@ object AkkaSpec { abstract class AkkaSpec(_system: ActorSystem) extends TestKit(_system) with WordSpec with MustMatchers with BeforeAndAfterAll { - def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName, config.withFallback(AkkaSpec.testConf))) + def this(config: Config) = this(ActorSystem(AkkaSpec.getCallerName(getClass), config.withFallback(AkkaSpec.testConf))) def this(s: String) = this(ConfigFactory.parseString(s)) def this(configMap: Map[String, _]) = this(AkkaSpec.mapToConfig(configMap)) - def this() = this(ActorSystem(AkkaSpec.getCallerName, AkkaSpec.testConf)) + def this() = this(ActorSystem(AkkaSpec.getCallerName(getClass), AkkaSpec.testConf)) val log: LoggingAdapter = Logging(system, this.getClass) From 96f264e842a3f9f51a2530f325c71cee35e8b876 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 28 May 2012 16:49:49 +0200 Subject: [PATCH 02/92] Initial stab at DeathWatch 2.0, tests don't pass just yet --- .../test/scala/akka/routing/RoutingSpec.scala | 4 +- .../src/main/scala/akka/actor/Actor.scala | 2 +- .../src/main/scala/akka/actor/ActorCell.scala | 80 +++++++++++++------ .../scala/akka/actor/ActorRefProvider.scala | 32 ++------ .../main/scala/akka/actor/ActorSystem.scala | 6 -- .../akka/dispatch/AbstractDispatcher.scala | 4 +- .../main/scala/akka/event/DeathWatch.scala | 19 ----- .../main/scala/akka/pattern/AskSupport.scala | 31 +++---- .../akka/pattern/GracefulStopSupport.scala | 8 +- .../docs/actor/FaultHandlingTestBase.java | 4 +- .../docs/actor/FaultHandlingDocSpec.scala | 4 +- .../akka/remote/RemoteActorRefProvider.scala | 27 +------ .../main/scala/akka/remote/RemoteDaemon.scala | 10 ++- 13 files changed, 99 insertions(+), 132 deletions(-) delete mode 100644 akka-actor/src/main/scala/akka/event/DeathWatch.scala diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 5ad6da271f..98d3e71384 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -73,7 +73,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with watch(router) watch(c2) system.stop(c2) - expectMsg(Terminated(c2)) + expectMsg(Terminated(c2)(stopped = true)) // it might take a while until the Router has actually processed the Terminated message awaitCond { router ! "" @@ -84,7 +84,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with res == Seq(c1, c1) } system.stop(c1) - expectMsg(Terminated(router)) + expectMsg(Terminated(router)(stopped = true)) } "be able to send their routees" in { diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 3d93e52a54..8906dcd60e 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -59,7 +59,7 @@ case object Kill extends Kill { /** * When Death Watch is used, the watcher will receive a Terminated(watched) message when watched is terminated. */ -case class Terminated(@BeanProperty actor: ActorRef) extends PossiblyHarmful +case class Terminated(@BeanProperty actor: ActorRef)(@BeanProperty stopped: Boolean) extends PossiblyHarmful abstract class ReceiveTimeout extends PossiblyHarmful diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 0955595640..6d49045099 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -6,7 +6,6 @@ package akka.actor import akka.dispatch._ import scala.annotation.tailrec -import scala.collection.immutable.{ Stack, TreeMap } import java.util.concurrent.TimeUnit import java.util.concurrent.TimeUnit.MILLISECONDS import akka.event.Logging.{ Debug, Warning, Error } @@ -16,6 +15,7 @@ import java.io.{ NotSerializableException, ObjectOutputStream } import akka.serialization.SerializationExtension import akka.util.NonFatal import akka.event.Logging.LogEventException +import collection.immutable.{ TreeSet, Stack, TreeMap } //TODO: everything here for current compatibility - could be limited more @@ -187,6 +187,8 @@ private[akka] object ActorCell { final val behaviorStackPlaceHolder: Stack[Actor.Receive] = Stack.empty.push(Actor.emptyBehavior) + final val emptyActorRefSet: Set[ActorRef] = TreeSet.empty + sealed trait SuspendReason case object UserRequest extends SuspendReason case class Recreation(cause: Throwable) extends SuspendReason @@ -407,16 +409,14 @@ private[akka] class ActorCell( actor.asInstanceOf[InternalActorRef].stop() } - var currentMessage: Envelope = null - + var currentMessage: Envelope = _ var actor: Actor = _ - private var behaviorStack: Stack[Actor.Receive] = Stack.empty - @volatile //This must be volatile since it isn't protected by the mailbox status var mailbox: Mailbox = _ - var nextNameSequence: Long = 0 + var watching: Set[ActorRef] = emptyActorRefSet + var watchedBy: Set[ActorRef] = emptyActorRefSet //Not thread safe, so should only be used inside the actor that inhabits this ActorCell final protected def randomName(): String = { @@ -462,13 +462,25 @@ private[akka] class ActorCell( override final def watch(subject: ActorRef): ActorRef = { // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - dispatcher.systemDispatch(this, Link(subject)) + subject match { + case a: InternalActorRef ⇒ + if (!watching.contains(a)) { + watching += a + a.sendSystemMessage(Watch(a, self)) + } + } subject } override final def unwatch(subject: ActorRef): ActorRef = { // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - dispatcher.systemDispatch(this, Unlink(subject)) + subject match { + case a: InternalActorRef ⇒ + if (watching.contains(a)) { + watching -= a + a.sendSystemMessage(Unwatch(a, self)) + } + } subject } @@ -567,15 +579,17 @@ private[akka] class ActorCell( def resume(): Unit = if (isNormal) dispatcher resume this - def link(subject: ActorRef): Unit = if (!isTerminating) { - if (system.deathWatch.subscribe(self, subject)) { - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now monitoring " + subject)) + def addWatcher(watcher: ActorRef): Unit = if (!isTerminating) { + if (!watchedBy.contains(watcher)) { + watchedBy += watcher + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), self + " watched by " + watcher)) } } - def unlink(subject: ActorRef): Unit = if (!isTerminating) { - if (system.deathWatch.unsubscribe(self, subject)) { - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped monitoring " + subject)) + def remWatcher(watcher: ActorRef): Unit = if (!isTerminating) { + if (watchedBy.contains(watcher)) { + watchedBy -= watcher + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), self + " unwatched by " + watcher)) } } @@ -603,15 +617,17 @@ private[akka] class ActorCell( try { message match { - case Create() ⇒ create() - case Recreate(cause) ⇒ recreate(cause) - case Link(subject) ⇒ link(subject) - case Unlink(subject) ⇒ unlink(subject) - case Suspend() ⇒ suspend() - case Resume() ⇒ resume() - case Terminate() ⇒ terminate() - case Supervise(child) ⇒ supervise(child) - case ChildTerminated(child) ⇒ handleChildTerminated(child) + case Create() ⇒ create() + case Recreate(cause) ⇒ recreate(cause) + case Watch(`self`, watcher) ⇒ addWatcher(watcher) + case Watch(watchee, `self`) ⇒ watch(watchee) + case Unwatch(`self`, watcher) ⇒ remWatcher(watcher) + case Unwatch(watchee, `self`) ⇒ unwatch(watchee) + case Suspend() ⇒ suspend() + case Resume() ⇒ resume() + case Terminate() ⇒ terminate() + case Supervise(child) ⇒ supervise(child) + case ChildTerminated(child) ⇒ handleChildTerminated(child) } } catch { case e @ (_: InterruptedException | NonFatal(_)) ⇒ handleInvokeFailure(e, "error while processing " + message) @@ -698,7 +714,23 @@ private[akka] class ActorCell( } finally { try { parent.sendSystemMessage(ChildTerminated(self)) - system.deathWatch.publish(Terminated(self)) + if (!watchedBy.isEmpty) { + val terminated = Terminated(self)(stopped = true) + watchedBy foreach { + watcher ⇒ + try watcher.tell(terminated) catch { + case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) + } + } + } + if (!watching.isEmpty) { + watching foreach { + watchee ⇒ + try watchee.tell(Unwatch(watchee, self)) catch { + case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) + } + } + } if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped")) } finally { diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 41473e7f7c..ca971de40e 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -38,11 +38,6 @@ trait ActorRefProvider { */ def deadLetters: ActorRef - /** - * Reference to the death watch service. - */ - def deathWatch: DeathWatch - /** * The root path for all actors within this actor system, including remote * address if enabled. @@ -162,10 +157,11 @@ trait ActorRefFactory { * INTERNAL USE ONLY */ protected def provider: ActorRefProvider + /** - * INTERNAL USE ONLY + * Returns the default MessageDispatcher associated with this ActorRefFactory */ - protected def dispatcher: MessageDispatcher + implicit def dispatcher: MessageDispatcher /** * Father of all children created by this interface. @@ -339,8 +335,6 @@ class LocalActorRefProvider( override val deadLetters: InternalActorRef = new DeadLetterActorRef(this, rootPath / "deadLetters", eventStream) - override val deathWatch: DeathWatch = new LocalDeathWatch(1024) //TODO make configrable - /* * generate name for temporary actor refs */ @@ -516,8 +510,8 @@ class LocalActorRefProvider( def init(_system: ActorSystemImpl) { system = _system // chain death watchers so that killing guardian stops the application - deathWatch.subscribe(systemGuardian, guardian) - deathWatch.subscribe(rootGuardian, systemGuardian) + guardian.sendSystemMessage(Watch(systemGuardian, guardian)) + rootGuardian.sendSystemMessage(Watch(rootGuardian, systemGuardian)) eventStream.startDefaultLoggers(_system) } @@ -566,19 +560,3 @@ class LocalActorRefProvider( def getExternalAddressFor(addr: Address): Option[Address] = if (addr == rootPath.address) Some(addr) else None } - -class LocalDeathWatch(val mapSize: Int) extends DeathWatch with ActorClassification { - - override def publish(event: Event): Unit = { - val monitors = dissociate(classify(event)) - if (monitors.nonEmpty) monitors.foreach(_ ! event) - } - - override def subscribe(subscriber: Subscriber, to: Classifier): Boolean = { - if (!super.subscribe(subscriber, to)) { - subscriber ! Terminated(to) - false - } else true - } -} - diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index c5595212c2..94ee24336a 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -408,11 +408,6 @@ abstract class ExtendedActorSystem extends ActorSystem { */ def systemGuardian: InternalActorRef - /** - * Implementation of the mechanism which is used for watch()/unwatch(). - */ - def deathWatch: DeathWatch - /** * A ThreadFactory that can be used if the transport needs to create any Threads */ @@ -570,7 +565,6 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, def lookupRoot: InternalActorRef = provider.rootGuardian def guardian: InternalActorRef = provider.guardian def systemGuardian: InternalActorRef = provider.systemGuardian - def deathWatch: DeathWatch = provider.deathWatch def /(actorName: String): ActorPath = guardian.path / actorName def /(path: Iterable[String]): ActorPath = guardian.path / path diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 682e6ba4bf..8e160276e8 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -102,11 +102,11 @@ private[akka] case class ChildTerminated(child: ActorRef) extends SystemMessage /** * INTERNAL API */ -private[akka] case class Link(subject: ActorRef) extends SystemMessage // sent to self from ActorCell.watch +private[akka] case class Watch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to self from ActorCell.watch /** * INTERNAL API */ -private[akka] case class Unlink(subject: ActorRef) extends SystemMessage // sent to self from ActorCell.unwatch +private[akka] case class Unwatch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to self from ActorCell.unwatch final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cleanup: () ⇒ Unit) extends Runnable { def run(): Unit = diff --git a/akka-actor/src/main/scala/akka/event/DeathWatch.scala b/akka-actor/src/main/scala/akka/event/DeathWatch.scala deleted file mode 100644 index 8bf6935619..0000000000 --- a/akka-actor/src/main/scala/akka/event/DeathWatch.scala +++ /dev/null @@ -1,19 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.event - -import akka.actor._ - -/** - * The contract of DeathWatch is not properly expressed using the type system - * Whenever there is a publish, all listeners to the Terminated Actor should be atomically removed - * A failed subscribe should also only mean that the Classifier (ActorRef) that is listened to is already shut down - * See LocalDeathWatch for semantics - */ -abstract class DeathWatch extends ActorEventBus with ActorClassifier { - type Event = Terminated - - protected final def classify(event: Event): Classifier = event.actor -} diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index a20baaf533..634299248d 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -4,12 +4,11 @@ package akka.pattern import java.util.concurrent.TimeoutException -import akka.dispatch.{ Promise, Terminate, SystemMessage, Future } -import akka.event.DeathWatch import akka.util.Timeout import annotation.tailrec import akka.util.Unsafe import akka.actor._ +import akka.dispatch._ /** * This is what is used to complete a Future that is returned from an ask/? call, @@ -229,9 +228,14 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide if (!completedJustNow) provider.deadLetters ! message } - override def sendSystemMessage(message: SystemMessage): Unit = message match { - case _: Terminate ⇒ stop() - case _ ⇒ + override def sendSystemMessage(message: SystemMessage): Unit = { + val self = this + message match { + case _: Terminate ⇒ stop() + case Watch(`self`, watcher) ⇒ //FIXME IMPLEMENT + case Unwatch(`self`, watcher) ⇒ //FIXME IMPLEMENT + case _ ⇒ + } } override def isTerminated: Boolean = state match { @@ -241,23 +245,22 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide @tailrec override def stop(): Unit = { - def ensurePromiseCompleted(): Unit = - if (!result.isCompleted) result.tryComplete(Left(new ActorKilledException("Stopped"))) + def ensureCompleted(): Unit = if (!result.isCompleted) result.tryComplete(Left(new ActorKilledException("Stopped"))) state match { - case null ⇒ - // if path was never queried nobody can possibly be watching us, so we don't have to publish termination either - if (updateState(null, Stopped)) ensurePromiseCompleted() - else stop() + case null ⇒ // if path was never queried nobody can possibly be watching us, so we don't have to publish termination either + if (updateState(null, Stopped)) ensureCompleted() else stop() case p: ActorPath ⇒ if (updateState(p, StoppedWithPath(p))) { try { - ensurePromiseCompleted() - provider.deathWatch.publish(Terminated(this)) + ensureCompleted() + val termination = Terminated(this)(stopped = true) + // watchedBy foreach { w => w.tell(termination) } + // watching foreach { w.sendSystemMessage(Unwatch(w, self)) } } finally { provider.unregisterTempActor(p) } } else stop() - case Stopped | _: StoppedWithPath ⇒ + case Stopped | _: StoppedWithPath ⇒ // already stopped case Registering ⇒ stop() // spin until registration is completed before stopping } } diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index adcbe53f0b..d1e7fab327 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -4,9 +4,9 @@ package akka.pattern -import akka.dispatch.{ Promise, Future } import akka.actor._ import akka.util.{ Timeout, Duration } +import akka.dispatch.{ Unwatch, Watch, Promise, Future } trait GracefulStopSupport { /** @@ -39,11 +39,11 @@ trait GracefulStopSupport { } else system match { case e: ExtendedActorSystem ⇒ val ref = PromiseActorRef(e.provider, Timeout(timeout)) - e.deathWatch.subscribe(ref, target) + ref.sendSystemMessage(Watch(target, ref)) ref.result onComplete { case Right(Terminated(`target`)) ⇒ () // Ignore - case _ ⇒ e.deathWatch.unsubscribe(ref, target) - } // Just making sure we're not leaking here + case _ ⇒ ref.sendSystemMessage(Unwatch(target, ref)) // Just making sure we're not leaking here + } target ! PoisonPill ref.result map { case Terminated(`target`) ⇒ true } case s ⇒ throw new IllegalArgumentException("Unknown ActorSystem implementation: '" + s + "'") diff --git a/akka-docs/java/code/docs/actor/FaultHandlingTestBase.java b/akka-docs/java/code/docs/actor/FaultHandlingTestBase.java index bcc4705948..2d40071fe8 100644 --- a/akka-docs/java/code/docs/actor/FaultHandlingTestBase.java +++ b/akka-docs/java/code/docs/actor/FaultHandlingTestBase.java @@ -182,7 +182,7 @@ public class FaultHandlingTestBase { final TestProbe probe = new TestProbe(system); probe.watch(child); child.tell(new IllegalArgumentException()); - probe.expectMsg(new Terminated(child)); + probe.expectMsg(new Terminated(child, true)); //#stop //#escalate-kill @@ -190,7 +190,7 @@ public class FaultHandlingTestBase { probe.watch(child); assert Await.result(ask(child, "get", 5000), timeout).equals(0); child.tell(new Exception()); - probe.expectMsg(new Terminated(child)); + probe.expectMsg(new Terminated(child, true)); //#escalate-kill //#escalate-restart diff --git a/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala b/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala index 8ce16f1021..4e0fdc5ee5 100644 --- a/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala @@ -111,7 +111,7 @@ class FaultHandlingDocSpec extends AkkaSpec with ImplicitSender { //#stop watch(child) // have testActor watch “child” child ! new IllegalArgumentException // break it - expectMsg(Terminated(child)) + expectMsg(Terminated(child)(stopped = true)) child.isTerminated must be(true) //#stop } @@ -125,7 +125,7 @@ class FaultHandlingDocSpec extends AkkaSpec with ImplicitSender { expectMsg(0) child2 ! new Exception("CRASH") // escalate failure - expectMsg(Terminated(child2)) + expectMsg(Terminated(child2)(stopped = true)) //#escalate-kill //#escalate-restart val supervisor2 = system.actorOf(Props[Supervisor2], "supervisor2") diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index a12c5f5578..eaecf67792 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -6,7 +6,7 @@ package akka.remote import akka.actor._ import akka.dispatch._ -import akka.event.{ DeathWatch, Logging, LoggingAdapter } +import akka.event.{ Logging, LoggingAdapter } import akka.event.EventStream import akka.serialization.Serialization import akka.serialization.SerializationExtension @@ -34,8 +34,6 @@ private[akka] class RemoteActorRefProvider( override def rootPath: ActorPath = local.rootPath override def deadLetters: InternalActorRef = local.deadLetters - override val deathWatch: DeathWatch = new RemoteDeathWatch(local.deathWatch, this) - // these are only available after init() override def rootGuardian: InternalActorRef = local.rootGuardian override def guardian: InternalActorRef = local.guardian @@ -246,25 +244,4 @@ private[akka] class RemoteActorRef private[akka] ( @throws(classOf[java.io.ObjectStreamException]) private def writeReplace(): AnyRef = SerializedActorRef(path) -} - -private[akka] class RemoteDeathWatch(val local: DeathWatch, val provider: RemoteActorRefProvider) extends DeathWatch { - - override def subscribe(watcher: ActorRef, watched: ActorRef): Boolean = watched match { - case r: RemoteRef ⇒ - val ret = local.subscribe(watcher, watched) - provider.actorFor(r.path.root / "remote") ! DaemonMsgWatch(watcher, watched) - ret - case l: LocalRef ⇒ - local.subscribe(watcher, watched) - case _ ⇒ - provider.log.error("unknown ActorRef type {} as DeathWatch target", watched.getClass) - false - } - - override def unsubscribe(watcher: ActorRef, watched: ActorRef): Boolean = local.unsubscribe(watcher, watched) - - override def unsubscribe(watcher: ActorRef): Unit = local.unsubscribe(watcher) - - override def publish(event: Terminated): Unit = local.publish(event) -} +} \ No newline at end of file diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala index 7e4beecc7d..1e81cfaac6 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala @@ -8,6 +8,7 @@ import scala.annotation.tailrec import akka.actor.{ VirtualPathContainer, Terminated, Deploy, Props, Nobody, LocalActorRef, InternalActorRef, Address, ActorSystemImpl, ActorRef, ActorPathExtractor, ActorPath, Actor } import akka.event.LoggingAdapter +import akka.dispatch.Watch private[akka] sealed trait DaemonMsg private[akka] case class DaemonMsgCreate(props: Props, deploy: Deploy, path: String, supervisor: ActorRef) extends DaemonMsg @@ -62,18 +63,19 @@ private[akka] class RemoteSystemDaemon(system: ActorSystemImpl, _path: ActorPath val actor = system.provider.actorOf(system, props, supervisor.asInstanceOf[InternalActorRef], path, false, Some(deploy), true) addChild(subpath.mkString("/"), actor) - system.deathWatch.subscribe(this, actor) + this.sendSystemMessage(Watch(actor, this)) case _ ⇒ log.error("remote path does not match path from message [{}]", message) } case DaemonMsgWatch(watcher, watched) ⇒ - val other = system.actorFor(watcher.path.root / "remote") - system.deathWatch.subscribe(other, watched) + system.actorFor(watcher.path.root / "remote") match { + case a: InternalActorRef ⇒ a.sendSystemMessage(Watch(watched, a)) + } } case Terminated(child: LocalActorRef) ⇒ removeChild(child.path.elements.drop(1).mkString("/")) - case t: Terminated ⇒ system.deathWatch.publish(t) + case t: Terminated ⇒ //FIXME system.deathWatch.publish(t) case unknown ⇒ log.warning("Unknown message {} received by {}", unknown, this) } From e3e391e5aa1b4a6345aed769f3b53457f15ea08c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 14:09:22 +0200 Subject: [PATCH 03/92] Removing DaemonMsgWatch, oh yeah baby. However, still no cigar --- .../scala/akka/actor/DeathWatchSpec.scala | 2 +- .../src/main/scala/akka/actor/ActorCell.scala | 116 ++-- .../src/main/scala/akka/actor/ActorRef.scala | 23 +- .../main/scala/akka/pattern/AskSupport.scala | 17 +- .../main/java/akka/remote/RemoteProtocol.java | 640 +----------------- .../src/main/protocol/RemoteProtocol.proto | 10 +- akka-remote/src/main/resources/reference.conf | 2 - .../main/scala/akka/remote/RemoteDaemon.scala | 7 +- .../DaemonMsgWatchSerializer.scala | 43 -- .../DaemonMsgWatchSerializerSpec.scala | 49 -- .../test/scala/akka/testkit/AkkaSpec.scala | 4 +- 11 files changed, 102 insertions(+), 811 deletions(-) delete mode 100644 akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala delete mode 100644 akka-remote/src/test/scala/akka/serialization/DaemonMsgWatchSerializerSpec.scala diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala index 7a1aa35485..97eec5be01 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala @@ -36,7 +36,7 @@ trait DeathWatchSpec { this: AkkaSpec with ImplicitSender with DefaultTimeout "notify with one Terminated message when an Actor is stopped" in { val terminal = system.actorOf(Props.empty) startWatching(terminal) ! "hallo" - expectMsg("hallo") // this ensures that the DaemonMsgWatch has been received before we send the PoisonPill + expectMsg("hallo") terminal ! PoisonPill diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index cb804703ed..c09f40cebd 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -460,28 +460,22 @@ private[akka] class ActorCell( // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ final def stop(): Unit = dispatcher.systemDispatch(this, Terminate()) - override final def watch(subject: ActorRef): ActorRef = { - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - subject match { - case a: InternalActorRef ⇒ - if (!watching.contains(a)) { - watching += a - a.sendSystemMessage(Watch(a, self)) - } - } - subject + override final def watch(subject: ActorRef): ActorRef = subject match { + case a: InternalActorRef ⇒ + if (!watching.contains(a)) { + a.sendSystemMessage(Watch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + watching += a + } + a } - override final def unwatch(subject: ActorRef): ActorRef = { - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - subject match { - case a: InternalActorRef ⇒ - if (watching.contains(a)) { - watching -= a - a.sendSystemMessage(Unwatch(a, self)) - } - } - subject + override final def unwatch(subject: ActorRef): ActorRef = subject match { + case a: InternalActorRef ⇒ + if (watching.contains(a)) { + a.sendSystemMessage(Unwatch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + watching -= a + } + a } final def children: Iterable[ActorRef] = childrenRefs.children @@ -579,18 +573,26 @@ private[akka] class ActorCell( def resume(): Unit = if (isNormal) dispatcher resume this - def addWatcher(watcher: ActorRef): Unit = if (!isTerminating) { - if (!watchedBy.contains(watcher)) { - watchedBy += watcher - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), self + " watched by " + watcher)) - } + def addWatcher(watchee: ActorRef, watcher: ActorRef): Unit = { + if (watchee == self) { + if (!watchedBy.contains(watcher)) { + watchedBy += watcher + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), self + " watched by " + watcher)) + } + } else if (watcher == self) { + watch(watchee) + } else println("addNOOOOOOOOO: " + watchee + " => " + watcher) } - def remWatcher(watcher: ActorRef): Unit = if (!isTerminating) { - if (watchedBy.contains(watcher)) { - watchedBy -= watcher - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), self + " unwatched by " + watcher)) - } + def remWatcher(watchee: ActorRef, watcher: ActorRef): Unit = { + if (watchee == self) { + if (watchedBy.contains(watcher)) { + watchedBy -= watcher + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), self + " unwatched by " + watcher)) + } + } else if (watcher == self) { + unwatch(watchee) + } else println("remNOOOOOOOOO: " + watchee + " => " + watcher) } def terminate() { @@ -617,17 +619,15 @@ private[akka] class ActorCell( try { message match { - case Create() ⇒ create() - case Recreate(cause) ⇒ recreate(cause) - case Watch(`self`, watcher) ⇒ addWatcher(watcher) - case Watch(watchee, `self`) ⇒ watch(watchee) - case Unwatch(`self`, watcher) ⇒ remWatcher(watcher) - case Unwatch(watchee, `self`) ⇒ unwatch(watchee) - case Suspend() ⇒ suspend() - case Resume() ⇒ resume() - case Terminate() ⇒ terminate() - case Supervise(child) ⇒ supervise(child) - case ChildTerminated(child) ⇒ handleChildTerminated(child) + case Create() ⇒ create() + case Recreate(cause) ⇒ recreate(cause) + case Watch(watchee, watcher) ⇒ addWatcher(watchee, watcher) + case Unwatch(watchee, watcher) ⇒ remWatcher(watchee, watcher) + case Suspend() ⇒ suspend() + case Resume() ⇒ resume() + case Terminate() ⇒ terminate() + case Supervise(child) ⇒ supervise(child) + case ChildTerminated(child) ⇒ handleChildTerminated(child) } } catch { case e @ (_: InterruptedException | NonFatal(_)) ⇒ handleInvokeFailure(e, "error while processing " + message) @@ -714,27 +714,33 @@ private[akka] class ActorCell( } finally { try { parent.sendSystemMessage(ChildTerminated(self)) + if (!watchedBy.isEmpty) { val terminated = Terminated(self)(stopped = true) - watchedBy foreach { - watcher ⇒ - try watcher.tell(terminated) catch { - case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) - } - } + try { + watchedBy foreach { + watcher ⇒ + try watcher.tell(terminated, self) catch { + case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) + } + } + } finally watchedBy = emptyActorRefSet } + if (!watching.isEmpty) { - watching foreach { - watchee ⇒ - try watchee.tell(Unwatch(watchee, self)) catch { - case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) - } - } + try { + watching foreach { + case watchee: InternalActorRef ⇒ + try watchee.sendSystemMessage(Unwatch(watchee, self)) catch { + case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) + } + } + } finally watching = emptyActorRefSet } if (system.settings.DebugLifecycle) - system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped")) + system.eventStream.publish(Debug(self.path.toString, clazz(a), "stopped")) } finally { - behaviorStack = ActorCell.behaviorStackPlaceHolder + behaviorStack = behaviorStackPlaceHolder clearActorFields(a) actor = null } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 460bd02076..ad45f6ad09 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -409,13 +409,17 @@ private[akka] object DeadLetterActorRef { * * INTERNAL API */ -private[akka] class EmptyLocalActorRef( - override val provider: ActorRefProvider, - override val path: ActorPath, - val eventStream: EventStream) extends MinimalActorRef { +private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider, + override val path: ActorPath, + val eventStream: EventStream) extends MinimalActorRef { override def isTerminated(): Boolean = true + override def sendSystemMessage(message: SystemMessage): Unit = message match { + case Watch(maybeThis, watcher) if maybeThis == this ⇒ watcher ! Terminated(this)(stopped = false) + case _ ⇒ + } + override def !(message: Any)(implicit sender: ActorRef = null): Unit = message match { case d: DeadLetter ⇒ // do NOT form endless loops, since deadLetters will resend! case _ ⇒ eventStream.publish(DeadLetter(message, sender, this)) @@ -428,8 +432,15 @@ private[akka] class EmptyLocalActorRef( * * INTERNAL API */ -private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, _path: ActorPath, _eventStream: EventStream) - extends EmptyLocalActorRef(_provider, _path, _eventStream) { +private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, + _path: ActorPath, + _eventStream: EventStream) extends EmptyLocalActorRef(_provider, _path, _eventStream) { + + override def sendSystemMessage(message: SystemMessage): Unit = message match { + case Watch(maybeThis, watcher) if maybeThis == this ⇒ + case Watch(other, watcher) ⇒ watcher ! Terminated(other)(stopped = false) + case _ ⇒ + } override def !(message: Any)(implicit sender: ActorRef = this): Unit = message match { case d: DeadLetter ⇒ eventStream.publish(d) diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 634299248d..2837bd6546 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -228,14 +228,11 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide if (!completedJustNow) provider.deadLetters ! message } - override def sendSystemMessage(message: SystemMessage): Unit = { - val self = this - message match { - case _: Terminate ⇒ stop() - case Watch(`self`, watcher) ⇒ //FIXME IMPLEMENT - case Unwatch(`self`, watcher) ⇒ //FIXME IMPLEMENT - case _ ⇒ - } + override def sendSystemMessage(message: SystemMessage): Unit = message match { + case _: Terminate ⇒ stop() + case Watch(watchee, watcher) ⇒ //FIXME IMPLEMENT + case Unwatch(watchee, watcher) ⇒ //FIXME IMPLEMENT + case _ ⇒ } override def isTerminated: Boolean = state match { @@ -254,8 +251,8 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide try { ensureCompleted() val termination = Terminated(this)(stopped = true) - // watchedBy foreach { w => w.tell(termination) } - // watching foreach { w.sendSystemMessage(Unwatch(w, self)) } + // FIXME watchedBy foreach { w => w.tell(termination) } + // FIXME watching foreach { w.sendSystemMessage(Unwatch(w, self)) } } finally { provider.unregisterTempActor(p) } diff --git a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java index 0794e54364..8f3ab4e1fb 100644 --- a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java +++ b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java @@ -309,7 +309,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -981,7 +981,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1977,7 +1977,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -2527,7 +2527,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -2936,7 +2936,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3410,7 +3410,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3909,7 +3909,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -4487,7 +4487,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -5367,7 +5367,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -6067,7 +6067,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { + private Builder(BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -6351,605 +6351,6 @@ public final class RemoteProtocol { // @@protoc_insertion_point(class_scope:DeployProtocol) } - public interface DaemonMsgWatchProtocolOrBuilder - extends com.google.protobuf.MessageOrBuilder { - - // required .ActorRefProtocol watcher = 1; - boolean hasWatcher(); - akka.remote.RemoteProtocol.ActorRefProtocol getWatcher(); - akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatcherOrBuilder(); - - // required .ActorRefProtocol watched = 2; - boolean hasWatched(); - akka.remote.RemoteProtocol.ActorRefProtocol getWatched(); - akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatchedOrBuilder(); - } - public static final class DaemonMsgWatchProtocol extends - com.google.protobuf.GeneratedMessage - implements DaemonMsgWatchProtocolOrBuilder { - // Use DaemonMsgWatchProtocol.newBuilder() to construct. - private DaemonMsgWatchProtocol(Builder builder) { - super(builder); - } - private DaemonMsgWatchProtocol(boolean noInit) {} - - private static final DaemonMsgWatchProtocol defaultInstance; - public static DaemonMsgWatchProtocol getDefaultInstance() { - return defaultInstance; - } - - public DaemonMsgWatchProtocol getDefaultInstanceForType() { - return defaultInstance; - } - - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return akka.remote.RemoteProtocol.internal_static_DaemonMsgWatchProtocol_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return akka.remote.RemoteProtocol.internal_static_DaemonMsgWatchProtocol_fieldAccessorTable; - } - - private int bitField0_; - // required .ActorRefProtocol watcher = 1; - public static final int WATCHER_FIELD_NUMBER = 1; - private akka.remote.RemoteProtocol.ActorRefProtocol watcher_; - public boolean hasWatcher() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public akka.remote.RemoteProtocol.ActorRefProtocol getWatcher() { - return watcher_; - } - public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatcherOrBuilder() { - return watcher_; - } - - // required .ActorRefProtocol watched = 2; - public static final int WATCHED_FIELD_NUMBER = 2; - private akka.remote.RemoteProtocol.ActorRefProtocol watched_; - public boolean hasWatched() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public akka.remote.RemoteProtocol.ActorRefProtocol getWatched() { - return watched_; - } - public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatchedOrBuilder() { - return watched_; - } - - private void initFields() { - watcher_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); - watched_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); - } - private byte memoizedIsInitialized = -1; - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized != -1) return isInitialized == 1; - - if (!hasWatcher()) { - memoizedIsInitialized = 0; - return false; - } - if (!hasWatched()) { - memoizedIsInitialized = 0; - return false; - } - if (!getWatcher().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - if (!getWatched().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - memoizedIsInitialized = 1; - return true; - } - - public void writeTo(com.google.protobuf.CodedOutputStream output) - throws java.io.IOException { - getSerializedSize(); - if (((bitField0_ & 0x00000001) == 0x00000001)) { - output.writeMessage(1, watcher_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - output.writeMessage(2, watched_); - } - getUnknownFields().writeTo(output); - } - - private int memoizedSerializedSize = -1; - public int getSerializedSize() { - int size = memoizedSerializedSize; - if (size != -1) return size; - - size = 0; - if (((bitField0_ & 0x00000001) == 0x00000001)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(1, watcher_); - } - if (((bitField0_ & 0x00000002) == 0x00000002)) { - size += com.google.protobuf.CodedOutputStream - .computeMessageSize(2, watched_); - } - size += getUnknownFields().getSerializedSize(); - memoizedSerializedSize = size; - return size; - } - - private static final long serialVersionUID = 0L; - @java.lang.Override - protected java.lang.Object writeReplace() - throws java.io.ObjectStreamException { - return super.writeReplace(); - } - - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom(byte[] data) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data).buildParsed(); - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom( - byte[] data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return newBuilder().mergeFrom(data, extensionRegistry) - .buildParsed(); - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom(java.io.InputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseDelimitedFrom(java.io.InputStream input) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - Builder builder = newBuilder(); - if (builder.mergeDelimitedFrom(input, extensionRegistry)) { - return builder.buildParsed(); - } else { - return null; - } - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom( - com.google.protobuf.CodedInputStream input) - throws java.io.IOException { - return newBuilder().mergeFrom(input).buildParsed(); - } - public static akka.remote.RemoteProtocol.DaemonMsgWatchProtocol parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return newBuilder().mergeFrom(input, extensionRegistry) - .buildParsed(); - } - - public static Builder newBuilder() { return Builder.create(); } - public Builder newBuilderForType() { return newBuilder(); } - public static Builder newBuilder(akka.remote.RemoteProtocol.DaemonMsgWatchProtocol prototype) { - return newBuilder().mergeFrom(prototype); - } - public Builder toBuilder() { return newBuilder(this); } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessage.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - public static final class Builder extends - com.google.protobuf.GeneratedMessage.Builder - implements akka.remote.RemoteProtocol.DaemonMsgWatchProtocolOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor - getDescriptor() { - return akka.remote.RemoteProtocol.internal_static_DaemonMsgWatchProtocol_descriptor; - } - - protected com.google.protobuf.GeneratedMessage.FieldAccessorTable - internalGetFieldAccessorTable() { - return akka.remote.RemoteProtocol.internal_static_DaemonMsgWatchProtocol_fieldAccessorTable; - } - - // Construct using akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders) { - getWatcherFieldBuilder(); - getWatchedFieldBuilder(); - } - } - private static Builder create() { - return new Builder(); - } - - public Builder clear() { - super.clear(); - if (watcherBuilder_ == null) { - watcher_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); - } else { - watcherBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - if (watchedBuilder_ == null) { - watched_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); - } else { - watchedBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - - public Builder clone() { - return create().mergeFrom(buildPartial()); - } - - public com.google.protobuf.Descriptors.Descriptor - getDescriptorForType() { - return akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.getDescriptor(); - } - - public akka.remote.RemoteProtocol.DaemonMsgWatchProtocol getDefaultInstanceForType() { - return akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.getDefaultInstance(); - } - - public akka.remote.RemoteProtocol.DaemonMsgWatchProtocol build() { - akka.remote.RemoteProtocol.DaemonMsgWatchProtocol result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - private akka.remote.RemoteProtocol.DaemonMsgWatchProtocol buildParsed() - throws com.google.protobuf.InvalidProtocolBufferException { - akka.remote.RemoteProtocol.DaemonMsgWatchProtocol result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException( - result).asInvalidProtocolBufferException(); - } - return result; - } - - public akka.remote.RemoteProtocol.DaemonMsgWatchProtocol buildPartial() { - akka.remote.RemoteProtocol.DaemonMsgWatchProtocol result = new akka.remote.RemoteProtocol.DaemonMsgWatchProtocol(this); - int from_bitField0_ = bitField0_; - int to_bitField0_ = 0; - if (((from_bitField0_ & 0x00000001) == 0x00000001)) { - to_bitField0_ |= 0x00000001; - } - if (watcherBuilder_ == null) { - result.watcher_ = watcher_; - } else { - result.watcher_ = watcherBuilder_.build(); - } - if (((from_bitField0_ & 0x00000002) == 0x00000002)) { - to_bitField0_ |= 0x00000002; - } - if (watchedBuilder_ == null) { - result.watched_ = watched_; - } else { - result.watched_ = watchedBuilder_.build(); - } - result.bitField0_ = to_bitField0_; - onBuilt(); - return result; - } - - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof akka.remote.RemoteProtocol.DaemonMsgWatchProtocol) { - return mergeFrom((akka.remote.RemoteProtocol.DaemonMsgWatchProtocol)other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(akka.remote.RemoteProtocol.DaemonMsgWatchProtocol other) { - if (other == akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.getDefaultInstance()) return this; - if (other.hasWatcher()) { - mergeWatcher(other.getWatcher()); - } - if (other.hasWatched()) { - mergeWatched(other.getWatched()); - } - this.mergeUnknownFields(other.getUnknownFields()); - return this; - } - - public final boolean isInitialized() { - if (!hasWatcher()) { - - return false; - } - if (!hasWatched()) { - - return false; - } - if (!getWatcher().isInitialized()) { - - return false; - } - if (!getWatched().isInitialized()) { - - return false; - } - return true; - } - - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder( - this.getUnknownFields()); - while (true) { - int tag = input.readTag(); - switch (tag) { - case 0: - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - default: { - if (!parseUnknownField(input, unknownFields, - extensionRegistry, tag)) { - this.setUnknownFields(unknownFields.build()); - onChanged(); - return this; - } - break; - } - case 10: { - akka.remote.RemoteProtocol.ActorRefProtocol.Builder subBuilder = akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder(); - if (hasWatcher()) { - subBuilder.mergeFrom(getWatcher()); - } - input.readMessage(subBuilder, extensionRegistry); - setWatcher(subBuilder.buildPartial()); - break; - } - case 18: { - akka.remote.RemoteProtocol.ActorRefProtocol.Builder subBuilder = akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder(); - if (hasWatched()) { - subBuilder.mergeFrom(getWatched()); - } - input.readMessage(subBuilder, extensionRegistry); - setWatched(subBuilder.buildPartial()); - break; - } - } - } - } - - private int bitField0_; - - // required .ActorRefProtocol watcher = 1; - private akka.remote.RemoteProtocol.ActorRefProtocol watcher_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> watcherBuilder_; - public boolean hasWatcher() { - return ((bitField0_ & 0x00000001) == 0x00000001); - } - public akka.remote.RemoteProtocol.ActorRefProtocol getWatcher() { - if (watcherBuilder_ == null) { - return watcher_; - } else { - return watcherBuilder_.getMessage(); - } - } - public Builder setWatcher(akka.remote.RemoteProtocol.ActorRefProtocol value) { - if (watcherBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - watcher_ = value; - onChanged(); - } else { - watcherBuilder_.setMessage(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder setWatcher( - akka.remote.RemoteProtocol.ActorRefProtocol.Builder builderForValue) { - if (watcherBuilder_ == null) { - watcher_ = builderForValue.build(); - onChanged(); - } else { - watcherBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder mergeWatcher(akka.remote.RemoteProtocol.ActorRefProtocol value) { - if (watcherBuilder_ == null) { - if (((bitField0_ & 0x00000001) == 0x00000001) && - watcher_ != akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance()) { - watcher_ = - akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder(watcher_).mergeFrom(value).buildPartial(); - } else { - watcher_ = value; - } - onChanged(); - } else { - watcherBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000001; - return this; - } - public Builder clearWatcher() { - if (watcherBuilder_ == null) { - watcher_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); - onChanged(); - } else { - watcherBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - public akka.remote.RemoteProtocol.ActorRefProtocol.Builder getWatcherBuilder() { - bitField0_ |= 0x00000001; - onChanged(); - return getWatcherFieldBuilder().getBuilder(); - } - public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatcherOrBuilder() { - if (watcherBuilder_ != null) { - return watcherBuilder_.getMessageOrBuilder(); - } else { - return watcher_; - } - } - private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> - getWatcherFieldBuilder() { - if (watcherBuilder_ == null) { - watcherBuilder_ = new com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder>( - watcher_, - getParentForChildren(), - isClean()); - watcher_ = null; - } - return watcherBuilder_; - } - - // required .ActorRefProtocol watched = 2; - private akka.remote.RemoteProtocol.ActorRefProtocol watched_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); - private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> watchedBuilder_; - public boolean hasWatched() { - return ((bitField0_ & 0x00000002) == 0x00000002); - } - public akka.remote.RemoteProtocol.ActorRefProtocol getWatched() { - if (watchedBuilder_ == null) { - return watched_; - } else { - return watchedBuilder_.getMessage(); - } - } - public Builder setWatched(akka.remote.RemoteProtocol.ActorRefProtocol value) { - if (watchedBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - watched_ = value; - onChanged(); - } else { - watchedBuilder_.setMessage(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder setWatched( - akka.remote.RemoteProtocol.ActorRefProtocol.Builder builderForValue) { - if (watchedBuilder_ == null) { - watched_ = builderForValue.build(); - onChanged(); - } else { - watchedBuilder_.setMessage(builderForValue.build()); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder mergeWatched(akka.remote.RemoteProtocol.ActorRefProtocol value) { - if (watchedBuilder_ == null) { - if (((bitField0_ & 0x00000002) == 0x00000002) && - watched_ != akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance()) { - watched_ = - akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder(watched_).mergeFrom(value).buildPartial(); - } else { - watched_ = value; - } - onChanged(); - } else { - watchedBuilder_.mergeFrom(value); - } - bitField0_ |= 0x00000002; - return this; - } - public Builder clearWatched() { - if (watchedBuilder_ == null) { - watched_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); - onChanged(); - } else { - watchedBuilder_.clear(); - } - bitField0_ = (bitField0_ & ~0x00000002); - return this; - } - public akka.remote.RemoteProtocol.ActorRefProtocol.Builder getWatchedBuilder() { - bitField0_ |= 0x00000002; - onChanged(); - return getWatchedFieldBuilder().getBuilder(); - } - public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getWatchedOrBuilder() { - if (watchedBuilder_ != null) { - return watchedBuilder_.getMessageOrBuilder(); - } else { - return watched_; - } - } - private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> - getWatchedFieldBuilder() { - if (watchedBuilder_ == null) { - watchedBuilder_ = new com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder>( - watched_, - getParentForChildren(), - isClean()); - watched_ = null; - } - return watchedBuilder_; - } - - // @@protoc_insertion_point(builder_scope:DaemonMsgWatchProtocol) - } - - static { - defaultInstance = new DaemonMsgWatchProtocol(true); - defaultInstance.initFields(); - } - - // @@protoc_insertion_point(class_scope:DaemonMsgWatchProtocol) - } - private static com.google.protobuf.Descriptors.Descriptor internal_static_AkkaRemoteProtocol_descriptor; private static @@ -7000,11 +6401,6 @@ public final class RemoteProtocol { private static com.google.protobuf.GeneratedMessage.FieldAccessorTable internal_static_DeployProtocol_fieldAccessorTable; - private static com.google.protobuf.Descriptors.Descriptor - internal_static_DaemonMsgWatchProtocol_descriptor; - private static - com.google.protobuf.GeneratedMessage.FieldAccessorTable - internal_static_DaemonMsgWatchProtocol_fieldAccessorTable; public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { @@ -7039,11 +6435,9 @@ public final class RemoteProtocol { "ssCreator\030\003 \001(\t\022\017\n\007creator\030\004 \001(\014\022\024\n\014rout" + "erConfig\030\005 \001(\014\"S\n\016DeployProtocol\022\014\n\004path" + "\030\001 \002(\t\022\016\n\006config\030\002 \001(\014\022\024\n\014routerConfig\030\003" + - " \001(\014\022\r\n\005scope\030\004 \001(\014\"`\n\026DaemonMsgWatchPro" + - "tocol\022\"\n\007watcher\030\001 \002(\0132\021.ActorRefProtoco" + - "l\022\"\n\007watched\030\002 \002(\0132\021.ActorRefProtocol*7\n" + - "\013CommandType\022\013\n\007CONNECT\020\001\022\014\n\010SHUTDOWN\020\002\022" + - "\r\n\tHEARTBEAT\020\003B\017\n\013akka.remoteH\001" + " \001(\014\022\r\n\005scope\030\004 \001(\014*7\n\013CommandType\022\013\n\007CO" + + "NNECT\020\001\022\014\n\010SHUTDOWN\020\002\022\r\n\tHEARTBEAT\020\003B\017\n\013" + + "akka.remoteH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { @@ -7130,14 +6524,6 @@ public final class RemoteProtocol { new java.lang.String[] { "Path", "Config", "RouterConfig", "Scope", }, akka.remote.RemoteProtocol.DeployProtocol.class, akka.remote.RemoteProtocol.DeployProtocol.Builder.class); - internal_static_DaemonMsgWatchProtocol_descriptor = - getDescriptor().getMessageTypes().get(10); - internal_static_DaemonMsgWatchProtocol_fieldAccessorTable = new - com.google.protobuf.GeneratedMessage.FieldAccessorTable( - internal_static_DaemonMsgWatchProtocol_descriptor, - new java.lang.String[] { "Watcher", "Watched", }, - akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.class, - akka.remote.RemoteProtocol.DaemonMsgWatchProtocol.Builder.class); return null; } }; diff --git a/akka-remote/src/main/protocol/RemoteProtocol.proto b/akka-remote/src/main/protocol/RemoteProtocol.proto index 72b04caa57..7d86d8a82b 100644 --- a/akka-remote/src/main/protocol/RemoteProtocol.proto +++ b/akka-remote/src/main/protocol/RemoteProtocol.proto @@ -107,12 +107,4 @@ message DeployProtocol { optional bytes config = 2; optional bytes routerConfig = 3; optional bytes scope = 4; -} - -/** - * Serialization of akka.remote.DaemonMsgWatch - */ -message DaemonMsgWatchProtocol { - required ActorRefProtocol watcher = 1; - required ActorRefProtocol watched = 2; -} +} \ No newline at end of file diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 97b85895ed..a56ea16c9a 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -15,7 +15,6 @@ akka { serializers { proto = "akka.serialization.ProtobufSerializer" daemon-create = "akka.serialization.DaemonMsgCreateSerializer" - daemon-watch = "akka.serialization.DaemonMsgWatchSerializer" } @@ -24,7 +23,6 @@ akka { # does, need to use the more specific one here in order to avoid ambiguity "com.google.protobuf.GeneratedMessage" = proto "akka.remote.DaemonMsgCreate" = daemon-create - "akka.remote.DaemonMsgWatch" = daemon-watch } deployment { diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala index 1e81cfaac6..ddab54b2ad 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDaemon.scala @@ -12,7 +12,6 @@ import akka.dispatch.Watch private[akka] sealed trait DaemonMsg private[akka] case class DaemonMsgCreate(props: Props, deploy: Deploy, path: String, supervisor: ActorRef) extends DaemonMsg -private[akka] case class DaemonMsgWatch(watcher: ActorRef, watched: ActorRef) extends DaemonMsg /** * Internal system "daemon" actor for remote internal communication. @@ -67,15 +66,11 @@ private[akka] class RemoteSystemDaemon(system: ActorSystemImpl, _path: ActorPath case _ ⇒ log.error("remote path does not match path from message [{}]", message) } - case DaemonMsgWatch(watcher, watched) ⇒ - system.actorFor(watcher.path.root / "remote") match { - case a: InternalActorRef ⇒ a.sendSystemMessage(Watch(watched, a)) - } } case Terminated(child: LocalActorRef) ⇒ removeChild(child.path.elements.drop(1).mkString("/")) - case t: Terminated ⇒ //FIXME system.deathWatch.publish(t) + case t: Terminated ⇒ case unknown ⇒ log.warning("Unknown message {} received by {}", unknown, this) } diff --git a/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala b/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala deleted file mode 100644 index 016d7f14cb..0000000000 --- a/akka-remote/src/main/scala/akka/serialization/DaemonMsgWatchSerializer.scala +++ /dev/null @@ -1,43 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.serialization - -import akka.actor.ActorRef -import akka.remote.DaemonMsgWatch -import akka.remote.RemoteProtocol.ActorRefProtocol -import akka.remote.RemoteProtocol.DaemonMsgWatchProtocol -import akka.actor.ExtendedActorSystem - -/** - * Serializes akka's internal DaemonMsgWatch using protobuf. - * - * INTERNAL API - */ -private[akka] class DaemonMsgWatchSerializer(val system: ExtendedActorSystem) extends Serializer { - import ProtobufSerializer.serializeActorRef - import ProtobufSerializer.deserializeActorRef - - def includeManifest: Boolean = false - def identifier = 4 - - def toBinary(obj: AnyRef): Array[Byte] = obj match { - case DaemonMsgWatch(watcher, watched) ⇒ - DaemonMsgWatchProtocol.newBuilder. - setWatcher(serializeActorRef(watcher)). - setWatched(serializeActorRef(watched)). - build.toByteArray - case _ ⇒ - throw new IllegalArgumentException( - "Can't serialize a non-DaemonMsgWatch message using DaemonMsgWatchSerializer [%s]".format(obj)) - } - - def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = { - val proto = DaemonMsgWatchProtocol.parseFrom(bytes) - DaemonMsgWatch( - watcher = deserializeActorRef(system, proto.getWatcher), - watched = deserializeActorRef(system, proto.getWatched)) - } - -} \ No newline at end of file diff --git a/akka-remote/src/test/scala/akka/serialization/DaemonMsgWatchSerializerSpec.scala b/akka-remote/src/test/scala/akka/serialization/DaemonMsgWatchSerializerSpec.scala deleted file mode 100644 index a6069beac1..0000000000 --- a/akka-remote/src/test/scala/akka/serialization/DaemonMsgWatchSerializerSpec.scala +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.serialization - -import akka.testkit.AkkaSpec -import akka.remote.DaemonMsgWatch -import akka.actor.Actor -import akka.actor.Props - -object DaemonMsgWatchSerializerSpec { - class MyActor extends Actor { - def receive = { - case _ ⇒ - } - } -} - -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class DaemonMsgWatchSerializerSpec extends AkkaSpec { - - import DaemonMsgWatchSerializerSpec._ - - val ser = SerializationExtension(system) - - "Serialization" must { - - "resolve DaemonMsgWatchSerializer" in { - ser.serializerFor(classOf[DaemonMsgWatch]).getClass must be(classOf[DaemonMsgWatchSerializer]) - } - - "serialize and de-serialize DaemonMsgWatch" in { - val watcher = system.actorOf(Props[MyActor], "watcher") - val watched = system.actorOf(Props[MyActor], "watched") - val msg = DaemonMsgWatch(watcher, watched) - val bytes = ser.serialize(msg) match { - case Left(exception) ⇒ fail(exception) - case Right(bytes) ⇒ bytes - } - ser.deserialize(bytes.asInstanceOf[Array[Byte]], classOf[DaemonMsgWatch]) match { - case Left(exception) ⇒ fail(exception) - case Right(m) ⇒ assert(m === msg) - } - } - - } -} - diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala index f24ea49b8c..d2eeeee776 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala @@ -111,9 +111,7 @@ class AkkaSpecSpec extends WordSpec with MustMatchers { "akka.actor.debug.lifecycle" -> true, "akka.actor.debug.event-stream" -> true, "akka.loglevel" -> "DEBUG", "akka.stdout-loglevel" -> "DEBUG") val system = ActorSystem("AkkaSpec1", ConfigFactory.parseMap(conf.asJava).withFallback(AkkaSpec.testConf)) - val spec = new AkkaSpec(system) { - val ref = Seq(testActor, system.actorOf(Props.empty, "name")) - } + val spec = new AkkaSpec(system) { val ref = Seq(testActor, system.actorOf(Props.empty, "name")) } spec.ref foreach (_.isTerminated must not be true) system.shutdown() spec.awaitCond(spec.ref forall (_.isTerminated), 2 seconds) From 2dfa560bbf1807d41a18d6b27539dc67550f8d3c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 14:27:36 +0200 Subject: [PATCH 04/92] Switched back to the old debug messages --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index c09f40cebd..135f30f7e6 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -577,7 +577,7 @@ private[akka] class ActorCell( if (watchee == self) { if (!watchedBy.contains(watcher)) { watchedBy += watcher - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), self + " watched by " + watcher)) + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now monitoring " + watcher)) } } else if (watcher == self) { watch(watchee) @@ -588,7 +588,7 @@ private[akka] class ActorCell( if (watchee == self) { if (watchedBy.contains(watcher)) { watchedBy -= watcher - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), self + " unwatched by " + watcher)) + if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped monitoring " + watcher)) } } else if (watcher == self) { unwatch(watchee) From 46098562ab84dc957a409225e8c05f8bca11ed24 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 14:34:25 +0200 Subject: [PATCH 05/92] Making sure that you cannot watch or unwatch yourself --- .../src/main/scala/akka/actor/ActorCell.scala | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 135f30f7e6..e8f9b64e5f 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -574,23 +574,29 @@ private[akka] class ActorCell( def resume(): Unit = if (isNormal) dispatcher resume this def addWatcher(watchee: ActorRef, watcher: ActorRef): Unit = { - if (watchee == self) { + val watcheeSelf = watchee == self + val watcherSelf = watcher == self + + if (watcheeSelf && !watcherSelf) { if (!watchedBy.contains(watcher)) { watchedBy += watcher if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now monitoring " + watcher)) } - } else if (watcher == self) { + } else if (!watcheeSelf && watcherSelf) { watch(watchee) } else println("addNOOOOOOOOO: " + watchee + " => " + watcher) } def remWatcher(watchee: ActorRef, watcher: ActorRef): Unit = { - if (watchee == self) { + val watcheeSelf = watchee == self + val watcherSelf = watcher == self + + if (watcheeSelf && !watcherSelf) { if (watchedBy.contains(watcher)) { watchedBy -= watcher if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped monitoring " + watcher)) } - } else if (watcher == self) { + } else if (!watcheeSelf && watcherSelf) { unwatch(watchee) } else println("remNOOOOOOOOO: " + watchee + " => " + watcher) } From f0cac7c1808d1ad786e3f96add571b036b11ee49 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 14:46:34 +0200 Subject: [PATCH 06/92] Removing some boiler --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index e8f9b64e5f..4e8a54d7fb 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -483,10 +483,8 @@ private[akka] class ActorCell( /** * Impl UntypedActorContext */ - final def getChildren(): java.lang.Iterable[ActorRef] = { - import scala.collection.JavaConverters.asJavaIterableConverter - asJavaIterableConverter(children).asJava - } + final def getChildren(): java.lang.Iterable[ActorRef] = + scala.collection.JavaConverters.asJavaIterableConverter(children).asJava final def tell(message: Any, sender: ActorRef): Unit = dispatcher.dispatch(this, Envelope(message, if (sender eq null) system.deadLetters else sender)(system)) From b38ce0274d0ea7a6a3f282bdd4c0874d50ba7a65 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 15:04:04 +0200 Subject: [PATCH 07/92] Making sure that Terminated messages don't go to the same guy --- akka-actor/src/main/scala/akka/actor/ActorRef.scala | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index ad45f6ad09..5e3de885ea 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -416,8 +416,8 @@ private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider, override def isTerminated(): Boolean = true override def sendSystemMessage(message: SystemMessage): Unit = message match { - case Watch(maybeThis, watcher) if maybeThis == this ⇒ watcher ! Terminated(this)(stopped = false) - case _ ⇒ + case Watch(watchee, watcher) ⇒ if (watchee == this && watcher != this) watcher ! Terminated(watchee)(stopped = false) + case _ ⇒ } override def !(message: Any)(implicit sender: ActorRef = null): Unit = message match { @@ -437,9 +437,8 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, _eventStream: EventStream) extends EmptyLocalActorRef(_provider, _path, _eventStream) { override def sendSystemMessage(message: SystemMessage): Unit = message match { - case Watch(maybeThis, watcher) if maybeThis == this ⇒ - case Watch(other, watcher) ⇒ watcher ! Terminated(other)(stopped = false) - case _ ⇒ + case Watch(watchee, watcher) ⇒ if (watchee != this && watcher != this) watcher ! Terminated(watchee)(stopped = false) + case _ ⇒ } override def !(message: Any)(implicit sender: ActorRef = this): Unit = message match { From 8903abb23014f7fabe4ce4172a7ef97b4066f808 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 15:20:26 +0200 Subject: [PATCH 08/92] Making sure Watch messages are handled by DeadLetterActorRef + EmptyLocalActorRef --- akka-actor/src/main/scala/akka/actor/ActorRef.scala | 10 ++++++---- akka-actor/src/main/scala/akka/actor/ActorSystem.scala | 6 ++++-- 2 files changed, 10 insertions(+), 6 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 5e3de885ea..042a5cdd6a 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -421,8 +421,9 @@ private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider, } override def !(message: Any)(implicit sender: ActorRef = null): Unit = message match { - case d: DeadLetter ⇒ // do NOT form endless loops, since deadLetters will resend! - case _ ⇒ eventStream.publish(DeadLetter(message, sender, this)) + case DeadLetter(w: Watch, _, _) ⇒ sendSystemMessage(w) + case d: DeadLetter ⇒ // do NOT form endless loops, since deadLetters will resend! + case _ ⇒ eventStream.publish(DeadLetter(message, sender, this)) } } @@ -442,8 +443,9 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, } override def !(message: Any)(implicit sender: ActorRef = this): Unit = message match { - case d: DeadLetter ⇒ eventStream.publish(d) - case _ ⇒ eventStream.publish(DeadLetter(message, sender, this)) + case DeadLetter(w: Watch, _, _) ⇒ sendSystemMessage(w) + case d: DeadLetter ⇒ eventStream.publish(d) + case _ ⇒ eventStream.publish(DeadLetter(message, sender, this)) } @throws(classOf[java.io.ObjectStreamException]) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 94ee24336a..993e7e98e4 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -542,7 +542,8 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, //FIXME Why do we need this at all? val deadLetterQueue: MessageQueue = new MessageQueue { - def enqueue(receiver: ActorRef, envelope: Envelope) { deadLetters ! DeadLetter(envelope.message, envelope.sender, receiver) } + def enqueue(receiver: ActorRef, envelope: Envelope): Unit = + deadLetters ! DeadLetter(envelope.message, envelope.sender, receiver) def dequeue() = null def hasMessages = false def numberOfMessages = 0 @@ -551,7 +552,8 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, //FIXME Why do we need this at all? val deadLetterMailbox: Mailbox = new Mailbox(null, deadLetterQueue) { becomeClosed() - def systemEnqueue(receiver: ActorRef, handle: SystemMessage): Unit = deadLetters ! DeadLetter(handle, receiver, receiver) + def systemEnqueue(receiver: ActorRef, handle: SystemMessage): Unit = + deadLetters ! DeadLetter(handle, receiver, receiver) def systemDrain(): SystemMessage = null def hasSystemMessages = false } From a413a9394b3d54c9e26bb5e58dcd03a22bc1ddb5 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 29 May 2012 16:36:24 +0200 Subject: [PATCH 09/92] Switching approaches for EmptyLocalActorRef and DeadLetterActorRef --- .../src/main/scala/akka/actor/ActorRef.scala | 36 +++++++++++-------- 1 file changed, 22 insertions(+), 14 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 042a5cdd6a..a713a61ddc 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -415,15 +415,20 @@ private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider, override def isTerminated(): Boolean = true - override def sendSystemMessage(message: SystemMessage): Unit = message match { - case Watch(watchee, watcher) ⇒ if (watchee == this && watcher != this) watcher ! Terminated(watchee)(stopped = false) - case _ ⇒ - } + override def sendSystemMessage(message: SystemMessage): Unit = specialHandle(message) override def !(message: Any)(implicit sender: ActorRef = null): Unit = message match { - case DeadLetter(w: Watch, _, _) ⇒ sendSystemMessage(w) - case d: DeadLetter ⇒ // do NOT form endless loops, since deadLetters will resend! - case _ ⇒ eventStream.publish(DeadLetter(message, sender, this)) + case d: DeadLetter ⇒ specialHandle(d.message) // do NOT form endless loops, since deadLetters will resend! + case _ ⇒ if (!specialHandle(message)) eventStream.publish(DeadLetter(message, sender, this)) + } + + protected def specialHandle(msg: Any): Boolean = msg match { + case w: Watch ⇒ + if (w.watchee == this && w.watcher != this) w.watcher ! Terminated(w.watchee)(stopped = false) + + true + case w: Unwatch ⇒ true // Just ignore + case _ ⇒ false } } @@ -437,15 +442,18 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, _path: ActorPath, _eventStream: EventStream) extends EmptyLocalActorRef(_provider, _path, _eventStream) { - override def sendSystemMessage(message: SystemMessage): Unit = message match { - case Watch(watchee, watcher) ⇒ if (watchee != this && watcher != this) watcher ! Terminated(watchee)(stopped = false) - case _ ⇒ + override def !(message: Any)(implicit sender: ActorRef = this): Unit = message match { + case d: DeadLetter ⇒ if (!specialHandle(d.message)) eventStream.publish(d) + case _ ⇒ if (!specialHandle(message)) eventStream.publish(DeadLetter(message, sender, this)) } - override def !(message: Any)(implicit sender: ActorRef = this): Unit = message match { - case DeadLetter(w: Watch, _, _) ⇒ sendSystemMessage(w) - case d: DeadLetter ⇒ eventStream.publish(d) - case _ ⇒ eventStream.publish(DeadLetter(message, sender, this)) + override protected def specialHandle(msg: Any): Boolean = msg match { + case w: Watch ⇒ + if (w.watchee != this && w.watcher != this) w.watcher ! Terminated(w.watchee)(stopped = false) + + true + case w: Unwatch ⇒ true // Just ignore + case _ ⇒ false } @throws(classOf[java.io.ObjectStreamException]) From 8bdb8702463d036e4a4353f899ec3f710df9fd94 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 30 May 2012 13:24:38 +0200 Subject: [PATCH 10/92] Implementing death watch for PromiseActorRef --- .../akka/pattern/AbstractPromiseActorRef.java | 2 + .../scala/akka/actor/ActorRefProvider.scala | 12 ++-- .../main/scala/akka/pattern/AskSupport.scala | 63 ++++++++++++++----- .../akka/pattern/GracefulStopSupport.scala | 16 ++--- 4 files changed, 62 insertions(+), 31 deletions(-) diff --git a/akka-actor/src/main/java/akka/pattern/AbstractPromiseActorRef.java b/akka-actor/src/main/java/akka/pattern/AbstractPromiseActorRef.java index e21d58204e..bb0f03861c 100644 --- a/akka-actor/src/main/java/akka/pattern/AbstractPromiseActorRef.java +++ b/akka-actor/src/main/java/akka/pattern/AbstractPromiseActorRef.java @@ -8,10 +8,12 @@ import akka.util.Unsafe; final class AbstractPromiseActorRef { final static long stateOffset; + final static long watchedByOffset; static { try { stateOffset = Unsafe.instance.objectFieldOffset(PromiseActorRef.class.getDeclaredField("_stateDoNotCallMeDirectly")); + watchedByOffset = Unsafe.instance.objectFieldOffset(PromiseActorRef.class.getDeclaredField("_watchedByDoNotCallMeDirectly")); } catch(Throwable t){ throw new ExceptionInInitializerError(t); } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index ca971de40e..d4e9595f62 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -8,8 +8,8 @@ import java.util.concurrent.atomic.AtomicLong import akka.dispatch._ import akka.routing._ import akka.AkkaException -import akka.util.{ Switch, Helpers } import akka.event._ +import akka.util.{ NonFatal, Switch, Helpers } /** * Interface for all ActorRef providers to implement. @@ -373,9 +373,9 @@ class LocalActorRefProvider( override def sendSystemMessage(message: SystemMessage): Unit = stopped ifOff { message match { - case Supervise(child) ⇒ // TODO register child in some map to keep track of it and enable shutdown after all dead - case ChildTerminated(child) ⇒ stop() - case _ ⇒ log.error(this + " received unexpected system message [" + message + "]") + case Supervise(_) ⇒ // TODO register child in some map to keep track of it and enable shutdown after all dead + case ChildTerminated(_) ⇒ stop() + case _ ⇒ log.error(this + " received unexpected system message [" + message + "]") } } } @@ -403,8 +403,8 @@ class LocalActorRefProvider( def receive = { case Terminated(_) ⇒ context.stop(self) - case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case e: Exception ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? - case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case e: Exception ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? + case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case NonFatal(e) ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? + case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case NonFatal(e) ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? case StopChild(child) ⇒ context.stop(child); sender ! "ok" case m ⇒ deadLetters ! DeadLetter(m, sender, self) } diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 2837bd6546..47154f7853 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -4,11 +4,10 @@ package akka.pattern import java.util.concurrent.TimeoutException -import akka.util.Timeout import annotation.tailrec -import akka.util.Unsafe import akka.actor._ import akka.dispatch._ +import akka.util.{ NonFatal, Timeout, Unsafe } /** * This is what is used to complete a Future that is returned from an ask/? call, @@ -163,6 +162,7 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide extends MinimalActorRef { import PromiseActorRef._ import AbstractPromiseActorRef.stateOffset + import AbstractPromiseActorRef.watchedByOffset /** * As an optimization for the common (local) case we only register this PromiseActorRef @@ -179,14 +179,43 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide @volatile private[this] var _stateDoNotCallMeDirectly: AnyRef = _ - @inline - private def state: AnyRef = Unsafe.instance.getObjectVolatile(this, stateOffset) + @volatile + private[this] var _watchedByDoNotCallMeDirectly: Set[ActorRef] = ActorCell.emptyActorRefSet @inline - private def updateState(oldState: AnyRef, newState: AnyRef): Boolean = Unsafe.instance.compareAndSwapObject(this, stateOffset, oldState, newState) + private[this] def watchedBy: Set[ActorRef] = Unsafe.instance.getObjectVolatile(this, watchedByOffset).asInstanceOf[Set[ActorRef]] @inline - private def setState(newState: AnyRef): Unit = Unsafe.instance.putObjectVolatile(this, stateOffset, newState) + private[this] def updateWatchedBy(oldWatchedBy: Set[ActorRef], newWatchedBy: Set[ActorRef]): Boolean = + Unsafe.instance.compareAndSwapObject(this, watchedByOffset, oldWatchedBy, newWatchedBy) + + @tailrec // Returns false if the Promise is already completed + private[this] final def addWatcher(watcher: ActorRef): Boolean = watchedBy match { + case null ⇒ false + case other ⇒ if (updateWatchedBy(other, other + watcher)) true else addWatcher(watcher) + } + + @tailrec + private[this] final def remWatcher(watcher: ActorRef): Unit = watchedBy match { + case null ⇒ () + case other ⇒ if (!updateWatchedBy(other, other - watcher)) remWatcher(watcher) + } + + @tailrec + private[this] final def clearWatchers(): Set[ActorRef] = watchedBy match { + case null ⇒ ActorCell.emptyActorRefSet + case other ⇒ if (!updateWatchedBy(other, null)) clearWatchers() else other + } + + @inline + private[this] def state: AnyRef = Unsafe.instance.getObjectVolatile(this, stateOffset) + + @inline + private[this] def updateState(oldState: AnyRef, newState: AnyRef): Boolean = + Unsafe.instance.compareAndSwapObject(this, stateOffset, oldState, newState) + + @inline + private[this] def setState(newState: AnyRef): Unit = Unsafe.instance.putObjectVolatile(this, stateOffset, newState) override def getParent: InternalActorRef = provider.tempContainer @@ -230,8 +259,8 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide override def sendSystemMessage(message: SystemMessage): Unit = message match { case _: Terminate ⇒ stop() - case Watch(watchee, watcher) ⇒ //FIXME IMPLEMENT - case Unwatch(watchee, watcher) ⇒ //FIXME IMPLEMENT + case Watch(watchee, watcher) ⇒ if (watchee == this && watcher != this && !addWatcher(watcher)) watcher ! Terminated(watchee)(stopped = true) + case Unwatch(watchee, watcher) ⇒ if (watchee == this && watcher != this) remWatcher(watcher) case _ ⇒ } @@ -242,20 +271,20 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide @tailrec override def stop(): Unit = { - def ensureCompleted(): Unit = if (!result.isCompleted) result.tryComplete(Left(new ActorKilledException("Stopped"))) + def ensureCompleted(): Unit = { + if (!result.isCompleted) result.tryComplete(Left(new ActorKilledException("Stopped"))) + val watchers = clearWatchers() + if (!watchers.isEmpty) { + val termination = Terminated(this)(stopped = true) + watchers foreach { w ⇒ try w.tell(termination, this) catch { case NonFatal(t) ⇒ /* FIXME LOG THIS */ } } + } + } state match { case null ⇒ // if path was never queried nobody can possibly be watching us, so we don't have to publish termination either if (updateState(null, Stopped)) ensureCompleted() else stop() case p: ActorPath ⇒ if (updateState(p, StoppedWithPath(p))) { - try { - ensureCompleted() - val termination = Terminated(this)(stopped = true) - // FIXME watchedBy foreach { w => w.tell(termination) } - // FIXME watching foreach { w.sendSystemMessage(Unwatch(w, self)) } - } finally { - provider.unregisterTempActor(p) - } + try ensureCompleted() finally provider.unregisterTempActor(p) } else stop() case Stopped | _: StoppedWithPath ⇒ // already stopped case Registering ⇒ stop() // spin until registration is completed before stopping diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index d1e7fab327..35004e637d 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -34,18 +34,18 @@ trait GracefulStopSupport { * is completed with failure [[akka.pattern.AskTimeoutException]]. */ def gracefulStop(target: ActorRef, timeout: Duration)(implicit system: ActorSystem): Future[Boolean] = { - if (target.isTerminated) { - Promise.successful(true) - } else system match { + if (target.isTerminated) Promise.successful(true) + else system match { case e: ExtendedActorSystem ⇒ + val internalTarget = target.asInstanceOf[InternalActorRef] val ref = PromiseActorRef(e.provider, Timeout(timeout)) - ref.sendSystemMessage(Watch(target, ref)) - ref.result onComplete { - case Right(Terminated(`target`)) ⇒ () // Ignore - case _ ⇒ ref.sendSystemMessage(Unwatch(target, ref)) // Just making sure we're not leaking here + internalTarget.sendSystemMessage(Watch(target, ref)) + val result = ref.result map { + case Terminated(`target`) ⇒ true + case _ ⇒ internalTarget.sendSystemMessage(Unwatch(target, ref)); false // Just making sure we're not leaking here } target ! PoisonPill - ref.result map { case Terminated(`target`) ⇒ true } + result case s ⇒ throw new IllegalArgumentException("Unknown ActorSystem implementation: '" + s + "'") } } From f4a3bdff336ff3580e22ca949098ab648a538ecf Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 30 May 2012 13:29:48 +0200 Subject: [PATCH 11/92] Adding PossiblyHarmful to RemoteTransport untrusted mode filtering --- akka-actor/src/main/scala/akka/actor/Actor.scala | 2 +- .../src/main/scala/akka/remote/RemoteTransport.scala | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index cb34f82b78..c8962e819f 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -59,7 +59,7 @@ case object Kill extends Kill { /** * When Death Watch is used, the watcher will receive a Terminated(watched) message when watched is terminated. */ -case class Terminated(@BeanProperty actor: ActorRef)(@BeanProperty stopped: Boolean) extends PossiblyHarmful +case class Terminated(@BeanProperty actor: ActorRef)(@BeanProperty stopped: Boolean) abstract class ReceiveTimeout extends PossiblyHarmful diff --git a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala index 7cb622ab00..249c23e968 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala @@ -287,10 +287,9 @@ abstract class RemoteTransport(val system: ExtendedActorSystem, val provider: Re case l: LocalRef ⇒ if (provider.remoteSettings.LogReceive) log.debug("received local message {}", remoteMessage) remoteMessage.payload match { - case _: SystemMessage if useUntrustedMode ⇒ log.warning("operating in UntrustedMode, dropping inbound system message") - case _: AutoReceivedMessage if useUntrustedMode ⇒ log.warning("operating in UntrustedMode, dropping inbound AutoReceivedMessage") - case msg: SystemMessage ⇒ l.sendSystemMessage(msg) - case msg ⇒ l.!(msg)(remoteMessage.sender) + case msg: PossiblyHarmful if useUntrustedMode ⇒ log.warning("operating in UntrustedMode, dropping inbound PossiblyHarmful message of type {}", msg.getClass) + case msg: SystemMessage ⇒ l.sendSystemMessage(msg) + case msg ⇒ l.!(msg)(remoteMessage.sender) } case r: RemoteRef ⇒ if (provider.remoteSettings.LogReceive) log.debug("received remote-destined message {}", remoteMessage) From 6c1ca7fcdbbb4ecc51cfe71d9813f7cca41d37f1 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 30 May 2012 14:26:09 +0200 Subject: [PATCH 12/92] Unborkening the top-level surveillance --- .../src/main/scala/akka/actor/ActorCell.scala | 8 ++++---- .../scala/akka/actor/ActorRefProvider.scala | 20 +++++++------------ 2 files changed, 11 insertions(+), 17 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 4e8a54d7fb..1f5fa72c68 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -462,7 +462,7 @@ private[akka] class ActorCell( override final def watch(subject: ActorRef): ActorRef = subject match { case a: InternalActorRef ⇒ - if (!watching.contains(a)) { + if (a != self && !watching.contains(a)) { a.sendSystemMessage(Watch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ watching += a } @@ -471,7 +471,7 @@ private[akka] class ActorCell( override final def unwatch(subject: ActorRef): ActorRef = subject match { case a: InternalActorRef ⇒ - if (watching.contains(a)) { + if (a != self && watching.contains(a)) { a.sendSystemMessage(Unwatch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ watching -= a } @@ -582,7 +582,7 @@ private[akka] class ActorCell( } } else if (!watcheeSelf && watcherSelf) { watch(watchee) - } else println("addNOOOOOOOOO: " + watchee + " => " + watcher) + } } def remWatcher(watchee: ActorRef, watcher: ActorRef): Unit = { @@ -596,7 +596,7 @@ private[akka] class ActorCell( } } else if (!watcheeSelf && watcherSelf) { unwatch(watchee) - } else println("remNOOOOOOOOO: " + watchee + " => " + watcher) + } } def terminate() { diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index d4e9595f62..3d9563b987 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -474,18 +474,10 @@ class LocalActorRefProvider( lazy val rootGuardian: InternalActorRef = new LocalActorRef(system, guardianProps, theOneWhoWalksTheBubblesOfSpaceTime, rootPath, true) { - object Extra { - def unapply(s: String): Option[InternalActorRef] = extraNames.get(s) - } - override def getParent: InternalActorRef = this - - override def getSingleChild(name: String): InternalActorRef = { - name match { - case "temp" ⇒ tempContainer - case Extra(e) ⇒ e - case _ ⇒ super.getSingleChild(name) - } + override def getSingleChild(name: String): InternalActorRef = name match { + case "temp" ⇒ tempContainer + case other ⇒ extraNames.get(other).getOrElse(super.getSingleChild(other)) } } @@ -510,8 +502,10 @@ class LocalActorRefProvider( def init(_system: ActorSystemImpl) { system = _system // chain death watchers so that killing guardian stops the application - guardian.sendSystemMessage(Watch(systemGuardian, guardian)) - rootGuardian.sendSystemMessage(Watch(rootGuardian, systemGuardian)) + //guardian.sendSystemMessage(Watch(systemGuardian, guardian)) + //rootGuardian.sendSystemMessage(Watch(rootGuardian, systemGuardian)) + guardian.sendSystemMessage(Watch(guardian, systemGuardian)) + rootGuardian.sendSystemMessage(Watch(systemGuardian, rootGuardian)) eventStream.startDefaultLoggers(_system) } From d42109ff5f04a5e266a7b671bd67ef213985aafc Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 30 May 2012 14:43:18 +0200 Subject: [PATCH 13/92] Adding warning logging for erronous settings --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 1f5fa72c68..23cd796ad2 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -582,6 +582,8 @@ private[akka] class ActorCell( } } else if (!watcheeSelf && watcherSelf) { watch(watchee) + } else { + system.eventStream.publish(Warning(self.path.toString, clazz(actor), "BUG: illegal Watch(%s,%s) for %s".format(watchee, watcher, self))) } } @@ -596,6 +598,8 @@ private[akka] class ActorCell( } } else if (!watcheeSelf && watcherSelf) { unwatch(watchee) + } else { + system.eventStream.publish(Warning(self.path.toString, clazz(actor), "BUG: illegal Unwatch(%s,%s) for %s".format(watchee, watcher, self))) } } From c64db4b00d1298708d2d7ad90c1c7be9c42d5bc4 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 30 May 2012 15:37:29 +0200 Subject: [PATCH 14/92] Code formatting --- akka-actor/src/main/scala/akka/pattern/AskSupport.scala | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 47154f7853..3805521ae4 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -283,9 +283,7 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide case null ⇒ // if path was never queried nobody can possibly be watching us, so we don't have to publish termination either if (updateState(null, Stopped)) ensureCompleted() else stop() case p: ActorPath ⇒ - if (updateState(p, StoppedWithPath(p))) { - try ensureCompleted() finally provider.unregisterTempActor(p) - } else stop() + if (updateState(p, StoppedWithPath(p))) { try ensureCompleted() finally provider.unregisterTempActor(p) } else stop() case Stopped | _: StoppedWithPath ⇒ // already stopped case Registering ⇒ stop() // spin until registration is completed before stopping } From 7257dc5d86171d4f922e748a26a209aa29f4db3e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 30 May 2012 17:37:44 +0200 Subject: [PATCH 15/92] Minor code formatting --- .../src/main/scala/akka/actor/ActorCell.scala | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 23cd796ad2..3db70d5735 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -727,10 +727,9 @@ private[akka] class ActorCell( val terminated = Terminated(self)(stopped = true) try { watchedBy foreach { - watcher ⇒ - try watcher.tell(terminated, self) catch { - case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) - } + watcher ⇒ try watcher.tell(terminated, self) catch { + case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) + } } } finally watchedBy = emptyActorRefSet } @@ -738,10 +737,9 @@ private[akka] class ActorCell( if (!watching.isEmpty) { try { watching foreach { - case watchee: InternalActorRef ⇒ - try watchee.sendSystemMessage(Unwatch(watchee, self)) catch { - case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) - } + case watchee: InternalActorRef ⇒ try watchee.sendSystemMessage(Unwatch(watchee, self)) catch { + case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) + } } } finally watching = emptyActorRefSet } From aef05497f57736caaabe2f1ded638ff3fef2b5b1 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 1 Jun 2012 14:49:12 +0200 Subject: [PATCH 16/92] Adding some error ouput for violated invariants in ASkSupport --- .../scala/akka/actor/ActorRefProvider.scala | 2 -- .../main/scala/akka/pattern/AskSupport.scala | 27 ++++++++++--------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 3d9563b987..eede9e1bef 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -502,8 +502,6 @@ class LocalActorRefProvider( def init(_system: ActorSystemImpl) { system = _system // chain death watchers so that killing guardian stops the application - //guardian.sendSystemMessage(Watch(systemGuardian, guardian)) - //rootGuardian.sendSystemMessage(Watch(rootGuardian, systemGuardian)) guardian.sendSystemMessage(Watch(guardian, systemGuardian)) rootGuardian.sendSystemMessage(Watch(systemGuardian, rootGuardian)) eventStream.startDefaultLoggers(_system) diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 3805521ae4..42154ff522 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -246,22 +246,25 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide override def !(message: Any)(implicit sender: ActorRef = null): Unit = state match { case Stopped | _: StoppedWithPath ⇒ provider.deadLetters ! message - case _ ⇒ - val completedJustNow = result.tryComplete { - message match { - case Status.Success(r) ⇒ Right(r) - case Status.Failure(f) ⇒ Left(f) - case other ⇒ Right(other) - } + case _ ⇒ if (!(result.tryComplete { + message match { + case Status.Success(r) ⇒ Right(r) + case Status.Failure(f) ⇒ Left(f) + case other ⇒ Right(other) } - if (!completedJustNow) provider.deadLetters ! message + })) provider.deadLetters ! message } override def sendSystemMessage(message: SystemMessage): Unit = message match { - case _: Terminate ⇒ stop() - case Watch(watchee, watcher) ⇒ if (watchee == this && watcher != this && !addWatcher(watcher)) watcher ! Terminated(watchee)(stopped = true) - case Unwatch(watchee, watcher) ⇒ if (watchee == this && watcher != this) remWatcher(watcher) - case _ ⇒ + case _: Terminate ⇒ stop() + case Watch(watchee, watcher) ⇒ + if (watchee == this && watcher != this) { + if (!addWatcher(watcher)) watcher ! Terminated(watchee)(stopped = true) + } else System.err.println("BUG: illegal Watch(%s,%s) for %s".format(watchee, watcher, this)) + case Unwatch(watchee, watcher) ⇒ + if (watchee == this && watcher != this) remWatcher(watcher) + else System.err.println("BUG: illegal Unwatch(%s,%s) for %s".format(watchee, watcher, this)) + case _ ⇒ } override def isTerminated: Boolean = state match { From 33f14f9bf691ccff6ec5d38378e1e55550059e67 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 1 Jun 2012 15:15:53 +0200 Subject: [PATCH 17/92] Test gossip convergence, see #2164 --- .../src/main/scala/akka/cluster/Cluster.scala | 1 - .../scala/akka/cluster/ConvergenceSpec.scala | 131 ++++++++++++++++++ .../akka/cluster/MultiNodeClusterSpec.scala | 11 ++ 3 files changed, 142 insertions(+), 1 deletion(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 98d0a3f11e..3729a0b3b4 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -865,7 +865,6 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val localGossip = localState.latestGossip val localOverview = localGossip.overview - val localSeen = localOverview.seen val localMembers = localGossip.members val localUnreachableMembers = localGossip.overview.unreachable diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala new file mode 100644 index 0000000000..eeb9b864ed --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -0,0 +1,131 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ +import akka.actor.Address + +object ConvergenceMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + + commonConfig(debugConfig(on = false). + withFallback(ConfigFactory.parseString(""" + akka.cluster { + failure-detector.threshold = 4 + } + """)). + withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class ConvergenceMultiJvmNode1 extends ConvergenceSpec +class ConvergenceMultiJvmNode2 extends ConvergenceSpec +class ConvergenceMultiJvmNode3 extends ConvergenceSpec +class ConvergenceMultiJvmNode4 extends ConvergenceSpec + +abstract class ConvergenceSpec + extends MultiNodeSpec(ConvergenceMultiJvmSpec) + with MultiNodeClusterSpec with BeforeAndAfter { + import ConvergenceMultiJvmSpec._ + + override def initialParticipants = 4 + + after { + testConductor.enter("after") + } + + "A cluster of 3 members" must { + + "reach initial convergence" taggedAs LongRunningTest in { + runOn(first) { + cluster.self + awaitUpConvergence(numberOfMembers = 3) + } + + runOn(second, third) { + cluster.join(node(first).address) + awaitUpConvergence(numberOfMembers = 3) + } + + runOn(fourth) { + // doesn't join immediately + } + } + + "not reach convergence while any nodes are unreachable" taggedAs LongRunningTest in { + val thirdAddress = node(third).address + testConductor.enter("before-shutdown") + + runOn(first) { + // kill 'third' node + testConductor.shutdown(third, 0) + testConductor.removeNode(third) + } + + runOn(first, second) { + val firstAddress = node(first).address + val secondAddress = node(second).address + + within(30 seconds) { + // third becomes unreachable + awaitCond(cluster.latestGossip.overview.unreachable.size == 1) + awaitCond(cluster.latestGossip.members.size == 2) + awaitCond(cluster.latestGossip.members.forall(_.status == MemberStatus.Up)) + awaitSeenSameState(Seq(firstAddress, secondAddress)) + // still one unreachable + cluster.latestGossip.overview.unreachable.size must be(1) + cluster.latestGossip.overview.unreachable.head.address must be(thirdAddress) + // and therefore no convergence + cluster.convergence.isDefined must be(false) + + } + } + + } + + "not move a new joining node to Up while there is no convergence" taggedAs LongRunningTest in { + runOn(fourth) { + // try to join + cluster.join(node(first).address) + } + + val firstAddress = node(first).address + val secondAddress = node(second).address + val fourthAddress = node(fourth).address + + def memberStatus(address: Address): Option[MemberStatus] = + cluster.latestGossip.members.collectFirst { case m if m.address == address ⇒ m.status } + + def assertNotMovedUp: Unit = { + within(20 seconds) { + awaitCond(cluster.latestGossip.members.size == 3) + awaitSeenSameState(Seq(firstAddress, secondAddress, fourthAddress)) + memberStatus(firstAddress) must be(Some(MemberStatus.Up)) + memberStatus(secondAddress) must be(Some(MemberStatus.Up)) + // leader is not allowed to move the new node to Up + memberStatus(fourthAddress) must be(Some(MemberStatus.Joining)) + // still no convergence + cluster.convergence.isDefined must be(false) + } + } + + runOn(first, second, fourth) { + for (n ← 1 to 5) { + log.debug("assertNotMovedUp#" + n) + assertNotMovedUp + // wait and then check again + 1.second.dilated.sleep + } + } + + } + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index cb679c12b7..4c0232cf9e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -71,6 +71,17 @@ trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ } } + /** + * Wait until the specified nodes have seen the same gossip overview. + */ + def awaitSeenSameState(addresses: Seq[Address]): Unit = { + awaitCond { + val seen = cluster.latestGossip.overview.seen + val seenVectorClocks = addresses.flatMap(seen.get(_)) + seenVectorClocks.size == addresses.size && seenVectorClocks.toSet.size == 1 + } + } + def roleOfLeader(nodesInCluster: Seq[RoleName]): RoleName = { nodesInCluster.length must not be (0) nodesInCluster.sorted.head From 17ee47079a2430c199b4154f176f22216b9fba9b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Sat, 2 Jun 2012 14:49:28 +0200 Subject: [PATCH 18/92] Incorporating Roland's feedback --- .../src/test/scala/akka/routing/RoutingSpec.scala | 4 ++-- akka-actor/src/main/scala/akka/actor/Actor.scala | 2 +- .../src/main/scala/akka/actor/ActorCell.scala | 11 ++++++----- .../src/main/scala/akka/actor/ActorRef.scala | 7 ++++--- .../main/scala/akka/actor/ActorRefProvider.scala | 12 +++++++----- .../src/main/scala/akka/actor/ActorSystem.scala | 15 +++------------ .../scala/akka/dispatch/AbstractDispatcher.scala | 4 ++-- .../src/main/scala/akka/pattern/AskSupport.scala | 6 +++--- .../scala/akka/pattern/GracefulStopSupport.scala | 11 +++++++---- .../code/docs/actor/FaultHandlingDocSpec.scala | 4 ++-- .../src/test/scala/akka/testkit/AkkaSpec.scala | 1 - 11 files changed, 37 insertions(+), 40 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 98d3e71384..f1952b8f79 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -73,7 +73,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with watch(router) watch(c2) system.stop(c2) - expectMsg(Terminated(c2)(stopped = true)) + expectMsg(Terminated(c2)(existenceConfirmed = true)) // it might take a while until the Router has actually processed the Terminated message awaitCond { router ! "" @@ -84,7 +84,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with res == Seq(c1, c1) } system.stop(c1) - expectMsg(Terminated(router)(stopped = true)) + expectMsg(Terminated(router)(existenceConfirmed = true)) } "be able to send their routees" in { diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index c8962e819f..c795534cdf 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -59,7 +59,7 @@ case object Kill extends Kill { /** * When Death Watch is used, the watcher will receive a Terminated(watched) message when watched is terminated. */ -case class Terminated(@BeanProperty actor: ActorRef)(@BeanProperty stopped: Boolean) +case class Terminated(@BeanProperty actor: ActorRef)(@BeanProperty existenceConfirmed: Boolean) abstract class ReceiveTimeout extends PossiblyHarmful diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 3db70d5735..736e004c6e 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -724,19 +724,20 @@ private[akka] class ActorCell( parent.sendSystemMessage(ChildTerminated(self)) if (!watchedBy.isEmpty) { - val terminated = Terminated(self)(stopped = true) + val terminated = Terminated(self)(existenceConfirmed = true) try { watchedBy foreach { - watcher ⇒ try watcher.tell(terminated, self) catch { - case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) - } + watcher ⇒ + try watcher.tell(terminated, self) catch { + case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) + } } } finally watchedBy = emptyActorRefSet } if (!watching.isEmpty) { try { - watching foreach { + watching foreach { // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ case watchee: InternalActorRef ⇒ try watchee.sendSystemMessage(Unwatch(watchee, self)) catch { case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index a713a61ddc..7368ae434a 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -424,10 +424,11 @@ private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider, protected def specialHandle(msg: Any): Boolean = msg match { case w: Watch ⇒ - if (w.watchee == this && w.watcher != this) w.watcher ! Terminated(w.watchee)(stopped = false) + if (w.watchee == this && w.watcher != this) + w.watcher ! Terminated(w.watchee)(existenceConfirmed = false) true - case w: Unwatch ⇒ true // Just ignore + case _: Unwatch ⇒ true // Just ignore case _ ⇒ false } } @@ -449,7 +450,7 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, override protected def specialHandle(msg: Any): Boolean = msg match { case w: Watch ⇒ - if (w.watchee != this && w.watcher != this) w.watcher ! Terminated(w.watchee)(stopped = false) + if (w.watchee != this && w.watcher != this) w.watcher ! Terminated(w.watchee)(existenceConfirmed = false) true case w: Unwatch ⇒ true // Just ignore diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index eede9e1bef..6807e34c55 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -403,8 +403,8 @@ class LocalActorRefProvider( def receive = { case Terminated(_) ⇒ context.stop(self) - case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case NonFatal(e) ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? - case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case NonFatal(e) ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? + case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case NonFatal(e) ⇒ Status.Failure(e) }) + case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case NonFatal(e) ⇒ Status.Failure(e) }) case StopChild(child) ⇒ context.stop(child); sender ! "ok" case m ⇒ deadLetters ! DeadLetter(m, sender, self) } @@ -435,8 +435,8 @@ class LocalActorRefProvider( def receive = { case Terminated(_) ⇒ eventStream.stopDefaultLoggers(); context.stop(self) - case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case e: Exception ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? - case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case e: Exception ⇒ e }) // FIXME shouldn't this use NonFatal & Status.Failure? + case CreateChild(child, name) ⇒ sender ! (try context.actorOf(child, name) catch { case NonFatal(e) ⇒ Status.Failure(e) }) + case CreateRandomNameChild(child) ⇒ sender ! (try context.actorOf(child) catch { case NonFatal(e) ⇒ Status.Failure(e) }) case StopChild(child) ⇒ context.stop(child); sender ! "ok" case m ⇒ deadLetters ! DeadLetter(m, sender, self) } @@ -502,8 +502,10 @@ class LocalActorRefProvider( def init(_system: ActorSystemImpl) { system = _system // chain death watchers so that killing guardian stops the application - guardian.sendSystemMessage(Watch(guardian, systemGuardian)) + systemGuardian.sendSystemMessage(Watch(guardian, systemGuardian)) rootGuardian.sendSystemMessage(Watch(systemGuardian, rootGuardian)) + //guardian.sendSystemMessage(Watch(guardian, systemGuardian)) + //rootGuardian.sendSystemMessage(Watch(systemGuardian, rootGuardian)) eventStream.startDefaultLoggers(_system) } diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 993e7e98e4..008610c333 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -480,26 +480,17 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, private[akka] def systemActorOf(props: Props, name: String): ActorRef = { implicit val timeout = settings.CreationTimeout - Await.result(systemGuardian ? CreateChild(props, name), timeout.duration) match { - case ref: ActorRef ⇒ ref - case ex: Exception ⇒ throw ex - } + Await.result((systemGuardian ? CreateChild(props, name)).mapTo[ActorRef], timeout.duration) } def actorOf(props: Props, name: String): ActorRef = { implicit val timeout = settings.CreationTimeout - Await.result(guardian ? CreateChild(props, name), timeout.duration) match { - case ref: ActorRef ⇒ ref - case ex: Exception ⇒ throw ex - } + Await.result((guardian ? CreateChild(props, name)).mapTo[ActorRef], timeout.duration) } def actorOf(props: Props): ActorRef = { implicit val timeout = settings.CreationTimeout - Await.result(guardian ? CreateRandomNameChild(props), timeout.duration) match { - case ref: ActorRef ⇒ ref - case ex: Exception ⇒ throw ex - } + Await.result((guardian ? CreateRandomNameChild(props)).mapTo[ActorRef], timeout.duration) } def stop(actor: ActorRef): Unit = { diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 8e160276e8..9517a59b7c 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -102,11 +102,11 @@ private[akka] case class ChildTerminated(child: ActorRef) extends SystemMessage /** * INTERNAL API */ -private[akka] case class Watch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to self from ActorCell.watch +private[akka] case class Watch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to establish a DeathWatch /** * INTERNAL API */ -private[akka] case class Unwatch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to self from ActorCell.unwatch +private[akka] case class Unwatch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to tear down a DeathWatch final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cleanup: () ⇒ Unit) extends Runnable { def run(): Unit = diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 42154ff522..c66fa4178d 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -192,7 +192,7 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide @tailrec // Returns false if the Promise is already completed private[this] final def addWatcher(watcher: ActorRef): Boolean = watchedBy match { case null ⇒ false - case other ⇒ if (updateWatchedBy(other, other + watcher)) true else addWatcher(watcher) + case other ⇒ updateWatchedBy(other, other + watcher) || addWatcher(watcher) } @tailrec @@ -259,7 +259,7 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide case _: Terminate ⇒ stop() case Watch(watchee, watcher) ⇒ if (watchee == this && watcher != this) { - if (!addWatcher(watcher)) watcher ! Terminated(watchee)(stopped = true) + if (!addWatcher(watcher)) watcher ! Terminated(watchee)(existenceConfirmed = true) } else System.err.println("BUG: illegal Watch(%s,%s) for %s".format(watchee, watcher, this)) case Unwatch(watchee, watcher) ⇒ if (watchee == this && watcher != this) remWatcher(watcher) @@ -278,7 +278,7 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide if (!result.isCompleted) result.tryComplete(Left(new ActorKilledException("Stopped"))) val watchers = clearWatchers() if (!watchers.isEmpty) { - val termination = Terminated(this)(stopped = true) + val termination = Terminated(this)(existenceConfirmed = true) watchers foreach { w ⇒ try w.tell(termination, this) catch { case NonFatal(t) ⇒ /* FIXME LOG THIS */ } } } } diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index 35004e637d..91293cb0d1 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -40,12 +40,15 @@ trait GracefulStopSupport { val internalTarget = target.asInstanceOf[InternalActorRef] val ref = PromiseActorRef(e.provider, Timeout(timeout)) internalTarget.sendSystemMessage(Watch(target, ref)) - val result = ref.result map { - case Terminated(`target`) ⇒ true - case _ ⇒ internalTarget.sendSystemMessage(Unwatch(target, ref)); false // Just making sure we're not leaking here + ref.result onComplete { // Just making sure we're not leaking here + case Right(Terminated(`target`)) ⇒ () + case _ ⇒ internalTarget.sendSystemMessage(Unwatch(target, ref)) } target ! PoisonPill - result + ref.result map { + case Terminated(`target`) ⇒ true + case _ ⇒ false + } case s ⇒ throw new IllegalArgumentException("Unknown ActorSystem implementation: '" + s + "'") } } diff --git a/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala b/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala index 4e0fdc5ee5..65e03bd2ea 100644 --- a/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala @@ -111,7 +111,7 @@ class FaultHandlingDocSpec extends AkkaSpec with ImplicitSender { //#stop watch(child) // have testActor watch “child” child ! new IllegalArgumentException // break it - expectMsg(Terminated(child)(stopped = true)) + expectMsg(Terminated(child)(existenceConfirmed = true)) child.isTerminated must be(true) //#stop } @@ -125,7 +125,7 @@ class FaultHandlingDocSpec extends AkkaSpec with ImplicitSender { expectMsg(0) child2 ! new Exception("CRASH") // escalate failure - expectMsg(Terminated(child2)(stopped = true)) + expectMsg(Terminated(child2)(existenceConfirmed = true)) //#escalate-kill //#escalate-restart val supervisor2 = system.actorOf(Props[Supervisor2], "supervisor2") diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala index d2eeeee776..5eb0c0538a 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala @@ -12,7 +12,6 @@ import akka.util.duration._ import com.typesafe.config.Config import com.typesafe.config.ConfigFactory import akka.actor.PoisonPill -import akka.actor.CreateChild import akka.actor.DeadLetter import java.util.concurrent.TimeoutException import akka.dispatch.{ Await, MessageDispatcher } From 5848c88cbaad23b1429a54b7d5443da8d1e7e3c5 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Sat, 2 Jun 2012 23:07:51 +0200 Subject: [PATCH 19/92] Making sure we use vals for the fields in Terminated --- akka-actor/src/main/scala/akka/actor/Actor.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index c795534cdf..2721ccffa0 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -59,7 +59,7 @@ case object Kill extends Kill { /** * When Death Watch is used, the watcher will receive a Terminated(watched) message when watched is terminated. */ -case class Terminated(@BeanProperty actor: ActorRef)(@BeanProperty existenceConfirmed: Boolean) +case class Terminated(@BeanProperty actor: ActorRef)(@BeanProperty val existenceConfirmed: Boolean) abstract class ReceiveTimeout extends PossiblyHarmful From 3c7ade3cdb2bdfde90648a480a837ff18e1875ed Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 4 Jun 2012 08:49:05 +0200 Subject: [PATCH 20/92] Utility to replace jvm and host:port with role in logs, see 2173 --- .../akka/remote/testkit/LogRoleReplace.scala | 148 ++++++++++++++++++ .../akka/remote/testkit/MultiNodeSpec.scala | 3 + 2 files changed, 151 insertions(+) create mode 100644 akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala new file mode 100644 index 0000000000..3b3527240e --- /dev/null +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala @@ -0,0 +1,148 @@ +package akka.remote.testkit + +import java.awt.Toolkit +import java.awt.datatransfer.Clipboard +import java.awt.datatransfer.ClipboardOwner +import java.awt.datatransfer.DataFlavor +import java.awt.datatransfer.StringSelection +import java.awt.datatransfer.Transferable +import java.io.BufferedReader +import java.io.FileReader +import java.io.FileWriter +import java.io.InputStreamReader +import java.io.OutputStreamWriter +import java.io.PrintWriter +import java.io.StringReader +import java.io.StringWriter +import scala.annotation.tailrec + +/** + * Utility to make log files from multi-node tests easier to analyze. + * Replaces jvm names and host:port with corresponding logical role name. + */ +object LogRoleReplace extends ClipboardOwner { + + /** + * Main program. Use with 0, 1 or 2 arguments. + * + * When using 0 arguments it reads from standard input + * (System.in) and writes to standard output (System.out). + * + * With 1 argument it reads from the file specified in the first argument + * and writes to standard output. + * + * With 2 arguments it reads the file specified in the first argument + * and writes to the file specified in the second argument. + * + * You can also replace the contents of the clipboard instead of using files + * by supplying `clipboard` as argument + */ + def main(args: Array[String]): Unit = { + val replacer = new LogRoleReplace + + if (args.length == 0) { + replacer.process( + new BufferedReader(new InputStreamReader(System.in)), + new PrintWriter(new OutputStreamWriter(System.out))) + + } else if (args(0) == "clipboard") { + val clipboard = Toolkit.getDefaultToolkit.getSystemClipboard + val contents = clipboard.getContents(null) + if (contents != null && contents.isDataFlavorSupported(DataFlavor.stringFlavor)) { + val text = contents.getTransferData(DataFlavor.stringFlavor).asInstanceOf[String] + val result = new StringWriter + replacer.process( + new BufferedReader(new StringReader(text)), + new PrintWriter(result)) + clipboard.setContents(new StringSelection(result.toString), this) + println("Replaced clipboard contents") + } + + } else if (args.length == 1) { + val inputFile = new BufferedReader(new FileReader(args(0))) + try { + replacer.process( + inputFile, + new PrintWriter(new OutputStreamWriter(System.out))) + } finally { + inputFile.close() + } + + } else if (args.length == 2) { + val outputFile = new PrintWriter(new FileWriter(args(1))) + val inputFile = new BufferedReader(new FileReader(args(0))) + try { + replacer.process(inputFile, outputFile) + } finally { + outputFile.close() + inputFile.close() + } + } + } + + /** + * Empty implementation of the ClipboardOwner interface + */ + def lostOwnership(clipboard: Clipboard, contents: Transferable): Unit = () +} + +class LogRoleReplace { + + private val RoleStarted = """\[([\w\-]+)\].*Role \[([\w]+)\] started""".r + private val RemoteServerStarted = """\[([\w\-]+)\].*RemoteServerStarted@akka://.*@([\w\-\.]+):([0-9]+)""".r + + private var replacements: Map[String, String] = Map.empty + private var jvmToAddress: Map[String, String] = Map.empty + + def process(in: BufferedReader, out: PrintWriter): Unit = { + + @tailrec + def processLines(line: String): Unit = if (line ne null) { + out.println(processLine(line)) + processLines(in.readLine) + } + + processLines(in.readLine()) + } + + def processLine(line: String): String = { + if (updateReplacements(line)) + replaceLine(line) + else + line + } + + private def updateReplacements(line: String): Boolean = { + if (line.startsWith("[info] * ")) { + // reset when new test begins + replacements = Map.empty + jvmToAddress = Map.empty + } + + line match { + case RemoteServerStarted(jvm, host, port) ⇒ + jvmToAddress += (jvm -> (host + ":" + port)) + false + + case RoleStarted(jvm, role) ⇒ + jvmToAddress.get(jvm) match { + case Some(address) ⇒ + replacements += (jvm -> role) + replacements += (address -> role) + false + case None ⇒ false + } + + case _ ⇒ true + } + } + + private def replaceLine(line: String): String = { + var result = line + for ((from, to) ← replacements) { + result = result.replaceAll(from, to) + } + result + } + +} \ No newline at end of file diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 35a9cc14e7..8ab65aa2c3 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -249,4 +249,7 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, roles: } } + // useful to see which jvm is running which role + log.info("Role [{}] started", myself.name) + } \ No newline at end of file From de59444795e257fb9a310e202204b2c8159168ac Mon Sep 17 00:00:00 2001 From: Roland Date: Mon, 4 Jun 2012 10:03:41 +0200 Subject: [PATCH 21/92] offer TestKitBase trait, see #2174 --- .../code/docs/testkit/TestkitDocSpec.scala | 20 +++++++++++++++++++ akka-docs/scala/testing.rst | 14 +++++++++++++ .../src/main/scala/akka/testkit/TestKit.scala | 6 ++++-- 3 files changed, 38 insertions(+), 2 deletions(-) diff --git a/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala b/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala index ddb3eeaf1d..96c7857990 100644 --- a/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala +++ b/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala @@ -14,6 +14,8 @@ import akka.dispatch.Futures import akka.testkit.AkkaSpec import akka.testkit.DefaultTimeout import akka.testkit.ImplicitSender +import akka.util.NonFatal + object TestkitDocSpec { case object Say42 case object Unknown @@ -251,5 +253,23 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { } //#event-filter } + + "demonstrate TestKitBase" in { + //#test-kit-base + import akka.testkit.TestKitBase + + class MyTest extends TestKitBase { + implicit lazy val system = ActorSystem() + + //#put-your-test-code-here + val probe = TestProbe() + probe.send(testActor, "hello") + try expectMsg("hello") catch { case NonFatal(e) => system.shutdown(); throw e } + //#put-your-test-code-here + + system.shutdown() + } + //#test-kit-base + } } diff --git a/akka-docs/scala/testing.rst b/akka-docs/scala/testing.rst index a98ee14917..d2875ed62a 100644 --- a/akka-docs/scala/testing.rst +++ b/akka-docs/scala/testing.rst @@ -671,6 +671,20 @@ This section contains a collection of known gotchas with some other frameworks, which is by no means exhaustive and does not imply endorsement or special support. +When you need it to be a trait +------------------------------ + +If for some reason it is a problem to inherit from :class:`TestKit` due to it +being a concrete class instead of a trait, there’s :class:`TestKitBase`: + +.. includecode:: code/docs/testkit/TestkitDocSpec.scala + :include: test-kit-base + :exclude: put-your-test-code-here + +The ``implicit lazy val system`` must be declared exactly like that (you can of +course pass arguments to the actor system factory as needed) because trait +:class:`TestKitBase` needs the system during its construction. + Specs2 ------ diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index 156a9d8612..6d8f73e7b8 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -101,11 +101,11 @@ class TestActor(queue: BlockingDeque[TestActor.Message]) extends Actor { * @author Roland Kuhn * @since 1.1 */ -class TestKit(_system: ActorSystem) { +trait TestKitBase { import TestActor.{ Message, RealMessage, NullMessage } - implicit val system = _system + implicit val system: ActorSystem val testKitSettings = TestKitExtension(system) private val queue = new LinkedBlockingDeque[Message]() @@ -579,6 +579,8 @@ class TestKit(_system: ActorSystem) { private def format(u: TimeUnit, d: Duration) = "%.3f %s".format(d.toUnit(u), u.toString.toLowerCase) } +class TestKit(_system: ActorSystem) extends { implicit val system = _system } with TestKitBase + object TestKit { private[testkit] val testActorId = new AtomicInteger(0) From a515377592b6daae707a9b49c21717763edb554e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 4 Jun 2012 10:35:27 +0200 Subject: [PATCH 22/92] Formatting --- .../src/main/scala/akka/dispatch/ThreadPoolBuilder.scala | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index b6fd432296..25125ae149 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -157,7 +157,8 @@ case class ThreadPoolConfigBuilder(config: ThreadPoolConfig) { def setQueueFactory(newQueueFactory: QueueFactory): ThreadPoolConfigBuilder = this.copy(config = config.copy(queueFactory = newQueueFactory)) - def configure(fs: Option[Function[ThreadPoolConfigBuilder, ThreadPoolConfigBuilder]]*): ThreadPoolConfigBuilder = fs.foldLeft(this)((c, f) ⇒ f.map(_(c)).getOrElse(c)) + def configure(fs: Option[Function[ThreadPoolConfigBuilder, ThreadPoolConfigBuilder]]*): ThreadPoolConfigBuilder = + fs.foldLeft(this)((c, f) ⇒ f.map(_(c)).getOrElse(c)) } object MonitorableThreadFactory { From df479a0bf09c0cda9b646a341f9903674cd23f7a Mon Sep 17 00:00:00 2001 From: Roland Date: Mon, 4 Jun 2012 11:29:56 +0200 Subject: [PATCH 23/92] add back TestProbe.reply, see #2172 --- .../scala/code/docs/testkit/TestkitDocSpec.scala | 12 ++++++------ .../src/main/scala/akka/testkit/TestKit.scala | 13 +++++++------ 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala b/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala index 96c7857990..564b7929ce 100644 --- a/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala +++ b/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala @@ -210,7 +210,7 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { val probe = TestProbe() val future = probe.ref ? "hello" probe.expectMsg(0 millis, "hello") // TestActor runs on CallingThreadDispatcher - probe.sender ! "world" + probe.reply("world") assert(future.isCompleted && future.value == Some(Right("world"))) //#test-probe-reply } @@ -253,20 +253,20 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { } //#event-filter } - + "demonstrate TestKitBase" in { //#test-kit-base import akka.testkit.TestKitBase - + class MyTest extends TestKitBase { implicit lazy val system = ActorSystem() - + //#put-your-test-code-here val probe = TestProbe() probe.send(testActor, "hello") - try expectMsg("hello") catch { case NonFatal(e) => system.shutdown(); throw e } + try expectMsg("hello") catch { case NonFatal(e) ⇒ system.shutdown(); throw e } //#put-your-test-code-here - + system.shutdown() } //#test-kit-base diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index 6d8f73e7b8..9dfa40a5ee 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -642,22 +642,23 @@ class TestProbe(_application: ActorSystem) extends TestKit(_application) { * Replies will be available for inspection with all of TestKit's assertion * methods. */ - def send(actor: ActorRef, msg: AnyRef) = { - actor.!(msg)(testActor) - } + def send(actor: ActorRef, msg: Any): Unit = actor.!(msg)(testActor) /** * Forward this message as if in the TestActor's receive method with self.forward. */ - def forward(actor: ActorRef, msg: AnyRef = lastMessage.msg) { - actor.!(msg)(lastMessage.sender) - } + def forward(actor: ActorRef, msg: Any = lastMessage.msg): Unit = actor.!(msg)(lastMessage.sender) /** * Get sender of last received message. */ def sender = lastMessage.sender + /** + * Send message to the sender of the last dequeued message. + */ + def reply(msg: Any): Unit = sender.!(msg)(ref) + } object TestProbe { From 2e788c970450cbd5e9efdab13f88c08e6812980e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 4 Jun 2012 11:46:59 +0200 Subject: [PATCH 24/92] Adding some sanity and some CAS-magic --- .../java/akka/actor/AbstractActorCell.java | 19 +++++++++++++ .../src/main/scala/akka/actor/ActorCell.scala | 28 +++++++++++++++---- .../akka/dispatch/AbstractDispatcher.scala | 15 ++++------ .../akka/dispatch/BalancingDispatcher.scala | 4 +-- .../main/scala/akka/dispatch/Mailbox.scala | 2 +- 5 files changed, 50 insertions(+), 18 deletions(-) create mode 100644 akka-actor/src/main/java/akka/actor/AbstractActorCell.java diff --git a/akka-actor/src/main/java/akka/actor/AbstractActorCell.java b/akka-actor/src/main/java/akka/actor/AbstractActorCell.java new file mode 100644 index 0000000000..d6005f463c --- /dev/null +++ b/akka-actor/src/main/java/akka/actor/AbstractActorCell.java @@ -0,0 +1,19 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.actor; + +import akka.util.Unsafe; + +final class AbstractActorCell { + final static long mailboxOffset; + + static { + try { + mailboxOffset = Unsafe.instance.objectFieldOffset(ActorCell.class.getDeclaredField("_mailboxDoNotCallMeDirectly")); + } catch(Throwable t){ + throw new ExceptionInInitializerError(t); + } + } +} diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 23cd796ad2..c74010668b 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -9,13 +9,12 @@ import scala.annotation.tailrec import java.util.concurrent.TimeUnit import java.util.concurrent.TimeUnit.MILLISECONDS import akka.event.Logging.{ Debug, Warning, Error } -import akka.util.{ Duration, Helpers } import akka.japi.Procedure import java.io.{ NotSerializableException, ObjectOutputStream } import akka.serialization.SerializationExtension -import akka.util.NonFatal import akka.event.Logging.LogEventException import collection.immutable.{ TreeSet, Stack, TreeMap } +import akka.util.{ Unsafe, Duration, Helpers, NonFatal } //TODO: everything here for current compatibility - could be limited more @@ -319,7 +318,7 @@ private[akka] class ActorCell( val props: Props, @volatile var parent: InternalActorRef, /*no member*/ _receiveTimeout: Option[Duration]) extends UntypedActorContext { - + import AbstractActorCell.mailboxOffset import ActorCell._ final def systemImpl = system @@ -412,8 +411,7 @@ private[akka] class ActorCell( var currentMessage: Envelope = _ var actor: Actor = _ private var behaviorStack: Stack[Actor.Receive] = Stack.empty - @volatile //This must be volatile since it isn't protected by the mailbox status - var mailbox: Mailbox = _ + @volatile var _mailboxDoNotCallMeDirectly: Mailbox = _ //This must be volatile since it isn't protected by the mailbox status var nextNameSequence: Long = 0 var watching: Set[ActorRef] = emptyActorRefSet var watchedBy: Set[ActorRef] = emptyActorRefSet @@ -428,6 +426,24 @@ private[akka] class ActorCell( @inline final val dispatcher: MessageDispatcher = system.dispatchers.lookup(props.dispatcher) + /** + * INTERNAL API + * + * Returns a reference to the current mailbox + */ + @inline final def mailbox: Mailbox = Unsafe.instance.getObjectVolatile(this, mailboxOffset).asInstanceOf[Mailbox] + + /** + * INTERNAL API + * + * replaces the current mailbox using getAndSet semantics + */ + @tailrec final def swapMailbox(newMailbox: Mailbox): Mailbox = { + val oldMailbox = mailbox + if (!Unsafe.instance.compareAndSwapObject(this, mailboxOffset, oldMailbox, newMailbox)) swapMailbox(newMailbox) + else oldMailbox + } + /** * UntypedActorContext impl */ @@ -440,7 +456,7 @@ private[akka] class ActorCell( * Create the mailbox and enqueue the Create() message to ensure that * this is processed before anything else. */ - mailbox = dispatcher.createMailbox(this) + swapMailbox(dispatcher.createMailbox(this)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ mailbox.systemEnqueue(self, Create()) diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 8e160276e8..4692486307 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -310,16 +310,14 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext case 0 ⇒ shutdownSchedule match { case UNSCHEDULED ⇒ - if (updateShutdownSchedule(UNSCHEDULED, SCHEDULED)) { - scheduleShutdownAction() - () - } else ifSensibleToDoSoThenScheduleShutdown() + if (updateShutdownSchedule(UNSCHEDULED, SCHEDULED)) scheduleShutdownAction() + else ifSensibleToDoSoThenScheduleShutdown() case SCHEDULED ⇒ if (updateShutdownSchedule(SCHEDULED, RESCHEDULED)) () else ifSensibleToDoSoThenScheduleShutdown() - case RESCHEDULED ⇒ () + case RESCHEDULED ⇒ } - case _ ⇒ () + case _ ⇒ } private def scheduleShutdownAction(): Unit = { @@ -349,9 +347,8 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext protected[akka] def unregister(actor: ActorCell) { if (debug) actors.remove(this, actor.self) addInhabitants(-1) - val mailBox = actor.mailbox + val mailBox = actor.swapMailbox(deadLetterMailbox) mailBox.becomeClosed() // FIXME reschedule in tell if possible race with cleanUp is detected in order to properly clean up - actor.mailbox = deadLetterMailbox mailBox.cleanUp() } @@ -359,7 +356,6 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext @tailrec final def run() { shutdownSchedule match { - case UNSCHEDULED ⇒ () case SCHEDULED ⇒ try { if (inhabitants == 0) shutdown() //Warning, racy @@ -369,6 +365,7 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext case RESCHEDULED ⇒ if (updateShutdownSchedule(RESCHEDULED, SCHEDULED)) scheduleShutdownAction() else run() + case UNSCHEDULED ⇒ } } } diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index 43e8944105..e50f9150a4 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -50,9 +50,9 @@ class BalancingDispatcher( private class SharingMailbox(_actor: ActorCell, _messageQueue: MessageQueue) extends Mailbox(_actor, _messageQueue) with DefaultSystemMessageQueue { override def cleanUp(): Unit = { + val dlq = actor.systemImpl.deadLetterMailbox //Don't call the original implementation of this since it scraps all messages, and we don't want to do that - if (hasSystemMessages) { - val dlq = actor.systemImpl.deadLetterMailbox + while (hasSystemMessages) { var message = systemDrain() while (message ne null) { // message must be “virgin” before being able to systemEnqueue again diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index 35b1e35012..b81a2fc0ba 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -235,7 +235,7 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes protected[dispatch] def cleanUp(): Unit = if (actor ne null) { // actor is null for the deadLetterMailbox val dlm = actor.systemImpl.deadLetterMailbox - if (hasSystemMessages) { + while (hasSystemMessages) { var message = systemDrain() while (message ne null) { // message must be “virgin” before being able to systemEnqueue again From 52f122107c04e88d1a9ef9dee4fe002b5653c05c Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 4 Jun 2012 11:38:39 +0200 Subject: [PATCH 25/92] Fix shutdown/remove race as described by @rkuhn, see #2137 * Skip nodes removal * Ignore removed client when enter barrier * Change order of testConductor.shutdown and testConductor.removeNode --- .../cluster/ClientDowningNodeThatIsUnreachableSpec.scala | 2 +- .../akka/cluster/GossipingAccrualFailureDetectorSpec.scala | 2 +- .../cluster/LeaderDowningNodeThatIsUnreachableSpec.scala | 4 ++-- .../multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala | 2 +- .../src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala | 2 +- .../src/main/scala/akka/remote/testconductor/Conductor.scala | 5 ++--- 6 files changed, 8 insertions(+), 9 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index 6ab4d1a39e..948791167e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -43,8 +43,8 @@ class ClientDowningNodeThatIsUnreachableSpec testConductor.enter("all-up") // kill 'third' node - testConductor.shutdown(third, 0) testConductor.removeNode(third) + testConductor.shutdown(third, 0) // mark 'third' node as DOWN cluster.down(thirdAddress) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 9d388622db..790c0e07fd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -57,8 +57,8 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi "mark node as 'unavailable' if a node in the cluster is shut down (and its heartbeats stops)" taggedAs LongRunningTest in { runOn(first) { - testConductor.shutdown(third, 0) testConductor.removeNode(third) + testConductor.shutdown(third, 0) } runOn(first, second) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index 63665d3c57..d04a97c9f1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -51,8 +51,8 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.enter("all-up") // kill 'fourth' node - testConductor.shutdown(fourth, 0) testConductor.removeNode(fourth) + testConductor.shutdown(fourth, 0) testConductor.enter("down-fourth-node") // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- @@ -91,8 +91,8 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.enter("all-up") // kill 'second' node - testConductor.shutdown(second, 0) testConductor.removeNode(second) + testConductor.shutdown(second, 0) testConductor.enter("down-second-node") // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index ba0471bedb..932eb91e15 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -65,8 +65,8 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp case `controller` ⇒ testConductor.enter("before-shutdown") - testConductor.shutdown(leader, 0) testConductor.removeNode(leader) + testConductor.shutdown(leader, 0) testConductor.enter("after-shutdown", "after-down", "completed") case `leader` ⇒ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index c0c12f4582..e72c8325f2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -57,8 +57,8 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) "become singleton cluster when one node is shutdown" taggedAs LongRunningTest in { runOn(first) { val secondAddress = node(second).address - testConductor.shutdown(second, 0) testConductor.removeNode(second) + testConductor.shutdown(second, 0) awaitUpConvergence(numberOfMembers = 1, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds) cluster.isSingletonCluster must be(true) assertLeader(first) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index 8fa8eeff21..f8f16a4d9c 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -444,7 +444,6 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP nodes(node).fsm forward ToClient(TerminateMsg(exitValueOrKill)) } case Remove(node) ⇒ - nodes -= node barrier ! BarrierCoordinator.RemoveClient(node) } case GetNodes ⇒ sender ! nodes.keys @@ -540,8 +539,8 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor when(Waiting) { case Event(EnterBarrier(name), d @ Data(clients, barrier, arrived)) ⇒ - if (name != barrier || clients.find(_.fsm == sender).isEmpty) throw WrongBarrier(name, sender, d) - val together = sender :: arrived + if (name != barrier) throw WrongBarrier(name, sender, d) + val together = if (clients.find(_.fsm == sender).isDefined) sender :: arrived else arrived handleBarrier(d.copy(arrived = together)) case Event(RemoveClient(name), d @ Data(clients, barrier, arrived)) ⇒ clients find (_.name == name) match { From e4104cfd0687ca09943a64e8a42706e1a97ebf1c Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 4 Jun 2012 11:58:09 +0200 Subject: [PATCH 26/92] Replace 'after' barrier with explicit barrier inside test method. * It's no problem using after, but scalatest will output the test method as completed (green) before running after, so it looks confusing in the logs * Using unique barrier names adds extra traceability in case of failures. --- .../ClientDowningNodeThatIsUnreachableSpec.scala | 4 +--- .../akka/cluster/ClientDowningNodeThatIsUpSpec.scala | 4 +--- .../cluster/GossipingAccrualFailureDetectorSpec.scala | 11 +++++------ .../LeaderDowningNodeThatIsUnreachableSpec.scala | 4 +--- .../akka/cluster/MembershipChangeListenerSpec.scala | 11 +++++------ .../multi-jvm/scala/akka/cluster/NodeLeaving.scala | 7 +++---- .../scala/akka/cluster/NodeLeavingAndExiting.scala | 7 +++---- .../NodeLeavingAndExitingAndBeingRemoved.scala | 3 +-- .../scala/akka/cluster/NodeMembershipSpec.scala | 11 +++++------ .../scala/akka/cluster/NodeShutdownSpec.scala | 10 ++++------ .../scala/akka/cluster/NodeStartupSpec.scala | 9 +++------ 11 files changed, 32 insertions(+), 49 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index 948791167e..f657bcee3e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -4,7 +4,6 @@ package akka.cluster import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -26,8 +25,7 @@ class ClientDowningNodeThatIsUnreachableMultiJvmNode4 extends ClientDowningNodeT class ClientDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(ClientDowningNodeThatIsUnreachableMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import ClientDowningNodeThatIsUnreachableMultiJvmSpec._ override def initialParticipants = 4 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index 6b0bbae22e..666c3e207a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -4,7 +4,6 @@ package akka.cluster import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -26,8 +25,7 @@ class ClientDowningNodeThatIsUpMultiJvmNode4 extends ClientDowningNodeThatIsUpSp class ClientDowningNodeThatIsUpSpec extends MultiNodeSpec(ClientDowningNodeThatIsUpMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import ClientDowningNodeThatIsUpMultiJvmSpec._ override def initialParticipants = 4 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 790c0e07fd..16113519da 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -3,7 +3,6 @@ */ package akka.cluster -import org.scalatest.BeforeAndAfter import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec @@ -25,7 +24,7 @@ class GossipingAccrualFailureDetectorMultiJvmNode2 extends GossipingAccrualFailu class GossipingAccrualFailureDetectorMultiJvmNode3 extends GossipingAccrualFailureDetectorSpec abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(GossipingAccrualFailureDetectorMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import GossipingAccrualFailureDetectorMultiJvmSpec._ override def initialParticipants = 3 @@ -34,10 +33,6 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address - after { - testConductor.enter("after") - } - "A Gossip-driven Failure Detector" must { "receive gossip heartbeats so that all member nodes in the cluster are marked 'available'" taggedAs LongRunningTest in { @@ -53,6 +48,8 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi cluster.failureDetector.isAvailable(firstAddress) must be(true) cluster.failureDetector.isAvailable(secondAddress) must be(true) cluster.failureDetector.isAvailable(thirdAddress) must be(true) + + testConductor.enter("after-1") } "mark node as 'unavailable' if a node in the cluster is shut down (and its heartbeats stops)" taggedAs LongRunningTest in { @@ -68,6 +65,8 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi cluster.failureDetector.isAvailable(firstAddress) must be(true) cluster.failureDetector.isAvailable(secondAddress) must be(true) } + + testConductor.enter("after-2") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index d04a97c9f1..fda3046e4c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -4,7 +4,6 @@ package akka.cluster import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -34,8 +33,7 @@ class LeaderDowningNodeThatIsUnreachableMultiJvmNode4 extends LeaderDowningNodeT class LeaderDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(LeaderDowningNodeThatIsUnreachableMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import LeaderDowningNodeThatIsUnreachableMultiJvmSpec._ override def initialParticipants = 4 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index f818c97744..070fb80553 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -4,7 +4,6 @@ package akka.cluster import scala.collection.immutable.SortedSet -import org.scalatest.BeforeAndAfter import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec @@ -24,15 +23,11 @@ class MembershipChangeListenerMultiJvmNode2 extends MembershipChangeListenerSpec class MembershipChangeListenerMultiJvmNode3 extends MembershipChangeListenerSpec abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChangeListenerMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import MembershipChangeListenerMultiJvmSpec._ override def initialParticipants = 3 - after { - testConductor.enter("after") - } - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address @@ -59,6 +54,8 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan cluster.convergence.isDefined must be(true) } + testConductor.enter("after-1") + } "(when three systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { @@ -77,6 +74,8 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan latch.await cluster.convergence.isDefined must be(true) + testConductor.enter("after-2") + } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala index 058bfca7e9..39fee8acfa 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeaving.scala @@ -4,7 +4,6 @@ package akka.cluster import scala.collection.immutable.SortedSet -import org.scalatest.BeforeAndAfter import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec @@ -17,10 +16,10 @@ object NodeLeavingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" + .withFallback(ConfigFactory.parseString(""" akka.cluster.unreachable-nodes-reaper-frequency = 30 s # turn "off" reaping to unreachable node set """)) - .withFallback(MultiNodeClusterSpec.clusterConfig)) + .withFallback(MultiNodeClusterSpec.clusterConfig)) } class NodeLeavingMultiJvmNode1 extends NodeLeavingSpec @@ -28,7 +27,7 @@ class NodeLeavingMultiJvmNode2 extends NodeLeavingSpec class NodeLeavingMultiJvmNode3 extends NodeLeavingSpec abstract class NodeLeavingSpec extends MultiNodeSpec(NodeLeavingMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import NodeLeavingMultiJvmSpec._ override def initialParticipants = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala index 3fe9e220f6..448d57d6e7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExiting.scala @@ -4,7 +4,6 @@ package akka.cluster import scala.collection.immutable.SortedSet -import org.scalatest.BeforeAndAfter import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec @@ -18,13 +17,13 @@ object NodeLeavingAndExitingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" + .withFallback(ConfigFactory.parseString(""" akka.cluster { leader-actions-frequency = 5 s # increase the leader action task frequency to make sure we get a chance to test the LEAVING state unreachable-nodes-reaper-frequency = 30 s # turn "off" reaping to unreachable node set } """) - .withFallback(MultiNodeClusterSpec.clusterConfig))) + .withFallback(MultiNodeClusterSpec.clusterConfig))) } class NodeLeavingAndExitingMultiJvmNode1 extends NodeLeavingAndExitingSpec @@ -32,7 +31,7 @@ class NodeLeavingAndExitingMultiJvmNode2 extends NodeLeavingAndExitingSpec class NodeLeavingAndExitingMultiJvmNode3 extends NodeLeavingAndExitingSpec abstract class NodeLeavingAndExitingSpec extends MultiNodeSpec(NodeLeavingAndExitingMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import NodeLeavingAndExitingMultiJvmSpec._ override def initialParticipants = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala index 7c1037a624..8ea16dfa8a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemoved.scala @@ -4,7 +4,6 @@ package akka.cluster import scala.collection.immutable.SortedSet -import org.scalatest.BeforeAndAfter import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec @@ -24,7 +23,7 @@ class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode2 extends NodeLeavingAndEx class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode3 extends NodeLeavingAndExitingAndBeingRemovedSpec abstract class NodeLeavingAndExitingAndBeingRemovedSpec extends MultiNodeSpec(NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { import NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec._ override def initialParticipants = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index fecb53c898..cf6839dd83 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -4,7 +4,6 @@ package akka.cluster import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -22,15 +21,11 @@ class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec -abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) with MultiNodeClusterSpec { import NodeMembershipMultiJvmSpec._ override def initialParticipants = 3 - after { - testConductor.enter("after") - } - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address @@ -55,6 +50,8 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp awaitCond(cluster.convergence.isDefined) } + testConductor.enter("after-1") + } "(when three nodes) start gossiping to each other so that all nodes gets the same gossip info" taggedAs LongRunningTest in { @@ -70,6 +67,8 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp } awaitCond(cluster.convergence.isDefined) + testConductor.enter("after-2") + } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index e72c8325f2..e59382341f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -4,7 +4,6 @@ package akka.cluster import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -28,15 +27,11 @@ object NodeShutdownMultiJvmSpec extends MultiNodeConfig { class NodeShutdownMultiJvmNode1 extends NodeShutdownSpec class NodeShutdownMultiJvmNode2 extends NodeShutdownSpec -abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) with MultiNodeClusterSpec { import NodeShutdownMultiJvmSpec._ override def initialParticipants = 2 - after { - testConductor.enter("after") - } - "A cluster of 2 nodes" must { "not be singleton cluster when joined" taggedAs LongRunningTest in { @@ -52,6 +47,8 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) awaitUpConvergence(numberOfMembers = 2) cluster.isSingletonCluster must be(false) assertLeader(first, second) + + testConductor.enter("after-1") } "become singleton cluster when one node is shutdown" taggedAs LongRunningTest in { @@ -64,6 +61,7 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) assertLeader(first) } + testConductor.enter("after-2") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala index b2b98f94fa..7e3fdb3323 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeStartupSpec.scala @@ -4,7 +4,6 @@ package akka.cluster import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -20,15 +19,11 @@ object NodeStartupMultiJvmSpec extends MultiNodeConfig { class NodeStartupMultiJvmNode1 extends NodeStartupSpec class NodeStartupMultiJvmNode2 extends NodeStartupSpec -abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) with MultiNodeClusterSpec { import NodeStartupMultiJvmSpec._ override def initialParticipants = 2 - after { - testConductor.enter("after") - } - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address @@ -40,6 +35,7 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi awaitUpConvergence(numberOfMembers = 1) assertLeader(first) } + testConductor.enter("after-1") } } @@ -58,6 +54,7 @@ abstract class NodeStartupSpec extends MultiNodeSpec(NodeStartupMultiJvmSpec) wi cluster.latestGossip.members.size must be(2) awaitCond(cluster.convergence.isDefined) assertLeader(first, second) + testConductor.enter("after-2") } } From fd1d0ce1212637084eeba004408fa9ba70eef21e Mon Sep 17 00:00:00 2001 From: Roland Date: Mon, 4 Jun 2012 12:18:30 +0200 Subject: [PATCH 27/92] make cleanUp of systemMessages atomic - extend systemDrain to take the new contents which shall be switched in - make NoMessage placeholder which will signal final closing of the mailbox - put that in when cleaning up, and check it when enqueuing --- .../akka/actor/dispatch/ActorModelSpec.scala | 2 +- .../main/scala/akka/actor/ActorSystem.scala | 2 +- .../akka/dispatch/AbstractDispatcher.scala | 4 ++ .../akka/dispatch/BalancingDispatcher.scala | 16 +++--- .../main/scala/akka/dispatch/Mailbox.scala | 51 ++++++++++--------- 5 files changed, 40 insertions(+), 35 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index acc416f04f..4d83c85b82 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -374,7 +374,7 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa def compare(l: AnyRef, r: AnyRef) = (l, r) match { case (ll: ActorCell, rr: ActorCell) ⇒ ll.self.path compareTo rr.self.path } } foreach { case cell: ActorCell ⇒ - System.err.println(" - " + cell.self.path + " " + cell.isTerminated + " " + cell.mailbox.status + " " + cell.mailbox.numberOfMessages + " " + SystemMessage.size(cell.mailbox.systemDrain())) + System.err.println(" - " + cell.self.path + " " + cell.isTerminated + " " + cell.mailbox.status + " " + cell.mailbox.numberOfMessages + " " + SystemMessage.size(cell.mailbox.systemDrain(null))) } System.err.println("Mailbox: " + mq.numberOfMessages + " " + mq.hasMessages) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 008610c333..af7313b41e 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -545,7 +545,7 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, becomeClosed() def systemEnqueue(receiver: ActorRef, handle: SystemMessage): Unit = deadLetters ! DeadLetter(handle, receiver, receiver) - def systemDrain(): SystemMessage = null + def systemDrain(newContents: SystemMessage): SystemMessage = null def hasSystemMessages = false } diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 48a91dd00c..12eea14ffc 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -107,6 +107,10 @@ private[akka] case class Watch(watchee: ActorRef, watcher: ActorRef) extends Sys * INTERNAL API */ private[akka] case class Unwatch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to tear down a DeathWatch +/** + * INTERNAL API + */ +private[akka] case object NoMessage extends SystemMessage // switched into the mailbox to signal termination final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cleanup: () ⇒ Unit) extends Runnable { def run(): Unit = diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index e50f9150a4..6beee3c9da 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -52,15 +52,13 @@ class BalancingDispatcher( override def cleanUp(): Unit = { val dlq = actor.systemImpl.deadLetterMailbox //Don't call the original implementation of this since it scraps all messages, and we don't want to do that - while (hasSystemMessages) { - var message = systemDrain() - while (message ne null) { - // message must be “virgin” before being able to systemEnqueue again - val next = message.next - message.next = null - dlq.systemEnqueue(actor.self, message) - message = next - } + var message = systemDrain(NoMessage) + while (message ne null) { + // message must be “virgin” before being able to systemEnqueue again + val next = message.next + message.next = null + dlq.systemEnqueue(actor.self, message) + message = next } } } diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index b81a2fc0ba..d26e7b2afc 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -169,6 +169,7 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes */ protected final def systemQueueGet: SystemMessage = Unsafe.instance.getObjectVolatile(this, AbstractMailbox.systemMessageOffset).asInstanceOf[SystemMessage] + protected final def systemQueuePut(_old: SystemMessage, _new: SystemMessage): Boolean = Unsafe.instance.compareAndSwapObject(this, AbstractMailbox.systemMessageOffset, _old, _new) @@ -208,14 +209,14 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes } final def processAllSystemMessages() { - var nextMessage = systemDrain() + var nextMessage = systemDrain(null) try { while ((nextMessage ne null) && !isClosed) { if (debug) println(actor.self + " processing system message " + nextMessage + " with " + actor.childrenRefs) actor systemInvoke nextMessage nextMessage = nextMessage.next // don’t ever execute normal message when system message present! - if (nextMessage eq null) nextMessage = systemDrain() + if (nextMessage eq null) nextMessage = systemDrain(null) } } catch { case NonFatal(e) ⇒ @@ -235,15 +236,13 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes protected[dispatch] def cleanUp(): Unit = if (actor ne null) { // actor is null for the deadLetterMailbox val dlm = actor.systemImpl.deadLetterMailbox - while (hasSystemMessages) { - var message = systemDrain() - while (message ne null) { - // message must be “virgin” before being able to systemEnqueue again - val next = message.next - message.next = null - dlm.systemEnqueue(actor.self, message) - message = next - } + var message = systemDrain(NoMessage) + while (message ne null) { + // message must be “virgin” before being able to systemEnqueue again + val next = message.next + message.next = null + dlm.systemEnqueue(actor.self, message) + message = next } if (messageQueue ne null) // needed for CallingThreadDispatcher, which never calls Mailbox.run() @@ -300,7 +299,7 @@ private[akka] trait SystemMessageQueue { /** * Dequeue all messages from system queue and return them as single-linked list. */ - def systemDrain(): SystemMessage + def systemDrain(newContents: SystemMessage): SystemMessage def hasSystemMessages: Boolean } @@ -315,26 +314,30 @@ private[akka] trait DefaultSystemMessageQueue { self: Mailbox ⇒ assert(message.next eq null) if (Mailbox.debug) println(actor.self + " having enqueued " + message) val head = systemQueueGet - /* - * this write is safely published by the compareAndSet contained within - * systemQueuePut; “Intra-Thread Semantics” on page 12 of the JSR133 spec - * guarantees that “head” uses the value obtained from systemQueueGet above. - * Hence, SystemMessage.next does not need to be volatile. - */ - message.next = head - if (!systemQueuePut(head, message)) { - message.next = null - systemEnqueue(receiver, message) + if (head == NoMessage) actor.system.deadLetterMailbox.systemEnqueue(receiver, message) + else { + /* + * this write is safely published by the compareAndSet contained within + * systemQueuePut; “Intra-Thread Semantics” on page 12 of the JSR133 spec + * guarantees that “head” uses the value obtained from systemQueueGet above. + * Hence, SystemMessage.next does not need to be volatile. + */ + message.next = head + if (!systemQueuePut(head, message)) { + message.next = null + systemEnqueue(receiver, message) + } } } @tailrec - final def systemDrain(): SystemMessage = { + final def systemDrain(newContents: SystemMessage): SystemMessage = { val head = systemQueueGet - if (systemQueuePut(head, null)) SystemMessage.reverse(head) else systemDrain() + if (systemQueuePut(head, newContents)) SystemMessage.reverse(head) else systemDrain(newContents) } def hasSystemMessages: Boolean = systemQueueGet ne null + } /** From b45305a61e29f4d755b048b7dcecefdef2662914 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 4 Jun 2012 13:34:30 +0200 Subject: [PATCH 28/92] More formatting --- .../akka/dispatch/ThreadPoolBuilder.scala | 26 ++++++------------- 1 file changed, 8 insertions(+), 18 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index 25125ae149..963299debc 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -29,30 +29,20 @@ object ThreadPoolConfig { val defaultTimeout: Duration = Duration(60000L, TimeUnit.MILLISECONDS) val defaultRejectionPolicy: RejectedExecutionHandler = new SaneRejectedExecutionHandler() - def scaledPoolSize(floor: Int, multiplier: Double, ceiling: Int): Int = { - import scala.math.{ min, max } - min(max((Runtime.getRuntime.availableProcessors * multiplier).ceil.toInt, floor), ceiling) - } + def scaledPoolSize(floor: Int, multiplier: Double, ceiling: Int): Int = + math.min(math.max((Runtime.getRuntime.availableProcessors * multiplier).ceil.toInt, floor), ceiling) - def arrayBlockingQueue(capacity: Int, fair: Boolean): QueueFactory = - () ⇒ new ArrayBlockingQueue[Runnable](capacity, fair) + def arrayBlockingQueue(capacity: Int, fair: Boolean): QueueFactory = () ⇒ new ArrayBlockingQueue[Runnable](capacity, fair) - def synchronousQueue(fair: Boolean): QueueFactory = - () ⇒ new SynchronousQueue[Runnable](fair) + def synchronousQueue(fair: Boolean): QueueFactory = () ⇒ new SynchronousQueue[Runnable](fair) - def linkedBlockingQueue(): QueueFactory = - () ⇒ new LinkedBlockingQueue[Runnable]() + def linkedBlockingQueue(): QueueFactory = () ⇒ new LinkedBlockingQueue[Runnable]() - def linkedBlockingQueue(capacity: Int): QueueFactory = - () ⇒ new LinkedBlockingQueue[Runnable](capacity) + def linkedBlockingQueue(capacity: Int): QueueFactory = () ⇒ new LinkedBlockingQueue[Runnable](capacity) - def reusableQueue(queue: BlockingQueue[Runnable]): QueueFactory = - () ⇒ queue + def reusableQueue(queue: BlockingQueue[Runnable]): QueueFactory = () ⇒ queue - def reusableQueue(queueFactory: QueueFactory): QueueFactory = { - val queue = queueFactory() - () ⇒ queue - } + def reusableQueue(queueFactory: QueueFactory): QueueFactory = reusableQueue(queueFactory()) } /** From e592cebe20682e0f789bd145e79922c70e267c0a Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 4 Jun 2012 13:35:08 +0200 Subject: [PATCH 29/92] Add script to run LogRoleReplace, see #2173 --- project/scripts/multi-node-log-replace | 11 +++++++++++ scripts/multi-node-log-replace.sh | 25 +++++++++++++++++++++++++ 2 files changed, 36 insertions(+) create mode 100755 project/scripts/multi-node-log-replace create mode 100755 scripts/multi-node-log-replace.sh diff --git a/project/scripts/multi-node-log-replace b/project/scripts/multi-node-log-replace new file mode 100755 index 0000000000..83f1b8a136 --- /dev/null +++ b/project/scripts/multi-node-log-replace @@ -0,0 +1,11 @@ +#!/usr/bin/env bash +# +# Utility to make log files from multi-node tests easier to analyze. +# Replaces jvm names and host:port with corresponding logical role name. +# + + +# check for an sbt command +type -P sbt &> /dev/null || fail "sbt command not found" + +sbt "project akka-remote-tests" "test:run-main akka.remote.testkit.LogRoleReplace $1 $2" \ No newline at end of file diff --git a/scripts/multi-node-log-replace.sh b/scripts/multi-node-log-replace.sh new file mode 100755 index 0000000000..8e8af7112a --- /dev/null +++ b/scripts/multi-node-log-replace.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +# +# Utility to make log files from multi-node tests easier to analyze. +# Replaces jvm names and host:port with corresponding logical role name. +# +# Use with 0, 1 or 2 arguments. +# +# When using 0 arguments it reads from standard input +# and writes to standard output. +# +# With 1 argument it reads from the file specified in the first argument +# and writes to standard output. +# +# With 2 arguments it reads the file specified in the first argument +# and writes to the file specified in the second argument. +# +# You can also replace the contents of the clipboard instead of using files +# by supplying `clipboard` as argument +# + + +# check for an sbt command +type -P sbt &> /dev/null || fail "sbt command not found" + +sbt "project akka-remote-tests" "test:run-main akka.remote.testkit.LogRoleReplace $1 $2" \ No newline at end of file From c990fee724b96a9467b323f8eea99e06b118061b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 4 Jun 2012 13:46:29 +0200 Subject: [PATCH 30/92] Switching to the appropriate check for confirmed existence --- .../src/test/scala/akka/routing/RoutingSpec.scala | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index f1952b8f79..35631924cf 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -73,7 +73,9 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with watch(router) watch(c2) system.stop(c2) - expectMsg(Terminated(c2)(existenceConfirmed = true)) + expectMsgPF() { + case t @ Terminated(`c2`) if t.existenceConfirmed == true ⇒ t + } // it might take a while until the Router has actually processed the Terminated message awaitCond { router ! "" @@ -84,7 +86,9 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with res == Seq(c1, c1) } system.stop(c1) - expectMsg(Terminated(router)(existenceConfirmed = true)) + expectMsgPF() { + case t @ Terminated(`router`) if t.existenceConfirmed == true ⇒ t + } } "be able to send their routees" in { From 5810f7353039a88371d54c4f85a850860a74ae17 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 4 Jun 2012 13:51:30 +0200 Subject: [PATCH 31/92] Minor improvement from review feedback, see 2137 --- .../src/main/scala/akka/remote/testconductor/Conductor.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index f8f16a4d9c..3aed112b55 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -540,7 +540,7 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor when(Waiting) { case Event(EnterBarrier(name), d @ Data(clients, barrier, arrived)) ⇒ if (name != barrier) throw WrongBarrier(name, sender, d) - val together = if (clients.find(_.fsm == sender).isDefined) sender :: arrived else arrived + val together = if (clients.exists(_.fsm == sender)) sender :: arrived else arrived handleBarrier(d.copy(arrived = together)) case Event(RemoveClient(name), d @ Data(clients, barrier, arrived)) ⇒ clients find (_.name == name) match { From f30a1a0b1f7678fb66eef0e8509e6ddeb1899e8d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 4 Jun 2012 14:29:32 +0200 Subject: [PATCH 32/92] Always removeNode when shutdown, see 2137 --- .../akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala | 1 - .../akka/cluster/GossipingAccrualFailureDetectorSpec.scala | 1 - .../akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala | 2 -- .../src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala | 1 - .../src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala | 1 - .../src/main/scala/akka/remote/testconductor/Conductor.scala | 4 +++- 6 files changed, 3 insertions(+), 7 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index f657bcee3e..a5ce2d4258 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -41,7 +41,6 @@ class ClientDowningNodeThatIsUnreachableSpec testConductor.enter("all-up") // kill 'third' node - testConductor.removeNode(third) testConductor.shutdown(third, 0) // mark 'third' node as DOWN diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 16113519da..afaeac747b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -54,7 +54,6 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi "mark node as 'unavailable' if a node in the cluster is shut down (and its heartbeats stops)" taggedAs LongRunningTest in { runOn(first) { - testConductor.removeNode(third) testConductor.shutdown(third, 0) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index fda3046e4c..dfd8dde310 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -49,7 +49,6 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.enter("all-up") // kill 'fourth' node - testConductor.removeNode(fourth) testConductor.shutdown(fourth, 0) testConductor.enter("down-fourth-node") @@ -89,7 +88,6 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.enter("all-up") // kill 'second' node - testConductor.removeNode(second) testConductor.shutdown(second, 0) testConductor.enter("down-second-node") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index 932eb91e15..e5972b7d7c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -65,7 +65,6 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp case `controller` ⇒ testConductor.enter("before-shutdown") - testConductor.removeNode(leader) testConductor.shutdown(leader, 0) testConductor.enter("after-shutdown", "after-down", "completed") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index e59382341f..1179f89d76 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -54,7 +54,6 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) "become singleton cluster when one node is shutdown" taggedAs LongRunningTest in { runOn(first) { val secondAddress = node(second).address - testConductor.removeNode(second) testConductor.shutdown(second, 0) awaitUpConvergence(numberOfMembers = 1, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds) cluster.isSingletonCluster must be(true) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index 3aed112b55..17a2bfcd5f 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -168,7 +168,8 @@ trait Conductor { this: TestConductorExt ⇒ /** * Tell the remote node to shut itself down using System.exit with the given - * exitValue. + * exitValue. The node will also be removed, so that the remaining nodes may still + * pass subsequent barriers. * * @param node is the symbolic name of the node which is to be affected * @param exitValue is the return code which shall be given to System.exit @@ -441,6 +442,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP if (exitValueOrKill < 0) { // TODO: kill via SBT } else { + barrier ! BarrierCoordinator.RemoveClient(node) nodes(node).fsm forward ToClient(TerminateMsg(exitValueOrKill)) } case Remove(node) ⇒ From b1c507f3b95bd69eb75d8fa2ee13adb494c16d23 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 4 Jun 2012 11:37:23 +0200 Subject: [PATCH 33/92] Shutdown does removeNode, see #2137 --- .../scala/akka/cluster/ConvergenceSpec.scala | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index eeb9b864ed..a76083b0fc 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -4,7 +4,6 @@ package akka.cluster import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -33,15 +32,11 @@ class ConvergenceMultiJvmNode4 extends ConvergenceSpec abstract class ConvergenceSpec extends MultiNodeSpec(ConvergenceMultiJvmSpec) - with MultiNodeClusterSpec with BeforeAndAfter { + with MultiNodeClusterSpec { import ConvergenceMultiJvmSpec._ override def initialParticipants = 4 - after { - testConductor.enter("after") - } - "A cluster of 3 members" must { "reach initial convergence" taggedAs LongRunningTest in { @@ -58,6 +53,8 @@ abstract class ConvergenceSpec runOn(fourth) { // doesn't join immediately } + + testConductor.enter("after-1") } "not reach convergence while any nodes are unreachable" taggedAs LongRunningTest in { @@ -67,14 +64,13 @@ abstract class ConvergenceSpec runOn(first) { // kill 'third' node testConductor.shutdown(third, 0) - testConductor.removeNode(third) } runOn(first, second) { val firstAddress = node(first).address val secondAddress = node(second).address - within(30 seconds) { + within(25 seconds) { // third becomes unreachable awaitCond(cluster.latestGossip.overview.unreachable.size == 1) awaitCond(cluster.latestGossip.members.size == 2) @@ -89,6 +85,7 @@ abstract class ConvergenceSpec } } + testConductor.enter("after-2") } "not move a new joining node to Up while there is no convergence" taggedAs LongRunningTest in { @@ -126,6 +123,7 @@ abstract class ConvergenceSpec } } + testConductor.enter("after-3") } } } From 9ee971ee794feff5944b880a21a54aa5ddec8948 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Mon, 4 Jun 2012 16:22:10 +0200 Subject: [PATCH 34/92] We need to reregister a client conection when we know the actor system address. see #2175 --- .../NetworkFailureInjector.scala | 25 ++++++++++++------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index d30872cd6e..a0f53b5a9b 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -28,7 +28,7 @@ private[akka] class FailureInjector extends Actor with ActorLogging { ctx: Option[ChannelHandlerContext] = None, throttleSend: Option[SetRate] = None, throttleReceive: Option[SetRate] = None) - case class Injectors(sender: ActorRef, receiver: ActorRef) + case class Injectors(sender: ActorRef, receiver: ActorRef, known: Boolean) var channels = Map[ChannelHandlerContext, Injectors]() var settings = Map[Address, ChannelSettings]() @@ -37,12 +37,13 @@ private[akka] class FailureInjector extends Actor with ActorLogging { /** * Only for a NEW ctx, start ThrottleActors, prime them and update all maps. */ - def ingestContextAddress(ctx: ChannelHandlerContext, addr: Address): Injectors = { + def ingestContextAddress(ctx: ChannelHandlerContext, addr: Address, known: Boolean, + snd: Option[ActorRef] = None, rcv: Option[ActorRef] = None): Injectors = { val gen = generation.next val name = addr.host.get + ":" + addr.port.get - val thrSend = context.actorOf(Props(new ThrottleActor(ctx)), name + "-snd" + gen) - val thrRecv = context.actorOf(Props(new ThrottleActor(ctx)), name + "-rcv" + gen) - val injectors = Injectors(thrSend, thrRecv) + val thrSend = snd getOrElse context.actorOf(Props(new ThrottleActor(ctx)), name + "-snd" + gen) + val thrRecv = rcv getOrElse context.actorOf(Props(new ThrottleActor(ctx)), name + "-rcv" + gen) + val injectors = Injectors(thrSend, thrRecv, known) channels += ctx -> injectors settings += addr -> (settings get addr map { case c @ ChannelSettings(prevCtx, ts, tr) ⇒ @@ -134,7 +135,10 @@ private[akka] class FailureInjector extends Actor with ActorLogging { */ case s @ Send(ctx, direction, future, msg) ⇒ channels get ctx match { - case Some(Injectors(snd, rcv)) ⇒ + case Some(Injectors(snd, rcv, known)) ⇒ + // if the system registered with an empty name then check if we know it now + if (!known) ChannelAddress.get(ctx.getChannel).foreach(addr ⇒ + ingestContextAddress(ctx, addr, true, Some(snd), Some(rcv))) if (direction includes Direction.Send) snd ! s if (direction includes Direction.Receive) rcv ! s case None ⇒ @@ -142,21 +146,24 @@ private[akka] class FailureInjector extends Actor with ActorLogging { ctx.getChannel.getRemoteAddress match { case sockAddr: InetSocketAddress ⇒ val (ipaddr, ip, port) = (sockAddr.getAddress, sockAddr.getAddress.getHostAddress, sockAddr.getPort) - val addr = ChannelAddress.get(ctx.getChannel) orElse { + val (addr, known) = ChannelAddress.get(ctx.getChannel) orElse { settings collect { case (a @ Address("akka", _, Some(`ip`), Some(`port`)), _) ⇒ a } headOption } orElse { // only if raw IP failed, try with hostname val name = ipaddr.getHostName if (name == ip) None else settings collect { case (a @ Address("akka", _, Some(`name`), Some(`port`)), _) ⇒ a } headOption - } getOrElse Address("akka", "", ip, port) + } match { + case Some(a) ⇒ (a, true) + case None ⇒ (Address("akka", "", ip, port), false) + } /* * ^- the above last resort will not match later requests directly, but be * picked up by retrieveTargetSettings, so that throttle ops are * applied to the right throttle actors, assuming that there can * be only one actor system per host:port. */ - val inj = ingestContextAddress(ctx, addr) + val inj = ingestContextAddress(ctx, addr, known) if (direction includes Direction.Send) inj.sender ! s if (direction includes Direction.Receive) inj.receiver ! s case null ⇒ From 54febffb283129cf84a1de3dffba5b36691f24a0 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 4 Jun 2012 17:07:44 +0200 Subject: [PATCH 35/92] #2093 - Adding support for setting the sender when using TestActorRef.receive --- .../src/main/scala/akka/testkit/TestActorRef.scala | 12 +++++++++++- .../test/scala/akka/testkit/TestActorRefSpec.scala | 9 ++++++++- 2 files changed, 19 insertions(+), 2 deletions(-) diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index 0a5d6163e8..279c728e80 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -56,7 +56,17 @@ class TestActorRef[T <: Actor]( * thrown will be available to you, while still being able to use * become/unbecome. */ - def receive(o: Any): Unit = underlying.receiveMessage(o) + def receive(o: Any): Unit = receive(o, underlying.system.deadLetters) + + /** + * Directly inject messages into actor receive behavior. Any exceptions + * thrown will be available to you, while still being able to use + * become/unbecome. + */ + def receive(o: Any, sender: ActorRef): Unit = try { + underlying.currentMessage = Envelope(o, if (sender eq null) underlying.system.deadLetters else sender)(underlying.system) + underlying.receiveMessage(o) + } finally underlying.currentMessage = null /** * Retrieve reference to the underlying actor, where the static type matches the factory used inside the diff --git a/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala index 7c977884fc..492c44408c 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala @@ -246,11 +246,18 @@ class TestActorRefSpec extends AkkaSpec("disp1.type=Dispatcher") with BeforeAndA a.underlying.dispatcher.getClass must be(classOf[Dispatcher]) } - "proxy receive for the underlying actor" in { + "proxy receive for the underlying actor without sender" in { val ref = TestActorRef[WorkerActor] ref.receive("work") ref.isTerminated must be(true) } + "proxy receive for the underlying actor with sender" in { + val ref = TestActorRef[WorkerActor] + ref.receive("work", testActor) + ref.isTerminated must be(true) + expectMsg("workDone") + } + } } From b840624b7844ff4a8427a4e069b9cd8bdc3a5447 Mon Sep 17 00:00:00 2001 From: Roland Date: Mon, 4 Jun 2012 19:28:58 +0200 Subject: [PATCH 36/92] warn against using TestKitBase trait --- akka-docs/scala/testing.rst | 5 ++ .../src/main/scala/akka/testkit/TestKit.scala | 90 +++++++++++-------- 2 files changed, 59 insertions(+), 36 deletions(-) diff --git a/akka-docs/scala/testing.rst b/akka-docs/scala/testing.rst index d2875ed62a..d19a1ab753 100644 --- a/akka-docs/scala/testing.rst +++ b/akka-docs/scala/testing.rst @@ -685,6 +685,11 @@ The ``implicit lazy val system`` must be declared exactly like that (you can course pass arguments to the actor system factory as needed) because trait :class:`TestKitBase` needs the system during its construction. +.. warning:: + + Use of the trait is discouraged because of potential issues with binary + backwards compatibility in the future, use at own risk. + Specs2 ------ diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index 9dfa40a5ee..373f4c1fff 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -62,44 +62,22 @@ class TestActor(queue: BlockingDeque[TestActor.Message]) extends Actor { } /** - * Test kit for testing actors. Inheriting from this trait enables reception of - * replies from actors, which are queued by an internal actor and can be - * examined using the `expectMsg...` methods. Assertions and bounds concerning - * timing are available in the form of `within` blocks. + * Implementation trait behind the [[akka.testkit.TestKit]] class: you may use + * this if inheriting from a concrete class is not possible. * - *
- * class Test extends TestKit(ActorSystem()) {
- *     try {
- *
- *       val test = system.actorOf(Props[SomeActor]
- *
- *       within (1 second) {
- *         test ! SomeWork
- *         expectMsg(Result1) // bounded to 1 second
- *         expectMsg(Result2) // bounded to the remainder of the 1 second
- *       }
- *
- *     } finally {
- *       system.shutdown()
- *     }
+ * Use of the trait is discouraged because of potential issues with binary 
+ * backwards compatibility in the future, use at own risk.
+ * 
+ * This trait requires the concrete class mixing it in to provide an 
+ * [[akka.actor.ActorSystem]] which is available before this traits’s
+ * constructor is run. The recommended way is this:
+ * 
+ * {{{
+ * class MyTest extends TestKitBase {
+ *   implicit lazy val system = ActorSystem() // may add arguments here
+ *   ...
  * }
- * 
- * - * Beware of two points: - * - * - the ActorSystem passed into the constructor needs to be shutdown, - * otherwise thread pools and memory will be leaked - * - this trait is not thread-safe (only one actor with one queue, one stack - * of `within` blocks); it is expected that the code is executed from a - * constructor as shown above, which makes this a non-issue, otherwise take - * care not to run tests within a single test class instance in parallel. - * - * It should be noted that for CI servers and the like all maximum Durations - * are scaled using their Duration.dilated method, which uses the - * TestKitExtension.Settings.TestTimeFactor settable via akka.conf entry "akka.test.timefactor". - * - * @author Roland Kuhn - * @since 1.1 + * }}} */ trait TestKitBase { @@ -579,6 +557,46 @@ trait TestKitBase { private def format(u: TimeUnit, d: Duration) = "%.3f %s".format(d.toUnit(u), u.toString.toLowerCase) } +/** + * Test kit for testing actors. Inheriting from this trait enables reception of + * replies from actors, which are queued by an internal actor and can be + * examined using the `expectMsg...` methods. Assertions and bounds concerning + * timing are available in the form of `within` blocks. + * + *
+ * class Test extends TestKit(ActorSystem()) {
+ *     try {
+ *
+ *       val test = system.actorOf(Props[SomeActor]
+ *
+ *       within (1 second) {
+ *         test ! SomeWork
+ *         expectMsg(Result1) // bounded to 1 second
+ *         expectMsg(Result2) // bounded to the remainder of the 1 second
+ *       }
+ *
+ *     } finally {
+ *       system.shutdown()
+ *     }
+ * }
+ * 
+ * + * Beware of two points: + * + * - the ActorSystem passed into the constructor needs to be shutdown, + * otherwise thread pools and memory will be leaked + * - this trait is not thread-safe (only one actor with one queue, one stack + * of `within` blocks); it is expected that the code is executed from a + * constructor as shown above, which makes this a non-issue, otherwise take + * care not to run tests within a single test class instance in parallel. + * + * It should be noted that for CI servers and the like all maximum Durations + * are scaled using their Duration.dilated method, which uses the + * TestKitExtension.Settings.TestTimeFactor settable via akka.conf entry "akka.test.timefactor". + * + * @author Roland Kuhn + * @since 1.1 + */ class TestKit(_system: ActorSystem) extends { implicit val system = _system } with TestKitBase object TestKit { From b98fb0e37a132b2b2a29278f3d5ae47abf2919dd Mon Sep 17 00:00:00 2001 From: Roland Date: Mon, 4 Jun 2012 23:10:03 +0200 Subject: [PATCH 37/92] clarify deployment using anonymous factories --- akka-docs/java/remoting.rst | 8 ++++++++ akka-docs/scala/remoting.rst | 8 ++++++++ 2 files changed, 16 insertions(+) diff --git a/akka-docs/java/remoting.rst b/akka-docs/java/remoting.rst index ae2ac9c246..910ec5fbb2 100644 --- a/akka-docs/java/remoting.rst +++ b/akka-docs/java/remoting.rst @@ -92,6 +92,14 @@ As you can see from the example above the following pattern is used to find an ` akka://@:/ +.. note:: + + In order to ensure serializability of ``Props`` when passing constructor + arguments to the actor being created, do not make the factory a non-static + inner class: this will inherently capture a reference to its enclosing + object, which in most cases is not serializable. It is best to make a static + inner class which implements :class:`UntypedActorFactory`. + Programmatic Remote Deployment ------------------------------ diff --git a/akka-docs/scala/remoting.rst b/akka-docs/scala/remoting.rst index 0f55ccdff4..0863d80b55 100644 --- a/akka-docs/scala/remoting.rst +++ b/akka-docs/scala/remoting.rst @@ -105,6 +105,14 @@ Once you have configured the properties above you would do the following in code ``SampleActor`` has to be available to the runtimes using it, i.e. the classloader of the actor systems has to have a JAR containing the class. +.. note:: + + In order to ensure serializability of ``Props`` when passing constructor + arguments to the actor being created, do not make the factory an inner class: + this will inherently capture a reference to its enclosing object, which in + most cases is not serializable. It is best to create a factory method in the + companion object of the actor’s class. + Programmatic Remote Deployment ------------------------------ From 391fed65941c29aa7d139011b0a97fb7c37f768e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 4 Jun 2012 23:21:28 +0200 Subject: [PATCH 38/92] Misc changes, fixes and improvements after review. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Renamed all 'frequency' to 'interval' - Split up NodeJoinAndUpSpec and into NodeJoinSpec and NodeUpSpec. - Split up MembershipChangeListenerJoinAndUpSpec and into MembershipChangeListenerJoinSpec and MembershipChangeListenerUpSpec. - Added utility method 'startClusterNode()' - Fixed race in register listener and telling node to leave - Removed 'after' blocks - Cleaned up unused code - Improved comments Signed-off-by: Jonas Bonér --- .../src/main/resources/reference.conf | 6 +- .../src/main/scala/akka/cluster/Cluster.scala | 12 +-- .../scala/akka/cluster/ClusterSettings.scala | 6 +- ...ientDowningNodeThatIsUnreachableSpec.scala | 6 +- .../ClientDowningNodeThatIsUpSpec.scala | 6 +- .../GossipingAccrualFailureDetectorSpec.scala | 2 +- .../akka/cluster/JoinTwoClustersSpec.scala | 7 +- ...aderDowningNodeThatIsUnreachableSpec.scala | 6 +- .../akka/cluster/LeaderElectionSpec.scala | 7 +- .../MembershipChangeListenerExitingSpec.scala | 28 ++++--- ...=> MembershipChangeListenerJoinSpec.scala} | 44 ++++------- .../MembershipChangeListenerLeavingSpec.scala | 28 ++++--- .../MembershipChangeListenerUpSpec.scala | 64 ++++++++++++++++ .../akka/cluster/MultiNodeClusterSpec.scala | 18 +++-- .../akka/cluster/NodeJoinAndUpSpec.scala | 76 ------------------- .../scala/akka/cluster/NodeJoinSpec.scala | 57 ++++++++++++++ ...LeavingAndExitingAndBeingRemovedSpec.scala | 2 +- .../cluster/NodeLeavingAndExitingSpec.scala | 14 ++-- .../scala/akka/cluster/NodeLeavingSpec.scala | 6 +- .../akka/cluster/NodeMembershipSpec.scala | 8 +- .../scala/akka/cluster/NodeShutdownSpec.scala | 4 +- .../scala/akka/cluster/NodeUpSpec.scala | 50 ++++++++++++ .../akka/cluster/ClusterConfigSpec.scala | 6 +- 23 files changed, 289 insertions(+), 174 deletions(-) rename akka-cluster/src/multi-jvm/scala/akka/cluster/{MembershipChangeListenerJoinAndUpSpec.scala => MembershipChangeListenerJoinSpec.scala} (52%) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinAndUpSpec.scala create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index 7dd511e34a..8c905d5b29 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -25,13 +25,13 @@ akka { periodic-tasks-initial-delay = 1s # how often should the node send out gossip information? - gossip-frequency = 1s + gossip-interval = 1s # how often should the leader perform maintenance tasks? - leader-actions-frequency = 1s + leader-actions-interval = 1s # how often should the node move nodes, marked as unreachable by the failure detector, out of the membership ring? - unreachable-nodes-reaper-frequency = 1s + unreachable-nodes-reaper-interval = 1s # accrual failure detection config failure-detector { diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index c5ad773989..8beb7f4164 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -380,9 +380,9 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ private val vclockNode = VectorClock.Node(selfAddress.toString) private val periodicTasksInitialDelay = clusterSettings.PeriodicTasksInitialDelay - private val gossipFrequency = clusterSettings.GossipFrequency - private val leaderActionsFrequency = clusterSettings.LeaderActionsFrequency - private val unreachableNodesReaperFrequency = clusterSettings.UnreachableNodesReaperFrequency + private val gossipInterval = clusterSettings.GossipInterval + private val leaderActionsInterval = clusterSettings.LeaderActionsInterval + private val unreachableNodesReaperInterval = clusterSettings.UnreachableNodesReaperInterval implicit private val defaultTimeout = Timeout(remoteSettings.RemoteSystemDaemonAckTimeout) @@ -424,17 +424,17 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // ======================================================== // start periodic gossip to random nodes in cluster - private val gossipCanceller = system.scheduler.schedule(periodicTasksInitialDelay, gossipFrequency) { + private val gossipCanceller = system.scheduler.schedule(periodicTasksInitialDelay, gossipInterval) { gossip() } // start periodic cluster failure detector reaping (moving nodes condemned by the failure detector to unreachable list) - private val failureDetectorReaperCanceller = system.scheduler.schedule(periodicTasksInitialDelay, unreachableNodesReaperFrequency) { + private val failureDetectorReaperCanceller = system.scheduler.schedule(periodicTasksInitialDelay, unreachableNodesReaperInterval) { reapUnreachableMembers() } // start periodic leader action management (only applies for the current leader) - private val leaderActionsCanceller = system.scheduler.schedule(periodicTasksInitialDelay, leaderActionsFrequency) { + private val leaderActionsCanceller = system.scheduler.schedule(periodicTasksInitialDelay, leaderActionsInterval) { leaderActions() } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 8e9b9c770d..0e7dac06ab 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -20,9 +20,9 @@ class ClusterSettings(val config: Config, val systemName: String) { case AddressFromURIString(addr) ⇒ Some(addr) } val PeriodicTasksInitialDelay = Duration(getMilliseconds("akka.cluster.periodic-tasks-initial-delay"), MILLISECONDS) - val GossipFrequency = Duration(getMilliseconds("akka.cluster.gossip-frequency"), MILLISECONDS) - val LeaderActionsFrequency = Duration(getMilliseconds("akka.cluster.leader-actions-frequency"), MILLISECONDS) - val UnreachableNodesReaperFrequency = Duration(getMilliseconds("akka.cluster.unreachable-nodes-reaper-frequency"), MILLISECONDS) + val GossipInterval = Duration(getMilliseconds("akka.cluster.gossip-interval"), MILLISECONDS) + val LeaderActionsInterval = Duration(getMilliseconds("akka.cluster.leader-actions-interval"), MILLISECONDS) + val UnreachableNodesReaperInterval = Duration(getMilliseconds("akka.cluster.unreachable-nodes-reaper-interval"), MILLISECONDS) val NrOfGossipDaemons = getInt("akka.cluster.nr-of-gossip-daemons") val NrOfDeputyNodes = getInt("akka.cluster.nr-of-deputy-nodes") val AutoDown = getBoolean("akka.cluster.auto-down") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index 6ab4d1a39e..ba34c9b0be 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -26,8 +26,8 @@ class ClientDowningNodeThatIsUnreachableMultiJvmNode4 extends ClientDowningNodeT class ClientDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(ClientDowningNodeThatIsUnreachableMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { + import ClientDowningNodeThatIsUnreachableMultiJvmSpec._ override def initialParticipants = 4 @@ -36,7 +36,7 @@ class ClientDowningNodeThatIsUnreachableSpec "be able to DOWN a node that is UNREACHABLE (killed)" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() awaitUpConvergence(numberOfMembers = 4) val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index 6b0bbae22e..ac1d68c8af 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -26,8 +26,8 @@ class ClientDowningNodeThatIsUpMultiJvmNode4 extends ClientDowningNodeThatIsUpSp class ClientDowningNodeThatIsUpSpec extends MultiNodeSpec(ClientDowningNodeThatIsUpMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { + import ClientDowningNodeThatIsUpMultiJvmSpec._ override def initialParticipants = 4 @@ -36,7 +36,7 @@ class ClientDowningNodeThatIsUpSpec "be able to DOWN a node that is UP (healthy and available)" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() awaitUpConvergence(numberOfMembers = 4) val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 9d388622db..cec99e9af9 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -43,7 +43,7 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi "receive gossip heartbeats so that all member nodes in the cluster are marked 'available'" taggedAs LongRunningTest in { // make sure that the node-to-join is started before other join runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index 9f1395b5dd..7b7263bbe0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -27,7 +27,10 @@ class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec -abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender { +abstract class JoinTwoClustersSpec + extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) + with MultiNodeClusterSpec { + import JoinTwoClustersMultiJvmSpec._ override def initialParticipants = 6 @@ -41,7 +44,7 @@ abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvm "be able to 'elect' a single leader after joining (A -> B)" taggedAs LongRunningTest in { // make sure that the node-to-join is started before other join runOn(a1, b1, c1) { - cluster.self + startClusterNode() } testConductor.enter("first-started") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index 63665d3c57..7b2536d9d2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -34,8 +34,8 @@ class LeaderDowningNodeThatIsUnreachableMultiJvmNode4 extends LeaderDowningNodeT class LeaderDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(LeaderDowningNodeThatIsUnreachableMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender with BeforeAndAfter { + with MultiNodeClusterSpec { + import LeaderDowningNodeThatIsUnreachableMultiJvmSpec._ override def initialParticipants = 4 @@ -44,7 +44,7 @@ class LeaderDowningNodeThatIsUnreachableSpec "be able to DOWN a 'last' node that is UNREACHABLE" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() awaitUpConvergence(numberOfMembers = 4) val fourthAddress = node(fourth).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index ba0471bedb..bf60b6b4ac 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -26,7 +26,10 @@ class LeaderElectionMultiJvmNode3 extends LeaderElectionSpec class LeaderElectionMultiJvmNode4 extends LeaderElectionSpec class LeaderElectionMultiJvmNode5 extends LeaderElectionSpec -abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSpec) with MultiNodeClusterSpec { +abstract class LeaderElectionSpec + extends MultiNodeSpec(LeaderElectionMultiJvmSpec) + with MultiNodeClusterSpec { + import LeaderElectionMultiJvmSpec._ override def initialParticipants = 5 @@ -41,7 +44,7 @@ abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSp "be able to 'elect' a single leader" taggedAs LongRunningTest in { // make sure that the node-to-join is started before other join runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index 0145628bd5..8932eed6ee 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -20,8 +20,8 @@ object MembershipChangeListenerExitingMultiJvmSpec extends MultiNodeConfig { debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" akka.cluster { - leader-actions-frequency = 5000 ms # increase the leader action task frequency - unreachable-nodes-reaper-frequency = 30000 ms # turn "off" reaping to unreachable node set + leader-actions-interval = 5 s # increase the leader action task interval + unreachable-nodes-reaper-interval = 30 s # turn "off" reaping to unreachable node set } """) .withFallback(MultiNodeClusterSpec.clusterConfig))) @@ -31,8 +31,10 @@ class MembershipChangeListenerExitingMultiJvmNode1 extends MembershipChangeListe class MembershipChangeListenerExitingMultiJvmNode2 extends MembershipChangeListenerExitingSpec class MembershipChangeListenerExitingMultiJvmNode3 extends MembershipChangeListenerExitingSpec -abstract class MembershipChangeListenerExitingSpec extends MultiNodeSpec(MembershipChangeListenerExitingMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +abstract class MembershipChangeListenerExitingSpec + extends MultiNodeSpec(MembershipChangeListenerExitingMultiJvmSpec) + with MultiNodeClusterSpec { + import MembershipChangeListenerExitingMultiJvmSpec._ override def initialParticipants = 3 @@ -45,7 +47,7 @@ abstract class MembershipChangeListenerExitingSpec extends MultiNodeSpec(Members "be notified when new node is EXITING" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") @@ -55,21 +57,27 @@ abstract class MembershipChangeListenerExitingSpec extends MultiNodeSpec(Members awaitUpConvergence(numberOfMembers = 3) testConductor.enter("rest-started") + runOn(first) { + testConductor.enter("registered-listener") + cluster.leave(secondAddress) + } + + runOn(second) { + testConductor.enter("registered-listener") + } + runOn(third) { val exitingLatch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { - if (members.size == 3 && members.exists(_.status == MemberStatus.Exiting)) + if (members.size == 3 && members.exists( m => m.address == secondAddress && m.status == MemberStatus.Exiting)) exitingLatch.countDown() } }) + testConductor.enter("registered-listener") exitingLatch.await } - runOn(first) { - cluster.leave(secondAddress) - } - testConductor.enter("finished") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinAndUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala similarity index 52% rename from akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinAndUpSpec.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index 81e32d1491..2f82e12506 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinAndUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -11,7 +11,7 @@ import akka.remote.testkit.MultiNodeSpec import akka.testkit._ import akka.util.duration._ -object MembershipChangeListenerJoinAndUpMultiJvmSpec extends MultiNodeConfig { +object MembershipChangeListenerJoinMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") @@ -19,46 +19,39 @@ object MembershipChangeListenerJoinAndUpMultiJvmSpec extends MultiNodeConfig { debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" akka.cluster { - gossip-frequency = 1000 ms - leader-actions-frequency = 5000 ms # increase the leader action task frequency + leader-actions-interval = 5 s # increase the leader action task interval to allow time checking for JOIN before leader moves it to UP } """) .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class MembershipChangeListenerJoinAndUpMultiJvmNode1 extends MembershipChangeListenerJoinAndUpSpec -class MembershipChangeListenerJoinAndUpMultiJvmNode2 extends MembershipChangeListenerJoinAndUpSpec +class MembershipChangeListenerJoinMultiJvmNode1 extends MembershipChangeListenerJoinSpec +class MembershipChangeListenerJoinMultiJvmNode2 extends MembershipChangeListenerJoinSpec -abstract class MembershipChangeListenerJoinAndUpSpec - extends MultiNodeSpec(MembershipChangeListenerJoinAndUpMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender - with BeforeAndAfter { +abstract class MembershipChangeListenerJoinSpec + extends MultiNodeSpec(MembershipChangeListenerJoinMultiJvmSpec) + with MultiNodeClusterSpec { - import MembershipChangeListenerJoinAndUpMultiJvmSpec._ + import MembershipChangeListenerJoinMultiJvmSpec._ override def initialParticipants = 2 - after { - testConductor.enter("after") - } - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address "A registered MembershipChangeListener" must { - "be notified when new node is JOINING and node is marked as UP by the leader" taggedAs LongRunningTest in { + "be notified when new node is JOINING" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() } runOn(second) { + testConductor.enter("registered-listener") cluster.join(firstAddress) } runOn(first) { - // JOINING val joinLatch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { @@ -66,20 +59,13 @@ abstract class MembershipChangeListenerJoinAndUpSpec joinLatch.countDown() } }) + testConductor.enter("registered-listener") + joinLatch.await cluster.convergence.isDefined must be(true) - - // UP - val upLatch = TestLatch() - cluster.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) - upLatch.countDown() - } - }) - upLatch.await - awaitCond(cluster.convergence.isDefined) } + + testConductor.enter("after") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala index f8b083c4d8..089f241849 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -18,8 +18,8 @@ object MembershipChangeListenerLeavingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster.leader-actions-frequency = 5000 ms - akka.cluster.unreachable-nodes-reaper-frequency = 30000 ms # turn "off" reaping to unreachable node set + akka.cluster.leader-actions-interval = 5 s + akka.cluster.unreachable-nodes-reaper-interval = 30 s """)) .withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -28,8 +28,10 @@ class MembershipChangeListenerLeavingMultiJvmNode1 extends MembershipChangeListe class MembershipChangeListenerLeavingMultiJvmNode2 extends MembershipChangeListenerLeavingSpec class MembershipChangeListenerLeavingMultiJvmNode3 extends MembershipChangeListenerLeavingSpec -abstract class MembershipChangeListenerLeavingSpec extends MultiNodeSpec(MembershipChangeListenerLeavingMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +abstract class MembershipChangeListenerLeavingSpec + extends MultiNodeSpec(MembershipChangeListenerLeavingMultiJvmSpec) + with MultiNodeClusterSpec { + import MembershipChangeListenerLeavingMultiJvmSpec._ override def initialParticipants = 3 @@ -42,7 +44,7 @@ abstract class MembershipChangeListenerLeavingSpec extends MultiNodeSpec(Members "be notified when new node is LEAVING" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") @@ -52,21 +54,27 @@ abstract class MembershipChangeListenerLeavingSpec extends MultiNodeSpec(Members awaitUpConvergence(numberOfMembers = 3) testConductor.enter("rest-started") + runOn(first) { + testConductor.enter("registered-listener") + cluster.leave(secondAddress) + } + + runOn(second) { + testConductor.enter("registered-listener") + } + runOn(third) { val latch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { - if (members.size == 3 && members.exists(_.status == MemberStatus.Leaving)) + if (members.size == 3 && members.exists( m => m.address == secondAddress && m.status == MemberStatus.Leaving)) latch.countDown() } }) + testConductor.enter("registered-listener") latch.await } - runOn(first) { - cluster.leave(secondAddress) - } - testConductor.enter("finished") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala new file mode 100644 index 0000000000..3df6b876f9 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -0,0 +1,64 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import scala.collection.immutable.SortedSet +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object MembershipChangeListenerUpMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class MembershipChangeListenerUpMultiJvmNode1 extends MembershipChangeListenerUpSpec +class MembershipChangeListenerUpMultiJvmNode2 extends MembershipChangeListenerUpSpec + +abstract class MembershipChangeListenerUpSpec + extends MultiNodeSpec(MembershipChangeListenerUpMultiJvmSpec) + with MultiNodeClusterSpec { + + import MembershipChangeListenerUpMultiJvmSpec._ + + override def initialParticipants = 2 + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + + "A registered MembershipChangeListener" must { + "be notified when new node is marked as UP by the leader" taggedAs LongRunningTest in { + + runOn(first) { + startClusterNode() + } + + runOn(second) { + testConductor.enter("registered-listener") + cluster.join(firstAddress) + } + + runOn(first) { + val upLatch = TestLatch() + cluster.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) + upLatch.countDown() + } + }) + testConductor.enter("registered-listener") + + upLatch.await + awaitUpConvergence(numberOfMembers = 2) + } + + testConductor.enter("after") + } + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 4d0c7f4720..dd57b4b13f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -15,11 +15,11 @@ import akka.util.Duration object MultiNodeClusterSpec { def clusterConfig: Config = ConfigFactory.parseString(""" akka.cluster { - auto-down = off - gossip-frequency = 200 ms - leader-actions-frequency = 200 ms - unreachable-nodes-reaper-frequency = 200 ms - periodic-tasks-initial-delay = 300 ms + auto-down = off + gossip-interval = 200 ms + leader-actions-interval = 200 ms + unreachable-nodes-reaper-interval = 200 ms + periodic-tasks-initial-delay = 300 ms } akka.test { single-expect-default = 5 s @@ -29,8 +29,16 @@ object MultiNodeClusterSpec { trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ + /** + * Create a cluster node using 'Cluster(system)'. + */ def cluster: Cluster = Cluster(system) + /** + * Use this method instead of 'cluster.self'. + */ + def startClusterNode(): Unit = cluster.self + /** * Assert that the member addresses match the expected addresses in the * sort order used by the cluster. diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinAndUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinAndUpSpec.scala deleted file mode 100644 index 5415df1b4a..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinAndUpSpec.scala +++ /dev/null @@ -1,76 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ -import akka.util.duration._ - -object NodeJoinAndUpMultiJvmSpec extends MultiNodeConfig { - val first = role("first") - val second = role("second") - - commonConfig( - debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" - akka.cluster { - gossip-frequency = 1000 ms - leader-actions-frequency = 5000 ms # increase the leader action task frequency - } - """) - .withFallback(MultiNodeClusterSpec.clusterConfig))) -} - -class NodeJoinAndUpMultiJvmNode1 extends NodeJoinAndUpSpec -class NodeJoinAndUpMultiJvmNode2 extends NodeJoinAndUpSpec - -abstract class NodeJoinAndUpSpec - extends MultiNodeSpec(NodeJoinAndUpMultiJvmSpec) - with MultiNodeClusterSpec - with ImplicitSender - with BeforeAndAfter { - - import NodeJoinAndUpMultiJvmSpec._ - - override def initialParticipants = 2 - - after { - testConductor.enter("after") - } - - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - - "A first cluster node with a 'node-to-join' config set to empty string (singleton cluster)" must { - - "be a singleton cluster when started up" taggedAs LongRunningTest in { - runOn(first) { - awaitCond(cluster.isSingletonCluster) - awaitUpConvergence(numberOfMembers = 1) - cluster.isLeader must be(true) - } - } - } - - "A second cluster node" must { - "join the cluster as JOINING - when sending a 'Join' command - and then be moved to UP by the leader" taggedAs LongRunningTest in { - - runOn(second) { - cluster.join(firstAddress) - } - - awaitCond(cluster.latestGossip.members.exists { member ⇒ member.address == secondAddress && member.status == MemberStatus.Joining }) - - awaitCond( - cluster.latestGossip.members.exists { member ⇒ member.address == secondAddress && member.status == MemberStatus.Up }, - 30.seconds.dilated) // waiting for the leader to move from JOINING -> UP (frequency set to 5 sec in config) - - cluster.latestGossip.members.size must be(2) - awaitCond(cluster.convergence.isDefined) - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala new file mode 100644 index 0000000000..99116ecb25 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala @@ -0,0 +1,57 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object NodeJoinMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + + commonConfig( + debugConfig(on = false) + .withFallback(ConfigFactory.parseString(""" + akka.cluster { + leader-actions-interval = 5 s # increase the leader action task interval + } + """) + .withFallback(MultiNodeClusterSpec.clusterConfig))) +} + +class NodeJoinMultiJvmNode1 extends NodeJoinSpec +class NodeJoinMultiJvmNode2 extends NodeJoinSpec + +abstract class NodeJoinSpec + extends MultiNodeSpec(NodeJoinMultiJvmSpec) + with MultiNodeClusterSpec { + + import NodeJoinMultiJvmSpec._ + + override def initialParticipants = 2 + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + + "A cluster node" must { + "join another cluster and get status JOINING - when sending a 'Join' command" taggedAs LongRunningTest in { + + runOn(first) { + startClusterNode() + } + + runOn(second) { + cluster.join(firstAddress) + } + + awaitCond(cluster.latestGossip.members.exists { member ⇒ member.address == secondAddress && member.status == MemberStatus.Joining }) + + testConductor.enter("after") + } + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index 7c1037a624..da500323aa 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -40,7 +40,7 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec extends MultiNodeSpec(No "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 3fe9e220f6..189cb4c9c6 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -20,8 +20,8 @@ object NodeLeavingAndExitingMultiJvmSpec extends MultiNodeConfig { debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" akka.cluster { - leader-actions-frequency = 5 s # increase the leader action task frequency to make sure we get a chance to test the LEAVING state - unreachable-nodes-reaper-frequency = 30 s # turn "off" reaping to unreachable node set + leader-actions-interval = 5 s # increase the leader action task frequency to make sure we get a chance to test the LEAVING state + unreachable-nodes-reaper-interval = 30 s # turn "off" reaping to unreachable node set } """) .withFallback(MultiNodeClusterSpec.clusterConfig))) @@ -31,8 +31,10 @@ class NodeLeavingAndExitingMultiJvmNode1 extends NodeLeavingAndExitingSpec class NodeLeavingAndExitingMultiJvmNode2 extends NodeLeavingAndExitingSpec class NodeLeavingAndExitingMultiJvmNode3 extends NodeLeavingAndExitingSpec -abstract class NodeLeavingAndExitingSpec extends MultiNodeSpec(NodeLeavingAndExitingMultiJvmSpec) - with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +abstract class NodeLeavingAndExitingSpec + extends MultiNodeSpec(NodeLeavingAndExitingMultiJvmSpec) + with MultiNodeClusterSpec { + import NodeLeavingAndExitingMultiJvmSpec._ override def initialParticipants = 3 @@ -46,7 +48,7 @@ abstract class NodeLeavingAndExitingSpec extends MultiNodeSpec(NodeLeavingAndExi "be moved to EXITING by the leader" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") @@ -64,7 +66,7 @@ abstract class NodeLeavingAndExitingSpec extends MultiNodeSpec(NodeLeavingAndExi runOn(first, third) { // 1. Verify that 'second' node is set to LEAVING - // We have set the 'leader-actions-frequency' to 5 seconds to make sure that we get a + // We have set the 'leader-actions-interval' to 5 seconds to make sure that we get a // chance to test the LEAVING state before the leader moves the node to EXITING awaitCond(cluster.latestGossip.members.exists(_.status == MemberStatus.Leaving)) // wait on LEAVING val hasLeft = cluster.latestGossip.members.find(_.status == MemberStatus.Leaving) // verify node that left diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index 300afdea20..ad445b4c42 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -18,8 +18,8 @@ object NodeLeavingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) .withFallback(ConfigFactory.parseString(""" - akka.cluster.leader-actions-frequency = 5 s - akka.cluster.unreachable-nodes-reaper-frequency = 30 s # turn "off" reaping to unreachable node set + akka.cluster.leader-actions-interval = 5 s + akka.cluster.unreachable-nodes-reaper-interval = 30 s # turn "off" reaping to unreachable node set """)) .withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -43,7 +43,7 @@ abstract class NodeLeavingSpec extends MultiNodeSpec(NodeLeavingMultiJvmSpec) "be marked as LEAVING in the converged membership table" taggedAs LongRunningTest in { runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index fecb53c898..369dcf56ad 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -22,7 +22,11 @@ class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec -abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) with MultiNodeClusterSpec with ImplicitSender with BeforeAndAfter { +abstract class NodeMembershipSpec + extends MultiNodeSpec(NodeMembershipMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender with BeforeAndAfter { + import NodeMembershipMultiJvmSpec._ override def initialParticipants = 3 @@ -41,7 +45,7 @@ abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSp // make sure that the node-to-join is started before other join runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index c0c12f4582..a9a5ee3233 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -42,7 +42,7 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) "not be singleton cluster when joined" taggedAs LongRunningTest in { // make sure that the node-to-join is started before other join runOn(first) { - cluster.self + startClusterNode() } testConductor.enter("first-started") @@ -63,8 +63,6 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) cluster.isSingletonCluster must be(true) assertLeader(first) } - } } - } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala new file mode 100644 index 0000000000..7931ce48f1 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -0,0 +1,50 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ + +object NodeUpMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + + commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) +} + +class NodeUpMultiJvmNode1 extends NodeUpSpec +class NodeUpMultiJvmNode2 extends NodeUpSpec + +abstract class NodeUpSpec + extends MultiNodeSpec(NodeUpMultiJvmSpec) + with MultiNodeClusterSpec { + + import NodeUpMultiJvmSpec._ + + override def initialParticipants = 2 + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + + "A cluster node that is joining another cluster" must { + "be moved to UP by the leader after a convergence" taggedAs LongRunningTest in { + + runOn(first) { + startClusterNode() + } + + runOn(second) { + cluster.join(firstAddress) + } + + awaitUpConvergence(numberOfMembers = 2) + + testConductor.enter("after") + } + } +} diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 45b0a35521..6b2ff1962c 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -20,9 +20,9 @@ class ClusterConfigSpec extends AkkaSpec { FailureDetectorMaxSampleSize must be(1000) NodeToJoin must be(None) PeriodicTasksInitialDelay must be(1 seconds) - GossipFrequency must be(1 second) - LeaderActionsFrequency must be(1 second) - UnreachableNodesReaperFrequency must be(1 second) + GossipInterval must be(1 second) + LeaderActionsInterval must be(1 second) + UnreachableNodesReaperInterval must be(1 second) NrOfGossipDaemons must be(4) NrOfDeputyNodes must be(3) AutoDown must be(true) From 0a011ee50ea7bd235b4c612968fad163f4f9c6b3 Mon Sep 17 00:00:00 2001 From: Roland Date: Mon, 4 Jun 2012 23:35:52 +0200 Subject: [PATCH 39/92] =?UTF-8?q?fix=20a=20few=20doubled=20the=E2=80=99s?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- akka-docs/java/fault-tolerance-sample.rst | 2 +- akka-docs/java/logging.rst | 2 +- akka-docs/java/untyped-actors.rst | 2 +- akka-docs/scala/actors.rst | 2 +- akka-docs/scala/fault-tolerance-sample.rst | 2 +- akka-docs/scala/logging.rst | 2 +- akka-docs/scala/testing.rst | 8 ++++---- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/akka-docs/java/fault-tolerance-sample.rst b/akka-docs/java/fault-tolerance-sample.rst index cb7e1e774d..749cf7ef95 100644 --- a/akka-docs/java/fault-tolerance-sample.rst +++ b/akka-docs/java/fault-tolerance-sample.rst @@ -43,7 +43,7 @@ Step Description 9, 10, 11 and tells the ``Counter`` that there is no ``Storage``. 12 The ``CounterService`` schedules a ``Reconnect`` message to itself. 13, 14 When it receives the ``Reconnect`` message it creates a new ``Storage`` ... -15, 16 and tells the the ``Counter`` to use the new ``Storage`` +15, 16 and tells the ``Counter`` to use the new ``Storage`` =========== ================================================================================== Full Source Code of the Fault Tolerance Sample (Java) diff --git a/akka-docs/java/logging.rst b/akka-docs/java/logging.rst index 0f6f4479e5..03de58de5b 100644 --- a/akka-docs/java/logging.rst +++ b/akka-docs/java/logging.rst @@ -211,7 +211,7 @@ the first case and ``LoggerFactory.getLogger(String s)`` in the second). .. note:: - Beware that the the actor system’s name is appended to a :class:`String` log + Beware that the actor system’s name is appended to a :class:`String` log source if the LoggingAdapter was created giving an :class:`ActorSystem` to the factory. If this is not intended, give a :class:`LoggingBus` instead as shown below: diff --git a/akka-docs/java/untyped-actors.rst b/akka-docs/java/untyped-actors.rst index 31a0df9674..ac911fd216 100644 --- a/akka-docs/java/untyped-actors.rst +++ b/akka-docs/java/untyped-actors.rst @@ -586,7 +586,7 @@ What happens to the Message --------------------------- If an exception is thrown while a message is being processed (so taken of his -mailbox and handed over the the receive), then this message will be lost. It is +mailbox and handed over to the receive), then this message will be lost. It is important to understand that it is not put back on the mailbox. So if you want to retry processing of a message, you need to deal with it yourself by catching the exception and retry your flow. Make sure that you put a bound on the number diff --git a/akka-docs/scala/actors.rst b/akka-docs/scala/actors.rst index 4a556cf6c2..9b2cb9a7e5 100644 --- a/akka-docs/scala/actors.rst +++ b/akka-docs/scala/actors.rst @@ -651,7 +651,7 @@ What happens to the Message --------------------------- If an exception is thrown while a message is being processed (so taken of his -mailbox and handed over the the receive), then this message will be lost. It is +mailbox and handed over to the receive), then this message will be lost. It is important to understand that it is not put back on the mailbox. So if you want to retry processing of a message, you need to deal with it yourself by catching the exception and retry your flow. Make sure that you put a bound on the number diff --git a/akka-docs/scala/fault-tolerance-sample.rst b/akka-docs/scala/fault-tolerance-sample.rst index 56ac838b1f..12621e968b 100644 --- a/akka-docs/scala/fault-tolerance-sample.rst +++ b/akka-docs/scala/fault-tolerance-sample.rst @@ -45,7 +45,7 @@ Step Description 9, 10, 11 and tells the ``Counter`` that there is no ``Storage``. 12 The ``CounterService`` schedules a ``Reconnect`` message to itself. 13, 14 When it receives the ``Reconnect`` message it creates a new ``Storage`` ... -15, 16 and tells the the ``Counter`` to use the new ``Storage`` +15, 16 and tells the ``Counter`` to use the new ``Storage`` =========== ================================================================================== Full Source Code of the Fault Tolerance Sample (Scala) diff --git a/akka-docs/scala/logging.rst b/akka-docs/scala/logging.rst index 66cc6ae398..4ea96722e5 100644 --- a/akka-docs/scala/logging.rst +++ b/akka-docs/scala/logging.rst @@ -253,7 +253,7 @@ the first case and ``LoggerFactory.getLogger(s: String)`` in the second). .. note:: - Beware that the the actor system’s name is appended to a :class:`String` log + Beware that the actor system’s name is appended to a :class:`String` log source if the LoggingAdapter was created giving an :class:`ActorSystem` to the factory. If this is not intended, give a :class:`LoggingBus` instead as shown below: diff --git a/akka-docs/scala/testing.rst b/akka-docs/scala/testing.rst index d19a1ab753..0835db18e7 100644 --- a/akka-docs/scala/testing.rst +++ b/akka-docs/scala/testing.rst @@ -194,10 +194,10 @@ is a whole set of examination methods, e.g. receiving all consecutive messages matching certain criteria, receiving a whole sequence of fixed messages or classes, receiving nothing for some time, etc. -The ActorSystem passed in to the constructor of TestKit is accessible with -the the :obj:`system` member. -Remember to shut down the actor system after the test is finished (also in case -of failure) so that all actors—including the test actor—are stopped. +The ActorSystem passed in to the constructor of TestKit is accessible via the +:obj:`system` member. Remember to shut down the actor system after the test is +finished (also in case of failure) so that all actors—including the test +actor—are stopped. Built-In Assertions ------------------- From c6b2572623e4ff40a58e780eb60202d2cb40070b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bj=C3=B6rn=20Antonsson?= Date: Tue, 5 Jun 2012 11:27:37 +0200 Subject: [PATCH 40/92] changed val to lazy after review --- .../akka/remote/testconductor/NetworkFailureInjector.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index a0f53b5a9b..2d5b73216e 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -39,7 +39,7 @@ private[akka] class FailureInjector extends Actor with ActorLogging { */ def ingestContextAddress(ctx: ChannelHandlerContext, addr: Address, known: Boolean, snd: Option[ActorRef] = None, rcv: Option[ActorRef] = None): Injectors = { - val gen = generation.next + lazy val gen = generation.next val name = addr.host.get + ":" + addr.port.get val thrSend = snd getOrElse context.actorOf(Props(new ThrottleActor(ctx)), name + "-snd" + gen) val thrRecv = rcv getOrElse context.actorOf(Props(new ThrottleActor(ctx)), name + "-rcv" + gen) From 46c9cf41cd02146aa9ed6c45c8c338982100e63c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 5 Jun 2012 11:38:41 +0200 Subject: [PATCH 41/92] Review cleanup --- .../src/main/scala/akka/actor/ActorRef.scala | 5 ++--- .../scala/akka/actor/ActorRefProvider.scala | 2 -- .../main/java/akka/remote/RemoteProtocol.java | 20 +++++++++---------- 3 files changed, 12 insertions(+), 15 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 7368ae434a..30b1ccf998 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -426,7 +426,6 @@ private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider, case w: Watch ⇒ if (w.watchee == this && w.watcher != this) w.watcher ! Terminated(w.watchee)(existenceConfirmed = false) - true case _: Unwatch ⇒ true // Just ignore case _ ⇒ false @@ -450,8 +449,8 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, override protected def specialHandle(msg: Any): Boolean = msg match { case w: Watch ⇒ - if (w.watchee != this && w.watcher != this) w.watcher ! Terminated(w.watchee)(existenceConfirmed = false) - + if (w.watchee != this && w.watcher != this) + w.watcher ! Terminated(w.watchee)(existenceConfirmed = false) true case w: Unwatch ⇒ true // Just ignore case _ ⇒ false diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 6807e34c55..a985a6f8d5 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -504,8 +504,6 @@ class LocalActorRefProvider( // chain death watchers so that killing guardian stops the application systemGuardian.sendSystemMessage(Watch(guardian, systemGuardian)) rootGuardian.sendSystemMessage(Watch(systemGuardian, rootGuardian)) - //guardian.sendSystemMessage(Watch(guardian, systemGuardian)) - //rootGuardian.sendSystemMessage(Watch(systemGuardian, rootGuardian)) eventStream.startDefaultLoggers(_system) } diff --git a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java index 8f3ab4e1fb..204a68fca5 100644 --- a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java +++ b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java @@ -309,7 +309,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -981,7 +981,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -1977,7 +1977,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -2527,7 +2527,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -2936,7 +2936,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3410,7 +3410,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -3909,7 +3909,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -4487,7 +4487,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -5367,7 +5367,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } @@ -6067,7 +6067,7 @@ public final class RemoteProtocol { maybeForceBuilderInitialization(); } - private Builder(BuilderParent parent) { + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); } From d56c0b4b340e3402025039ccc3c65b8bf81e5f6c Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 5 Jun 2012 13:07:01 +0200 Subject: [PATCH 42/92] update ActorPath.png, see #2147 --- akka-docs/general/ActorPath.png | Bin 50836 -> 42223 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/akka-docs/general/ActorPath.png b/akka-docs/general/ActorPath.png index 08ecb64284ef8f14812f1054f1606dec3bb4dec5..988a2807dd021318737e0f83351e9ecd2368626a 100644 GIT binary patch literal 42223 zcmeAS@N?(olHy`uVBq!ia0y~yU~XYxU_8XZ#=yX^{Q`S30|NtRfk$L90|U1(2s1Lw znj^u$z#v)T8c`CQpH@{ zsB@W6Z%s(i``$o<(tFj)GxlV>>QE0?_DB-i%CxV`O|?)!RK#VUXkyyCe{0_@Pyf1X zf7-JBzyGg(^{RjG_glNe*T+@=z5aR4^Fluk#TEf4yu??J$*UYV6kRMWN`KB4UbKAL zliMdRCcE%=-w?FDrW(I}ztQI(y{ke&<}?LxD7t*@*!R$Lqq(K#zc)LVx-2)Dcw=78 znn$%NV&$){gRD~FXccgp62>r3($?48PHlR?kFQg{EeUwCQA?;}_1A1p#TEgf7AFqH zUuq2MDo5}4Jo(%qwd;?lUZP`+VtFv!nna}!XO=KLG2H35FRVI9L9h47L+)QwTLqjv z1h^Di1nw#oI0Z3myEc*Ij5?qAi6V^_0VlBYl(`R{o}^UZ)*Nfg`qC&Z^#Vc73VXpSXHWD_kauCrLEs)fA^icOJM| zg#nz^)jMolHPSl85y9yK)V7E?5 z`k*zPLuN6@qO0sLHS@wvITTwaD2O<51acYfIHGy5W~;Jz5G)kBv{)kbcbq!ux+N@P zx=5@33au7sh`;ETNZG3pwp{G0oA1G0hcboW4*lZFv#uygKjQQjEz?#3Ck{?0#TEfu z(dOwpT~q=)9Xml>aNJ0F@Wf3OX?9u!ayZxtosNouupn$XvS5M+D1HvbtTH!$xckNksZTUPM-#r}IX=f%pccb?zf{jWdo{^|Np=WP>T zZ>sLDlFk47ALlsnqhIshtT8jMxpgM4+mYqzxBq+ghs{3pWQDHIMSD;-1i8|r zPeP^ROq16f>4zpYx1QRH1>Bw{9xkBd#lB_Ev?}{Y#m3dK*WBOg@4kEfU%0M$=e3km zf{HByi^HBd<7@w4&EgyB`=0b0T6}*t|IP$8fsG7#JM-sBSDh2T z+qZn$ZrO_)WA3-FUtikZYk98r!;Qc9o^DBgulIl6`y+MH%7VW?-mWjcJ?-x6^-KSs zSX}e7Bl_FZKexmFTzP-E&^p}q&Hefxx8mtYBOD7a8n6HUDCfk@_zz4R(|v_w^S8ymy#A4Q-k1J=#x1d*A0*bv_wjze zEAiHP;af%X4=tYmjCJ~UG;T86m>lnv^U>nz!Rdeg753%rJ-b(!b3gC>KRx2hIzrZU z7w6xrYTo`sks=KZD3e%>*{O;e5DS_#zem@QZFe(ja7GhbLgbl3j-ZSU*zU$18$ z50rbJF8%!OhqIseBs|%=_rHeL<$cjJ6PglQFIRyQx~hxbVkeG3M^JJ}y?D9DYmW5m z_(PMv7i8HcyfEwXU=nU_P~g(-c(|`!wlro|g3}d62Fw1&9Zf!|3g6WJec`W@U-z>2 zy^p_a_kr7gW43c^tat8S7+b!qi1D>@)Aeni=UORG%zCXRSpRTi1p9B-L*_O8E7mfs zQQzbHUj&+q1M8CnST}76dhT8#`+e^3J)*tGf4u*9C;ETE+7{;M`=6Xv2;A6Sd11qr z^F9e1*9KU$WjjjTGF5ANa`m67n1jos3!C@PuzEHl|L$YfhuX&d<e936i>BeuvK3_CyL)STA~UA~l3VQH$L%MQc)xT;kUJzw*I)Q`x5Nc-w#X{n~un zja{j?uHSLGc}+MlCnJvQ8EOatERxf;Bb1?c^gtfWMm?Ak^1r{0UtbKCQJ}drxtlDFREpBl|TjQfU4k&r> zh#jtLZd>Xiu}IKjo`Qk$va??v8Ql?&SX_Rm{Jr-S>Do4nGkP4tuYc~)6S^#aW7o;u zo7<8meeI0+S3Jq7JvdkBbbg)b*Cc$lq^WU>G65u6o@isqss;(&E2|O2k+0 z;)qG8lr8UE%fPJ^xZ}~(na8zXtq^ePSaflTmVi^or%65z`yEU#7DxP^e)8?+#s~q< zL$9w2K}%?t+%Cs6Id9iFWwU2XHr<^Oy8d8hr^`dPt)SZfs6#f$Z+p&6m3U*h=CAVA zmLm(6`-UpC2rP;?;iS7p0sOTQvCB9u;U_lGfqk(7*lbw2F26 z<~@0*t>z+rx8k;Fmg~zcE4f+(oW6+e?BP;$shknLrm5)Z=hIu)E`L+^ZGTNsMe+5X z{rX}oJ&K*SJ2`k4bxyi%@OJmq%D)Pa%8##lYPbL4K93y!`DT}Uf804^!uapqrQ4T3 z2~T=`;T5~!=|V-}y!dHqMU8X1YrZG%EA`j9p4@l_)TSw(IvrL)gcx32@ORUTl}oR! zo4sNl$DEw^-Kx9p{d~#0?~XyjnQ#C1+*h9}Z@qeL$$fpz0{6d{O(nPI*T4DavE;8s z<=ps!{F?GA6|eJOJ>T~(Os_YbDlV8PonEr#?7r*A-{*P>Pj2tE)KfJu|NG%}T+FSX zTrAzi*R2V(vj6_2QpYbQzM;A;86o?=|9K|)zOG_Jcv9zySrMnJGAv&e z&+L$16SmEN*?0M^`|m8yHWn1&TD85dWUch~wbgeTzuzmG{qOVZI%}oeKjroBT!o%@ zPWQKrcxn`>RqU#4S{`oQ_xil6#<@2V>vZ@0dv@?})Ajai&Kb5m{hzOk$NW1m&9gm1 zm^&q}Wd7A%A&M<4^22J&axTA`CEDU?cw^c9K#q$YZ>(>;tWsvX|MTSbyTlZ~y-jOYN-%ot7oo2eGbw|KH z;i=6P$+h##Z3zBT9er|P>eW?ujOImmCnN5IW*!gX6N#V=ng>Y}1} z%jeF|n3j{t+w}Zv%+KW}@q0g>u_9+%u-l7AC#&~KH}x6uTff>dh2i$DGe6(^Nk%aG z|9E)H`N*=!|2wV;>?_veDqX*{X5C8hEqiu8f4uE+yUaTK*#b^i^zX!EM+tB#x*Yy= zW_NXUsf^x~w8CF2YWKz`r?2Zg*0+4x@}08JZckh7Gw<-%b`Cq?=#Srb6wlIL(rJ1` zOs`*0ljH276J;$g7d+Xw@F`znn**rSxI1C<{s-%Mx1S0~X%*Xcu6B3OYp$Pp^}nyB z@9A(ZNz8om`{uXC`?hEIUaM*=|LDTvRO9<-pJ%;SZFzi}$_athGg~B^W@Pttf2w`` z|B3vS^DDg9d9Kc1(|$$te&+qt^+m^PKF?djG||f9o%Q#xyLvrAP2Cr|T}E0h0-+il zzB?4vd@e}raM`qv?@^M$^=&b!N-u)eO__LtOE>Z3cMi29^4F|3&zX7S*6c026c?Pi zbz&ojvP;&EzH@hUox?+pOzqUusaV)xmE*HrVsq_}eKGR))kSKiv>$NID6C_VJY*6+ zC8W<$=2)z~uw(wT-wX5=JEFfmH(dBuFbn;j&yQfSH#&3(-edF1WhBiJ=HdDIWa47 z!PFns`JL9kyW<#ge>_VMQtGaKvnN8~tW<}=Q;DW630IvZn8eIo3!v$^7p5AhE2*{v)tuETGOrv<(=!J&!j%p zox*Ru>&~s*-(T8=q0RQIM|OR;e{;2P&1q+qgvdj$rh0Zn9GL9$NbT}Ar&A`68iVcdH#Q16zYHSF$q$8w~%GZ!MT^NDFmq< z7tb=3o>5}RySBgV?lxY>w$s|LnG2`timWMLxH{J^ah7jJdQ@}A1UHp$t~^rf-`!bn zKL^@GUZM8O$m6|F)Lx;BHpaUxleQjulyWfnQ|{?dwQJ5TGGf|m>fQd>wyayUqgLsG zj|QLZb8fo_-Wo*$c4@9}%rhdQ>>ODXuD5;_SlE{R@?XlAOU|vYmi2r+Q0X35w3RY3gvYbMpCWvlM|w!w!*^mp4k^xw^Lmn)NKS8|3VjUJ6dI572w|Co22=;R;lo}Uw)#IW2?ZSO>M1w z3nyhquQPIfEgrVjpu+c0twuq_<@N|3=A%DWERwvVCRp>PpQC&FbY1?04t~DRal5{r zWpD6|x+5@+Lo3?8^FvbX{LbyQ`u`2L<_T?_v*X(7yYAlCt1rs*Uz!zRZh3F_oU~%* zrF(pyGq2-roTZVRuHy9H&{t*ivWfS#bgZXK=iA-8fB#|Z6nFkBxv?CIF6T3}zH-*> zU)t@ekh>~N(M9>-tabDEeX2WLo3B&Du{PpFx&)j$el32xIeB)T ze(1v@Nl~W`p$oGUjJEPkGgvcGsCla6Yfx7f)V*tAdoId+{j|Z%y&A={Z-1Pq&=!82QG%Ue!J$E)L&AUfsa%EoZvDgXH z#a!g{W^!pjh{W1NP`?t?>l1ZK{157j1$uu87v)fN`CHVLU7r56`X+Dj?>m~l(su*4 zhffVsnfKo6ti8Rs?fzS)2j7-8uYRXs-;9?!Y z-EF>w@;!52edD$BoEmtr?A{u)I3cT9?K}1c-o7WhwHVy*D?MnKxBB<>XYF^N&ze_$ zc-FU-PF_cDz7zfbXZ^eIe?QCTrtkd!V$M9Fu0{n#|6iE(KaM+p=4(Hz+D*HkoVvDX>d#-^ukS31e!lDI{#Ezj^)ViMQD%Hv`r9+zx$c4h*i9&NnI400o2Vl1ZBC8~**x-XFiSJ}+*i{352B zefK~A`xx_l<-2Kp^`-B37?ythYnGo?US0O?1M`asThFVo?F&9PJM2^TfuK*F`)lK! zHaPLz*nhNpis3|s`xc31CKY!3pFDR-czbi};a=JMkB+(a=iDydYX7aV;`ZGQb#tYy zYFw^8&baN|)@$)(M@)fk_Wa1ZuixLUDcEjpw(4`wNFC6 ze;0m!_2?0IMU$z&SU|0W=r326rKz=Ku*mJ$efICCjsJ^;^WwwG3@hsYTi;QiZT8B2 z1yflmZwly_)w|Gj&u9xB4c3GC|cy#WWop$y^6K~-0-#0Is zZT?pH=7{Iy+Syy*CQp7l<@)`Rite$lH2ZVI+57IIC0{3W3Y<$ke)o#i;reY~ zPL{4rwiLg9Y^zQhM4-TK->dj-Oq5ki!-}`;hT+dvS`a9c>$KQT< z`}(I9a}zH*-^9ZepTBUo<*z$^?i()VJ^UWMYY)es zU&n6$$<99f<>eG({Ysg)vtJ+Jy%Hb(rPArg)}`;5Iow)aTs7ZT$(Zp}{aoM-i6cfH z($gi@$i2w_;rpWgl`JSlzu2m<=Po~w)>+xJ|EC{GnA#MeU#>8}CR1I)$2jBJ!Phyr zKl_$l_-4HG!&Igt=k)IxbW@6vd#lW8_p?S*l=s+3gc|WIrg<~DgyeS zuh}O5ni{_Jj;-aq>py?JNwtrDaC)K5gWR&hc}8s7h3?<{>zu{s+Z^I&apJHz@H}V#pEuL4I5bOU^=+@MkmYkqI39fZ zZd?Am=W#PD_uT$+c>V6az1H5JY&SS;%<~OVkKp>hGrsbAiJYZDbp^w%mtTV2s#de* zmZx1=H{Xx1WUcLI$=m6%7B_3_{j#T>oM2|JkapqRf8}>|;XWH2PG$w&Wlm%IBO)xE zd-K}6nbMhac071}wuLq9glWJ1q^#>U>wbTFerH~+_2nba&tyNZ@KzE#5%XGryHR5& z%R9lVoPYgS>O2Gu=>$4ToG`kP`b>QLyxr5*eQ1u6mN7PPGtjFnntQ+Q=+@%DXXnq% zzP9kjnXNlHTk{;{x9)%QZSNoV@0hgNDSiF^;QhVQ zx9@A0w1qHC{}vr9dOdMnf%m+K*SfkpZYAD4@b>bNN2gwFRC(Fd-YxsT@ka{#g=d9Z z4?jH@^Vmymm1)4bD!1AV%hv7d-#PJ^{7jeZ7mIF1AG@;i#tL>0li>4LVx|PvtYObw z7SOS(eUIJMd7Hbws;fPzaFbEun)%oD#m9STGb;DKxc2(5LPPCai?k)iYXvQLeKoso zQ?a9-XTI6xx6%KPPpWATo^?w*sC@sAyVdKSu4w#Mcy`hCyMH&o*0#T?TwiCexWm3S zW&UZaZJE)bHOp;1zXz~wNQ>pX`)I=i^Ey@bIVaB1+L+ua@1|FL*Ga zfpgb}-nQEt10LU#iF$rx|N4u0mh+07%0tgJKfQ1&ZBk38$TjXHTb}ovU+)~7$#Un% zy}I&)FAqzv>%IQ_NQf_oQ_bZ<_Xo!(-0l$A%DQm&$=AWFJW6U6?JhiM%M*N*bD*bs z#oFy!QyO)bS1R7nm{b32!>-?c!hKQ!<@Y{$Qw{N9JZ-*rwt_oXrYUCgFK@B72qh0A>I zeRZi&dY2YwZ7!-i{kl)z14pCke@yXajHx@s6GHFL;n6r;s`hREupRIQf5_o8#~L-~aaSmXH6;Y5GoT!Kzrdjcl7H6rZ{^(@wto z$O3__?CV@y7R}!Ct;oFS_hr-KyJAQCj=z3={bysb?t!yaU%jn^`R}c-PVT?Gal7>e zk3Y7*KSrt@s;x}B{~=<>tIzJs_nr@tJN{wA>yvJOWZuZ{TGsadu#u%&bZkmd?o+8Z zg8yaI<{KQbKlENi|M)u7;Ga3CzuIcEhVd^lySMX6{Vv6e4@AX-x~FkO*gVQBTH2?e z)#7V2V+Iq`F#}I``JM=ul8br1F&Fg;@28*DzAf0ScFE^WZ!hP@AL(g=>9K3qa&LcN z8gb?4oK2NyH!%rUakyIZr_M21X??+0>BSkL|N9@^NZK(ajQLAu`aFXh(HzEKD`)=6f=3=11PUHA0oE)P7tS zOPW&}a_52YMTzv#QrYwWpU(c5_IK-f`KR%Z-*qf~-S+!_!S=eZKc>v~|1s}x#WB{B zkN3`MUO&&h|NEkH_NULz?X$7}dud(B%{l6(Cw?9K{I0X!{_gwvYxk_b^R?^$oo&4G zcWO?YF=XcZHv8|=6q_nwII4E}2=gL?9+g>Uem%cZnRe}Hoq76nNz5N( z;rhai_i7B>n#%k1!@hg%ik!~$%kr1cuXnEdE7jNjKPKc+@BZI!{;4`Oo}KC+=KY(x zZ=&7g+&%xU`Y!+Vm~Zw+_VT;akNvd!{N52)nShO*+ zmLAyJT6*wjh5Y|Gnd4h;=a_yydf@1(t&F|AEgJm?yQiK{IQHsRpLycReN%;+m-EO^ zxb{w$N#oJE=sodK)2sY{n=*DRZQheBmOK4FOJIdDsERfb^C`RNewqD}`Yqdry8eIn zf7Si6{T1<9=3()#%j@-TJ=lIReDhf~ao&60JYE9J6n-kdOLgk_#ri#eP3uvWV7sjX zi@JnF6k1L!+^je24g1L*vJbx{H?l;yI~_TCja&UUd-F5J!)?o-rX|N;lPR)ce(rVH z_l66P`Ma7V0RiWT-7`L(4VgDJb^YVpRY7vhogXaunmJD$ebf=E_iom#x7O{CCcHoY zm8qFmF2;L@nt@H5V@GFj-)RvYM#s*tg54_j`~N=5=BdnoZu&aG@aK=czI(0uJb9y; zJG!Oc{#X{-8KS*;(%W0NY}OZ^bY|Ez_rJ>y+3?*Lmg@hD>)5wpCs)y~t}_>w-%gns z$&`CY|MFkO;Q!+L)j!_)<--yC#r5Y(t?7X}Z0h^VCfA87N->CC6xPW)y+iw#4vS#Q zjyF$Eu};<0vCDJhS9gB>*h+lY(T9HKPxqARE1eCJC}(_R%bd1ZZI;KM$II7y-p)TG z%JOo@z9*$!yPvq;uReKy$IOkcC!R2UJhEo(*;7fZXIS){BHleR|9)@UbndhXf=wKU zd=?q`X-HYxH}kBU&e$3GuIQkT={38SIJ1e}-NO7wmPUF#{+jqYyNmay_WhqHdGpSk zIdt29M!Uhgd(%9`FHZX!QLK8lCI5eTTD`tY{z0u?g)`6Ow6wV2eXMY4`QCE+{jb&U z#b0J~dpaI*k^H-)(=|@qC{y#4b7&;T@w~{m@P|wFYqlL;d#~<=!j$tj!miw$z1@2A zG|R%T2Yapu^2k-17v4K8Xa7Pz{qMsC`=>d*T6m@A_f$UfU!6xv1KB^mPO`o(W_RuF zt5+Al#XN0~`JB`~d;h{i*)LMElHWHik1o%&Dof81=U7 ziFwpl+;TnF_@bDNnx4x}F>CeT56w<}RXX>^x_C~kzuF38HuH}UEFM}ONINEZq~+;s z?NGy@kd`;G|J}bID_{28W*W2qjF1n7i`l=0KUd0PD)v^}l)HEB&eh^CCcjjE{GBD* z`z(vf-}$U98ynKpWX`JopSHg5yKvUE>6hy-#dp72udeg`D7ptfF+j*Xw3aQ{Hv@#MZqNKJ|X%-@T~n zW!Ja2%iE;hH&}i9k+R-)tB_I0j$*&n(Vu@P|NHwzOWGr0VP$>2`_1#&O03>u=CNWg z{?~kS(HGcRc>7V^5v9#rIi%Ng$Q*tiA+d9!a!!T(E>|P#o)w#nET-q0KV)m2cKm^Y z^ql6I>DrTDFZH>1ynpioy={5t)8UO!#wyXNTn!>3N9+%@J_{vB=kX7Mp6i@$!1Ic0pjN)Bb5I>zAxI@1B2GKJ0%{`mMhW3CR)SA6Go6Py4RHy(9H+ z>AK&I8vMRX?GC=*UUFB=cI(sI{ac&U_gJh>)1KsL&-C!j!gJg#!n)i2-b@!tj+pM| z@;&R@;i#0RwBwC>`gTqi+YU&`o&0+8@Fn}%1ufPt1qx^CK9+^MHMTtET9}b~d*>eo zyL#Ij9&zy1B0x##Pb-Hcy-mHE2d`o%H# z&DUHnkp3O_CFjwdZ?;8O{WX*h-&g);cmH2&#}%L7$+9j}HlJN*d`Epv_U(7<-#1om zy07(5cCF2;tmBQ&uQ%MfX`0hj{dCJwR_=pOzw3T@=gOnhGj)C4gv(vwd2)AmT>ks) zY;3CK^}lC7-@W-a?jv*bnwW=+YN9v9cf3x~bvkmvpeoDs^GfFb`H7Dnu6+9L?$YhM z&(+?2`~1lKKL=&&LK2ymO>XBexc~D?VCh$D;g(e*mA4AK-9)0M+^pHqx5eC1KV!L% zP|KmqF>k6~8y#oiZ_b&$;00$yj?Sz+!=Imb?$^(-kNM(x^1!ZlSC*XlQ(Zm9e#5;V zU*_9sy9P0f+&bp%ay$LF+1XIHfPGX5Xj zTltdq?tXUEFaFy(X3mR_H?QxRp(iCMHYfYG>)*WvN2SuP7rzYjz4z(+iE2*4rXL4< zzhoc(_L-^eyr5yc^|q6?zw)NfzI&<=@NF7DHB$~f-$e1E=Eidm}d_MLIJ zHgi7A`I`KEdC!iBYQ>jKvwXiiy*+RJAD?TE>pmYidbfVB^uJp(Mb32YTEN%let&*( zwCzmOX}ca2f17{zl78sDQemOo{b~7l+>F z7m81GW(fR_nk8&^_}9Vgm{&e`ZrVjS{kXG6S#hC&iFo$8~1jIc;Y+%`p(kIj<8n>w|5FQthwDC?P7ejzxI6mp1U(QX-KIh+&uPG^+?M8 zrtQzQ@4xEb)fw$#+&J~ROaAr~CHwXU^zB?et907^6Q^8F{{1jMYx>(hNj7oWo|ZI0 z7oKCS3mp5l7;G#v-2fVoe!cs5SJbl)nbV((u<{NW8&7oZBpxJqgchr4P)h+OMut|*FCh$wGM@L-kwm>0= zb4$pbvo}74%(^Xe%`Hy(oX!T}T;YQf@%5tfk8qxE?^vw!?{tS_+~cZVRRQj$tzY;Z zMWX!7?L8&t&QBKDv*#F7*6aN4z2~;BcM|Bhw`Y@M&}@O3;n}LKEQzv*9PYS?H@&#{ zeOFTRpL0uES6KRryz%Ke=-vNLtT~~Hd7)e7!WCLBU!Dqead4~&oYKu;%pJ@!b8kG~%N5}4j`B+u}k?~Y>=-`8D}7s_T>JE!LM=c)HEZ*A^4c=~#o zTj{Nb@x^kRryL5uwv2tL=Z;@x|F>;Dv!GV`&b!3mp!X*yyZIe3)ec{>vn{Quv}N10 z4f7;TUf$U=ZBfBnH)R3;oOtaCPD#J(@4YWuS7mY5)I|7q+`q~7BBz8+|5OXjSt-*b zAlMebeN-`f#tfT{cjkVx?~4BRrda9nBfG8zPZ|@r?-~fU*T_AKJk9c)du@f|iQ zKikZAj8){2@WZATkE0^vcRY63-rAomkg44CwW{One0P49J5RH%YaWOQ>-VeObrakk zVYE~1j%4N8^o18rrzuLy{kFNIe|sl4D`V-)i=5rjcHFY1Nnz)lwm;Y2uYS$3KDqzZ zpP!#+?ftPdZzI#KpL!l#N!1CCw#6$>NAKTyXrH5SGINXU%EqP@buYHB`Oh66{whjG zso=-1SugJF)?P0lvXog{=AfQ{hmAn9nBlb;lg|h15|`Yqcu{-1?Z>Gpi&Ud6|Lv3L zH0@@I;g}=Nm{Do|c8{8Y?#vx4Y9)j(>_2z^!pu(rtHpxadYGT?U-!e7{ruXj*WXvq zUG-AYYF4)KB{5#U(=#F&1T){}mzz7eT%37$SM!1v=JI==e||U|bIPluLt)d?H`7{5 zxh_uYo#!UD^?_=~l$*Mp3L0sthnma_O=W$SmnZUC%SUT>2pkN(e1!Y?fxnaAAKc|& zcQ~kZcCun)gn&s$+-9F!TW4Q5S(exAr#@T2NKrH@{_*i!rpxorTr8aFuYIHUc~$bpC2A~IUmLAo{!L%PzG$WTqj!rUxGK)f+pmx;Fmu96@%XT* z7hlHR-FScRuHyQOr>t|%6o&r1eKBw5hK2fE@!?$m{B1t8EzsY3edi*f>C!&F|xiP^(f>SB___fJfo7uvayWfBNC2!TA!i|q^evIjkt=U=WhyVbsbHvUx^?$fg5>io4wPnYJkNIrk|%hJ65*M>Tu z4M)>^&pf@nrtgja-Ocv>)pj#?d;^sg^G{oy_$1x+fqjMvYd9V)@+`7Stj&E0M1|9+YJ{q;XJr|2d6hoqHi zVm1H#f1aSE5b|S}RHmHGo!(!|v;QbZ2h8lN-F;_(ew_Z;+H(kM<<*+8e^)fL#`>_D&M%N(rkKgOyvH38O_^!K1nXR+;nEjt*VNDUnKuc%+*!YTlzG{ zKI-%hiN!)Y=Wae&!It>ziRIPgRyB=9FCP9~aYem@E$i~4<(WUfSr`16XveQuc{0me zz-*^}PhOF%X_vcg^NG9yc}S{&vHcnxF6be<_|XQ?!C(Qo=Ki3w{URt+CFYfN_717&nmLI%yCWUdo>~#l)c^m9bTMCEg+_U^P z|L(qnJCzgpBrGmO^XUJ+wg1QUHHLpG>JRJfPxX)LQ)`>2R-c-1WLE~ag{h@w<&l;%{%WSbUvIen^WW~J&;GyJWAnUh zO&f>vj}?DYua_!Z+-F^u^Odnxp#SHOqSFQU-@W23n#y-$|5A-qUW+{^xtBDyFTbyB zUwbN`{L6{McWz8$PCj`%ZGG?b8KL}lYJa~95{)yhDjduxAW)%qw8fF;xGgqHw zP*W7K-t)Ep{5-#9e+}O=oW43!-#pV&(}aJacw=?>ub7q{lMh{7yM5`AB(px53vcQQ z&KZ{8^q-=|zfeQZ_Uy&B_aASdTfGF^uG3S$ z->}(g(f55?*cqjJ_b2Nd-=neS{n6;tYO`DjFz+~jla_THBFM>olFM|e2colE{XtA65_Mcv0Qod~s$nLWSb<$Qxx z1^t|x8NMv*6`y(RgfqwOV=<48<;6W?dw+YuysbS>6H0q{X1(B0VRNh!knTff~w*zR~=qVoN&@wQF_Ob%ikZEy|vYBvh-qkP}+Fg zbkW3{G2Sl^`~KhgdDdj(19#Wme#o#gO!s*Nb7Znb%`Knr*Z9|e&F1;c>GV`yWGmZi z?vwT&@_o{pVTZF~_~Y^pr7B+g9~reFCP6mY$GyPHI`5ERV##k81^d}5+`rv_CD~j* zv-oY3+>LH0?M1rl6D0%oeN;)BkZPEAGU)4^WoX_Pf+OeYN_RSLu030+&{l!^LD9@PY2}ZM~eQ7Sopd@U}@Ux886>82R{G4 zlIPk|oywxQ{Dpm2H{6<0^F;91!r}ueB6<&(%sS|-7$ZJ;#?!ZbD~`V@n2~;E&4dXY zM?&iVUVq^E{{?9Eq}_rG?)huNqlHV^Cs%jZ?>%*PUj6#>=GsEF@ijAj6$P>zIydJW zS6n0UIgwLE>QA%dDuZ7^Nh%43%5ja6|2}mc{MXq~tr+qA6Q`!UL4HK=y#@Ma2T#%z$*+ zt%OG#T;#5=6*v1WqSxUxo7HSRYfQ_ViDz|J&N~=o@$g&fy890jBfncN+IiAf($<65 z=ZcQl#1NHV`+8kHG#}noPiEX&uu$azm)_(Z{cVc`7Bd`U{1i|u^ZLan$Njzr4zITh z9~7L;wQpX`_x5}D!hZ$X_xgl-B?`AzC+|GjBCz%RvyH~xYHYlVCVpDscFwx0;PAg| zhbM17ey1Y$ZhXhnOGf6Z-M4toJc*OzaJf@^=%DxF!XJT;H|Nb(dc9x;vq-85}(%Lc4rE677YX84UPaP8F-WZtA=iEN`d2lu_C%ra0b}PExg3)VlIBh!8LY!JVoLEGymj+JS)Tec+E_`Nb^(b8Mp+HvSZb=t1=5^J2d zzYogzYhn~Cnrb3_mc4xUMENy^%G=j}oOmot*R)lj_tZbe9PwN&-#Woews$PIYN(PQdsb81C^^m&KJE|O>20y#Pz zP90`>q~NL~x~S^^=CjV9o{8nCoU_gfILWdwBWFrS9EYNdGshtxri~NV@;Y_gYHE;o z=ezOn-hKV;?tw8NNk>57kvITF;vUg}^&DIHt`BTJyc?c(3Nn2LiS1uOBRq zJu3b-g8gLiO^yvdPu@OW7nZy#Xv3TgN&ou`{-va@jVpM0H|+VM*rtGxo~`HC1gUnk zEeZ)cBwtpyHYKlf5r1>XI>&d@n*Rq)+9Z9{DkGusP>{wd?oY1&{~oDa)uH5ItM$L^ zYh(GL`%(*fy#>0uJ|4K6E7ZDZp~lpy0_!X*x_`$$k6iUc>*3pV7yp@8Ufn9P_l?^o zzGeI~^1toubS-Ug?XI~PQQ&%iq1;OU&O@&n?>F6l@|f|l3P-C3>-zm$^n{vM-%aM% z4NZl(>u%nL*|!Uuf8RS;|8bgTpk=|sy^kL7dnvIkytt-0)5%(>MStGLBfASaPdPA5 zx$d{`^4_{ru|mzN0bi^XR-W5@ zx4y*h`)ez2)@uhNm&>f=>X+E^z0|zwb+TQ||AeP%dOJB>T7*A-Z4P|C+UXR7<@Vz_ zu`h4$xK;l1mL+3df&Y4^)d`-$m%RnLHf^p9-aWhW8|TXJ;rA<|zXS*i_8$HJ@r`xr zN$(B|8&#p!2qzK+%G=*2GTZ!SZf{Jk925zLZO>W)&FY0=?wWRXBG%e(sUc@{yUfyZU)u2huY znDIF1ylx_U$7EL!Rm<8DvwaD*) z>XbdrKO-tm$AnJHuy|q>b$>&jf%x3tWtD{;-Y=!^&wO|9=gIAVpKsyZ*~JB3P-e=X z&Fox$VdG=<9g2r{rSDPqvoii#b@-RK3YY8Q#@8>l`$@0uT5^>b{+<;=Gi~5@~zDFC!Jg0J$!!auIF-pzqiKE z`&jdyu9n|X$vwTIaL1p0t?yn=)efm#v{2!dM)pyczZY#PDsEXFGTB&hYv()A0CVsE zcjqq$1y5*@D^CJ#U%8^ezfeOn+HC9ZC7+-B&!1RqUnCG$l)Ab;A@E9+)5VSrVL6xY zG3VX=Ex*d>RS%=?@}CVxh2Qr6QQw~bcYe(W$>!2#E;E%DfkiPVoHV?+rX8qBGB~&6 zqi&o8*Ts$v^8_3fUUfI>PUaKrV9RE=uS(uqe|Fcw{CQ?koG)4-dzf65n*bou5+GF-R-qkxKeS2NWU$ecB^8Q>2%=_YC`JMwDwoV;wtd0!^6ExN= zp8fVg%MWLkzMYS%_OCM#X?2mh`?0?Mc<-AV%TEb`Q%{RPFW*Jgbj_y@cM}{_eRvKX z5LVl{I6B|^?dNZ+|DQfP+thigDs;(TOL4Fpe~{cRtHuR}9ftz{m-ya#?jrX05wE?+ zUiJ9b-=A+;bqKZ?u)ywXgG*wq!Mo`1GM^;(ztz3})7<>;sntRA*Vepn-2Nt=J0uah z641#*`&xf(a4K|#BF995R`Yp=ZWrBO2k;!a@KAZr(t;BkTrSMC-Cefp^*)=kZ}07N zo~j1-%JLqG6;nh^yZo){Urm%ex}fmV%gJ{i#jp3;I~}~E3$zqZi8m?YNU{!ebyC~!U3jhR%nKk+a3z1k3HuyZ_=yt043xWsc=Q;(^EPv_XwH0=H= zf2>L|USoR9%~xr0*6LsD&OYwn(YpOiXeuNhIB{ImgZ{e5KocEN|^r?Rs_K8h72- zBNJ=2(!c#--FRa0<~z62!$eEoFqdyn6{)Q;d%fsn-o>?>ETFb_>|!W)xg(@5U)iFy z_|S~*48_;1pBO5{`B~pjzJ0u?|1n3<=G4n>|L4_;Y;fa% z>=3-dxI?w>^_GkGjFxIFdD?DuyjEQN;zj1);%j4E9>#iyEm^vFN9MJjIZkPyI-cIqjVTb1dp^n{}*+TE5b}4P`-P2xO_O`%(eau4nK$vT|({xoP?!6{9jc-kp z$}IhDp$5Xw{pS~YSarP@pU~G-D|WYP?{Vly zcG2|X1;#_!(-r+#6$#u-=j#~{CStx1R3ufR3c4b~cm__Y&uoS)|VcIEju64!W` zUJD2|Ke0}^y(f}WTXCYtV^B_6p^-aZ_IAl`mW80bj2;qU3_4BEWo>t8wIpz?R85Pu zxalry8o1-h$x^#R8F~f6t|FJ--}-g1bMmgcp1P(-KRv(2UuY=m#1UBE6ri`*iR0n| z0hZKjn(kHK-PaoWwC^&w9`I_#ts@E%S)E%-MAjOA+EpVohfO?aMrE0|%0Ij3^9#jU zb((F$(h8ixrhpeJw=9h3-~M9H zIc2Sz|I^_EucY0fwKZRv=XW)gc5ryj`}jaY>Gc9gVcC+Q(=o%7i&ZCCT+VojVo+ED z?~~p4jGku9_nu(2!ezsZdre#Ozx}BWelJ+ux@7Tz)Dr(ovrfET>c8&ux+T-rUs%(@ zWu)99@YT0Iv{AsR!y@9?zN$wa|0{~mAAdKq{QIub_w_IC-M=4bu2gVpqWg}M7n4uD zxYfU>>Ztj%UDvO#|D37OknS?Cb5~ANLDiPpYp#?3)~)e$@rkQ_oO^Nc+4qO5oQ|mG zE&DbTTy-h72=E^H=*;(>U$}k!+dJ>CxjmZjJc_CO!bwlT6W>>wd$>0?Y(B9#;unWh z^NiDMf^LG7m&|wInY8|b%M#Y8mwJI1B(RmcQhouWRCF=ziVZF_R)0CJC1@=3bzMb6E3oO%_ICY z{=vJLO-`M!TLl*J1>E9P{Ke3$!lcWg=(4qO#q7$rbFNe?^(Qf&aWymSXcaWv@#DJx z&poZe>b~FRL#mfW3lopcd2-H)Bd}M%b;{NTws#Bqziv3cEAv`UN>|tFJIv+VD_1K# z70O6^YrHz-B)E)r$vnt(T%kpy;!K{gwr|`TgUF&Cy~j7r*R^<3G0#$O&edm|H~6F- z;$`#YDVrC!^AM;NV_%UQG4svg*T+?OS_MK+y!pv}=ZI(evU`u+qE<{;+<$vh=Hh<6 zuH7$heALbKeE(DtQhZLCqZ@bO?z?AaXFa?-)1z;Xx=@2kup7rGhYK6!Z+mR%ad<2! z*S{cn+H8SWW)l>9tO{fVzi1q1vxxX7&}}0e(^!0d_SgMOJKYppR+MdcAsCQ$V#C9g z=U?!1zdWt8YK5(`p5?nwo1<&&ZN5Ho={sD|1lpm`vQR*kapCb>VlJ|RUwpPa+~)Eq z)h=23x59%*#t%B|wna`#V^RM7_}3BR+soZ^WOLBwr^b}pz zb~q{ZAG6inBx?>TDTQ;w_-g07>K-jlN>dSdYNrKh6pPI6G!sbZFlJNZFiI#m{6OXM zfg>#oHmbe+mwC-aE<)IRP5kt-z8?=fI25x4-#-;_kyQPZq1c#c(PlTb_TwyrK#$5D zryg4}R)n7j(tsG*6O(ji(F0%m)Fsh%_nVqdCAZahtU2TIpd~DHo4_Ky#H5}Qk%byO z*ICkDR|__L>D^;?vE5<|%eM|UwJRG$POXjOP|V_fswiU0y!m_t^WpDnvNxKx@Pz5; zi(T7S^Ugc@xWC8F8C;4kiyh{>|M!sMdhm0J=VX3Q7kTm4#Q4sRCl*JR*7n_4V!!Oa z>GnFs7J;k%IxUN|TI1Cm#g~;QTxdM4p8c=j1I8W@HiuwBf z;T%mNf!|sLwz3I2d+O!u?E3nvNm+G!oW;%W`=6+sF?OEV|qyQl?$tiRHd)2I3K_x;KmMwreZ4tWf)~ zg5z?aPS>xEPIqiKsyuS?l9y>(pmKg;r_1ZZk=NI5ZFS-Z+>5;Of0dYG$F{BK*L+() zGnJ2Bj7Mg}uZi!u#4oxSG@rjX?azTj+ggn;%Os1rL^GeLo09$XD7${!)XuH^-HUfF zH8M!*O?yNEh9nbrN$-Y{(F%NxNW8#{88n!$H=4R{PWGv@4sF

>@<=%|4@ z6%N;{hov0Zq@6evy_l2uw)-WYecUgyMNi87LV@i<*V<#7o6kq)doV~ZT?8qGt9bLg zcgA?N3OId{JtAw#%6rH5bL6)d^EVnuO?z=q=sNRxmqsyg;m{)B)S<uS|l=^fG8FFN>>*eD#;P z{Qf$Z&v2i(CC0UthWhVoL@?hl1CJ83t=v9Me>|S_QyoDYOXiYH)eFfBb8? zwobK#Fof46 zubW->G<4lT@$1+&=SzVapH@KIC<2QdXV}&|Mm@P`{rpT( z_Wa1DU4qb-q|+5^7dh1vXKs8H4w`@Wik0(AL}c|UalMb)q@*i*)OT|lvT1?}pI*dz zhcCI8e{;*lvYQki$ zAM*+ku_cf}0Z+Ddi+vD1Szx_ew~+L6V3W526tpi&wC>_uK0lQe4nY`ynXS!T=k((89Nx=&qU7Poyx z@ASUDXNr0EA4~?Phb!XB+wI@F)ooC#ihKMkTOhEg%$51s+T>-s`Q7|VW&gIXayoV8 zn0tQg%eJ>stIgurwz=p2ZVn87y{cD#PwKk)9SXc&8x#J%UKIVj%DX&Xa_?8e`|YcH zSA&KPccna9a&qN$=eFK}R*pcGHaF&W;s4)iBUw)KFq_G-&u-tL`*>YJzI^#st)(qa z1&5BgW<5?#yTYFRr`gQ!_p-UunOUyQi?zOU+3&F z*W8TweMZaPty6wF_qx_Sv$7s%J>KdPywd-}Z{67KuKu}MX-*w(4G9v}W$#{GWLOj- zbAR83-rJjE?43k6lwWZ9ek!v&RaeJo{*T+=@6K4O9V*Cs;aIu*-xsCv^PX*UsnzbME=g( z)&;jJN*kUZ&sM)5sr$Dg`bt{9e&|CkE#(#g*A!Le&gkc1DGPKABW!u@Z9h2o*Qe6D zhH(GMvNJC^8a#NWXt=^G&gYqH5`VYCYcIj%)AO^xi`m{<(0O#F`1ucqdft8((UvS^B^|4@1ww(WyoqzsL)6-{Kzwe{j|64~C-p(m{rrKX^Vb%9~L*3l- z!iUzM-Tz$jV28loU&qY!8P@(eV!2H8&)k*%TR*;HT;E^y`nW1btH7cY1_~?NYBG=S z``W-J7;2SZ_j7AysrAi+3-)|Hbn@Nr;;7)})zRsdujd(p zhEwiI$((cPYi@qYqDCx=!+5Rb8TK%bU*C3kXL2MJKHFNnM^&&pVO?oQ>$bwm ziV*!Z+9|blw(hs5wgzxqoKUbiUG2xG&Cz>GnX~1yoq0CbZE$OSYkc1BV6>Bu!>xpe zyY%bN?eB~BxH0`x#l9a;1XVYy^xfL$W}i0Ixa{+$RlDSFZQEJ-$?EpEnv)MDr+iO4 za5p$ba!sAvwMhGvyd@vq)ht zH?}Ws-T%Ep@pi@D@6M)AjZQ7{tDE5XX`|SpSBe*3-#e%ty0!bL==F2HJAZxXS&_Wp z^ygUb%j_)A=1n>6dPTyzt|`7)h|l1ZV2QKHv18xA{eFH@ohNg}``m*@Gwy8ocILqK zTPDph*Em7j)aLUf3fR0}bn@Tlig{+U85YKhE8m|b;G-3>#Q#@vjM2;f%@d+?UMfL1S+B=bz+RQ zz3Mvzn?HMZfHojlZ+K*!bw{YB)|qo!>xyPSwOtoCvUMDPw$ZqD)AgtmXOHC=FblGs zdoo>Vzvyd@?78gq-`7=tIe+_ac3S1mm5=QzkL#Q7H1?NxlU{$`zIMj$SD)Q?U)6tG zvj4~3>VKcb@7;LK&b+;*bWi=+=i+nh?%e%#^>J=YOa13h4jO)sDL$ll{q)uX--zEk*z9&c{AYgWjlS&T z{m(ytFXK1ZUvypjUAXz&SL`M={@-4D)_$Pz3i2 z)VV*0M}40D0k^dY<}ohQ&VDfJ?k?E${mvXg=LQXv?+Z4si+`T~sY7{9PV&Fs^Y>O9 z?(InSy`lK*(K@Fo$x28cCas*AXTKibr=P>MgMKT%_BQ zA+zSa`;OQzd#49AT~ND_^XaVG#~aiCU46&xbE1$(xHED7xpe>iTTV~k{k6U(N;Fn+ zmC*{Glj*8#ttz!=1i1|%N^;;M>?ri$hQ=K6p!zONh$4xkUeH6c@@W+nd z0lJ)SE`m(#iHCiq7am)a_u}7S-rH-^a*PEs)LIVxJ^Sbw?>u&;GimL-o43u3&}j(> zDDsWb=1)*KxbuhH!<`Z#H@+HPNT{}$@iM#igY5=4u}6BLXXnhH^Csc%KotKIZ}{N0Kd%x|U4{<=J4&^+gbg0U)b%xlY zeFuBLJh`6tsc}&kBlESICY^nDtP$)@3!HT(DKQylWUs35oxpf)!kw8sdJWyN5?A{e zO(%LD-XgrlzjMC&<@Bjc(>-JKcOIP0ovv0@mF(7GD13%ld%;<*_NTc&ZbsHU*zm)A z_TB}rITy0>A544xXyVzU9oa?;!{7bVP^%S8^=XP=2{`p|@6@Rt|MqI73NAUTzdt`Q zYf|&JbN`imzUaEFJRues5+e62aF6Ml^G8*$sNLpL4Zg3%sNpz6P^j21BR=HY=XJNQ zJ}r}ZVO7E)RC{b&lGDu9+tc+XpEr(m4@|GzR-S6>zwNcw_51@rZ`;3FmGLu2|66{X z##Ebf)dN8lIf6Ef5-t<8Cmi{-GhwdBwyp9@dMo~HmCd~PGQ#?1p+?yRb=Q_(<2Zl{^6J#HL`*Fn0)VHMLR%<7_n%1@}ozZoB6XS#xzdWv}+0eUh zvfU-Mdlh`YQ-06+d&q9?^=B6qwfsXpi|@E-3ueA-+h4XTTK!wG_>p(t_1~1`{cH34 z{>1XEtM&Coy|H=8CLi`EgnU*^dNWJh`c50S*XPNgomAT`tiJzTEM$Dt-~IXTTN{1f z&GY|OtjyGI8UW9%LWD|^dcg^Ns2e@zeMNr9{ zW3@ZGdfvT=V}JfXz3=~hox``E=hOYa=-KbtwzcKJ>w;JvJ ziO=7ocja}@yTx_$`+nQs3zVms0W|^+LiEI&XN8{=~ z-F+9u3_B94e#q}Se|oDz%WeCTmG8@MJZA6K6p^IjJ4 z`)B%bF3#ESzwiF~3!hG_o+>_fAoRnXy?@_!u}*ZqpEL9F;^Ykp0+(hxvLswRVb*b$ z&G^!e#$&<$SESY)Uvs===bG1tO;^UTovpX~{@qqeMcD6t&;o&N`zBtto3g6cxIx$5 z&-7Z9;m3rxl7%OBR^GiWqT#b4z%RcjvuFd4IKAe1{GPj_oV{uFYsGD=FT6ke-I>ELruIVp`VD#-ldTr@e2Cgy8}B0a z{DH)_nJbpc9ZFw!cir(*ecTDBWCD+b-JJYs=eg*nUr%cz?qAX0uX#Q9KJ%mfr4jxQ zf3CGT)fpeXtoZ!A3s2-K*Ps3$*ZM~+MmV?gd(7>bhlQ?-Zu`AKrdUu;?&|Ty7b`#g zEnB)so!R!z^9{3ZC@`Iny&7!D;k8V6RjK0UGWn57QW;N%^;zrP z*!B)rm&;GHt%IBdS4%UUEkASd!Efzs>)_w_1bG)dPA&Fpxuz@0cCFB`;ppijY)=Es znCs$Z&)AS1wZ(9w?q$OU0f&}N=fnR^+R$Sw7-6$tjFaI=!$G?%f=pl6*q1E1$}Q&l zd*2oL`zP1TyE0Qg#r^(87Z)`F^DF%4XD)~r++Wi z9sg3SazyfXkoh*g$yS>`2l;2deWWj&`Shdp!);wxjqdOK$s6ynF8^sw$AOsK^RKS` zH)G#@{XpB>c||#M7eyX;TmCfSR>n@rm;?3m8;!*FzTWr8l6RVGEZ4!Dx6$t|EInAX zs&!%puZt36n}Bc2sdU>Zt|41Ac5&E#JNWmk+{)@7-{ZdgmN$FMzfVV}a3_oU?_=km z|9+9OK>5^ynp0c9?i4G$zD|1&-)^zKrF*W_+EnGgjL^|4*x9P^I_Pmy!HEqIUmb5< zVZbYVul=CW>tkmo89&<4Qo*5X&GSrg-@Y?P5?-Wr9H{y<=N+TXyHYnb^WF0z*f%=m z?cHFR+GxD7aq6=+51ttX4lHNqUC@7S$hkV!$tTJ1(XZmpW#0Yow%yvh;dO!*1Mm5T z*2<-=Gj_arQFpQY)jH!fQExpr9{+kzaPy|X`!ip)7jJ0GJCgWLKs%|M|G`?8?u#pD*OrOK5cMx%uAjZ$NtM9P1LreQ#dq{l8tD zvu>Nat&RK^W|oW_@6O9ydnuOx#^-O`7Te#k=VfMPEZ-b$^uKIrOGD7iYl4rYytF!g zC|F)j*mZ9u`}b?}0-Doo z^E)dyOF0V&wXBR{>fyXNq3HtKk%&l%9m%4ro2s1bb9hU{mbVt^I}(%gAbdR$dqU*c9*{RdH&1kN}hKUBi^@t&;NA)=z7_j zrOKtT9bPT#1fy2Ask^eS>x}Q-8TV;k_WzG7eeY#i{pxIv`|xSk^4AK2cjEh5)V>Oc zdwO97xnJ9x?*(Pee-7Sjx5(!C0#%{X_{iH&kIJn(K4s<)TdTbW{H(J&OAUL4_|-U^6jj`&1)RbnrgXj`seN`I5a0~-+S+8A?7hSCm*Mc;@f!_c|H&7w_^jPu}tHZ(PT|3+pHTbK!X)7#NZL zh*|Pl?92)dM-j!6M}a!e)nh+vTt5C7jozUrl+Ry>8ESUylff!|v-Gi`Nymm&)vW z=Xsl1=lH{~BF0&H8*kX=c-^XI$~Y6f!$edsK18h8wr~HsXaDR<*7N_4iCL0+vSF{D z`Ih`kQy%bXo$|LVUZtofSbOutGp|{?@@Zz9o-(>RC|Oj``o4L+far9)8qlzY_X>?m zDQ-n8*-r8^x6V;1Yx&x@T+$+`o7XPC=J;Di zTjYB6vkNUx^+meNb1Womch#Kgs5xH1+rh?W>ms@Lf6tNcRsW3t{@p5Sw7ccP?iV*# z{F%1y-TN(uf_skWgmMJBtS_Fsa%<1S2Rc1@Ug~Ma(P}xHJA=5V2z901`y9)CbKd$t zF=AFtYj14bJim0`Gq=}wQ~Pth)^he-pFE$Zqwcr2u@Bo5>+QZOcdRyQUaiyj^^1Cv zRXwwAcZ=$FOVb(YhZY2A=l!@nwX6B;%=ERofA;Q_SzGhsV9#&E2NwGonS?@&?n~$L zWgGoh&Aat&S5%@?$Nw{-$CuTgGrgRz#B%uh)P_HG(;n-G|5p2zG($c(cTYd_J{`-e z9KHFLS2^UGm-25+%l>v|*Rks2-6rCJ_v6z~u55eC7uX@K9%d!nkY<_^U11mFakBa3 z&qFeECDnN+UvoOmoHfm@xOnNFqx)~ZiFMEIUBu~nE|4#wjJFmO_VF)jWMo?O;pgW|(ke~@;Vx`HI^=~T6OVWaN7VFPNH(}Xd24f` zWxDO;2=3!sAFY!r>W=rmF*EJssZyRKhCl}g!F}do{%c}6T&`Vtdic6`$UV>1!j7dn z)91TJ|2=wJ_Fn9zPk&k+vp38rjAPlp)_rlEP$g5WmWTJA;>}-+ujn^s&HJXbyZZ0q z+V7AJOHRk_%HGB;P_gDX@-O_~rI+>5o2Bkm`6cDXNBHL~_FFTLYp%cxxhrKq&7Rla zZ`KQ4fAdzH+B^Hj^Y4qlw)pBi^P!8A-lP4oE3HoOyZN5AUU6X7q}&H*q_o$b)X?2x zpLcD`&JU)!xBnIUd{Q{nCSR6O65rjG9T6{JZzjiS-jg6xmnUrdFeP5`t+Aov)u*@1 ze(ahQ@jLCW%u2OKZ-0ON`g%`+$kByDmRE0x7pABFst(^2Gw<+c#V044U#hFJn=gO5 zm#05iJAPc=kiX5*r(*5L9}3tCS~s=1ue z@~xk`$tpU?Y1A+{Y|%MthpXD`>Wf++&LYKS%Ra!ZDw6r z`e=v0vEii^M?MQERm}Kd9{BCW?z#6ipFT0I-m)&{hWRg!FZC}ye1nV|&ztT3|2Q)3 z_(lDA^M&zof>nt{MY9Y=YweyI*Ug*o#VrMuF3ZH!b$djd(!k>&2xhfiqBJoE*x9Wf6v)y z0n-RQ`_GeKdTmHBuE_9TwkFZn*!=&|leyR8TXU`Jax6}4Z1k28IK!g+JF#(l(c_;+ zxpRF@*G_-?(t+vp_wUcQRC#+X=AWj@@!DIUZL+*r1>5$E$K!8b+)yem-oo-~bDiOA zfo^W6*VTs~ANrfaq$lTj|CbLFUt4XdVu0Nte#f?S*+s7|EpeUj`53RnoBWH{4p%e& z{VnUcUUS>g#s7}~{_?|CVUA1sA)`0HKW;gs+;=kn+vBRWcPeb3o!%RBzG=g%rc(kD zNxx65d=+YZAgBHB%*cKX!-)HH?koIzU2p&KS)6?L?~9+F-})=1ePrPZnL7!&42OKn z(=T}(_c1X45%V{+;?3meCCfb={}}Gr(7-%>{ac@ zu2{3n86mN2|NK2tc`r-h(nJ1p37wI=KdrCZUzOn8a^sKgul4VxYl7yVO339p$RW_a zV^Y^C0WRabUt$|huXpXtJzbi^p?7_!(i5{oOC;XjUa!9=xlzs5<*w5Om6nHXUtb+w z`~LRL?u^+V|B0Eg)vj~(*dCG`A?*CH{j@q;yK%djsEp>74H}=tHrk7HADQFuL6PUN z=9xqLY(=`Pt1I6hs5On>QZ5;iQt0)WH{>9xS6mibJyCB<1_r8wqZ=t3;N>75y4#{fm|2l_}fAgIu z$y<{nFKe_2Y;9QTn9OU|7+TQWb6hmx?EMc*uJUgsFxFOlv?YMw+(qT}iMLTZ!-^JB;Uo2NA0r`&N`Szy4l zwQ!+X+|eV=rzCDJni+k6T4uDwOS@o`LT|gnx+LbWt>^pwDCe)CaJ87Dl}-4% zeK!{!X`lG_ZRxVlX}jJ(>H0QrwamBECHAYIFE9$VPxV?Lxu&)K{rxv?x$%!#A2q!A z`k+?&N;5~`QKkuB8-KI+l*;$adG)!+i{qO9nMG;4H>zKHU9z7)e?#jTv2?Bp0{?ZQ zEp2&EDYOV&b=g*D8h&3)PWZj_s!u!Pc;l{ML&ORDCFdJ{vzPS5vZFKP^!=(P}{ge@@v|Q1FoXlvg_7tjuyEY z3|iRX;>j*FwPgA3R}pt#d~RGCv*OCQJ_&DXtSQEbe9^)@d0{0l$576H>9rY|A( z(cKb(;a}CiNMB91Ung_t>bY%J`Hw+cX95pzI5*35)*`Eqj8l)D3vpf1ySle~PLlJ> zYpcX{9z-AanXUj@C9`)=*{8X60cxxGFRjs75OiO$eC4ejw<>C{KR%oFIP~`aJ2BsL zV?cXpKu1QFy}z@tOJGrd#JjMyk5}3WDo?-n$RWY<)(*BaS&zSRP273)*lO-N?pr_J zv3&@bpdd0;xaGTULAG2xNS8-*r}+7ZN89?ZL{0H(el5QLsM*@pT2<2V=BYcuR(^jx zKT1jbrB>!+jxTPRk3;9rh`XZRZ@cBnUCZ=ovn^iwZ2xkybd`W&7st=2<9Tt<+-f(t zO$~g;$g*;xiN`xj{yTS{ndN7dZ+@;{0NM#69WEVizs@X9P~hTQWl)QT(2ngNGVFHTUcN+POMbI7XwAU>(ZGgspIo`09Dw*R=k`cu~-pWHo@UWQ*e=4$z?aO=lQoetAtOp2tZD~P1B zGw%2{^Xkg)XLyVm1e`>~uZeBi=bl~TvbBjJrsVXUMO!{@51qT{-0#Azg@IaI+5)D? zvLx(shR%ETjoHH;mKF6 z0!~-dStcqzo#6Ot;cRie(4!oREi4OHl#AW9G8eT_5TDlV#i8g@>v;d%F303IKRCM# zbU{mi7712VJ?)xOEx_C`^ZL1un-8(QUmjO8v%WWD&&tv^Eng1BR~+(zTTI0QOFET6 z+e%m#uBfi*TN(ZQm#I>B{og6oKX#UxKksABY;O3x{QQTDPs8^8YMgERKkmOIBRgn) z6#E_aLkdQl7ymxsx!gbOD2Jkp=z=3(7A@iFY>`*mARy2QT4YpzA~%FX@fGtlrWVJP zb^AV=u}&0p>dO4}jJuRGt6SusS~{7~)0H{+w4cU0g0+ zyHb6v?|iUJAbgEd?c?SXXS!G?My^sY0*4rAvCXdZI2E?-Ik#q1^-Ej{Vt^|aJv_@^ zuC`I*V)p#VySjYhdWFH~W-mJ>>h$OTo4N55Vy>31@BjCsD7S@W*THG))_RyJZDgLZ zWQu2ABKN}s%!{-f)i{DmS35NK-Zz;f94Fw&!SRUo_@xhNNp4Rx6{NYBbG>9tn!r&i z8x#I0>Cq96Xs&Nld3Gu=ZgcLr@HT&b+*;%GU-!>^IrF8YK0Y+Oe7^nfId*%`+r8d? z{Z_X3^rUHlf`Sw6+6Da8diz%@O1wY5bUiMdr=_HHxmnH$5TgI$h6u5nunkcj85g76FlUAAY7sO*{BSgx|iP z{O#{otJj}3ySw{vx3^ zKaq4bzp`&~U`aP97)3yVcqWuDn@UT5sowDaA=W9;$N?zWMEVMv*N&?;?0H4(QMaxJ(QYvm~{4ZoH%ndQ{BbI z+W&ieeW$xiWjlIWspLwRRW!?8wDsJ~oadCn8Q+C3eryowYyR{TN zy6Svp>R~Iv;)xz?67Dy2I(g)dHK-Wz+x_bik8yEv)KF4Vy353w_~Dn~I(;Y0`)fNa z57~(8M$THhzw~p5&w`F4VUPbi@rW6BDY2c^&{Dc&ZP6Cs;Z)g{lG`c6960ZqWmB!!Mm&=om<@0a&<=bbQgP0(m4&qJpt%m*n`RmT6YQQ|oBv5xfewU8* zw_6i;&2&zde9U^%;8fAQk9=lA%~LExIP6vTh$Lz%DgABqOm%T_N#o$yC!Ke9&Jo!~ z6UvR+-5yCCk-h9(^SV_z^Hs-TeR;1(>n>UxC|@OfO?>hR1r1*%WoX{gQ*p9nUl%iT zZQO1>&V4q8XHJ>KvM3e&{xDA@>{b>}LLg65Qh>{n{{|@`E-oo-O%=&|6c+L2CoV2* z?cI@oxh=@;Vj?^Rq^LDi-2QuL;upz$#m8*jkGVjJ_;9WK?_+&A?-%!9*|&awX>i%u zUg-QAJo-f~c(~pTx3Y_w>O$p@Vmr(`@20aza!~cBlPFuR=wX0%S%yI5!^=A*d{=W20 zx4*`I@wQy57_X*U<}>T};s4L=e7SY=gju%dUY>mQH(2q! zZ@ypVb;Bl)B?q`2+YFEFx~u=kL^t{Oqkp^QZ%sUQ?aklo+brjz5o5Ovm{3?&&`GDx9+Mnhcg~;ew^>JWb%b|Ir9Jfx?C3YTE}Hx?|Rg0 z)^7H8>h$v^vmRRJsC?Mg@1?Z)w2BcZ0H(}5{Oi@7Xop*+Jd#z%pPxN{{IrW!**3)` zT#r|Bth^a1eoW~1BoD<)o0Q*N3*%X{Xu84KpP{JdG{w@%fgjtsNa-*v~m@7@soGYT`INyIt|f^E(Mu zYJ2(K+zuA*cRjq45fTxEOZwEV={=zn~+AEtjSqT0bXG zR%vflj`In1xqTm4+BPKLy{DFWo%!3+GmmD-2wfEk-n5;`@zoY#rAwI}U9J)8aR-Wa z)bwcQxCZht{hs3d($&S~i-6a4!AQr`^Q_h4(y}^^dbZfko}_fdOr?6ExvfoyM3DbB zf!3Vn^1=tJLNou*Sk@%>rA+a*rki+hc)`8#W(wh7bwV#j2)gB^14XcRGnGxIzwdM&LsbGuf1_!zqDR#K6qt~&4TZ9 z-)c;qr0N}fNm5X7V%m%lw_Wqn6z3}`?d6)c_h5B- z%J1**R&v@Ny7oGiGnL2WcFze7s~opLLBY&ka2rLhbz}Y8p8vP)v^vk|feLSr=ogA2 zf`TudIQWlR-{4wyK2d7kgK4+lNtuSWf*lSjzY2dJ*p+kofcWe*(bSlShxzZsg6fMC z8+4SEF7dVXbaix;C~{aINm#sQQH8)s7b%_n8Lo$0jFrH}mEc54m(3rWH9OBZXhJPL zal;T?Ft!~m5jh@j9rfGq+r^{a(<&2PAT`9L(|63Z7Qfg4YMOv6;Y+V4xTJy0!pSbn z=4NVw6Q`1rlCgx)nVkD30l)XX+GLvfsY(!1ZFuk(zuBsM^R4u`GrBu6PF~9S{XtH@ zrvp;qR?VCeSmLaSV*KYEebtGQd&)UX z%55P>9(=sscWs|Co49`DvsRef^^5N>WJQdHr&;I`I#Mp|DUi!0O z0oeLT<@3SA{5jGSU7k(5{mux~v%r=M^VaTq!=*o| z0~*zW$GFXsGG2#YmT5Vf0!oj;y^4#Kl$0jBs7%Q`(pc-iL_C=@e0}WnWpgiNLt7CO z3Jyprf;j!D)qN4>HIUHdT z*&oCs9$WqRY#%?gHRG}W0LzyO7WVfy6C+kYoS5=z#^-<5H!JgHPn@g%>`?Y?ecZ`& zRquC*rwUcnpWE{$JUxEK-qL%o-kfb+z4^aQc46*aS?ltbf^$;rKAjHV^fO=n%(<0& zp3SSe&TZtm$F@Fn)#EE)#9p>t}Bc9-lw6AoBmyy~#HZc}e^I zotG~=e_B}#sC{c3#Gz+vHKk?2vBM%ZF51^C>aIpS-j!@2A8R$Ehx_P*Cm*BpZD#6P zeTyjjww|xra2dm^@9K5m3clTo?A~O4ZoiuKzKq(ktap~dML8_jcIVIckgO2De)5p? z>)Chn4dpX#2A|(z@Lb1Y`@_1H+VivP-_5Z3Gsk;zN>H+3Q_TC%_itoBo&BtAPy7ti zo9q?ue!SeRm4ACpUeFUzLO&jFJ;`ozLgL1i*Id=kROY`sZ};_8S!;rL!I~t4|F<{! z+Fs8~j>z9t?w%{zSzKm6rT%@3`x4O#7W=QSYPk+(vL$vL{B!NlY4x`f@8{S|%-!>@ zD@9x3#ER-0+=2gp)croNOXY?@X^s4vHQf2~zxDQKqzaX8lneBZZ0l} z^FiOdpS}{iD?*rF{hZdQpB7!l=kK{(F6o2u|2=`uJGm8D{+8G6QLX;kqc^Lb<23r8m`2KnTHWC%~0yhK@#?@LqvL;=s@ zWs92q4R6eK*?l=PutP)njgX=Eg>z!I6#;K5ocFBV_v_XcJr>2AwaRxU|NSX*(X+SW zjkI-;r6SwIw)HuFiwthhoZhVAQlsQ!dGAB8aQ^3)SAu!U(yjm9i|pQgGI)i8h~P`L z7p!vG78gAGo5p5FLEp^pK;z*ndQc`+8fi8eZ;;P!RGDA>=vQz$w|wb@^Pg7F z%h28a_c5dX6j`~u`drnLtP4B7tW?-z-DzudDDlsw%{B#BSIv6i{33Tf$F8f{*Oz{M zA0{mGATg7b&BX27tJT++8tQQzJ1NwfBK+2U-JjFbH>KBCY^FFJ{=r?J250^KIk%iiL60`%f2k9qND1e4cMlvyHc!7T5Xe>}OX# z3h%Gkum9RY(nGXGNy(N&Nx<2xC;r&%mgoHM?oU;hPq3YTzxH+YNwg1O6|INZtwvWrZ*u?RaFru1cF(e8 zpEad5&n|XqusWH%EMays;5?~wE!ai;_JxOTw|D(nB&vDRr^D%)$dY|V_wseVvpniE zIwaipd`?lvhmK7zA1tov;F$O#;-O9V?KXwpzA0rkZ&dVVa@u_2-+!X*-@pIgmR7v^ z>M=*sxNX9ZS2Jp^T@al3Zn?$9?Cs~CHnXqic+^|i_|fu!f}}UUL8n<)a=4n+K9^(1 zw&(xv5nsr6~4_`Uo2(og$CciJme z@-FIs`g8N6TURtCobo@+lim6_fQ`v$)AVPa>1~&{h&o5_pOyYFcVb26vX0)r_BGbk zkADBxzx^wH|J`ffp8r4pK7Ieq&z9QTn+hITSS~+trqy84j59NS)~IRQ{&eAalIF5P z-K}W9ztO+v_y2fS#TS2UdUMPEPyS0|+oOW>BIEWwcw>3VA$T|ugw}qnh$ZGXHC3(rMM@SC+XGB%S6 z`~S9IpL~PsV(W*)8V?lf|Ni|ysYT{C|bERx5^@3`hnqc<|P(--R}jp)Rq2nzg1W2JGVXQ zch`Z%e|yWOew(q)l12HnK=avolHdQ`&3yV~Lx%w{+k7b?0nbAIdytKWw1aTruYzPfsx8y?q>_TIkNCl}9EJGJOWUVPohKT*>DOWa(tr`*1ucAm#2 zse&i<)yq>Al1COiW^t{W?&qI&M#BD()$FsDt${J~T&e?2*V--8e-?7vZch8Z4LW;N zQzmnF-4mX4lqXbJQiS(a9J65ChQ|+k#ox2X$IonunYmF#V8!=A>&SE`YD(q~Uz<2h#K`nT7xrKi;E^N!dpo6PpQtWkfhAt|O@dFjLD_N><{-ybd9 z{`9?)aAQ^abLVoWDISG6X0p{b{4aX4x5v(XVeI_NN8sPLiF3OgC)zCto-BFXChM@j z>G>9qst=W)-TNe6WH#|ITQ2M9C~Tc*DYCF*;-Pf4mwIY<6LdJ;td5md2)&j(X?(Zf z*}}8U^RG|b|EK@U+<<_pj>F!^pKtu~?vdjXnR|z4tVox+@HxijoQsQWs>tb8&nLby zTTx%}MIi0*LLH@*_tzdX3%vhY@|mZEUgn?J6q^et-L?f-i|-QN)0}GP9(}%Izjb{4 zq+7u@?N8I)cFvs#a6&kx*e1b=llD9gN<-7K!wtZ=NsVfW1E49O3p zx7S-$$|&!d@_kdI!}=1L_S%ZAQt3_^!cLobeizyt+Z5m@vpu>u<&bH){g&d8Yf%&9 z8u@H*6>e&AD`_hcuV6p@@avZOTduBAzB2jimbHe*WIi4*ZhBf@^}j#pw)MsIHXpso zc{}aO|9pv^x;^)8PW8k6_w1gX^ffdybesRL*>dr<*q{E(Crykpb5U11W%s*(`KH6! z&w7l0?f-IVWlrYNjJy;P|0@OmKY7cinAg^XSslB0s?sJ=lBaBOjX1d3n>BbL0E3oW<3R@BjBdKbE5M_-mS-w?b~M zefqySYpnd-+3)o~+HvJ1Lq2Dlv8~@+xzeZGF8O*{|Kwk<9(ZG&!k;(c{7XM>PG;rzOu4-FChyfN zzaO`5`9JYw`I-qAULM?UKh4H0*!SXR+eLlWWdSKsFQptS;-r%bUYuaRDZGBZpW@_- z<5HEKaW%icXe5iizcfAW*8-&_Mg43K^Z4cdDpbz@;PG(p0gJpRFEiDCojO)JWg_>C zJJ~08IvXyT65!$F5E+&~|Hs+*-)G*4%-a6v=J&}CkG|Lb{{B2<+WD0oM}w7F5-YfN zx_ykk%)H>>``nU!KR5Abr55eAuvYX zzkj&rr~c3LeIbYQyRUctZ^Q=InFV1dq|(4Fd1f6LFlGP)gjU1NE~|3~+JewKgw zJRm0gi@>z^Q+MopTk-t%%b)ov8vaKj-hHoRUHZFKVT&-IZZf;0TZG~JqlLb%2Sw#< zS(KHujI@vK2%jeCbvE7Ui4t>uPILH#rySciz0f%~vGe!@1FODpp4ok8`i`>5O+3=P zX20UQ4@R4EQ*w09{dBARcsuD9!?)i1LI3Vf)?_KS-=eJgs_R5wL(bW5^Uw1-_SIMPv6S-< z{eMe$wl9x;pD^d}+|Yxq3DLhE)|`=7tde*h$d;<~O6qO1;QzbVKMQ!)*&j3CFRv)@ zXsyM+zJQ=Q?%N7h`~F;7`G$S|?Ab>I_D}z7X_wUa`e3QkhAY-)sqfcIeSWij3-3g6 zrzg?-?LS$4+Fy8%L+TjAwAPx{U)P@M&yM`_U!c#`FRaBcM!T*$rdxdV^=BVu?R+^a z@Z8+cgT?uw5yf^>j&EJc_atf8&yC%;m9{C_OfJ@tPd?cG>Hn^Kh8EZDf8KcWTzSE+U%(|8m^1?Yp``M(lrk#Zr=7n_2;!! zmaONGy}4zfzHR5v$C1Goub_8Kt(FKRc?tT>2gJ zl-`fB59`(Ii+6pyEp|p%>T#q{|4S*|Ig^i-GR`|FFnQ;MIg?o=o?m)XGd;{U&{f}m zv4JBC%kzkR|2o?9!&zk>aXU`@Kka7j+WgSEKi^&XW3_kq$xMG+*&6ZiM!A^Hat^~+ zkuA%o*`}ZEJNwn9c<<`RcG}lxpXa#s;nT<2U0-)p@SKYKu+TX5^S|x(=l9MHEN7oS zSMvVxoo(Xl@6}&lp{KmbGVPAM_PLpJ_f&7KexICvEzoX#wzNycM`_QH;6HCChAflx z3H!f#v%ZWXOLsEM?7|;U#eXl_sOMLEk$29DZO(V}kDkBF_S~(u@7#eOHgk`pd^B|1 zAenRdT-mjchF3bfIJDDdxC$7}<6j?jW~(IA#y2;+pZYBE{gGjKG0>1lxbVcMqHjk7 zbJLzyxa!Ao?B~#Q?Ko!P?2=aT)oH;dzKbX7CSDa}JoP?V(#crJ-1UUke8JU|U&`wA z3+DXWKR+kiT5zUK+W9HMPZ|3Zy(S4dsec#m@nN|%JzgsFj5upF$KFK;4oSB@XG$!& zYCkut>P^@^$Cj+M?VC5rCN&t@|L%XUzEJX*&?234?aB8;Y?r6~esH%lr%&SA#1z$D z-8CE%E@^=+SBy`*YJK-qY3a|6w@tU*;BWj~BD=9HFZ4po2d57AA34^qqo>OMKOZsa zr-GKC|NgI6exEcJYifOcuxK5l{xtEu+1k&x*T4Dn_1t8}xt^bz4m>S)tzA*hv(4S( z*v}79zaB>V_FwL*Dz$7C6z@B|YtccQX~{BwzrJ{FGH;2W%7q)7io;K>)40o?dntYX z`P+*gJrBEZ&+N6r0v)SO75Ywh(=FGRSxIyqSDf|a(y5<%A;;8L z2f2^Wna-gS*WoyAso5H-MK?FCU7Zu|v-`HgwEs9{d_B*yAtJeQcR>t`!Xsmr zj0!=)R+;)$&+n^f>*fCV6>YwBLcmO!jMs~D-%R_gxqbbOwNK6V3MM`e;wuYMIg$PK z_Xg>WFV(7QcRjW8V>dqL+~N4)Zv2yZb}jP!-=ZEz8YcXIHYxJtjiWzwq;}_={*?4d z)aBTx^v@s88a#V%8FSx3qPyH+!I`8C?Zo!x(`i?2%nj4tv(KM>w`jh=#EO3relK1g zTe^9&go}#Nxiz9wTz6(~T6E)tt>xLiyVsh(zIge>F{^wRZ_BZh-*3m?U-hSKZ}Y_! zM+^^XpZwQxsD6J|-luIvb8pXGw&do<{(rx|tau;up5y$Js{19Lazc0B)GvPHr@A^b zWcur?{U3!=r`G?zBzAqv3t65dy)7J;w|>7+F8zM=!Hf2jJ-am-`4X=QraH3UJ<-wq zu{do`PS|$ey_;O0$J%BX$JeNTpC@h~|9j)hy={g|IX}mmt-hZUzvrR$?B9nJW_DT0 zN!oQYah{isvpk}hWN_@cDf7Wrf$ohBHS5fKJ z{>6Ps{0Z;xefjt;N2~3iLCw=;S^Ij*cGe{R)%kz7tX8BY_rvObo&1~G1(%P1zF?|# z_Fs^GVaHwxX>q0A@_p^%6W$uINz8YBs+hA`L(h_L{n}Q;xa5;MUgtS}3rBSpp2MfkSo+qduD7y7ezQ}v^p-ks5PliQyO`R!ToOJ3>3qo^X^ zBpZe6ov&pUO}O86eD%cd8za*$?qRfv<(~QEs9~hsy?|+g>nekrbv8<@FA0g+XYl?^ z#}N<1YwM>PX#X$oePwj~;jbe#&4zK`Z|}D+Q=HX5^=NVJv+bW}zt5Sp^4s3cVp~nL zJ}fganj@L~bVkq0*56HKpxV^*iQCOi=;E?m_*t`4Pa*sVsBNdVas%@>rUvIr!=y$YqO1E<2 zQ44F;*Js<;D$H@amp8FY&R@cf{ZQLHhPaR?k9-?1?%6Np6=`ziysz95%a!5M`o1k1 z^ZtKX`9#!-qh&c;v(h1k0 zuFXl8=7+tWJ@x(>=XSvpJ2&Z}l^V}imc)F^e$trxbmkK) zr8VEe5Bq)YS=iHjcG7|8RqmhWZ8G1Owxq`5|9ek97mqJZJ1aZ#t@)ci)wg%#giKdn z==-s}zWVBqMJK0+F6ZprJ`T1yDwZlK#qRrJ0TOONZ&?cb2NVwczc4gh) zM_*Q)KiVhv&i-BYtM3mk8OPOGNhjS4vumHwbFTSCPsHYe9R8A9g17oZ@*Y0)Q$Ka= zRYbH^vzgxPyP-`RGECf$vp-Jfe^afbd%R%TW#_;fk{>3C8rNLQ>B%aRaIkT0^KS{| zP&*P*@m3(x|Nm80(~C!+H@xlIpqsRDhUt`(dzv48{1L-r{&Y{)_t?auf)&^Tol)LEcpLqYgWuL0f)?fv$lNwRyEPh2iwf-6}Rld$VD|oix z=ZMcbc9EiXiSq;1w&~}pz5c{fwt4lUcg=w(rEkwouXHW#+Hz;x+5Wjl9==|Fd8hc> z;+*JP`QH+Mr%hD*GtJ{0`^3*p$scxa*e-Rp{SL>GC!trDwEgu9_x1U|GW`ANbM?vj z;a`tUXjl@*s{1{!e<{0ka>tr!e_g)ZF*iCAwKl&*`SD$8Z~w^U8D($6gg2H)$Ii=( z+m*5-??i@nS7&iD2|sm9H+}BS$F}K<`0XE6*B9K}C1E$M+~HhS5lNUg_ch@{x@u4+x_)|#Ia9|hj{Z7 zDm{3f_kA?u)6%lOGR65&&dL9?eW`q<^5;h>QLSc4LmE^?nI{dno?M`HDHf-UKL z%h#-Sa<@KsRz95N+hm0)*OETC@Bf|vn(33(Tpxefb+_QL_k2rLT#J`Ispwg&+jqEj ziPgL%(-$0eUNT*AVUK63)$L`|j5{XBFV?6#=kwcC{r9cqpXV?OW*)3!k=QBv@0W2T z*TtT~#S?jt&ZCI!-0o>il(#7$k}1n@o+&!tw>AkA%DFc%6q@h(h!+w zoK&!9;i5}-FFf{M5zzD6d5xfx$+UKx-}b-L{+c{`TKdK9bAr0e{KCBwlGAT%Dl64q zTlTD2G+kMUS!vIQmWQ>Rjg~ir>24swKbXSbpYtz&Yc_brR11 zb2qn`S8#sbxx;LZ}2^2mz?&wm+z~tZJp^W z(3&C_Ta}_QyZTjZG|NP}hU#BexwkF(Q1H_C(}f>9w`NA`>9+(VGwA516tBL1`;3`; z{oNY|tG4S+5;92TQw*pSOZd+%`0}lp>5k2Q&n4rw3sg^RXe~3W!*uZ*xYkEth**wtr%V%MZh|H-y(eKc2e(#J@eSEotv#I{THO3){qzm1H}zagKKJ{5N_XG;bLZ+GpM10Cd*5xNjxzy@Cw@FAkMhx1 zPCm}lW89p1&M#e7{B?!+Wu4O*`i>e}3ujg^q$eqDh&yL=>ua&R!Lg3Q);*xP=S$8H z_wOm!vHl(2p6lqcOXQFfCui!VpWDwx-`%*e3A7l3?fj{@@+yO+&S(DK&5m4H-6&}) zDaW?#ai5EeTUY)^d!dOlbvVSo?@^F>z2xIvja$`{s=GQ)@H|`Fx_U3iKli=0f6smX zRcf98OFr`WTF}&^;7qxHfB$4x?R#FdthnYPZ(IxKhhsmpziuk4mW|Jm-QM?I?~nQ3 zxZ|MNCF42atAoFKd@szoSk-RjGvS1)!d=dJ!oqF7d;0!AY0{1B&};p+{QBm_+8nMf zX%_E8mVch`t|{(+ee|}I+!q;N%za+vRsQjGzfSmxl8)_Sae3DFmcFg9uQ>4hS9o{F znVL@*E*`zfy8hmC5!=`U=U3g_xU|^7-mvtKuzmH7?u)DsBwcuBx$fHh_Qy)g&zt=B z?z(HaW4YuZmwr8IpZ}ir7ruTL=<4wJedNyC?mpXPj)uG!X8+l2$Du#VZ$?wr%ZrN7 z)#j*4Y_2JJneZv2^KFGbm!nW)X`;2=%FhB?9Ih@^Q$F9jGk@;)YmH9~uT;#F;h1?i zyOr_boY}$pJoT@8)r0Srx${yz`-dv zF>TJ`jW(KpUMZh``$FJlq2mh)(0b9CA7{J_a!IL~ks9Ix2B38UO3>9jf`ZWHNgdGT zK_C_2p4?R5C(1aozL<_7zXt+cz=){+5!Re`kVj+@>|J z&wqdL`TUabnNP*no(YRBb1gg9RBT;$=Dhx_P|a%JZx0@yk4RjiU-?GbdQ;`p>esWp ztPO=Lel+GTo_2P2ecraJx4SsE7yH}o(w$9(>AL-9M#T`|E`C z@+-OX>TQl6|IBlz;;!h_=dU(z-R1Kaw9rNS{GRtqi(Jz_{Z-;}%6!gNpW*)W;3tn? zHK%iLr?n_NaoqZ6k?2--d4Bh4XJ_wM=-kGwANNvT`;ubc-ZB;GTfeTB)UVxFb$3b0 zu97%`XKx)h>K=ITv^?jgrA=OQ+qd-J%NkFW?l9Y({9I4F{cF`J&=RDje9yj`vn2cx zv(m0$J2&ON(mnl4efV;By`Sf%UEXVbsk#69y5;<-prrw) z^W`JV*L||cukv(VqO)$>QnN>QxBFkS5fV;Z_0IH2!WO|j-~PRhUa8FK=ziN=ZqeL3 zL4wT{eqZKAtY>~@Qd(`KxG4Sf0rAbQ-qTWd$6HLA*Ie@Cl4Ya$`wyq}({{_>dN^PE zp8g_*Dc=;IurPNTXdRmWsx|4Hh{3-{ z$v*nK`TE!CdeVZr7vAj+(_4P$+LYUKG*-`j(chYr z`|GSKhuV(k0`HIh3ap7UT`m2z=+t-jpLU&ZAMZT+vv6K^{+^u=Eh2cn6ef`Ikd>u^zUW>RZDjvih4)*wwv6r3ow$L&r&>{_|;0lF}Iy|OT>dAZfUMtPX ziaW^gcv-6awP3c?iz|3ywq~^)EZ_6%!D7P$*`5_*kL#iYd3iX~8r&K`YH^iSyJ&&z>P&CiPtWqM%?n2#nXY1UGCEKlgtfeB^ll++H_phw1}=!}79Eu`%beEc9;a4ED3;G*~n@k;i9eZFgDI=jG-$ zJ)a)^bgLG7{;uNby>-|B{J+9HuUF#mmHva(Ci3Z_+-8WzXG_ z@7Su;9&b10RoRsD+g@uwmlCuQkGoPgH+y+rx6jk!1(&~{u9(McWaguF`L%h)RcncY z&4vjxq*vcKe8M?-MUk?8)FzYOtK2&tK03C)=IyR4#wXL|=2X5owJngBm8t8V>Y>=K zr#vxRjd&;RuX%fdU1aaiw`t#wT(I8ozVk}8R2)N{veWhon+J?1-!Eb8y6@%e?0i*Y zBj3Um&EISGmYuc8uUhxQGFhhGWBozfuX#`Z@iRC4o@1zT$NH(2zlL;Js8_wfv4MaJnqd*Y1c`U@+My0`?|uaLI6erQ2%n0ZB8QN6)6+gEW$UC-yw zp11qy^TIjd8`r$3$=NQv=q@DPoZJzAtCsJ?j|4i_kYnh+f=P}f^kdt;ikaaDU) z!j`StZHJU2>lo$+`zD?-Je8PuH11E;)2AypZdXn0I^q0>_g?X{ns0x99N`d@J}%8~ z((tr+`NBtgT&o0o5|2FZ$Fvd$*K)n_~NV;-8hdhZh&w#|EpH*4?Ro z`XT7uX*V~9=OT&T+4bM|TnSsVzchGT?T<4%k4Mb-y7Zgwt$8zT)tzYaHbo^A} zjGBD&#idg9Ti^~9mM?mc#qwN&Q%=HVf9mtLwv|tk*B!0lJDh&J^j}+> zr@i3gsb%{&WKU7-na-PR{O;aPB+bS@{!=e&Lq>WPt0ue>+(kT|&G z<)e3e%||X|UsY}qP-$sA)_mXlyvA>VO=?QLogcQ$c)x~6*8R(qvf^!Li&s7V*RbW! z#61lqf4>@;ToYcHma`?UoZHY$ZlSqYen)Baub-9*Tuzw`wpJaBtL|+H4K<9A5qCdh z=2OB@HKqB*n&}&s?(kjCaWGou|HSXJv^ZR)1T~LIry3ubuq0UXn1ZjH^u|?N@)qT? z*q-PKP5i@oZ<54zp{Igj%L9}5>Ro%ir%vZZ#@==8r!D<1maPqJ!-S6?-V5JX#(44GAGXuqX9q-IJlJi+Wi|@(V z%iQ&fk`azATTG-B&imSh%++63;Ws5VU*VHWN8GYUGnA!fRIQx!xj6n}NuKP)eJ@S4 zmPq7Z^E23|^)8i%N$$#bQK#+~nrfmyYL^x)cKaSBlrgnzL0q)(%s%y58OoFVTs1-# zmu2RAMWw1Vezm$kPiMR9`Qrhzv_EhwAA4w8{jc!OuJ6a&O1FM}do8{v@0+&tnJby^ zR#)^zZC|>3KJ(;yP`xE}_d#h;^mNX}I#sTXYicA4m#j+Tlc-Xhsl8^-)yZ-`Hbr+% z_$Zw!ORU)V?w^-si{u9V*PvT??@d$N8}OJ>(OX}L-{V|s^2HrIVb$wS9m{B$z#CE@ zZ(^hL*TwQ^h4Q0D8Cj=0_7C@VRL`$mqVf9QL)+Z@*;|;mus_|_R@PjkCiQEYyX~5l zm63~Ly`BeWOgEU3dHTzOFXelm{ZBu^==T0s*4|}xpKDSoHwqi`&yJq(DlaEIru=kn zdG9X!zVPR_bIdi_zLx%YY4cLL$>iO{noI9?U$&JvU-5cd$>-t(t#{%tc{Z#4W;icv zUmN~3clPwER|gof)Q>C;wBYG=skjqSUfKL%t5Kwtqgbwmja&EneeZ@z=iK_i?Mk z{T6Rq7I~s-bLqvlg1U=lPgkD%e#J=qy0!ZIpa0fge^amE+g-HiVX^Kj!LPI4zPfrl z$+Gudf7xq^+fSX9KNV~Fzl*h59#*=$=FCRB^rOF1ZO%9P-O*2$+;!z}&hI~qA}2rl z77p4`*3ki4UuObZrtae6vcx|SDyRY4>I7=h4z4ya7pUEitz~}d%-p7Ao8Zda&q3Xw z7JEmJx4*B{X0_jM=3Cd@+8^My@#8M-P0i=+XWQBT`0;&H==9i!u7ZLu8QFh|z9;#;26o;6>Dz4B6h_oT$4$!}aa&Z@$c+CF>s#b##6=`=r(K(NcVA-cw^RK5m4(nn@t61)efam#_U4|S+?T$W-?y82 z*KUJe_46m3`hAe4;x1Klx%J~_-rb+=|MvIC>wU{UK8E%%CfYT$B&tm1 CqNPXZP?H>cOPT8N9O_I_6qNFo44l=wZc*H?UT-ovL<_1+U( z&eVn}Ow!w~&i|$JUx|T%VF5_Z3ylw_57}EQzIk07!8+$!+mS7K(SKL!f6`}$DOl@N zpX+oZzu@?zAKIdo9BaHanB|_|XJ=q=0Qs!pLj2DKJPc4?z{aNkrJM`v+4fF!Dt40G z(B#qtbLy$k7t^=aCpvwQck53{b#@YL2nX2@ir|J#0)LjPxBO$PS{!V0e3`1AoYIEr z@8D*3HSK4)F8Rf6`+^uo?U*~-46ZPZ3@r5`U`H^4L=Ns>B*J+6m4B=k$YikZG}<1- z&ldP2{i0WHp{1FwN~8h<0|VGfC$9hHJ1f%aH~so|e(A3!d4;`r|zYqM;3-@S%&T1=o+%W)SZ4zj^3nsLL?qshCfd;0HH9Q`V3 zaYWWUE5`HN#eetzFaN#bXXyXh7d@7$x6jF?UE205IDQGpOS{;eK;gW*^}ngD?w{_v z{x)yEc-Clo}#k}BA`rrK8zQ50$WAD8`82<0{ z+aKS;wckz(`qxord&CcH@P2K*X1NR_m+%YYgpSp67FX6=|7hC(dH+(23-4pI`;R`C_j%)e z`shXH-$|!mO`cZs_Wj*sOY=$&$G%(p{>}q7-<^l9o{7EmaK~S_-mhF-vZuG1KB-*r zxM%lUcg@efoeMu-D4!eu^?>5K`zH$RdK_-A*qgQ8hDRi9L2dNxCxr>QVZTfEIW6Q+ zPP+Yi&y;=#E|$-30ry#+m$pw@#I4g4n+z_4EXt)B7#yCjT*l47uweC(mJ9hu?b4<@ zMSuJIOnlvc&Bu2+)Bmph|M#Hq>b(N_PU|^e>w2#%P0ovu6JE9c^8|(W9p5$0I;}b}nycyX`#(4L-rsU)YkkoT z?yJn_{zPB@ek4%#mU?2%`Tsx5?L9N5I~@>W2;-N2d9U^bUwKWA?A+#$=f39`F8=qi zeur{(*-rE6aj#!ZTVL#~KkMl#cAk&NFMJdFclF=o|MTDfy3f^|(YN*e5$%7PWpejF zZ5ux>V;?pRm{Sy{wJKCz5EL*dAwPv0wf7Djh z7pJx4Ti3kU^n(5R_42cqHz{!0PH0k%S zk|GQY3>TUY?ce!tI?JXz-Vg5oJ$?0_=slr#Z~y;YUz5>LnDBXhl2hqBy|-Pr`=jDN zA2R&AbIRw*2J1gCXfmlx@s2PzujhPo@Y}na7oJSz`uN!QgCa-%9ka{YyiaCM;HwnZ z@Hz0yaq=(MEwWbAv>mQ;oa&I=n4QO1E(b~#3hbbeXjR>~a$ShjVu@dd=J)0+wRlgc zTl+9ZKV^D@bTzx{fs^KS*VR85YwzB}uCgP#T6Ft-9>I>a^|#8e{r)^f$oci0T~99Z z?PTH6bQU=#pz>$8%=(?hv+l)gU;FQhSe`utL&Ki~%%BR_b>+@~r`N__d$WNx&S~?S zH$t;`D_Lu<{P8;X+i&~6XOT^NV>qjnSXjS4X8t|#X#Cr$;Rh~AuH@t?t@|W5LCN8^ zXXS)-XH_5Vx_?(gB!RUYR>f$@^)9_4ar7VmJR#Y``ifD9Hcb5kR%k2nlKZoL9uyviA2iHchiDen(#% zujls^{PEOwK`o1@{Gth}eBCGBv@n4xhffUx3=9k>X1mCg#jf1q&a_qNF>l@urJ|iJ z8LHQbXz~?X)g4v;arymYx87ps^fR8GKg<&z zwVpHm`lep5wd0q|QVxFANka2#9{<&R8zS!&cyWJT(`q?`Bjr`9?SJ{U*r@w6(^e}Tcyr?mN4YMv8T3MCv+$u%R|W3h4>aqiE6u3C@@JLx zzb&1tqATwI{HGhYW9`h{Dz1_ruWipSSS&3@r4~^;UM|RwS|w4{xmFnVdP-_WTi3*=y>{3|^RLt&z6)xOuMSUR7|@ z)ysqNLrwPYl3lV43u<;GKJM#UwRvCt=lWfj_kVt}@&5k*Q>D!o?@Ks7dBS}m;jH8L zzD@XlcUM}c_?q9nGOtyjw10h~C?IlW zamzQK*C#IJU;F>SXzRwwTn)SGtJ1EQ{;xULk@hZzfk8ugmj1exSt~tQ9eQ~G>}Ozj zdT=(o-Mc*t{{H^|MJ$$cQjzj>0n1ObW*g^yKKFg!LpGluIZ@&t9(>CAJ$*rK&D|qY zYBT>b3hVjYe14DD*Yjb_v?|WWMUm52?`kQ^{rvA1`5>`8v$>CJEB^Zq$D#)f_NzvnGlG?icB#XtSiJ0c%9zxFJ!-|#bBGj~$c!702i zw+XxRzx}-K-=-(OS?W0#MsxLOyxuM-vP0B4f8%GNNu@cFZ<=3hf0n&-tLt;=tIHHV zIavIjIQ_C#+7r_Se?2+{!#;Q{n6Bjbe5pZuL1}BuHO@t;%1O@!zifHI6aDX(qPada zmz{|0sQN$Q_lC85y?;+wpdDVq`62L7eYB57Sd*px0e6m-H>aiFt`~@W)ZUi#H8MKM}psf%B%Le^bfrnF4R(cQ9)2pTcPt@07nm$uVH1VC&rU z9rkaw`)5R$AC!OJdpoV_eCN4o7E>;{Jmh~khhdq;hU8P55A2a?GW@%9P4bHSOEgZ3 zsZHXJ4g{60;3Dr4XP}Uc>Ko4$lU48Cxf|SaXT}7rg)vSAH%jepe~S4g?~?kqDXf(9 z<6r078t3AiR*CQzEi}Jg%DMm6wKdm^I{B-Vm)!O{F*o+O(ZlfH7Yeto+vgqNpI-Z3 zC+mIY{mA!zdtOz&zqkI(uepItEB5XT)41?oASB?)=DvToo!9@HZvFGv`?A|xt6!W5 z%mZ~^L8bWw?%2Y`(;U>lS6%HpKkaO6(-xgKMg09iOfI_T)BiioV+s;rS$jk1(cx+3 zy4P)5`8^8kMFeI&ouzkrFH5HC)zhWckz1b5TK_nE=e6M3Z>!CYx=KXb-wFSAd)nz= zdPh4%7(ba!P*RZBd82gwQ_IPMW}fH5Tl-Xl-{?KuZ@n%aTBTgb4*2$e-LC)aA3uIy z==1$lOuk1zXwQyU=Vw<}YUJ0DXs`FH(~wreO@QyH*g+65bh1u++P6@9(` zb8mFbw!bmkCl@8WF_$meJxlIG?j$D1$+uMe88>&ew>~L35$*8yhN#x*<@f(GdkU=e zwc01d7WI!Y^#=d9slS{!S(=|KyywbQ*JsVnzp&czb(^TbrwtR1H)=)}#k&7rc`N7m z#rkVEC$uWGfU2&N7A;VN?d_q-YmNzhN!c1c$#&fful@YCxwjVg{LPxpc|Wl5 zzuS(Gg;Tf=?h824c;~a~ifBm(+5b z(#ylZAh2lF4fdLSTldb*V`tFFo%VO()wdQuIAsOH1dgzTa$bl%8^nJ!@#B+=@2)r= zzbMGS5b#Q8=Zz&JtN>zDbz-?LwI7&%YX zx#z+OYDR!MpNkG=afz1NPCgIffSV>y{`U7(tk!fms=*MD!3Y^QY3Td&b+K{@Q^+(X zaMv5$M!6vP=>Nh;A`BHE$ARo`=zGA(FqKUaR3C$CMTMsC{M#>b1{D6uJnvotvH;Y+ z<7fgkham$OFL=2aY#0_WDQ*W@4Uz?!)ez?#zl(u^!DZ(W&-w9RS6S~~b(6QS{m131 z8TQ+*?<&9LzRP~+u1R^%7b?5|+|0yd@ne_tru=T}cOU%BKU^rYTGDPAWP3tZs$!Y7 zNy47n<|&)9Ut7=c(^O_V{%+~NjnymvKMb>4+DcO?g zVS68>eXYFa`*L$HpXO1=@ZSvIeSZIZ_i^#%yWDSuz1G!Q^VMB?{eST*<=?Ts*^{Mr zzUE|^DwOhrU3&8U^7$5LxAyHk_O_qXcWp49sEwj?= zdrOku{FmC9y*GS`<4Z-=hP&<>%;0vI@k)QkT!L%+_Mef7EJ z_Ma~--?6(tpRz=0!StN&lkF(na3#uK1iHt+wB`P+A1Gxhdbq?^7w zZTq(O{|o*H{S9vUZ{!>&wfgtHw|TEWKaMNCxB7kbBfWJiUm3)Uozp%q^=x|P+$5#O zr*p#di<0k0^&SXnJF%3qs+*7qZed+RpUn#P!&-21O6=E3#l*Dfxv-G2YrzrEFe3g7(W)Vsdvwse}hEl1CT zy3(9l@o#tQo_8Ng`x0NEKlOR7Nu~GR@H-ANpn;YHpTEujcU$(}<lnU!9x%chl?q*hj7ZcN#jzUH|mS^#4OPe%WW&{uQ|TY}j|@|Gt!G zIs4bGc_(z~=%3WW@Bf|}CdM)Sn7saNVRO>$%>TFM8*uM^HoGDJ(62>Pdp(b~JX>2? zwOU%(df&Fd+g~z|Pd)VDjKBY<%Z;_we~h*sz4m_1hxOH*2lww;nH%|(O{VJZ-={s+ z0SpC}3=CiNass|SIBzy3=C)tiZh?Q=9F@#>p4Gh(+y2*kz0Lox{bsv93ZMVn6ui;# z$9fx8hv=^g-w%H9<|^aie6058wyE~Qn=M7chu;1*v@nc+{bKK?SLa$i9b>j+)z6+i z`L*ng?T2*Z4X#&nXGqH)wqZZ?-Q%*ewpGsh+*ix5{QT|pI{x$4*1X4)U++mu{B$L4 ze{V}&uti3#@9Ou_vuxxJ)xUSEl;-;`|M;FHXT@#Iz4hxp19O_3+G7}v8OMm zmF+R#(%Vvf*L~Bz?hjSTpFT89{ND3T_?+N^YGa8!h4&1VDjtj*|4tRZ+iYLGEc*S% z%l~Wm57oCWG~m1IzE|<28W#gYgOO{qZ1MUjUtODJ3^Psyl;=O*wM0NE(OGQ9*M7T6 zcfwwu@JgB{_V)OOid5biO-*O!rO&!^?*08qe{4&R$&`QUy|4SDa@8LB$bZ_CZ?CF< zGtV+;7Z*cu^|Y;VhnmZ)`_3ANCkuP1Jjjq$T>a=w_WI&-l{=FUv%EL`Ew}GF*D{6- z)oWXY0+WnW=j(@PG>AmsWchkcR(Im*+<9-6;~VW?S9 zR`UAq(mUK5m$bq;H{>2_-uvy3Cld!lfZB<(fzxI@kG>sy`I-41RTs|*XHIE6ZP2k0 zGTL(PmwoL6T?3(zl$}$)2A)6^S1KeCia+Qp9j;kH{a#he|PT$YxqJ9!v}@!<%>WS(Zfoz@+E5cP(C=|kfZVK^@E0wcK=pS;GDWEma*XJ8$PEl z7J;L>-i8T<)7w}kms~qp@@4((2|}OaZ3{9ko_imxx`;vG`CPLJ(o3f+G^XB~x_J-x z$6M`TKYsgko@r`2b4*>n-1Kpl^%Mc)qfwfHXDv92ROSeTUEjN|xN`E$q6o3CJE!{> zrTfi!;(Si$TKs#l;110cx2}xVBTu?*ZmJx3ZE>)A<-fMpyo-1Ce?Mg&T+@Dg(S4yG zqD%|6ZGYF%&g7vf;N!NU_*KuMb$3#`*y}f5zS{Td{kuP(R#xV!FFm-SajLwk-Gd9? zHc2rrVkr2yLuS$)28Z+shXO9oxTaRVqrUgH=_jr0QNi&Wj(FX5ow7i1-g3u2HXosg zoClMqe=tbk5clAb{`17TU=>SU)o;ta@8+obRCqs{{CbZ<@=u1QndcUt>JZ(%Mc$!& z(WeU8cde$c_ar&o{I)bCtm91Mj0L%pUsS(%KbS7$@LuY!{H=bSZ_{e+<=5?0oVWd0 zU;52ost2@}-4`j|^}i%0RAs|9omtkm^OL!n3PKrnmzVOqP4{~v(cH#U%>6Dda7w|P zc`k7=v*y)S=EuD^tZX>@|Io?2U(4Hzninu=$cn6A7^Ly^-85!~rHx6cQ+d*!tW&=; z|NM@{OEw>JJ8G6SPc>>1gK~E#TT)T`6$W?7DT^m*Ol7`b@k9UJ>KGE@WQ4*I}Jw zr_Sf1ll32ao8RsJ?B~C(`sV(9Pv-sGUi`N_TCeWk?z`{YKOf)ixt!N)-SWRD&i;RO zp65HWvTXYMgJ%2lBfSshUikCc&ivN@xUb#idrDm<_bDdcoWlBe|F@>2q`p#q8(#bE#QOWba^Al;X!Xh(h11EpZN5~MjMmKPRFa2cdk!hWs-I0Vqbl3Vd}MMe>s=B6vQX}oK^a7 z@A8xuPgaLR&VF1)nhb|-hbUErzx-SB?`-tn&r#>UDa=~_y=&gxs{_^Kq zwaa^kvzMz+IB|YCv%P%Ew&{}`&sVjczG*EHz)+&C{;uxaTsh%?g`e3QoHUr!_Eb%- z&VKV%HCjHbdBK9{#X82TI1=(^tZK{&^Im-A(d6QzEhog~W=s*eV|rsk=%GiSn>{ql zOdK{I?Azyk!+iC&YnKzAuB`vP<^Nj8?r-U*O=oawF`8I#$-STC&+n-v{A_L2UH%@E z0-b!s0f6o4Yz4mGG;y)joGNg-7^X!UxRnr_M&iPrb#=GHqxo??`Y&_F8mcJh= zUu>WDdi%DEmG))7mrrW8<>lHcZ@sOhlJoDuWbU{B|JLs}Sh-_^y-xm9SUShD+eOYq$2Z@-xd%Op~ zmgbx_-%`JIVaOv7_Aj<(cMR7{Og$qa^r=+;WNyTFmphZyzTPvso_19J^}b70U6ZFt zmt=F^vuC(aeQ|Td`O3{7ycu)OSMJ@sM1)_f=4;&L&r$QM*1!C@_I;ImM(ySw+?pa& zq7_!8O!y%G&459KA-29yzUJ;#v-OYeGc_z)VB{TSCDycVibm9yO&7W4gZQraTb|{X zJ1CJF86YBW`Tf($I@z5eTH7pjo@DQ>aN6uW`Snz(#K!w;{_kAi*nQ`o>JGN=6a6j< zhy@)^WJpZB@olc(j`M9Tsk|GWC@$E*7#UxrdLyjX+wU7z$W?EE(SuXO#rWdaN zFYK<`aIe2;yY1P_wRS&ewA8I#ULClU`D{_-<6;&cf6H}!7p3p?F0{GX`DvBX#MXO4 zH(%YO$6-IaW*_VdQGXDM?Q@AO@jS5#rYZQh2zFLs>% zci~l;%paB1zs0ley%A(>V3R5`-*SJaiqWJG@+JP$6>^xTYAjmwjs$aSeSPnNCkEBBp0U+=!gJHL8^ zZtIi9=5dAZ&GbsQdS8FGHP(Q|;k)Y(xy!Z-s17+~k+_H@_`@x9R?N zSH0@8^4+h$r##H6?5qFQVej_j@RPS+-oDD4WuH^Nde-CPmrdR;HLL$)5Vh>ztITgT zm)`B$xK{UOT~seO?x~WKW#BRB%&e zeq>FW`4O(@Wlu^nB16ukT&a5acg}_FC2`!F0=zw@ZIBW9r&}5K>8HeoPucr+e*ayq z?QL7lxy3QOQd8i}Dmk?r-j{2RM&}wi-i!I%YIk{V_=ehp{|vYPU*LNG#Iwi+xuzOt zoEMcOB3*a)*WB1!c-tp_l~KIc%Ux}s94g(Lz6bm$w^UBuViTJ- zY4@T>6RPVjq{g3^8-G5(viRlhd5;g4cgml4)7bL0F6f6t!OU%!za1>F&^T8AH z=SSP*E^c|YeooNSSAHu_Y5bYLf92V^<`(zj{)R>C_j5kik12U`E$e4M?TWuk-ye>- z_Vbebt)$mmcOTuD%=I^->dLJ7x0as#<@@`Z-IvVV&aCqAA3yfW<~{$OSJ)o+H+N61@vG1Gj<0O)d)#=f z`9&0G*cuJ7u)@Pyoe^L8Vn5%UG%fPbg_8ZJ1uJVke}sRESlII`!13|V8@u)IZVrF1 zt;tYwS6gw-z0_UuCBCn1mwli3j=MJMo_KBeQ%P;!6I%?_x(1hzS9__moi)^yt;0MS*iM_lGlIaX2-l$ z<6Ze_zpBXZgRdXV_^|uwiv0PSY*(H=c>m7ro|ym9bCz?1j@*(7>+^Ua_p>DW)V7^p z67^oYXsVw3e^#*k>kGSye-oztPtwTOnmO0>>$|hNSYzcfI$gD|muN-ru;CC{uAhB& z2Gez!`d4rNzPq^p&C7}V{=KjJKjBXvvw-xH=x0@x%wMkkI?{DmfBK}Vz&{J#dcR|L zfBt08zNEk9|BJR(?|<3u5;|NnE7D`meg`FHhQwD^~KJXb%>2{F1HY<-gx|ThBk=y00ioRP{e9K2>D|L;^W;=3-u33r_Pa1u zz;ge#RnD^y9l!Xov}c6@)7k5vj^DSP_3h64dve?BynkH(HC;XC&sWXt?}wj%`zZF_ zz-qamzwF204}Z4A@7#ZT;}_c-{V&fsPj0ooGpXS1b^(^?wEe0b?X9nEIrUfG65jRy z*2Rs7rq#*nzlyL5{~G>bfAIg7!jsDt~>8_Z+)#tAOkN<|ci_51i=T_HrZ+r4n zX49KLUP59b|E31+*}U}78keAjt@VOEQ{Q=}Oyr0%I693x2{IRbjgI_6i9!q8LtEIv&DhVvHy}Fr^r%ERZ+%s%3ySKIj+xXFvT<-qgP4e#BIU6+B*1i~*S&|EE2jwm{qa-|Ne( z_j-TVt+$y~cK2s#VflN5sIoYph-n9M_O~BB_I9`4>Ti>$oxNG)qcDy8Z};!Ttbb;| zTBOg+S(tTy&Bf!>RyWba^!e`~K1;dS8ZpYQtbU)TNGe)ZkKX*(tz{N)@`&+qUn8z9D^nH%~7S}euEhskeL0GBGGQA}7W=JLLH5Z@>}eCL(vySek0-rPb9Pv>{dypsGs{n0!JJXVOG za$NW}=YN#{{;hkjU98#pCjGSc!j?=fW24q)RoS&F2kXDqde?G9ovJlSS+P4s?cINd zy`onhxw;&h#S$v4p{jDK);dSm)bZ25;zf5qss8Xhz#ldLz5ViA+_7pOJS+ZwynUSI z-iiy1!XMUKtKNDW<8=Sggx2V$du#kj3Zh5fVeDekY6TYl`A^mxVpnpaK9rzZbS&YhOM zPlP2XBgLx!<>CIf_Ba0jny1I&e2+0`N5ODQ|yRn!@&Ukuh)Ae>I zr6)TV1~hMII#?^q2Z$A6Kw#~Z}7?O2o>CRv8?_PLf zck^r5t*^g$BpwwrJ~{orye9s8ec$>uJy*4KBltwwbe`Rq!thIWi-i_n8Sw{S=F-j*Z0>S%U@i( zN@ux;>X4PH}TZd+9P{e8kiRB5oP$hYEMDt$(tr8U&Tmo5c6JPtl#)r zN$Akg*qgCk1!;#?a`xBlf1h^#kDTg*^k6;@jj07%-utJBhxc1OYfW(4p>t5EsW!vG zXN3trlcvV9oUG5cGw!Z-UmkVp?A<#p#}}O5KTT6H;lidldO41V*4z>lar$~!gk`f^ zx?O+G#wV>oC(iVLSk}?bwWKX@g?w6Xo_`=)YV`4E$Jg_(jhEHTnElm0nCY;AmD{fW z$J;w)I^UOvdS^*^#uOI&BwmW1P*U3%ARneL%gv->p6kKF50 zy}w$8Z{Mf*xEJ?#KioLy+I05@Ri-oT&Kon?rw8;4Z2956Vfxd1lVVQ&(qCvhXQPc) z;DT+NmrQGGDN$9gzHwBPAxPrc6s63aAUt(VKogUnpFC_X(f&G&p! zBh#0l6MSJQDms1j+s<;IENpdHbs;6}-SPX~WsLH-%=PB9e`fM92+_3fF5O>M@@#+D z>*K6{UVIHRb=PK3nRw4;>n-lfLi1D&?BoK>PE@TwQjvAIS$AdR+o_WOH=eBB^Lp1R^9pQ`RN4<4|fSo=C|#i=Qu4S_vYlh$yyZ=8T}sN zdB$BUgi>dIOZ@3s#xBD?0KDh8C!bX1K?TxOn-I?FUw`|_R35jS)9-u|-B zV|xD@36{_KthbkckFwlz`qacp-xw@{Vk4)?$DWC=eYrw>(-|$fsWbeWrk*SvHghMeDBHJV|K8^5jsX^PO_o@dt*OY|wd1Je zKA-2I)lIemx9)y>x$^jhqP5lkmp@Yd@+k7l>OBSP|7yI8UnsOm=YFN5{Cmd*rTdDy zyjQQ`P%i%^t}M4_*7Cz`FU z|NFnroML$J^O_|t^*`;}O>P7^+54{FzvpT-Q;V=p>q^P+Xfv*6XZOBcvg~%6D=vil z9ok=vGupAMtqEEf|OLKXv@BX|0|CIWl z(;`d?Y(6^-)+cK)KIq^o{x4n>tyfz2=k@nHm;aaLowDJ)zUh{?VuI7IN|uyEr+k<7 zgeKiys$Hq(y{~WSq*O7b3yEtCR_zaG3ULaZcrrNsN9MmxqU$uj*_{$&S|G!aURO1# z`Q6^>Iws-CU-r6teV@~uy8UVEn(}r#^{)YZ38y!T%Kbm(5moIZDxR4i%!|!c>qilaETv~H7 zN}IKjrG_&<@u^e51)t?I6B6XhrmyEtU3h(4Pzj6fw=g4rIVQ)`TT?ge*PFQcfp_N} z_9~5|zYkSDw3jb0Q$4ynHTsTFURWi!`i7Ruo97mrO)M~YQ@7skdgaQ!=hcLm(t>?n z%(=pMdwNU9vm0gM{p(I;%YKbFJE%OlKxnGV#dRlr{`dcinYUFCXyH5&ixyTr2rz$hsmflHGeOPuzzpg4<;Q!arhneI-nMqe?mtrw z#+-0fi#d9D!Uo-kF{cOb2+N7xn--~0EQ)%s&1P~mrm#9 z{@i5KyOO6C{PIQ6!JdisR@?p0wQZfJFsGZVY9dp@_Yaxtwxx!;+DfQ+XtEq=spG7D zc;=^5(@K`VmeGA>5;;|@j&GJt=-#&Gt<)T^7uR-8=josRHQ22D?t^Jdbk{|$_TuXF zx-sqDL50(`R!usLJ-Li>%eE3=p=X9Yn=WDi| z{XY3Z)xjslJMBzVSWasn`V}U&so>BapXZ5QJe+5y?YU?&XP4Ci!SaM?apL-j>?KuC1Q;nP3zLl?kYx}0kde{4NA&Qo1X=`IP6~+qb5R6jjxE zTWX{()3n$+SChy0VDm2ZckNa8L+3<)4mV(qD z+k@smYtmoZFon76?{udJprJzXXZ3ak+rt?$?9=vMPv7CP;@i?CjR*hUuCZ==dwE*6 zz1oBJsoXQC@GkABTzOL3;r)^Y?hX6A9ScO4GFYoR+%CSc=E}~thPL_N1K+bqMm$-1 z`_IDO`Hz#ms^_Of&*R-1o4mi|(Eh#oP04pxR6hJZOH#79GQBeO_Pa9sXID1uUHG9n zl~Fk@Bz0}hUy0Hh$sK9EYM<_E{Ce^C=e2Kp{w@W4V&Wz%XHQ=Ca-ql< z7LWG4T?t1UKG>yLp1NW-e?oq}`}Bgt$B!OoJEQe<`nuk<$a7cRf@hVq`usjwCU@;x zN$u5}+fr`D>%C6dwJxRi{5PQshL^t9Z~O0lyd#|Zh3w3KPyg*NEt;JDdiH+Sz=e#f z-^I+Bl#zMylxeE*gDPleP4kuE|K0cPr^MK6usEo?iSANelEEAHVgggZsiO~!)hy0? zW$4M>S3jzJiY>m1&vRp1v&Y@}VMh4NO9J-jBPJ$djVF>+6|ooBuq2 zU&b@Dw@UxU=J;c~7hH09xG${i);)H|{(#)%dFeN&J#V@vZp5@KIO;|1;=iogy{*qR z-}i)fsou-3O80ckd%q;-z0OI_s_A-uQ*PCn1$y1m%ALt{U{BQNx5qzxZ0)H!^;cBp z{eIJLRo}~Uv=pbs!~}2MBEhcpQ;XqOvD%)hU{~7!HBpV|U3vdDSl5ZnY{~l1aB>s> z>a+=doQeuklw;0MBy;Z+l`Q-AocNyO0bIR9U58d|P{<5XN)TP}?8>5cLT-JOT zuVLb#H*M3VHBoymM8`Z~4bbCa&{5k2}|DEdIm((D_-0Ueg!}ycV)B~AK8|FQ|K6ldZZ$cSg zd-&~^HygeSn`9}`Cs`Ap5$L}s#D}A?M`g(~C>CVA^w|%W%vgkQ9Fa-IS z+wHv?TmL%t-*zu{r@SMp(|1lux%5-v_1=~XKB2Ro#A>b&iTwHaebcu+)5_U;@0vcH zb>rv3XB85KX8IRO-s@EJKNB{am~YQj`AYH4^o>joTdth@-dw2h)^S}-xe4pqFovfc zG5`M;9h05A?O*%ER@ZwBQY9z37sNO;ys`>23U=BTBNuw@#YBgD;*rINI!(UX@bViq zU*oNO_2pP&Z_@wl={B$U%=gbKy(_$G^_?ez%sUtul>*kyVz2+!bNh#iBE!z4$iLRJ zf1jALYv#PWLaVm>ulIY#J~`0$RI>KP>A979E!wFwZ`ayApV-arxT`#D-R<(F@@v;G zI3vm+)sjE^gMt?WgGSef_0P{PY)ltRxnR1vXOf<;tnCDDCI3ZP_On+nT9x)63K#&sgCw_4TKm7qTKg_ojPGM{lZ-D%q-26286vmHP??R)+x232Rlv8646r z@9bZeaA<#^-^Xb!j4AH#W~9|#d>$U3<-J;9f~ZTVZ_CsNA(xbn8n@|M+N;anNFu#r|}1$SC%-p>`C?cr-}!%t2tkn@yrdjtDWU-?^O0{T5z&2r$N$n z;dR?8r@p>j)=)3Nu!~hyCiF~w`Od%LT%VYJ9G(69MTbeohoy{@?g%}dl&R{{t2z7D zpN6NeT!b7#SuW&zAJg>iTciCvzboC~TEa8S_?694C+4z#uV>I;(dyJmyu9hr>&mr9 ziULE|=iR*atnFL(@qMRbW2aRgU8>DEZ?FCokF(^x_5K|$`RtaFcUQh2{qnHz;8m~fQ9pAF(--N? z+de_Qll%30|M!#6SWn2UTz8__;ddaX^ILZ=#_IH!f0ul|^!vrRkD+z8+Qzx3&HdMY zI@R%W>&N>-d(KM-&HZfc{a5*5e9nBi_xm4wi+y{1#pBO%zDr+73eC&UeP8zVOksQZ zF2cKmCPPQL8-pOZnr?c?8~_xatmT60!RSod%0k@A1PZNJ~TcbL`wT>bao>{h#* zenKlE%^!tg{&H^*hgWGY-?Q~rZ?A8_&Oh%CJ%8Dv1gc+r)I$tD{i(4wn<@Ys6#Tlj z#Xex#?(gnNdzl$DST?N?b7o|?5PDAP*8Xtr{|pQaHxa6J7kp1-3+$K&bn-*-Ep!9a_C9i+&ObiQFzqxbK zN1Z{WA*q6~=J$*6JMy=)U%ugb6yN`6^Y6*4wSR2YR24lkal(&Vk>7>50vt~yb*8(2 z{CQfk`_93*H(GI>Tdtga|8?p7%4nyf8-MLn)nH`caxmP(aAc2icOX;9gb)8ee|yPl z%XaO5M#!_0+b!&YOd%Z~{!cf4w?MgyLGyBn{6dx$JfM|w-|ZI4YA`I=bYp*cfAs$s z%6V`3!!JlOur82@WB7g7q5SN`d_lf|V{fwMAFQ+AwBx~{{jrlH{$JK++~${esCTE& z#81z*Zl0$6XL^TF!|xR}fgV>@Z~QOtP@a_`M8TK;C@W|kS5+zlLy7*;PfP+_>?{61 ziTnO~@$<_cQ&oQaU+~sD@7cuAcdg$zS?%W;{ZIaTUT@Eyx^H{yK6En#+a2mknzD4y z&x9YUB_5r3-9wTz<=K{rcgyMY5k$wx5o@mA?6R%vJ6@ulc!uI8~3_eyhA^!~UNyS{6n8X*QW- zI_FY-Wl(L(e(R6Ty?H10>8|_#FwFeHgV*3?h|Aa~R_|o-;hZKv$Kq^to*7G!Lf|Gj46-@E4j(?sf-7^bv1Es%1J z>JNSv_HU6U!>^*14oeq2sds$Bsj!qGj)CEB|C)8P@7cbHyIs8_J?>LpLSW39g|FX4 z{cfTkJR&wmo}-*yFu#g9|R0}ns67+97#LWcnr`f9{QH9A#EFJZ@nwIeDLpu=dVSaan$KR_e^2NCZNs=z>Adi% ze~h5TxeakA&dHtyCCrqV=Lh!fJ^87D;lf`N0WO9}37e1ySr$c_2*TuRK4c{(kRMZFU*?+sC{(>S_;h#QbLj zZB};3cUkxUG&57}*&R{kf7g~TU1MDNs>J;5smPP}Z^u_oT*bh^`sCP*hBBv8QCY@` zCl<}`bqrotki4&MzxD5l)9->e&vZ(q4ga5HSKpDh60vX=G53WgIhOdrmD&wm-f>maK3Y*LC4B*Da~e26zH zE?d4$)_C1HZDxiA1t+4cORq3kF$IY-EHC}S@WMF3}rahncMo-!=>UYWEq&?V! z3=9FcVycgI-mBskP?^C|@S?|ZUD?USnqCZ{j2W?;kJN`-a2A511ym|8I5P3-w`=<) zTZI^cR`_xJ7W(CW<^G$!Om5~&)C&GS=)9VDXtn>30+6G?Tj5g|u}t7LpUQZ^fWP74 z{Q1xOq)pG&hhN&iLFmEq^1qvJZ^~qrWn*9f?~y*my0hT4FoP;X$E!x?SWrI;w9UsT zo{O=Sg(YySxN0qEeKlzPdV>(dDS@5;yaff)&RzQd^z>FK_HS=Xx1XQ;_S^fVMW797 zT%bMWoD8W)A83YGO#g4NO3Lv2o5^>Y?Qc(5#FBCM;-kD@k9e#=X9j@GT)@&$q_j$8 z0q6fXM}v|XyOUEFOPf5I(SG1tcwEVMos0#b{oA6tXlO9cKkdo{RfUte-2y7qn$Xt{EU40Ij6hPmp2x%JVHkMX{7 zPygZ+BKPUlt%Z`;cfPvC7G8h2X-eg3@iR+}7~rbiWbR+DW}0KV#eVKZ?+MqBUq5eo zGS;Lj!SA*Hj_TIgm2XQmKTi=Uc;1(0B@9js3=C6*nSXsNy#Hr*pq-eE*6hi@!mC@) zKN0--_xk7QYhOgjZD{<|08W4m3=4Dub#~l2w9shc^mZpZkzW(_SMe-Z((v#UL-(_D z%aXyFih;qwQfd2#qm$jMy$wV7yuPXH^H)}vOw&*W4OxpVu~|^WW3#{B;%YVildzhAZTrjGX^qFUMcx`P)>vuK>&<=;GF5rCx zUZPXELjL>+&VOHSpMUeUT~7A$U@sBPtY`B~@BB}1ebzT)!o6LKZ$4)fSjF=o_Pe?H z?;6IJ8)BFor?@*TWcj2sq3Ga@vZp!W_Dj!cvxoEvFfc3-I>5}pz~CjfhtW$s@^9!t z$F5mP9RH7fGJS0|h5gL&r1f97>}#p76k%zt4;DMqbN2SXQ(^y7U|#b%BmLm@^Eoa{ z8-yNin#}$)qCT`h!%XPH``no83vCuW`co|WPt;6ddwt~Z#TFa*zt=-GUP$(lGd#dO zS4-c0(t&x7^CYxx{&;HarxUTJUav5OUOJUy;SY#Tzh{@o(6^hPl?Wc7nnqN0C#3fnkd7 zCcE>tzg51s-dgxhT(|n}vhREsHCY`L#a}(1SMc>bE9Xlut`$P{ZJU019|(8*ub}or z`g%?7Lh&EZGi%s5EM&R7Zq8F}CXQC=p7|ChtGPhyr*z{#%kNWBWMFXE8CY`v zyH~&|Pyjb{IWJ^9CAcB$RYd$Ezm2aaNNRddx%&T}X7Zh0VYeNd+hyaYJy-0m zKfB}Bll+~Zy*tjgU;VE8(US4WEB*e4+50M1`s--8I%!*i&Uoli1Z|iJF%egdxpCj- z!^YcR-Z|RW-f%OSrI@{cS<$91>4&DX#|Zo1Qup4g$@qk`z+GvXy|mVUbB%5CtxOq{ z$`8Ed-Mm-5L4@Ix(E=+528Johpe;KN>m4Frhi>zWV~n?dt`cv%V0#DKiEPuT*t{g3HT7tQ(T#k{bF;gqJbYtuJfedg}};qMqKL5HPGVTPEO z{HcSnZkF`#eNX>7=e>@%FNsbHR;^~!Sa|wre4YHW)_=OQ=am|-->Y}D+o5T{7XOv|8yaSFF)*BBTE*bt>L|*< z@Pdm=xFYv`;(DG7>6VKx)utbBspI_9^*lVTPi07)%Bu22+cBYH z-?!zpyi!JI_7@l3)n{OEXjl}=2+q6;jfYrFV(U&N{QvD=ZC6x(tNO-Uiyxi03m+tZ zzkWDZ^>$h0kEgfv7uLtTk*R8)YA>#`MLVzl@#OyB_fK&z_`{Gr@7}9pJ*!Xes*TxT zzbqh3>40l-`fruS&(m!m6fRHh(4j$olDYbzvf?LS-|`L%kr zbba{52EE3;bzZSvsSQuLrL0uAecE;Y3H=KdGnhLy{vN|B5reu%OV)IO>Qhi1&5+s^ z`SAQQZLcU!0UiDo8yad(F`s(0zvk>NzC(MbFoTM2VOFVAk_$F4g3hZ6@JjFkE!vN3 zWclH$m>A#rC*+j=^xVZxY-fxe4(%^uwYh)m-L&;T`)|}q?+pS~j;ss}B~rX5#gc27 z7#!U7W?J4>f6u)rsPX*T`0FeG-nA>=rZ2rhs4=^K$02Q&5}sBukDUwdgX$&*h6TYV zPAy>xVPLpmx$fsJC;bh}wHT#7iCtT20%{sOyTA8PyWk0L2d`UmH>Piy!sG-ywl^(U;|$eaR+Te@D#Cg}MKK@kGtulE2ki za@p4SwZ%^(5C8dh=rzm!+xZL(C-^zvOkVEi|L@yW#%Jfgoqrf}^Pg8tVge`QRCxza zrmr&F)AyX>U;@{r8iFZ-({C?Z1}YtH_%39;bFO7+?CB>;THDWohT7uYPv`x5wDjb` zMQlyd^JAWGpIqws`l&;(wQps+WVu>(p0`ah^n7qGb+)5(+pcA2&z^kZC-vv--kM4BcTRex z7=90ry*fbOaRHHK9fJIyQlafG?)M;j)_iTIFeSSkH~AHP=I4Irc+^p1=l5*;PfRa-tQi;8*O_-MHZU;n#)_pm1^fh|7I=Ut|q#N z>;9ze2Ub~&Ka)vgU|_fqE5O9?ASUH+`q|Aq7rQRXi%37vEDO$?>%}%BJ5#+?=tQ!@ z{4Ch`l;=cZZ8wzI3o*JSQ^SMW%`{Yncb^{3-qG-j<)d^Pgm1n2>1qdUGh( z3eO#7S@&N~*mPhO*NY#QlYUPqJ#oh=U2U5C*6qzq53;%UMz3V4->O=p$G~u*RQ15# z>_>TjrtySzg;z2?5!x>Q)rS8W3&%93E7k`m@oNc!njMNOwZ1XV&(Ft?T`5;JoWE=8>T0T3jz#vPfsflU|`Ui zq-r=b1-8GJ!GUGdllHZrR=?Y0shaL%_K1^F|7%=)uYbNT*Z)(Z3J>Gs(zzrj6e-M) z0ktzV7#OTf56qrd@|c~0fn$-tx`Nb(&~EW(F_I2mER{1^o!PgnEjr23%A6s)`QP)X zL^cM71;zqQ45F^-pi`n4HYtk#>-D#r_eXd7-vpidbu7httZ?5>b6MyZ(FEl*-rgwJOg~F`V*U z+{Li^eY8_L6NAIWm)@Xp6NV>!YkshQd-qY5vG*zWvt5RdU)8w%&tcI9Z5kF4wgDN( z@af5{^}25YHjF3w(AcFETptwmAKHtRzSws2*c*=v=&{!BN%Wb)Q(g z9H?yv5oKL?is2Ir$8`2R>gPYfI#LV_UNWI<6Vx39LfI59!#%aaLG{3*so6o#*6z#C zJol6l+M!}#n8K;W9|7*~Y4Llcg8HlAKnh3%6?E;N8oao66uTSGWriMW$Dq~u^K@8= z4*v~@N(XRG0v$#cxWcGm64SMw)xPI~QyW~1(=S`XeROFG!}oQO(}Uy+PA^N>1bGz_ zmRh|PsSU@bRBG|>_|~rdLI!S_XN*yCS^KAk)$jJ)wB24Oi)1(FRCbl~Sk2GV^k=-@mUGMDIkd~n(BSpxSM>A}4gQMi!#SsznIPN585+DE_jR0N z0Cng=-D^-2n}K14$rKTT6)XX%3{%-z;6dTI!e)UD)dT-TH zZe*YLvX-y+(BxlniUE66V>;hyDkHcLk#O~cvng)XaM8~_~g16P!$U5 zH1H=MyZZA}1N*nPg_D<`Ti^_}K18nhdv2j~Qt6Okv6(1`!4>mz5P4; zHa<-yE&dykecgBMKmJ~EwchFcqUsj~Z~k4Z-C}R{RZXWeHGJLkoSd6m4qok9{khBd zp68y)44}L^#e$h{-@B}+>$`-@^Gj-P^IcF>bqT1rvLO5ViPbULzI(G@ThBPBX}GHY zO}TRU+H&PG+lOp&xBS|tXt=Lp-^t(R|M`1RegNB=UB+_zl=`PR6jUBm~ws*e8$ylS} zvQHAX<8RJdld$k>>ora}@8dUj)WsUzIog+e+dcBz+Z#^DxBk6hXeDi?oLZ85T5C_* zDHcsD_GAC_ZGT6-dRCpva0F3DF*11ds|z*nT)i&$#m8-PcKo(1|22QQ@`fpfoA%|* z{-7{zx|~bc|IKmHEeC>*olssd<4||Zr~F?@F%HwB_pv-@HcX5bt17-6zq4Xtc2&mG z^=^uZwWpV*?`3oo)$*y>zhl{{tM@l3XqI%pUBBl=T3q#9{q&VTH=Sv({E&WLg|%ew zGrt8FbgnzEo(XhG0KhsBz2(&n6NisX6te@=M* zp}YH?Ca60YPGKlsc4Y0hDt?a@)0Wo0dph~fOkwq(uY}d6tp3%a+3lNYko)Y-k^()} z299;*)rWJoe>{0S`Or^k&(*cNzV~TzG~I6RUt691E_L!B+oju9$`qi!?qGt zmWFbn_e;Ej83Nb@7#2)deBHY&$VG1&&lW9q&iO)4)yzT>hpxE^2c%5B@@Xz-`I97t z)AonzpDuX*=mAr8kgYw z%0TBfTsR$YCuL>Pwe0Pm*+U;RcpJ{xYpCA9QZHB%sK>S`;?cMGPOpaQJKoRNY0mh1 z=^n4xa{1Ofe^Y(=7w3wIEvTKrFfY^o=7fNF<8@{)6%#xv1@=r|5)#nI$SwL}olCt_ zoXK+KKjAV}X0Ie~&Mszp0IK#L&8S{#m%4NRr!|T9mIl6?lGwdyjRNm<-%qD=Wj9Y_ z`JR0LZTXLiZTvf%qw*S(ICQ53uJW1Evd&y7(e>KJB(DEXCnXksy72kqk+>-$p<;F1 zvp0G&{XY{hZ`LnqF}3UO&sF<&N}O~Kp84flx(XKqLxYg}#>1O-o!A@5)EZ+b%B*=X zj%lCbbk9h~l+r++C!k{)L|M8PPl zhYmS-DxYp^spotv#h{rvX_M0I*0`oUBFs_>nX;NpVt4i)(w`i??5KC2=M>*f2SYZr zOnPKL^@!*1?MJ+v^rp3@Jy>}C=VWe|&(-of_3!Rp>weo{LCfQdOTSEFP+mGyB!1UF z(ewG=r?v11KM+6qj(=_X=S!U4H>c#bGF|_FYMFJbn0HL3kN1u@>-j5dU)ohD1$^@N z)NS7QZ(DE6H^aFB3@zWMzO*>Fjf3<5r%jnPRoQ!l9N&~*`M2m<=&!wBe!TzpYX08| z@8({qJ}x6Vk=cRY`Rze8BQE|_|J4>X3zTdd+g1o3MX!N=dCh%a7n?F$vfod zz8c-WTk!^smuBQ`diI1V%=-nW3WMjzuMf^kO}@U5^G({>5KF_#`z*;nx9+L)nk!zj z=uw~P`Igo1dc4oMnd+UMYFqIB*#GONK14>^*uJ0Ya(+_eE2-<&DOcy3t3Uo%dui_t zYxh(7-eMZ>jVo7r8cx%{yl&3h@B00-AN)11zsXfE6RKd~x>fbomTew_3o5hc_3eMY zWKQrUUrk1zN?H43I`#8k&DGRruNC`z_ge6pUq`Li%?M<8p|WcIpMxJV->uzveRu!we$AiX+;}5Q{1utn*2qY`IFYoT zF|u5S`IJ2GGtk!c5*HSsPwc#suRdpMd~szu`JmfeR+_Aak{Sirz}e^&lNHmC1*Ua!yb)sUT5 zrEj+K*;O{#^w2l^LL{R8)U7TB4JU2CxZ97b>F368x2nD8|5{tVKtz03dh0V!%>{p7 z#C=y~KOXw6{`Z^H)pskG^Oo5E`&#|(U-R;viOj{qKSA4*@BVxmT;0mFz&v;w7lW#B zkXxcqF@wW&xrUAD9gj3k->zBtJ@VA)opC#lN^w_mM2YuR+ROazVEh}OZAV$&o&NV#^}e2Lw9Tj2<$s@-&X<0x zYZCSS)p;I&x#(sy-UFLtqkqr+-#Yp3UHh98@5IgDY*!q#$@g^Kr)Bo$ultw(e;Z@} zUz-2FYURFzxm)&K>y28Ldu7}F|9RWvkADk&=aDgCYe8zzg&D6~Mef>9I~pU$H0w;# zEZ+_$4fZ_>GL5b6sk_oew3cbkuk}{wpEW-(zAWD@QL!*_(VmA}=9fH=T6yAc{Clpw z@n2%!&wfxHe7KJc6Ex_)lgby=y3G|z2;CwrU3#3CJ{yHmQS9^AkDf`cowFy|Sy9b89udly_?aIEF?>Hl9-xBqQ_uFmdm)^X9{`JdU_ zs~T&si9fpC^ZyoiT*0&pa`&dQ0x4-?qvcJ}<(l1IZgyF`6Gq)0J7yeW`5FLHBZ^0v{&sX(dDbJle zEmOY#?!T3D)_Oi|dZ&F^tUJz=W!OF^@5&Fe%bNZHhqC)4IxfnF3 z9_~u>tvdI5?yBUke7a+kCzA%a-gf%I z-q5tLK*=_}%F1|+&cr~;!)MoCj3`aNb0~i&W79$gr$as#lXp&tUiR(s%q1R6J9I7` zWbEJP9dK3Q*o6nKO&eNL79Q+hyZ+~$YX8^|ngAj7bml74Iflq|Uv#Z_$OSPi7q3&a%}V+?H8=mVcY?+qEXF4T|0i z?{qJ95q@@0U%S3MO`M%ULqzD3>YUj@*Y})$^LNE}?|{bK_X`cGuD9IxuX2B}H|4$L zUhl7QtFDwYaWx1D{PX<4&d}0sNdkTA~tbp!?v@}Z)}@8ZRUY> zjqw*=35T?0d{Eau*HQl`*GR?jcy~}tNR(J6_tc4tbbnY&oX)?W&GdbmR-VSqnG37E zz9()FQD<=QPkJw_`fqY;{N#JSRsAgYGk0>li86onSL6==l$R0PWuz)Wt;avi4uMk{ z()X6_u6}vP^zJO-19v_y-t?6{@LMwH|HtZnUNhJK@Kkl3)V*n%cH~S|OQEjtyjy?T zzKaA*TKY8mn(E}4tdk8hrKA04us8Nex87Nh?PX%QfK6HI#fiD`d402V9v9va((m4P z)bLpsZ=H%ky0n4khH24DSe{va|96pp$C;x>!dc-J4C<@B?T+(lxs>G8rg=R|VRqj7 zarNqL)vk@d)Yfd-ux$!g*k|k5m;${Q_c+x~Y&r9w>;3el@y91@SbyeW+4@)ag33x# zwZkh8%xDqsb;{%5ZYXk*OY+)qeQGtQSAT)TjJ@JZRIcwnd9N{WURL56VLh*d6W;F@ z@#>!Rok3zxes}-rx0(Aku03n}>D$Eyvk?k|j1uN{}Uz_}vr;5Mjb`aMIE&cb?90k9-+IehL zTN)PdP*u@G^-RfH?;F>@Z%A13=0*0rTaTtP?em%WzgFk+4w1}T`4Kax`)y=f+PkNs z>3h_Va?8a9VwD}Pw(l42xoB)HaW3t>%3k@Y%hg`}m8e@kW5W9HOYiv}Or7R1U5gRC zf4-+SyVP^b!>UPjVjc$l+F|Bv-v!}H%AChHtmpBc{b4xbyz3^}$CfrYpF1g}%BmEi(SOa=EYcfmEw)+qie9bLHwUb-J)?vE!V% zi~E#U?`LcIwQfnBlN=+L@rLBcP}c13bF0y|AR(x~5(^;pj!vR8RBn&d~iAYLgb4?poK}X?mIK&e&kx@JLc zFaF=P+q>m!9?x*EMxhUl~=Z|za!>UdO-g6oii_VnYAXzXdM#{wp%%?Ma1W3%eyJ% z`ET}}P;F6rYw@6$*-pldYh8knF{kye_nRu2zs~+Ud&;J?hxctyzSpaxF z&T=QQM9vB!6IC9sjw#}eGfY=$?VQ`9A2~zyMhok$jPYU57JQ9AesP~%z5cr7 ziBn?^O*-d3?fr%aOaA` z%fBt(c4h8^JHCdF7B5s4p0?i7x^mvp(IPf_&H5IdYv=hN)?AeR>r{_Qr( zSNfa;?WRrPJN#>z_>RN>SO0Q;%NNIdurezC`J8vRc)mUVbL&d#u5au5zAjwH`1bif z|E2%Lf4vuDU&p(4vDrNH9a@$4E&mt$eDB(N`=9>pu>GYvxr_fhy)}61bH%Xv`hs%{ zkA9E-Q=OahJ4w^u?@vPVJuU{$e_mo~%({y=Uz=94buy!tXyv&Vc3j6^b+3#1zwk$d zUeAh4GaVCSryWfw<*U5hBFd0a->8UC3WLj=+o_|vPt!Mw!-^Vt;(|FH( z{@T%t5-lt~8{)5}ExB>%qp8Q{vN=i|4-c@rT$MQLHbGxnMP|j%Mcv8ySCyJ{mMCo7 z=5gWa>70&<1y@5@k0d-?>69aSghNovY~Dl->B)wNrZw%I&D@4VCR!%M%sn)9x1*YB5gdjDS+hfY#%{B<*Or`);2?f;j5e13M_Qqe2-r^R^W z?Bg$O6jBzgTYdgPNi0LIoBD5&`TS0A#O*B>FMfV%v&KH=kI@k?1p5x3ZYVci*ZM~! zaJ~(oRsD%J*Z+cEz0M}*oR#Gk2eeBhsB%T!@!f8%4)zAq7LDh}SaU@K2; zsZvN$i_L8Q`{UflH;3OJRWS7S7+$YQT(&oM_4ki>K1c@ z&z)daCiSp)8?=^%J-wpU?eHgE^kG|VcT);S9mg3(1!b1{kIlxfuYFIw*%RZzo5(4!$9&-QVw&}~8*1~@BR%_@{*D3Z_6&c?C zQjGcg{myP(@qdSXck&qZfB#%mEzBAx@XXQU*QxuK)iHVJnpM_JiTo4%?A+}6vK2Y` z|0K&IrY@GBqPX&^eNy*jmeePUB0kIOt$BXBBStRbMKC`DODW4Q`8%6;?2|vWx#TMU z6pkZm$q)ILI%;>@9WVYD{_pei&&e8-m%0Qxl+4oEBCeRzX1FpmT4_pK?UMRilTKXF zU|2Tk&eT2AgcM`8^gk23=JxRas`vY+uZ^_WoayuL+^h%UOji%xd1NUi9(P9JeBJ4D z=dHxfJhI`wSD$u*p-Iz8>Zx+b(%Xd}-A?c+b{Prm*}1PcJ)HG(M`ZtnRwGqUj`Wwd z0r9S^Zdnig-ZPq)wA>7dc>4d(F|#!{=EitWY~5*oWaj?lwa0$-itf@;lt2IR7=N!_ zyv~I;^N$@a-{hb3`_sYio6PHqfAt*cm}_%A?ESN8hp$TfShXnm%4cTnYK>4Q&BN8w z4}}+VEik@+J?vD+BjZOuHhe0-6t*wILjL)~9Uqi9cAN|Fimj_#?Cu?S@k+M9^z%gt zS5<%Bd*N{4=KUfkGo3Gwx|lX5xo`2l_j;w5S!9X-yfwcUuRS{BLZ;HylWb2V57|64 zN)Q8bgCrds{@tXe}Zx#Ps|NU>0OQVeYrBiK%Yn7JN z|6C`&DL+m8`qa$3Dw0cl`Z(V=Ta>GCzERKc)_Hwu#uYC?*$w@yRL*BJ9G+BuKc7m@OgPzXbZ(r?me(O}GhTqS3KI-aMYD+(E-O%n?SjsiU zdAojAeE8Zaf}d|+Dc!v-Xqk;xT>8cC#rM^s?{fUVwA;mgV)dJptN%aet&6otPIxDH z)PO7Om)FQ(c2>t z>U$6StWNmSdhF0imY4DCX3gELFFw6?apIr1>1odvy$;x9$mf(B)DPkrTX7g7sj0eNtH7)n%8J z>gK*)aOQNj9P|A@K392X?_qqxxM0r<6RXnGX`d>$sPxUd@3isKo2Xr_p6_n88t?WJ zyWV+qeUAJgZGl zhsNR`J%^mSf>uu6R@CN{SQgqCvgcS`D1*T1`rF^`-kg`-pCY~O{i(Z}OYJ*F{buaB zu>JAgOFBEV^_}%5r0oon()s^+Rb1HQttGkU8=lBTo+!7iGl;G`+`F%bna#UTRQKh} zpcKU=<&)T(Ya^%Wv_77t>)56`zj$L!q_M!4oqRF>FZ=Ev^JeUdZf zB6xF*!dPZ|L@u9E-m^1hb4<$FQ$NCtuGxe~Cw3dInEqhjxu5*}{IHv(kH6 zj%veS)7Lc|-4mbg*I2OO@5=Dic~9+kY`Lg)_W%EFN=rgn|E>Dad7R;zKg%tFC6`Q} zcCThQQg%v@xpDW*g6X?Tm6jCQX9n4vdTzc>|Lm{068^QjX7tOoACR2<#AIFd<3;aU z%_ludyz0O9`NZai+_&pgTRdOoMqZn-Lob$9VP3pO>Zy)h{;wxwc=t5V-7;0=(f{sBPP^ay@2}~seH<+3>uP_+OR(X!f_xD}LTl!;XZt@rE4{f( z_gKugTPYp?W^(ep*XIhlSA6Sr$?oIj-tXpq+xIDVV;9%=s8F?YXGE9mWm+AxAYk{) z*Rwy_F#ML=vH#`1-~Wzht6hG>+_6|+_?+Md!E=F34`dzw*l+wNc)UMAUhl^mYq^I^ zZ@f3{+wekphw`2K|J21z_p+_|DH9x1+hD}$tMH}4=tD?{0ZW~KICID6<;TBmYHASB zwo|#Bb9GvbU-60C&-GW|JFs)nJMp{ytP?L-%6d-}zNR;4*N2^D2KT-;EsAD6AW`vq zjcah(x~w^+rqZ^DWG8F3yu4mt{OokIs@Pf~{pwj6`~njMCasRVwEAgK+?vFHbL00- zKfbtKL3lMoifOh+lh`_wvuplGw9e*?;z-Lm-c-8fSzeq^Rr}HJE5qII{%_X*|NYh1 zL|HD~gg1w64HHx3x3w>7DhjA5dHl`!_=oLVH!Z4R$*D_@mUF-VDC!l{2kym^|CJr@ zOgns)kGIuPMO8&@zlVQ++c&VTlPDC_j|@~fT4 zOk=0VXFvWe{+{Fgko*_O)AH1Ae)ihE7U6sXzx$mYeBA%1MDI-f^EctetPD?GoE-m& z#eUc9UecKP^JDcKJ#)^j?-wzqr8vd=7XbyTDVSVY} zzJGbD^Jahf`Epg*QN!hTzkNGYy*NRp>-`S%V=Es0b?IB3*0yefci**Xm2MBu*!;EL zFJm@KXui_U*7)iRI9X)2VZZp zd@nP9QZ{#X{u;x1QE|oPufO;1s&8Lv)w1PE7O$X^fH&i^?0d_9yIamFPV}ufyefK` zQ9S{wbf>5{SzydEuA#& z^2x0iDmU2LsB7q3aenKq%Dwh;>GN(Y?cLM-zGptOI{M&%^@UeEm%aBXIdaE%A^*p7 zh4m_bl%LJ7)=~TF{d%)Rcj%hxbIua(n=f>nH4WczLi6LV-Rbv=&2yK&*(o|d`{L<^ zH}fC96ns9Hi%;+7u-?VzCQni;D*xc+vgT+een5z#my=6CwH>ER}q%} zC4296#8c%pQd{OnoK>Ax+1u&0=9lAqsamVm^IDh`bd_!5I)r^iS$6mr`EA>1?{z_= z|I=Zo=1bj0Gdfy}KAz7OKKVTBt=^u~N!2mQU%0n8{(8UZ$b!jd&)e4of9pQAJa|LW zr8YjBZytZmh05O6{P2|a=C=Ft`S&LNa66+NHShW7y~~rSes(6*jAOT>r^4KR&+pfZ z3|;@r6mMRno_y=L`17vHyjA`ERrZf~B_DG?ow&i;_M3;U%e0?CyWhUOQL|&0lGtOJ z#FJ-?BX8!^`EEPlu_NRE%$=K)WRmW5TPDqNR(scN)M)hKPmk!{q&Z#5U5}*-YFhja zmTmICQe1Pqz_n5$RN3d-!dW(lgD&?x?rGFuy|>~dW9GZ9kGSIQ_IU;$Sa#Yi&BwFs z+`MpxKi{tbrG-5<8O!^P=ZfZaEtug z_i5MEn@4ASx!u42c3{%WG_HmZY<_p2U%sETCu5q!hP&VVo|QLzI6Ffmj^(A%>6IJg zAM8DODn>*1(nock=8Zljh1HuYQww9de{-=qMA|Oa=nTEF!g$KPg#sS01y$WNjs~(U znRmhU@e>1WhAO+u7x%{$_#W5W5yU5TTD$oB>s4-k5962EzCSl>Hb39v#)#BA|2)sQ zoKaoPwm@XUKc%^Ajy_>rDBaK4^4;&(6_bZ*ljm5lt)Krq>txrX!};!$3Ytzwr=-Wl zaGyTZB+QvKeXf0kd0--;JWnsS;Q0>jl&<+ z4t$c6;}u+Xi?8LQ$^CGS|AFVH%CBuFyLrj2hnlq~_gvic_;UWvTD8ghBG?4D z@2J=9QJY=+YKeai&yigp_htxAy4T$IyIcN)t>vzQw=-h?pH|QmW;%1NbahYSJLgND z&byS{Oy{J!T~D2J^I6(8b?t*S;xBU*!tO25-B_X!dEPZ8C->#LyVak|T8z$q%y{$I zF}%2I)`weqo^oNIPHg#~ZeOGSJ4!L#XZPE=1(mwfg+=ZJKs}p+@Ct# zZkOVByIDP+*N@JRdgFcVxXq{47ex&p^frBMjX0WjLO5gJ?Tn=x_UG{(>Pj?Gdd#jm zt82@y&PMaW9%1&{DkY=DJ;v!vl>XeGw%0Ph zbE+cmgc^==R~=IyAEj%Bh4V_*OZD&EV0QC>=;Sr0d`za5PDr1g^QVI0i8gD_ zjpG)%C@9{Kc>OaYH+01-qn5dv$^m@_amBhVIstB`6Bov@L|$WG&0z9)jxhUYo$Xpo zE-8DO>`(qvyZlOj?#0!s8C=93Cb2zSkaO}>p67HuS^q`XHk5ry+H_s+%nq9gJGc%! zFuoSbkdkz4NkmnZPHpa&uWS3dug1=L`YvmGtx|_7|M68GdeK6Aq8H6OUOYe0t&sDB zob2RfIid~wO!*U*Dyux$e$R5+-2bl{V-yXRfBL`qzFkhgs**12zw&1>Gv==NqPTwU zc9ZWqLYc0+IpgeBN~9Mp%6j(b9Ke>|Q{U9%xa9p`=98Cd{~UV#xxb_1i~f^}!mZ+hf)iIh*S24yw_W^U-Wn!@6LX{< z?I~j8=<4wBZ!mY$Z!KM#EqwaN!QXCb^M#)oN@m=%%APpYptbheobo>YvwtUfw_RxZ zxa-|_-)N~M{tGkB@&yH7vT~dhn7_WKf4er*B5gI^0&A=PMcRp76{RPB={Ru*)bKZd z@Rzqd<2gT+p`*jY|HCQP)1onKKW@&ORof?O$Qp3I&6RK61*hmqD-V`E*&NEKprmAU zvZ*`r5qofk<*tOaqJ~o^-<~+NErw^s1*dn~Go@C%o^P$jEAyrI%S0^(7Z;bRl&00K z*&ABw!`@u{WxqAxzWvV|u~k=i1io}@c`TQEb;mGs!!ea##}efyhcKKtQ22aT=*`*r zd-7a6zG&M2eUW=}qvG{*{F6Ss51M_M`K#aG)hrS`59ikfF3a}`;(Rb;36sMTcgGL* z8}jqFy>Gl5*5)Dq`8@aaRQsPd!dj=>wfcOq|FbvNZ%dT}XF6DKdt{?92$}2T1y4dt)VS0arqEhX- zHRbouxb?k1saNsz;=-uDmGfEalhW>&XzucSeB-qHCe!#`O1rbK-+S}OU;o{Y9mRe! zXWaU1XUzS7i&1;yaYeVEQ@Pijd_0AB{>!t5o!d-)U-8sl!p~Cg#Qi(P=T^zh6Ft@; z3>_sWOXo+=kgflj@$diI-TUUo?yG5?s$`@v=j*lZo0XyuQ{!AG%=~=LP?X7*A$!t( zFpa}cJjf~wh#3`em?*79@HUSw=*#`JoQi5(zh4dR=s^u@h^N?{DbAP z3mqOWW?E}+@&4-aqNBHN=F6Wut}e=Sng4D3w=Djs$oyEm4G0*bHnaNW-FN(jq zKRsrb&h4KcCWrNObnvh|*>UsxfxlnhSMBcX;Nb~g#$dqSz{B(X?%tIP1a8dNW_59K zVN2}sZvDUW^RwrP3`$B$S_~;K94@#&$X_lZASn3KeBQ0?+l}U#O0`|fyLl~?k3mpS zP?W*Qz(n}VS0(|$iFMD8%FCYGwl4bWhuf!SUoVyYt*@lCh{=JC$>{&|$p6i6x3Awm z$;D;Kdxe+x*RBpvQTN{y_&@jM)$3`mx8-aua&d8CZQyzSWX0Us_BSWV|B+VrR#LhY zpRKx10L(&1#&)klNVM;bHG^?EjVBy-Vu1 zhqnALel|b*X-7u~7lWi-N4JieYZlwP$wwu;1O+F?wOrU{^4R{)*6Q`1{~=bYM|^&- z!|~;a>#_%0C#6mvY&C5}C+U{S=+ov8~^injeucKoH!-R(^$B%uj z%&~W}6JBh4{-4hFz9ljHYFxiv*zCRu6!E(IA0E3?TXg*G>(3{rc64~GFMRV<`Q|Kr zQ?6e<@z?Js8AS^U&Xh=tX1$cZVa@$JmCw_5%h}(UXsy!4V3(g)XKVf8#=Sp_rI&Ve zd=aWV_CK^&@_%5_{j^K=5TEnpO-L?mRVkeGTCKaoLtkiKE@ND_wS3*BE$d@?bEZ;w$+`WfD;rHoam@HZL{y>e;en#-KBebjSSd&B_$=J*bPj)^E;CczY^v3 z{D0!d{)0XLK}E|Bu7-||4iD~YH7~omKJ2&u|Nn$5_om%tz4?|m-)>*Oemc9#-{hV4 zlmB&goJd*B4puBISDR2Sw|al#?{#mMt*=+M{(0>6PN#y=s4j z1t)4c@`F95rts(fm#@BGkJNatkDGY>U4HC#tNYh{&(xts)s~J9AI>VUzo#@T{r}^% z`=*_p*>=-@&E>bBss85hr1N$vU$T$SHa*`s<*E6d8L{=JtF)tFLFFSJ@%pDs#hZya z@dxkDkEp%#jgwPhTK)OGx2=T!UGDj>@;7?-O^~k_F*&%nxTHjJdfPWyIoaj?cje%{ z@$*CZ=lyGMY%HvGK2|^JU*I-qN&|%szb0eB_xs-u{9XF)-`hvu<^Rmi{kc(->pwWB zfQ*u|{Pt<$olE@->hIs3U6i>0#k>8#RvoR(a8S3|nxDTd`Ej^(U9u^o@xN|ZG-Wa^ ztou5(Tifa1>^c%SF7rOeiFUeDsDS-?~gwVPu*|zJ$Psk+J|BcRZ#XZ+3h?KB*?+z2Lv=dHdAExkC2mpW6O#-~Dr4`xC!* zwJoVv5xTQ0d%dVGs4Q^-**kf%dfa+#rlgbmo|@l@2Ll$;swsz`SJ6$g_(^0 z-QQNb9T8k7N*Yt=^S}F}EUtUv8+b_HUF*K!t!BT;-xg}zD1QH~?Y>#aDZ`iZVJUg0 zo4_~uJ=>+G`%kjh`M%?wyBxy-<%XqyK5%3#_qkE{`&Zk24n>9hhK>%8^$9`_ptNsw z;+uW>zn!Jt_Jw~rK1O_q{q=U{09A;rwxD`3Wko`! z{hD9J&rOd#oxHUE`QiI#of!gH1isgQTrQTrTh9K9QQLlvKa;<#pL)o_#U*9e1!hnx zVH48%yn21qoamn?pUKNq{`LO(AMrkrsfYE{o$m+!^6uCC15Xf|Y^VRt zOYeW8_tV;}&UE{=$D#}>e{b)4eY;fE;mEIYE=v{-3fV z+1oI7TvtDBFIICPsb47J<<;xYI{)9&Wc;9TV6uYmukBARr9zS&q#W{L{(g6_=ItoK z8-ia>ezKSPZ9hw#LBDZ#+55BG1*I-@t!-UW4~p<@?`~zg!aenpMX~-T@9LuOHAa7w zA8bFxeL<^H{vPATDuxLk|F7%%kPj^%K?Rg+!b|pWp*!eFqK3 zS&ow&oLGX)a_;9MsSXu5-@jDeabCs$2Zy@Fp6UI1nZHtq;XK2n?}|sZZ+f&i;zr;J zUWXkFo-D5L@{pMBJh6`=+dT`uOL+sL$ zdJW!NPq_r(MQ5YJjpT*LyY|2S=vDYgpm-q8PWo!o*xWLp@-To__rA~w);x}kgto$s0WBu!0S8u-37Ju&a z+jQ$xh7SxDfBr1Yd3jYkO6`O4OL?yrkwZKd(>ppm_9t2L2w$MOXmcq6%1Ug;I;%PCE6a?rF*-``|rmt0eQ~~vDe0d*K0j=HrE$SkqcwNSx6#4zC+Abxf!%R&zSqJT>MgQDC;s4X}q;H3h+D~IS zQYBx>2=dI&7Jin>WBn31llDI~U$aVmMN*K|)8c(5^QLg>?4P{VD*oE4P4d?_Zz<9XRE|44 zX3Sd?Ic-MHCjM)xYBLUe3t5nU=;Tl573tP`EC1AenEAIdXjOsx(N9O7Oui9%ZRInW zg#iij`;ObG=AL5LS7PR`eFSQv9gnx0_5J;q!(Y~~+$82G=ihLrD*oku>%a+a|K}Y0 zTW(d?(b3@}Fk8#$q-=1Zk~4#g)|;K>)$<%@xdhe-94Tb&oe|XXsb+tA{}U$5-)rvH z`0JfmuD7!0_^$r}x%O)FeS&7jB%STQA32LF+fHn?$eQRvK06-|`90;)xlX%Q>b)_x zeEOg3zmkzoOSJu|;3*3$7?yNJYv}#`o-w;%rOnIxuZ}IL*Qgi$=jxIY5Xh(y)X16o zYSQ`t8{Zw@bunU+z`e^67YcZ7E0an*HP`qZ&AAA&aNXTKYad1&V(yw8t~R%j@7qJE zzS$rAT~yT1zVP4g^*`#&8+q^l7whBxth}c8MwY_~y`#gVY0*?Wsq*@kAB%R|cQtXG z&pI}@wdcQ}SpuxwspPSfV~m))K}hiQd_TEp$2awcVx^s-T1uA$cf5MPvpm{1Q=5yy zX6uFz;nNxaILS?O)d*J?ka9S5V2X!~1~@sV9Iy;~lvb~E{@VTpdVe1CI=uONYWDT# zVoGX@UQP-;o3P0%^4OHFjvn71>vz49U)kyw8+c#r-M^gs8$YhT*?nIA)Zw*Jksn{~ z{wU+2_`X|vRY%7Og9JVXNv#gi)KAA-jGi9Yx0iSSQ}1tYrAsxRN<8wsEb+JObtX6_U7_{=2onNh@1vikL^V*%QI+S4?Zl#Euq zU{r8sjr@M{%Kpy}-!Fpm>Q09KK$?C$=6^sBVy8uE>HD$ zE9Xr2%+UFMT{mULGMm1+*6;89S)aFB$!Nue`xncnMScxzTjZl!&y~nnlb`nYyz#&F zS!aIuTU^ftwG{s_H=x32r_5-$ET$2G;k zF(LoiSC=@3Usoc8woZNh|C-Y!d#63ORJlY11tm?=e7l$4ZgxhsQDT=Ks+iOi*GCn)2^}y3HScYG6dZo+zYw4= zC@AO*YCkR6Zu#NHJ)4L3PYSPpe)_xV;#ptvOAqH~o8M1fF&*4(6BHDD$?m~a@#m@X z%~P|lo8NsNk!tPDx@<$zfv?-wY-fU1#2))EXjRv@a4eDh@A<2IHmJ?yB4q$>o_;a4 zHd}X|&;EbRUu)0*QLS7Zuxjb0@w?sC!OoZV3;a9#1JXqW^?h5Ge|vlO-R|Fg;NH=+ z*gr3if;xPVYE+U1(L<^{1Zp=1g2q#nm>uSQ$uCX5w0~)M{Oq;2H=R8t2&?Ox54ClD zsQ>eH@z?h$f9@w7tA|x_Mix2yYir9s%>4WHJ-kQMv4TNBP;esS!;9=?>DBM<-urMn zcK7#LU;M#+R8a4Y>*|vF)zWp*yMNBhIk_}E!V=s=2Xz?)6kgs3cP~%p?=al`Z`xNR zcdI`!{zQyT#DK#HzuHUSC8{DLX z_8wipvC4zgdo;_rpNixkjSx@<{E_?g?CVY0c@e3gt~bOxjtxud%fnj!L;4S?uzo(c z9e85F21q9%<5CMZ)YDH$n%It)4I_uiJS zyB#3-&llmTUoFk&ZD#7fyxx;_Y5!H;{cG>m_?P|sBpNmko}$!2@jmatdXD{Te_j&h z-4E)hzxfq?EjkkxluPa_tp4BE``WwpKe!=&Ezb zL{B$xr*>vY-`<)f532)z#8-aa^=6iISY#mFMe{A=0vML)>@11gme&nV;NZ~nxL^EX z=3n3Kdw(o;u8d>&*9G@w@GZ`7I*dD>-+O!W)#_;}*K6|<*YQ|fJCqE{pb)+Mlk|RH z?|ZB-fA*X02J73q@y=;S z5B^`aKL6CZl^p++;BnsJE`MX~16XccVtuaa_0czbuJb*qf4-+!$NKl%!(pdPKD}GB z^XBdJ_&Ixx(^aeu^F6=as`TC*y!m|QjrJvV9RDvx{}Fq;{8Hb!Ra+EqU49d+zIICH zleL@0QvX{$KlEm;S9R6qOVipor~Pdyjo#*a%MvtFVe=$o(bNgg=UB`*ySi3l^0Pg~ zccxd)O4P|RRqF(`pkFfA{hsQ+ng9PEJN59rW{oR2dVOjhHidtCS|7jGXGdRrMQvFQ5!Yc^~Uzn@#D%+ldMW$UX`M=O&89_au6 zVE#}3ezW=g3S0a0SLLm@*Uh_eZd-Kt+7;CTESWp!%>M8E$9`WwYfIfNL9Wc!`r}LX zy-Q!iB&jmbuI`qXfTXhF##gPXH~-z8`1WS&@$WwZepOsDU2hav_i(ax`=Kt&RSY{8 zAAHT|nE7>T_v^P=PZqV_47s&%#)rBkN?$(PEb+g6v9tN+#n$Z2^*bNT(zH}(`Z}+A zd-%P_`A@=%kG0B5@1K?b_-++v#S1x!E^?9lOllz>%S8i+nyK@3xwt4v7 z-`m4lDp@`pulrPGw!KK^X|P;NYh7M$sz4uaV(sB?cT115?0;MJD4eBq&mv7|PovZt_ad%sqPY|fE&%L>EW0vR5 zpC2Y)u9EfJ!j{rh_$uXFr~G$X*PgURlJ1Km7)7sZz&rcckJ(PK98Nefc ztN8sk06g`~n(`_huG9 zTyo4v{>FCm|GW1Xm@P?WW%_-oYeIa@a!a;{a{Y}Tl(gTqi|(6U*j3bJ$@Ia-<1p_B zz5~jPtVbR`pMSc{?$2G$+7d-v;T8u-9kTs3_b?IiHgjxPwtx~ zv|u_%d8*_d&g)H!t*d@X2}X6O_sw{=X~*-hk0(;Mbm!0W>GNdaV=pC4Q&6k9#7%{j7a@_O+VMP5BA+jTQBhIv-dh zBtHo_gGOblWbPOV-q^T7!Qn%_S|n(2aJPflcU3v#HEM4T7e=@&n$xk9hq3+bOBY_p z_S`9rTc7@#ecj(Pj$w_CT9}ki#Wr{GHE$!kPlLKW#n0zP_hJy`nd8USwcHElYazQDBKsg4LzmpHPr2p>bzi+bT`+D|<4v+q3 zX~RPtOzei!Ci?U|agWetZkaZ9{_nXl1?~?bzN|d@)9{GTp_5$#hwc9K{y+XbFu1_*%67uB>AW#rIZEzTPKc{9t92 zhJ&QHUToMd+q&5r+%=m&PnkQtkn5@J1pb6Y7rN654&Ep@Ctx2h%AllF%hYsg-Esb> zO-FKXm#0f_dw&YFNWxZbQs;$F(s^fCg}*s2I2cv=>4@}CI}N@YVv>`7*4OYCEI#tc z^XHVp>+kmd{K#%Kqoa4a<>d``LhU~si(dZ8-?;40_5bF-KgH@_*>C@S^Y%@}=kwk@ zIR0(%{;Kp-?-z%+22IHXRVth33zy5xJS~{wl;Hn(qI02`oYCd2Tjo2RE-;#Se)|p` zxvsSnyTzV#J2LpE@Jjn`e*XJf_u45#gE?|ZFnoY23Bo0Y-y%%+^uyjmBA&You#93K?f zc5?jwCbXGnvE-Z1-z;|VNpHIEY+e28YwL3fFa16HKL~M$|H;l;n6X8)<#X1FGynF; zx_tYacYJFt|DAaI_=*P?zID7je);Xe>g`Y8PWde4CVYKU;@7S}GbA>5t!6s%OIts8ze)AerffCS)t_8~#M;m2ALoAe(`V8QmQdZPzmM1UIkJmaYxq_4 z{draYY}zO9y611$8fV=XuX}KRPGQXEzmH`iK3dQ0Xg#_t=l-T5i~8Lsqhe+^)mw5f z@1Aclh1$Dm2R<@lpR9nQLA!pnn@NDZd`anGwv9 zrxdrbm`pLiLMl z{nPyxZP;V*^HJ5KTl;o?dt@xa{ATmd_x~)bAL-i%|3A0!9oK1R;}TPe7$8&+WzAe z7CJUx7YqOVaWLM7F=(OO!TE)~hK+??hE4yC;|#?%*?Tb`a~9z&w7D(u{$JVKMKwIh zGD$Iuc#fTyzq9#<+ft^4q63fazdp^`=~3Uveo?)Ck6QK9rl+=ZSIV5OD`YyHpJ=G) zsBk}c_0!;Bf!+7lw>b1Jn1AR9$3r`o!ZlJHUwv9;2pkl*udlOvnRvT=-}fxL!mivY z(!y$|4!PQ&O0riAINB*RSywIbkK8n=Q*IIIm!?L&4^&yuk|1PE88M$jj2}1-Jhi;j8@OZbzEw(dLQW~RIJhhcKW27J z_5e47YRM{w1fECVB{oZlOU#zYm)IS^_Cflw(C>Yp=G)(2V6pGry82Um|Iem>{%^eR z&-7^fg-JWlzgVr(AYb{?ueDxqrpX7kA4mGHKY4x8U;W6J$t^2qRLXCEViG8Hh=cQ& z+v6$yPnf({sVG!e3dSfbS6W@azFDESlttTZ$7g1yqCm}t1N{r+m%N)&!M^1FF=d!OTHh-;uqY+rZJ)X-}U637w&gH+A+CsgQ5LW3S6KK&SRxFs=j~grm~5X~%yZjP z-uq|hRHokNDTj^=HHEq!p8PdC>YfXy!>7%Ty>pM8oydQ!=X~wgoiU6J6V9n*MVigJ zpfTxpDYvk`4a+T6_eD7|`s&Un17kk%ulC!omi;JJZvBNzI&L$JZcn|fBPSWKO;j}R z@Qje)D)}iFTlOBQ)^BD$^~OYcSFTALJ$9(dGW`mD&H^5^%3 z^Y5iEZ(RTXS;@BlS1UX?K&OPqBfAUGOCOfCfnP2BU-Wc|-=Eq64hxWG9a^D!w z+WY4W&)JRg^KTr>Po2k*dYqr@p;%|AO_JJ7n?zmC*X*Yl)vN@M$1lm?^kmJO#JVi? z<8~dk`J7VM1+GutKYwa*+w9c9j{MtUS(8onNLj>J=}&%|YcTV{Ml{7;Vk{AH*4<~OU}&p%aHT>3M2)7y{t=TFtOu79-tceehX>9rr5 zjW-?roHl3U+vxoku{*b2p8EORETMy0&o6%8r8QXp|+vnVus|e$G>;I?j z=f3a%AMJg!(`)Ysp4(gh`~6}l>f2NH@9ATqsEB@xX^#%a##d!;a^Y(bSm&8C#jmPk z{uBM*u3`TJ1eORS&p-BJu}4SkVx2YmZw~(#JazhkOhoj*KeO%6eowBCF`L;V+W%1M zQt`6e%A1W7ULIeqC?s8!TjSsVwlrWt;!pMau3KeSGQ2SNDCjiGS=GvRD{svs!CXnk z&a#_63&k&(FKG(-SiQgT>ijtd8@0~dsr;obW6@4la!y7|-V zyM<5ozB#Y;eZz;p#zm~G3TyTU3f}v_N@0b5O?~2rbsSwstKufSY5s8Dj$_`G{)C@T zPyIY!d%ybTF<0Z=b?bt8yXM{&|GG`sUu(klThDLtpF1)CV8zT1qo4>`2K|P-sSHOu z!wUYMeJxvB#i3^V(x_TfP%6N!u=mNM?@2{V!%{`pFZa=Ke34OlZqFZ;n1vN}EpwBC z4%an&5IL0Ax4&k~Kk3e&m%g4@YTqKXru*e7skseI>vC*A`5lb%-}dRzrr8`0b9d>9 zi;Dft`r313Q)YC~M6-%1y85&8ytj)V?=0D05|d-5H?`Mx5mSPb#rv(yvTxf}zweB? z^iDuXy{kZR-;+5y;vUwCryo9Rl03wBJwha8!!xBDN(;)gk4%YGE^YcC?__t1p(Xsl zsoIm4#+zUBO^fb7d~}zOrNH%@YwTuS6+i#{&iAeR#an)TiI^hZ|7TU!SKs+>|9n6D z;zi{4ThIONO*RMp%c$L~|2+Gd*}Ydg`@YI%h0ian`MB3X^5FaXf6lB~_IBZ#Gk&+Y zQsw`aKK{0&J?*%Owfa+=?`glk1uYZE4mVG|e)9kHRO4IUMDK1f{ zpKZD|M>#;@i`|wdXWnwKMI61Zvu4-q&HDEbuj;w*Vf%c^QrF{qWnLFv+$*t=rTn+i zL7f)abMxPwuRJ$5Ry1qlv^MEqQ@4H=KmF^C{hnxX7r{^ZEAMh&|9oN6%$r-~=gf@$ zxqj`=Rqfg~hc0d}Vm-2^Mt&XZQHKBhRtw(j;5+9Vt5mb`?P>P=r(ahb{Jr}2|5w6` zzY5-COTGSS-|>5E{~xog4BJtl98&cDj(5bJZl*N74WZ|@mK{7&tu9QYB~3d^IDdA^=PHX;$aNa zQ*Q^>3;t9S_!G#`QZSF}pW+JT8ey#i|C3kx>|r?6%qE^Pg(c_sRLS*C5%qKaYRopW z5Zj&_^KEwJk1NMp_y13Q_pax5$q|?B*30A-0@ad(Hg7YLYu(bZf?-BT!Twy*V?}CeuQTGFUJGbi^BM>|J<4GcQy8N z=gIssU5;9g=BqNyi%Xr)*p}{l@Yh$3Z)RHxKddx`MYYKPhWiAiY4xyY^sBL zu4-*KgQJYA^BrfI$y!dAij^8R&wbt~aL-Gjze-nY$BNlod<*_O+jC~kI**;(iryE> zoS#w9k><6%V)}~iFO>!QkKeX;d~|=YKJ97#+aNCs2Sb$viNv2<9ep)4l9|6hiTJfi zbj!xmZfOyTl5viwKe5c)>zDPFU)5n1!;j6f?VoQvFw6i+GDjVCxSWgQ4*}%1|@YMgJt23YOo2cTa*PY3Cu&3g; zU#skz=^U{#yEiP8;b82|c9pnQ_@%_wQvTY_4S$Ms^8VhDZ_2I8-?{Z$t#MXY!^Qje zGbS3!F!Y>z`SiZG!#%AL7>-PtjMDS;Vm* zt~`t-L+6_2!!Wr@egP%rd;5RfS+|hE~$E{Z0VwoX)9HoB$MA6U99Z-)yT0oM|_)f@nfUpXshF|O;l#@O222` zQ+kinnEjzzg`tO0`)?~If5A1^&k9{=ZQXRH>co<+w-FNSf7MkxE}WVWruL>qplL(h z$A7QB7R$ffl#+AY>P-Gc3xO-nADj&KKIjboX}T%CR$zAQ-hX#WcZbEzo1oiy+^xdB zLtb>vDQQn1w{GFUS6@!7-@NVmCEb|x&=Yeaf5n_zd-T8inScCxYtw3;@0fB!ea%IU z%C7h9vPaVYKMZ-t+sL%=&CRV<>(*w3sek18u;75Ho!!nRwLqh(w*3KaCfuDGcFLzZ zo?rKj_O$j|H~F>a`iW&9j_p3%|B~%oyw5g?>$g^&VNJG>e=625?a$;hX+rMZt>&O< zt~*nM*s>2DtCp+P`=3AkxYE+ge?Pv{%5Hx6u_8T*7C=t>mxAyUw?tdA#HOW7Y}%<@u%S8gjRlq;57!TKhTd^tY)~ zJvAIQhgwz5EREilD!uLfxwSVhSsR>RW}oioZGX2o_ltO_5T47 z?SJ1MhgPaRn>6=CfMMTM-LF2fpI)ZfEL#(|=5yS__t*K8C$%`V1o$vn#vNE>Iw5z5 z{rbp)Up=1o?=H^fpMS1(bI5CvkC$Y+HymyE4Anjxs;zA(_G-V@N2v!@1@oV^E?>Mt zPw+?F-AA1tx?UTJoiVSQY`SCP=3f45aymA9-e?F5tP-f%xa5)Aznkd-e^$SHE8{=6 zwZSHEHN%!&)AFnZ-ff>P%C(|DE2jGOUx81T%ay>ip&2`BkmbPLqzFW?-o0z@)t^Ci@{A=d9P3L{%?_UYDIO`k# zzs-2NU55M5mVfW-r`mp;K0k9#e`rhgnr|8`P9FUI4PhJdb*@Vm#7rqXwr9b%HIg6y zo=y7Y`Kw)M!DLaZUyRefJa*2IN(67`JoqhDnx`Q)fA-39k;Llw(?htpkn^Tx+!vzcb(*(=TCjQzvO{!`j*D!c^mHk zOf}7S+MWHf?W}2b_4ck#55F6E^GjAb%!}N;lr!4)dKyQ`a3SOR>Hn%`3OMp|vI#>JX^L!d z!Y5xOOHBp;23UQ}ntp^w(Y;FgmzT)4J@tS7zgX`-eP92>RrgQ%?hlQ(7R>8>7^RS~*-!WK`>*(EyYAio4Mv>DyF8^crv?54P?BB}a z{NQ_uX~UwTMZ&4B`dgOX-uC|7?I2@U21|wrwhxEw0#v>-KaTCyv9$~Lo4V$Yh}*hN z0Zm_i_&FaBni1`0x8a2IHC~AwdHzK@hUqbX_C5RjB<-PqaN?An$_mpbk>CHPeJSAp z?S{6xmhjZ+=)~_u-070i#ogz6Y_9Eo@yaRL^~P~k<5JtsqVLx?U0=T7EVpmSRnz}_ zm*w4@&AR{&z1XIJuR5U*6XXEmrCS6Sn8`-T$gf7zs{<1-JQ$9C;weJS$wk8Z1v<7{#Iu! zrv0_}cjxBX*0fKSTY}s+_?yb@cva}|CwNuLizRE9baB6%C@`xcEqd1$FE`nbe-16( zb#+qH9cix#>-QF{s`Hy?dcMY{K>rHsd)Gb9SFXBzb8b+boZ_UiOS_G2fye1`zx^hXaXgb5=b7#ldT>*D=|V>Sd%Mq0-{POK?TLI-r9;Glr}O7cPyeAl z*}m{+;zW1Nh9ZVkCMWZme*LI z%JOGX(ZZ>l7ETvste*EbGBet0L;iaGmn;YV^e(yiW$T&P7q?H}xiR}-VN2Jtymu>K zTRnE~G2duAx4rP+mx^O^^Nxu<%gn6oQCBn$Fnb&wn3u$NwD#DfE0yPFnyoH9y5s0p z!EZ93A5UPtRB&UX9s9be=@a^^^MB=STeLiv3AEp*y8POmoNp^XN$9minD|XRE1f8^ z&|JzdW#)Q*EBWVh5{_I$RT^mXZK1_h6ngz#mZ zP7m^y397KkYiIBIQuANmJ8hn#h@oI?ut(3q(p<|J-p~hjKj!&;n(-%5^!_C7fcICg zhwR_m`E=`yUi-hi=4w(Ra|NCTl=uJtx59Mi-K`($oupqZ(U`oUeN!mI55FCUm05XJ zt~a%_e9*B7zan}3Lv+-Co9ubwbB(T_wv1@f53LDMlXZ!7@7x`@`nl9s?+DL5^`X~) z%`8=QSZ%e??tE>$sh-X}?^z8Se%QRIDVJHPW|+hJ{p@ zj`gx1(N_e2^4<7Wk(=6ZBKhJQIg9i?*;iceu$KnKEHnJ8`?vmM)FR&F))Mz`N4mxD zd@yU>Z#@x4e%t6+MFwu42&0(=f<_8^x!=30oL$@>|KZO@g0qm`GY4nRKrw^Zu`+)v~J3yr(BV-u$fP{pZhN^VIz-YK}Rr@&T>ft~}8@<@3V%_ovtH z_%!k0)9U&UUuN_#l|4FX%WgUQ43D!G$5bZWQ{ZJXB0*fSx{#^>;fPC>nOiqod>Nvcfa?Ee$__SpOx^OrqNyE$j; zpVhn;+{d_nD<)7Hejd>~*dCUFG}(i@C2oi(@#fb!B1fk-6z>I`6xr zyxH^R;_YLxY}dpOF+~_Q>^Lo6Uvi@BZSbL`8|E%eT{CTl-;ZVD(7Pa-=kSr)lU0|0 zH1T%!-~I5@t)f#E`)fZo_g*i4+`dY2ilCXK+%EI79Y=nI^mO<0yxd`NfvX{S^ZAo~ zdNI~h_|uEezHNK+^{>!=&CAZl;&qbN>ti<9RB!J7@xPB{#;GOUE{&J&@A)_TWpotB z^)ri%Dy-J0YdOx`_Pf*h=AECAlXHSz?^U%Zp{I9^d`;Rikzp zLj!1O!0b<<1xPpe}dB1=a%d~t$IYL*}X2-uCVH{dHTAh zrfM}N`~4piQ)jon|Gu-M!^bq@SfTxKt9e}WQ){a>?mu48{6J=PPas>e_bi54_gAgX z>+LgtIqBMz_O14^${g$w25p9nPaRjMZrpj@#^nKXx?XtiB}IR+h}G=tHqV|a5OnG4 zj#CPOinoIs=ZD_2u;2f*Q}X}Iy@ppCxfblZ z8a~HaPdIB`c;T9+lC1L_`=QhN`szwbwgu(ie6PfgVU)jH>VMnI;WQotzscZ;5Xk5C1XD5$Z z#Q*l21#3V%s;bg9nV!+BZ1!n+b@GXG#S?9Do$X3F-bQ{I?pl}GCeP=ubczBkQ@CUd zDtXqlFfyCpdaSIywnMY!%r!2(X3Z@#wv@eEp(cAj!Iej%;v&!P@^<6m zJ+JHqEDA&aU3~vG?8kdX)rPk#_TRpAku_WNUv;X_ub$kM^KVZ)^j{-*--5@NkNeNx zcFWaeN$@$#LWA88rn+m-uMKnKcV;>st~EL3(cif#U(>9AeK`OA^s~@^UhlbeRDN&P z{hAjoW%(_k>|ZwfnjSeeeG?J$iVI&nt#85hq;_;%nep}A?42KquHA0m436*GS4*Z| zViwj}^C#kHTczjHx<~Nc(j7B{fBrXEUwqG-$^LEoHy1gP>1W=|H|+eF5w}+NY~}ME zQ;p6Vh%M}SmM>qmU?ph1svu|s71J^w#su)XWzeQUMF;S5QBb-Ag*ZqBbZIGQcLykV zKpVk8dp$u);EQrW>Oe6HQlzv9lm@`cK%oy-2VPGKQUqBD3Q~qeRR?ia;#0Mxd}gj_ zhmY>@IJ-%p#aCuU7Y~7#WGX6MDz@30pI`Rlr}NMM(_^Y!{|QR6+^c!~_056g>dlX* zZj=zK%_{s?H{baG9ZuQn{C1yH{%!o*;jw-pdqc??(B|M+Gb82dkM8Zie%XPRqSnbD z1+BC*kN&(=`%~xpltW7%@7w+UggUoU(tgukNo|$wcGxbpFh2AU36^y{>tm&VZS=vj~l8@?|r>R`gZ>f zJ6G0*FS;B@L7O@L|36sGzIbzF(aTMs{g#XWIXc&WnECf-%7ehxV!_{Q?Otpbt9>=; z&4J|pC#Oz#1~9$3vBS>#&!6I(pQQ87eEZvfebe&(`RnGSPu9OYp>e~W_l>{VSG~R9 z_U+DY+vuvVTW_vA&j0Ma*?iDG*nD@*1sn9X`3Ev81TB7bYn#IV|Bam6CKzihK04=B zqj=-|&fJ=JxAvOf{`sMN_2dV)ulMhadDr~=;XaoXgWqd6?DOBtv|!`?TYrx%jES(X zeAIi#J7JHD{hqSV+X{EOc>munyWsBvm)RctvtCMk&Xm^JCh$-4wf?^^kA7`CJ$?O& zYk4cV0=539-ngA!G28b@g_0J-l77W)yxhWtxe>ExWbnOGIQe82-_+_WLJCRyukyFF z&igH^|Il#Z2m3n-p+|0~yu7OY>i%!hHA^3=$u`vqz7u9X@YQ_%_lMnYgW2RJGk8Ti zUEZYkN`oO$K;-qY7-SlCNl-Z4M9(&xc4W3`vA z8k2upRX+NU z|6*ADeD3V;(MOGT@70%#S@r(^#hNooGDmamzAw0W@81rS%9M|>@5|1t-(?woJimjb z`sm+Vy+8lRvnpKspZ4DH?e54$cBlMqyKnRtkZk!T*{QSGH>@}!Co7{mIdlF|mYFXr zwtRl5Ub{exVM+MHjgOMN)D6wAo2@&i>ud6A)4?fzjN3&gHm%fMwsU%-Bi)mtKCnIq~;4kAKXk6b zTYr|P7tQ=;`<{Nc?0#2~;04S3ce)A!L*2H$-+t(?@wvb<|BK}p?SuIL?9KiC&iLH@ zleH%wY?r+^y~4h7eMS3^y)%upj%|ANd()aPvG3E)JfHjQu4_Sm`Sia(FRZ&(9mp2+F95)sCe!b5_>X_-#mh$s^Z@==~D7R|r(fd=) z*Pni`Ub^?uE#-cO)FXdCXgVGDk}Zev(DJ~Et*68OZ~;` zTf7gnPdNXCZ^N@CIV)m}zq{-gdRmz8BQdjOsiNTAd((N$LbLDw<(>SuV*RYGviFqx z9`5b>*yI0eU2xv=WV_#O1&+VF%cn1y^6_1s{>jUibI$lr-~5J6q)>6q8_#2Pzl3gy z*0!Bku*LtD`&Qk$#b)mR-S*ZloAzttHveaPH2A*R{N7v@EhNftX|apY+L8x7GZt^E zxbuja zV%!+B`{z7kmUuRMwYeFeFY&1+wRafmO1JEe_3O$$FzNWuQ)&+ib>9S+yv<$yw%@d& zTexFUz`dMD_V?DTc4cIJBjNgHd(^~Tscc6Jm^j`yO`LVRe)A@mpnwUY$xkzd|FktU zGECB1@hDbb`g~OH{I&N#zWNf84Ft~v9CuiK?&U1WBc#W$rieDQtT^)Kt*T=q=+8>e#Z^1CH^ z1y^~??YGQ0a%Is=ohpHjj=$GJ1l~<+VMsWcKaGLQpCep!qTh|+)9ITNzsheH=sEvo z!OQlI^URt8`;8WI|_^-nD;oqx|v6Xd}^dt}X{H!i-tP1wmQci-iLc@y;x6}jZh4>ng9 z7iBQ&eBdU{l)svB(wl`5my?w4YRJs@-1RJ9{lZ1pH93)MWM$)y-13`t%vk6D(I={p z&oU|~eT#nlcKeR@SD(Co7pppOSj?Hculvk})Ys<8ys1sgmU=9Dd!S9s^UI7I?sE#a zZ2S_dFU(PKJ@UP@;-1S-YWD0doM+T%yJ>FuExAnPig#PPZtAZKmHTZH9l!Uso#T;j zntNL;8J_UG5RlM_PF>Wt>CEC1Gat8?IZB^1qVzTvZ%hfbQfyy7OD9q2y5q6eCT;7l zehLo`bU*rA`yRjWr@SOS*Cq3Rt!1^noPSiM$1`b8_wSTP;$jO$)bj2G?`m1eW4=dt z$L{SyOagKN8xsF|-~V0FUp{5eYn}NgcehQr_;$wm-!UOuE|*!@zuhYRG<40I;GF)K ze}ylJH`#taWoZ$<@vZr5VV9iw$=UObzP?~oNI$S-LT9qil-U#0ejoX2w#J8Ni^{3) zgCe>KQnP34ZDf~eU)q)G{G#Q>kryFJyzy$=H~NTwT(?;Ghet;h=KD$U)A($EoLRLe<{SU%(;?rY zW3s6aGnlGvHt8czP%ad$slIW_#Imwb@qMpU9=?pKtoh$x|FJ$TUehP(E-;T zwVc;4?w0dq#c`PrPX2zkFWKwb^SOps%fEg3czJp4 zK2X2HNH*lUyZxm9`)B`17o6ysayeDmc$v!X9g3hcag>Zq9<)B#^+d^N%?y?Ra36)_ zz6{=83cSaIR~2MW9;j!8w_^nFis4m7oRuLA5v-u#0c{dI|0dJox!USji2qfdpL7gk zxbyJy)RT|Sn#Y|FtIJC)yL{a|_4@ry_qYUKCZDVRy7G;(e(u?{`Bs^!UnJF^>7I=H z^`h-f{PUQMR3GEb_iByv-JkC-J8r;1RZJs+V9^!&o-&GgQwwxQBjS5*+K38 zwBmb~d97a>AN>E!e}Br}$e-Gi4{w~_pOU?=#I@}Dy1h4#&X2qEu@JNcjN3qqscL@t zv!0aoF$KP5H7QG(y!-n+x2TFT{#E=TzyHIpeUg*)q-RQ9jA3oy5drP3EBo_w@z4L; z-~B2B?>6i@(NJ(LT0iZ1Xv_3UQOC4%*2hCQzA)s^-MT_)P0S4cb#b=!E5-hMvgw<* z{6Bo&etP}=4YlEAXL(oe(t5mIE4SS&B-j1;=lw>1ne5+kuu92`X8dK0%hI@ZuI_h% zo!v@-8Fd-aAYl04h`qjJq+uOB0>AjXyu9JD@({7ms9UWifRsQZS{o3{BNvHX%{knU9-O9N; kEf=&+wWsrcd;=px%ASkK`qL%%f$lr?boFyt=akR{0D$o3e*gdg From 5ccfb2cfeefe7cecf5102c3ac8e0802b01b8c728 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 5 Jun 2012 14:13:28 +0200 Subject: [PATCH 43/92] Remove cluster settings vals --- .../src/main/scala/akka/cluster/Cluster.scala | 25 +++++++------------ 1 file changed, 9 insertions(+), 16 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 0b2b3919f7..b947782a9a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -372,24 +372,17 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val remoteSettings = new RemoteSettings(system.settings.config, system.name) val clusterSettings = new ClusterSettings(system.settings.config, system.name) + import clusterSettings._ val selfAddress = remote.transport.address val failureDetector = new AccrualFailureDetector( - system, selfAddress, clusterSettings.FailureDetectorThreshold, clusterSettings.FailureDetectorMaxSampleSize) + system, selfAddress, FailureDetectorThreshold, FailureDetectorMaxSampleSize) private val vclockNode = VectorClock.Node(selfAddress.toString) - private val periodicTasksInitialDelay = clusterSettings.PeriodicTasksInitialDelay - private val gossipInterval = clusterSettings.GossipInterval - private val leaderActionsInterval = clusterSettings.LeaderActionsInterval - private val unreachableNodesReaperInterval = clusterSettings.UnreachableNodesReaperInterval - implicit private val defaultTimeout = Timeout(remoteSettings.RemoteSystemDaemonAckTimeout) - private val autoDown = clusterSettings.AutoDown - private val nrOfDeputyNodes = clusterSettings.NrOfDeputyNodes - private val nrOfGossipDaemons = clusterSettings.NrOfGossipDaemons - private val nodeToJoin: Option[Address] = clusterSettings.NodeToJoin filter (_ != selfAddress) + private val nodeToJoin: Option[Address] = NodeToJoin filter (_ != selfAddress) private val serialization = remote.serialization @@ -424,17 +417,17 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // ======================================================== // start periodic gossip to random nodes in cluster - private val gossipCanceller = system.scheduler.schedule(periodicTasksInitialDelay, gossipInterval) { + private val gossipCanceller = system.scheduler.schedule(PeriodicTasksInitialDelay, GossipInterval) { gossip() } // start periodic cluster failure detector reaping (moving nodes condemned by the failure detector to unreachable list) - private val failureDetectorReaperCanceller = system.scheduler.schedule(periodicTasksInitialDelay, unreachableNodesReaperInterval) { + private val failureDetectorReaperCanceller = system.scheduler.schedule(PeriodicTasksInitialDelay, UnreachableNodesReaperInterval) { reapUnreachableMembers() } // start periodic leader action management (only applies for the current leader) - private val leaderActionsCanceller = system.scheduler.schedule(periodicTasksInitialDelay, leaderActionsInterval) { + private val leaderActionsCanceller = system.scheduler.schedule(PeriodicTasksInitialDelay, LeaderActionsInterval) { leaderActions() } @@ -983,7 +976,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ } localGossip copy (members = newMembers) // update gossip - } else if (autoDown) { + } else if (AutoDown) { // we don't have convergence - so we might have unreachable nodes // if 'auto-down' is turned on, then try to auto-down any unreachable nodes @@ -1055,7 +1048,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val views = Set.empty[VectorClock] ++ seen.values if (views.size == 1) { - log.debug("Cluster Node [{}] - Cluster convergence reached", selfAddress) + log.debug("Cluster Node [{}] - Cluster convergence reached: [{}]", selfAddress, gossip.members.mkString(", ")) Some(gossip) } else None } else None @@ -1091,7 +1084,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * Gets an Iterable with the addresses of a all the 'deputy' nodes - excluding this node if part of the group. */ - private def deputyNodes: Iterable[Address] = state.get.latestGossip.members.toIterable map (_.address) drop 1 take nrOfDeputyNodes filter (_ != selfAddress) + private def deputyNodes: Iterable[Address] = state.get.latestGossip.members.toIterable map (_.address) drop 1 take NrOfDeputyNodes filter (_ != selfAddress) private def selectRandomNode(addresses: Iterable[Address]): Address = addresses.toSeq(ThreadLocalRandom.current nextInt addresses.size) From f02793ebd6d32c4a23d85d7aec0725600ccd6657 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 5 Jun 2012 14:13:44 +0200 Subject: [PATCH 44/92] Refactor cluster startup join in tests and fix barrier race * Refactored common code to MultiNodeClusterSpec.awaitClusterUp * Fixed some race conditions of barriers --- ...ientDowningNodeThatIsUnreachableSpec.scala | 24 +++------- .../ClientDowningNodeThatIsUpSpec.scala | 23 ++------- .../scala/akka/cluster/ConvergenceSpec.scala | 12 +---- .../GossipingAccrualFailureDetectorSpec.scala | 8 +--- ...aderDowningNodeThatIsUnreachableSpec.scala | 48 +++++-------------- .../akka/cluster/LeaderElectionSpec.scala | 13 ++--- .../MembershipChangeListenerExitingSpec.scala | 17 ++----- .../MembershipChangeListenerJoinSpec.scala | 18 +++---- .../MembershipChangeListenerLeavingSpec.scala | 17 ++----- .../MembershipChangeListenerSpec.scala | 10 ++-- .../MembershipChangeListenerUpSpec.scala | 14 ++---- .../akka/cluster/MultiNodeClusterSpec.scala | 28 ++++++++++- .../scala/akka/cluster/NodeJoinSpec.scala | 4 +- ...LeavingAndExitingAndBeingRemovedSpec.scala | 11 +---- .../cluster/NodeLeavingAndExitingSpec.scala | 11 +---- .../scala/akka/cluster/NodeLeavingSpec.scala | 11 +---- .../scala/akka/cluster/NodeShutdownSpec.scala | 11 +---- .../scala/akka/cluster/NodeUpSpec.scala | 13 +---- 18 files changed, 85 insertions(+), 208 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index b241899ad6..d78afcdeb7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -34,13 +34,10 @@ class ClientDowningNodeThatIsUnreachableSpec "Client of a 4 node cluster" must { "be able to DOWN a node that is UNREACHABLE (killed)" taggedAs LongRunningTest in { + val thirdAddress = node(third).address + awaitClusterUp(first, second, third, fourth) + runOn(first) { - startClusterNode() - awaitUpConvergence(numberOfMembers = 4) - - val thirdAddress = node(third).address - testConductor.enter("all-up") - // kill 'third' node testConductor.shutdown(third, 0) @@ -50,28 +47,19 @@ class ClientDowningNodeThatIsUnreachableSpec awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) cluster.latestGossip.members.exists(_.address == thirdAddress) must be(false) - testConductor.enter("await-completion") } runOn(third) { - cluster.join(node(first).address) - - awaitUpConvergence(numberOfMembers = 4) - testConductor.enter("all-up") + testConductor.enter("down-third-node") } runOn(second, fourth) { - cluster.join(node(first).address) - awaitUpConvergence(numberOfMembers = 4) - - val thirdAddress = node(third).address - testConductor.enter("all-up") - testConductor.enter("down-third-node") awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) - testConductor.enter("await-completion") } + + testConductor.enter("await-completion") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index ff048a2eda..5f778c25d1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -34,42 +34,29 @@ class ClientDowningNodeThatIsUpSpec "Client of a 4 node cluster" must { "be able to DOWN a node that is UP (healthy and available)" taggedAs LongRunningTest in { + val thirdAddress = node(third).address + awaitClusterUp(first, second, third, fourth) + runOn(first) { - startClusterNode() - awaitUpConvergence(numberOfMembers = 4) - - val thirdAddress = node(third).address - testConductor.enter("all-up") - // mark 'third' node as DOWN cluster.down(thirdAddress) testConductor.enter("down-third-node") awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) cluster.latestGossip.members.exists(_.address == thirdAddress) must be(false) - testConductor.enter("await-completion") } runOn(third) { - cluster.join(node(first).address) - awaitUpConvergence(numberOfMembers = 4) - testConductor.enter("all-up") testConductor.enter("down-third-node") - testConductor.enter("await-completion") } runOn(second, fourth) { - cluster.join(node(first).address) - awaitUpConvergence(numberOfMembers = 4) - - val thirdAddress = node(third).address - testConductor.enter("all-up") - testConductor.enter("down-third-node") awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) - testConductor.enter("await-completion") } + + testConductor.enter("await-completion") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index a76083b0fc..a7e5712cfa 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -40,15 +40,7 @@ abstract class ConvergenceSpec "A cluster of 3 members" must { "reach initial convergence" taggedAs LongRunningTest in { - runOn(first) { - cluster.self - awaitUpConvergence(numberOfMembers = 3) - } - - runOn(second, third) { - cluster.join(node(first).address) - awaitUpConvergence(numberOfMembers = 3) - } + awaitClusterUp(first, second, third) runOn(fourth) { // doesn't join immediately @@ -70,7 +62,7 @@ abstract class ConvergenceSpec val firstAddress = node(first).address val secondAddress = node(second).address - within(25 seconds) { + within(28 seconds) { // third becomes unreachable awaitCond(cluster.latestGossip.overview.unreachable.size == 1) awaitCond(cluster.latestGossip.members.size == 2) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 27a012d32e..9df3e20d68 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -36,13 +36,7 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi "A Gossip-driven Failure Detector" must { "receive gossip heartbeats so that all member nodes in the cluster are marked 'available'" taggedAs LongRunningTest in { - // make sure that the node-to-join is started before other join - runOn(first) { - startClusterNode() - } - testConductor.enter("first-started") - - cluster.join(firstAddress) + awaitClusterUp(first, second, third) 5.seconds.dilated.sleep // let them gossip cluster.failureDetector.isAvailable(firstAddress) must be(true) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index e8b956e87b..ffbd4eb287 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -42,13 +42,10 @@ class LeaderDowningNodeThatIsUnreachableSpec "The Leader in a 4 node cluster" must { "be able to DOWN a 'last' node that is UNREACHABLE" taggedAs LongRunningTest in { + val fourthAddress = node(fourth).address + awaitClusterUp(first, second, third, fourth) + runOn(first) { - startClusterNode() - awaitUpConvergence(numberOfMembers = 4) - - val fourthAddress = node(fourth).address - testConductor.enter("all-up") - // kill 'fourth' node testConductor.shutdown(fourth, 0) testConductor.enter("down-fourth-node") @@ -56,38 +53,26 @@ class LeaderDowningNodeThatIsUnreachableSpec // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(fourthAddress), 30.seconds) - testConductor.enter("await-completion") } runOn(fourth) { - cluster.join(node(first).address) - - awaitUpConvergence(numberOfMembers = 4) - testConductor.enter("all-up") + testConductor.enter("down-fourth-node") } runOn(second, third) { - cluster.join(node(first).address) - awaitUpConvergence(numberOfMembers = 4) - - val fourthAddress = node(fourth).address - testConductor.enter("all-up") - testConductor.enter("down-fourth-node") awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(fourthAddress), 30.seconds) - testConductor.enter("await-completion") } + + testConductor.enter("await-completion-1") } "be able to DOWN a 'middle' node that is UNREACHABLE" taggedAs LongRunningTest in { + val secondAddress = node(second).address + testConductor.enter("before-down-second-node") + runOn(first) { - cluster.self - awaitUpConvergence(numberOfMembers = 3) - - val secondAddress = node(second).address - testConductor.enter("all-up") - // kill 'second' node testConductor.shutdown(second, 0) testConductor.enter("down-second-node") @@ -95,28 +80,19 @@ class LeaderDowningNodeThatIsUnreachableSpec // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds) - testConductor.enter("await-completion") } runOn(second) { - cluster.join(node(first).address) - - awaitUpConvergence(numberOfMembers = 3) - testConductor.enter("all-up") + testConductor.enter("down-second-node") } runOn(third) { - cluster.join(node(first).address) - awaitUpConvergence(numberOfMembers = 3) - - val secondAddress = node(second).address - testConductor.enter("all-up") - testConductor.enter("down-second-node") awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Seq(secondAddress), 30 seconds) - testConductor.enter("await-completion") } + + testConductor.enter("await-completion-2") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index 5a155fc195..ce4d5a8042 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -33,26 +33,19 @@ abstract class LeaderElectionSpec override def initialParticipants = 5 - lazy val firstAddress = node(first).address - // sorted in the order used by the cluster lazy val roles = Seq(first, second, third, fourth).sorted "A cluster of four nodes" must { "be able to 'elect' a single leader" taggedAs LongRunningTest in { - // make sure that the node-to-join is started before other join - runOn(first) { - startClusterNode() - } - testConductor.enter("first-started") + awaitClusterUp(first, second, third, fourth) if (myself != controller) { - cluster.join(firstAddress) - awaitUpConvergence(numberOfMembers = roles.size) cluster.isLeader must be(myself == roles.head) assertLeaderIn(roles) } + testConductor.enter("after") } @@ -71,7 +64,7 @@ abstract class LeaderElectionSpec testConductor.enter("after-shutdown", "after-down", "completed") case `leader` ⇒ - testConductor.enter("before-shutdown") + testConductor.enter("before-shutdown", "after-shutdown") // this node will be shutdown by the controller and doesn't participate in more barriers case `aUser` ⇒ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index 8932eed6ee..cdf809187a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -18,13 +18,13 @@ object MembershipChangeListenerExitingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" + .withFallback(ConfigFactory.parseString(""" akka.cluster { leader-actions-interval = 5 s # increase the leader action task interval unreachable-nodes-reaper-interval = 30 s # turn "off" reaping to unreachable node set } """) - .withFallback(MultiNodeClusterSpec.clusterConfig))) + .withFallback(MultiNodeClusterSpec.clusterConfig))) } class MembershipChangeListenerExitingMultiJvmNode1 extends MembershipChangeListenerExitingSpec @@ -46,16 +46,7 @@ abstract class MembershipChangeListenerExitingSpec "A registered MembershipChangeListener" must { "be notified when new node is EXITING" taggedAs LongRunningTest in { - runOn(first) { - startClusterNode() - } - testConductor.enter("first-started") - - runOn(second, third) { - cluster.join(firstAddress) - } - awaitUpConvergence(numberOfMembers = 3) - testConductor.enter("rest-started") + awaitClusterUp(first, second, third) runOn(first) { testConductor.enter("registered-listener") @@ -70,7 +61,7 @@ abstract class MembershipChangeListenerExitingSpec val exitingLatch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { - if (members.size == 3 && members.exists( m => m.address == secondAddress && m.status == MemberStatus.Exiting)) + if (members.size == 3 && members.exists(m ⇒ m.address == secondAddress && m.status == MemberStatus.Exiting)) exitingLatch.countDown() } }) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index 2f82e12506..c07ec19f77 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -17,12 +17,12 @@ object MembershipChangeListenerJoinMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" + .withFallback(ConfigFactory.parseString(""" akka.cluster { leader-actions-interval = 5 s # increase the leader action task interval to allow time checking for JOIN before leader moves it to UP } """) - .withFallback(MultiNodeClusterSpec.clusterConfig))) + .withFallback(MultiNodeClusterSpec.clusterConfig))) } class MembershipChangeListenerJoinMultiJvmNode1 extends MembershipChangeListenerJoinSpec @@ -42,15 +42,6 @@ abstract class MembershipChangeListenerJoinSpec "A registered MembershipChangeListener" must { "be notified when new node is JOINING" taggedAs LongRunningTest in { - runOn(first) { - startClusterNode() - } - - runOn(second) { - testConductor.enter("registered-listener") - cluster.join(firstAddress) - } - runOn(first) { val joinLatch = TestLatch() cluster.registerListener(new MembershipChangeListener { @@ -65,6 +56,11 @@ abstract class MembershipChangeListenerJoinSpec cluster.convergence.isDefined must be(true) } + runOn(second) { + testConductor.enter("registered-listener") + cluster.join(firstAddress) + } + testConductor.enter("after") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala index 089f241849..41b69ce7b4 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -17,11 +17,11 @@ object MembershipChangeListenerLeavingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" + .withFallback(ConfigFactory.parseString(""" akka.cluster.leader-actions-interval = 5 s akka.cluster.unreachable-nodes-reaper-interval = 30 s """)) - .withFallback(MultiNodeClusterSpec.clusterConfig)) + .withFallback(MultiNodeClusterSpec.clusterConfig)) } class MembershipChangeListenerLeavingMultiJvmNode1 extends MembershipChangeListenerLeavingSpec @@ -43,16 +43,7 @@ abstract class MembershipChangeListenerLeavingSpec "A registered MembershipChangeListener" must { "be notified when new node is LEAVING" taggedAs LongRunningTest in { - runOn(first) { - startClusterNode() - } - testConductor.enter("first-started") - - runOn(second, third) { - cluster.join(firstAddress) - } - awaitUpConvergence(numberOfMembers = 3) - testConductor.enter("rest-started") + awaitClusterUp(first, second, third) runOn(first) { testConductor.enter("registered-listener") @@ -67,7 +58,7 @@ abstract class MembershipChangeListenerLeavingSpec val latch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { - if (members.size == 3 && members.exists( m => m.address == secondAddress && m.status == MemberStatus.Leaving)) + if (members.size == 3 && members.exists(m ⇒ m.address == secondAddress && m.status == MemberStatus.Leaving)) latch.countDown() } }) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index 352f9de1a4..c87a280e17 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -32,13 +32,9 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan "A set of connected cluster systems" must { - "(when two systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { + "(when two nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { - // make sure that the node-to-join is started before other join - runOn(first) { - cluster.self - } - testConductor.enter("first-started") + awaitClusterUp(first) runOn(first, second) { cluster.join(firstAddress) @@ -56,7 +52,7 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan testConductor.enter("after-1") } - "(when three systems) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { + "(when three nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { runOn(third) { cluster.join(firstAddress) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala index 3df6b876f9..7709e9854a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -35,15 +35,6 @@ abstract class MembershipChangeListenerUpSpec "A registered MembershipChangeListener" must { "be notified when new node is marked as UP by the leader" taggedAs LongRunningTest in { - runOn(first) { - startClusterNode() - } - - runOn(second) { - testConductor.enter("registered-listener") - cluster.join(firstAddress) - } - runOn(first) { val upLatch = TestLatch() cluster.registerListener(new MembershipChangeListener { @@ -58,6 +49,11 @@ abstract class MembershipChangeListenerUpSpec awaitUpConvergence(numberOfMembers = 2) } + runOn(second) { + testConductor.enter("registered-listener") + cluster.join(firstAddress) + } + testConductor.enter("after") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index bf431f74f6..5fe5e7de37 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -30,15 +30,39 @@ object MultiNodeClusterSpec { trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ /** - * Create a cluster node using 'Cluster(system)'. + * Get or create a cluster node using 'Cluster(system)' extension. */ def cluster: Cluster = Cluster(system) /** - * Use this method instead of 'cluster.self'. + * Use this method instead of 'cluster.self' + * for the initial startup of the cluster node. */ def startClusterNode(): Unit = cluster.self + def startCluster(roles: RoleName*): Unit = { + awaitStartCluster(false, roles.toSeq) + } + + def awaitClusterUp(roles: RoleName*): Unit = { + awaitStartCluster(true, roles.toSeq) + } + + private def awaitStartCluster(upConvergence: Boolean = true, roles: Seq[RoleName]): Unit = { + runOn(roles.head) { + // make sure that the node-to-join is started before other join + startClusterNode() + } + testConductor.enter(roles.head.name + "-started") + if (roles.tail.contains(myself)) { + cluster.join(node(roles.head).address) + } + if (upConvergence && roles.contains(myself)) { + awaitUpConvergence(numberOfMembers = roles.length) + } + testConductor.enter(roles.map(_.name).mkString("-") + "-joined") + } + /** * Assert that the member addresses match the expected addresses in the * sort order used by the cluster. diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala index 99116ecb25..0d6a50b82a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala @@ -16,12 +16,12 @@ object NodeJoinMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" + .withFallback(ConfigFactory.parseString(""" akka.cluster { leader-actions-interval = 5 s # increase the leader action task interval } """) - .withFallback(MultiNodeClusterSpec.clusterConfig))) + .withFallback(MultiNodeClusterSpec.clusterConfig))) } class NodeJoinMultiJvmNode1 extends NodeJoinSpec diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index ebab4f6ba3..a974930d0a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -40,16 +40,7 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest in { - runOn(first) { - startClusterNode() - } - testConductor.enter("first-started") - - runOn(second, third) { - cluster.join(firstAddress) - } - awaitUpConvergence(numberOfMembers = 3) - testConductor.enter("rest-started") + awaitClusterUp(first, second, third) runOn(first) { cluster.leave(secondAddress) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 31630f934c..3773ccbd5d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -46,16 +46,7 @@ abstract class NodeLeavingAndExitingSpec "be moved to EXITING by the leader" taggedAs LongRunningTest in { - runOn(first) { - startClusterNode() - } - testConductor.enter("first-started") - - runOn(second, third) { - cluster.join(firstAddress) - } - awaitUpConvergence(numberOfMembers = 3) - testConductor.enter("rest-started") + awaitClusterUp(first, second, third) runOn(first) { cluster.leave(secondAddress) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index 17db90c880..96876cf4cb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -40,16 +40,7 @@ abstract class NodeLeavingSpec extends MultiNodeSpec(NodeLeavingMultiJvmSpec) "be marked as LEAVING in the converged membership table" taggedAs LongRunningTest in { - runOn(first) { - startClusterNode() - } - testConductor.enter("first-started") - - runOn(second, third) { - cluster.join(firstAddress) - } - awaitUpConvergence(numberOfMembers = 3) - testConductor.enter("rest-started") + awaitClusterUp(first, second, third) runOn(first) { cluster.leave(secondAddress) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index c0ac1ee22b..b54c0c1b39 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -35,16 +35,7 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) "A cluster of 2 nodes" must { "not be singleton cluster when joined" taggedAs LongRunningTest in { - // make sure that the node-to-join is started before other join - runOn(first) { - startClusterNode() - } - testConductor.enter("first-started") - - runOn(second) { - cluster.join(node(first).address) - } - awaitUpConvergence(numberOfMembers = 2) + awaitClusterUp(first, second) cluster.isSingletonCluster must be(false) assertLeader(first, second) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index 7931ce48f1..eafdf2fffd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -28,21 +28,10 @@ abstract class NodeUpSpec override def initialParticipants = 2 - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - "A cluster node that is joining another cluster" must { "be moved to UP by the leader after a convergence" taggedAs LongRunningTest in { - runOn(first) { - startClusterNode() - } - - runOn(second) { - cluster.join(firstAddress) - } - - awaitUpConvergence(numberOfMembers = 2) + awaitClusterUp(first, second) testConductor.enter("after") } From ac98dddfe8432bd6610bceea20c6efcbf1f1e423 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 5 Jun 2012 15:53:30 +0200 Subject: [PATCH 45/92] ScalaDoc of awaitClusterUp --- .../scala/akka/cluster/MultiNodeClusterSpec.scala | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 5fe5e7de37..113064e13c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -40,10 +40,21 @@ trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ */ def startClusterNode(): Unit = cluster.self + /** + * Initialize the cluster with the specified member + * nodes (roles). First node will be started first + * and others will join the first. + */ def startCluster(roles: RoleName*): Unit = { awaitStartCluster(false, roles.toSeq) } + /** + * Initialize the cluster of the specified member + * nodes (roles) and wait until all joined and `Up`. + * First node will be started first and others will join + * the first. + */ def awaitClusterUp(roles: RoleName*): Unit = { awaitStartCluster(true, roles.toSeq) } From 4d3e9f19fe93db77d184b0fbf2d23baa75d058df Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 5 Jun 2012 18:19:46 +0200 Subject: [PATCH 46/92] Fixing ScalaDoc messup --- akka-cluster/src/main/scala/akka/cluster/Cluster.scala | 2 +- .../src/main/scala/akka/serialization/ProtobufSerializer.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index b947782a9a..c16a34a2ca 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -481,7 +481,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * Checks if we have a cluster convergence. * - * @returns Some(convergedGossip) if convergence have been reached and None if not + * @return Some(convergedGossip) if convergence have been reached and None if not */ def convergence: Option[Gossip] = convergence(latestGossip) diff --git a/akka-remote/src/main/scala/akka/serialization/ProtobufSerializer.scala b/akka-remote/src/main/scala/akka/serialization/ProtobufSerializer.scala index d9a5c7b0c4..77f6702a77 100644 --- a/akka-remote/src/main/scala/akka/serialization/ProtobufSerializer.scala +++ b/akka-remote/src/main/scala/akka/serialization/ProtobufSerializer.scala @@ -27,7 +27,7 @@ object ProtobufSerializer { /** * Helper to materialize (lookup) an [[akka.actor.ActorRef]] * from Akka's protobuf representation in the supplied - * [[akka.actor.ActorSystem]. + * [[akka.actor.ActorSystem]]. */ def deserializeActorRef(system: ActorSystem, refProtocol: ActorRefProtocol): ActorRef = system.actorFor(refProtocol.getPath) From 211435048a7c4c2678ac1abdaeaed9b3f9c42067 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 6 Jun 2012 13:56:46 +0200 Subject: [PATCH 47/92] #2189 - Removing RemoteClientWriteFailed and log it as a RemoteClientError and send the message to DeadLetters --- akka-docs/java/remoting.rst | 4 ---- akka-docs/scala/remoting.rst | 4 ---- .../main/scala/akka/remote/RemoteTransport.scala | 15 --------------- .../src/main/scala/akka/remote/netty/Client.scala | 14 ++++++++------ 4 files changed, 8 insertions(+), 29 deletions(-) diff --git a/akka-docs/java/remoting.rst b/akka-docs/java/remoting.rst index 910ec5fbb2..82a736973f 100644 --- a/akka-docs/java/remoting.rst +++ b/akka-docs/java/remoting.rst @@ -280,10 +280,6 @@ which holds the transport used (RemoteTransport) and the outbound address that i To intercept when an outbound client is shut down you listen to ``RemoteClientShutdown`` which holds the transport used (RemoteTransport) and the outbound address that it was connected to (Address). -To intercept when an outbound message cannot be sent, you listen to ``RemoteClientWriteFailed`` which holds -the payload that was not written (AnyRef), the cause of the failed send (Throwable), -the transport used (RemoteTransport) and the outbound address that was the destination (Address). - For general outbound-related errors, that do not classify as any of the others, you can listen to ``RemoteClientError``, which holds the cause (Throwable), the transport used (RemoteTransport) and the outbound address (Address). diff --git a/akka-docs/scala/remoting.rst b/akka-docs/scala/remoting.rst index 0863d80b55..ab49765fad 100644 --- a/akka-docs/scala/remoting.rst +++ b/akka-docs/scala/remoting.rst @@ -288,10 +288,6 @@ which holds the transport used (RemoteTransport) and the outbound address that i To intercept when an outbound client is shut down you listen to ``RemoteClientShutdown`` which holds the transport used (RemoteTransport) and the outbound address that it was connected to (Address). -To intercept when an outbound message cannot be sent, you listen to ``RemoteClientWriteFailed`` which holds -the payload that was not written (AnyRef), the cause of the failed send (Throwable), -the transport used (RemoteTransport) and the outbound address that was the destination (Address). - For general outbound-related errors, that do not classify as any of the others, you can listen to ``RemoteClientError``, which holds the cause (Throwable), the transport used (RemoteTransport) and the outbound address (Address). diff --git a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala index 249c23e968..aefd34ec74 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala @@ -77,21 +77,6 @@ case class RemoteClientShutdown( override def toString: String = "RemoteClientShutdown@" + remoteAddress } -/** - * RemoteClientWriteFailed is published when a remote send of a message detectably fails (throws an exception). - */ -case class RemoteClientWriteFailed( - @BeanProperty request: AnyRef, - @BeanProperty cause: Throwable, - @transient @BeanProperty remote: RemoteTransport, - @BeanProperty remoteAddress: Address) extends RemoteClientLifeCycleEvent { - override def logLevel: Logging.LogLevel = Logging.WarningLevel - override def toString: String = - "RemoteClientWriteFailed@" + remoteAddress + - ": MessageClass[" + (if (request ne null) request.getClass.getName else "no message") + - "] Error[" + cause + "]" -} - /** * Life-cycle events for RemoteServer. */ diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index c6d23e71f3..76b400dd00 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -13,11 +13,11 @@ import org.jboss.netty.handler.codec.frame.{ LengthFieldPrepender, LengthFieldBa import org.jboss.netty.handler.execution.ExecutionHandler import org.jboss.netty.handler.timeout.{ IdleState, IdleStateEvent, IdleStateAwareChannelHandler, IdleStateHandler } import akka.remote.RemoteProtocol.{ RemoteControlProtocol, CommandType, AkkaRemoteProtocol } -import akka.remote.{ RemoteProtocol, RemoteMessage, RemoteLifeCycleEvent, RemoteClientStarted, RemoteClientShutdown, RemoteClientException, RemoteClientError, RemoteClientDisconnected, RemoteClientConnected, RemoteClientWriteFailed } -import akka.actor.{ Address, ActorRef } +import akka.remote.{ RemoteProtocol, RemoteMessage, RemoteLifeCycleEvent, RemoteClientStarted, RemoteClientShutdown, RemoteClientException, RemoteClientError, RemoteClientDisconnected, RemoteClientConnected } import akka.AkkaException import akka.event.Logging -import akka.util.Switch +import akka.actor.{ DeadLetter, Address, ActorRef } +import akka.util.{ NonFatal, Switch } /** * This is the abstract baseclass for netty remote clients, currently there's only an @@ -65,7 +65,9 @@ private[akka] abstract class RemoteClient private[akka] (val netty: NettyRemoteT new ChannelFutureListener { def operationComplete(future: ChannelFuture) { if (future.isCancelled || !future.isSuccess) { - netty.notifyListeners(RemoteClientWriteFailed(request, future.getCause, netty, remoteAddress)) + netty.notifyListeners(RemoteClientError(future.getCause, netty, remoteAddress)) + val (message, sender, recipient) = request + netty.system.deadLetters ! DeadLetter(message, sender.getOrElse(netty.system.deadLetters), recipient) } } }) @@ -75,11 +77,11 @@ private[akka] abstract class RemoteClient private[akka] (val netty: NettyRemoteT if (backoff.length > 0 && !f.await(backoff.length, backoff.unit)) f.cancel() //Waited as long as we could, now back off } } catch { - case e: Exception ⇒ netty.notifyListeners(RemoteClientError(e, netty, remoteAddress)) + case NonFatal(e) ⇒ netty.notifyListeners(RemoteClientError(e, netty, remoteAddress)) } } - override def toString = name + override def toString: String = name } /** From 1c5d0bdf42e489dc60d9f28432ef5038defec38c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 6 Jun 2012 14:04:02 +0200 Subject: [PATCH 48/92] Adding a FIXME --- akka-remote/src/main/scala/akka/remote/netty/Client.scala | 1 + 1 file changed, 1 insertion(+) diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 76b400dd00..2d3748fb52 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -65,6 +65,7 @@ private[akka] abstract class RemoteClient private[akka] (val netty: NettyRemoteT new ChannelFutureListener { def operationComplete(future: ChannelFuture) { if (future.isCancelled || !future.isSuccess) { + //FIXME Should we just _not_ notifyListeners here and just assume that the other error reporting is sufficient? netty.notifyListeners(RemoteClientError(future.getCause, netty, remoteAddress)) val (message, sender, recipient) = request netty.system.deadLetters ! DeadLetter(message, sender.getOrElse(netty.system.deadLetters), recipient) From c686622acf190867498a21b25001fd9b366fc20c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 6 Jun 2012 14:23:34 +0200 Subject: [PATCH 49/92] Deciding not to publish errors when a message delivery fails --- akka-remote/src/main/scala/akka/remote/netty/Client.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 2d3748fb52..c9e78902f4 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -65,8 +65,8 @@ private[akka] abstract class RemoteClient private[akka] (val netty: NettyRemoteT new ChannelFutureListener { def operationComplete(future: ChannelFuture) { if (future.isCancelled || !future.isSuccess) { - //FIXME Should we just _not_ notifyListeners here and just assume that the other error reporting is sufficient? - netty.notifyListeners(RemoteClientError(future.getCause, netty, remoteAddress)) + // We don't call notifyListeners here since we don't think failed message deliveries are errors + // If the connection goes down we'll get the error reporting done by the pipeline. val (message, sender, recipient) = request netty.system.deadLetters ! DeadLetter(message, sender.getOrElse(netty.system.deadLetters), recipient) } From 5ec760680afa6903e884c519321a7b1c101b413f Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 6 Jun 2012 14:28:06 +0200 Subject: [PATCH 50/92] Minor restructuring of the send-callback --- .../src/main/scala/akka/remote/netty/Client.scala | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index c9e78902f4..86c534c418 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -63,14 +63,13 @@ private[akka] abstract class RemoteClient private[akka] (val netty: NettyRemoteT val f = channel.write(request) f.addListener( new ChannelFutureListener { - def operationComplete(future: ChannelFuture) { - if (future.isCancelled || !future.isSuccess) { + import netty.system.deadLetters + def operationComplete(future: ChannelFuture): Unit = + if (future.isCancelled || !future.isSuccess) request match { + case (msg, sender, recipient) ⇒ deadLetters ! DeadLetter(msg, sender.getOrElse(deadLetters), recipient) // We don't call notifyListeners here since we don't think failed message deliveries are errors - // If the connection goes down we'll get the error reporting done by the pipeline. - val (message, sender, recipient) = request - netty.system.deadLetters ! DeadLetter(message, sender.getOrElse(netty.system.deadLetters), recipient) + /// If the connection goes down we'll get the error reporting done by the pipeline. } - } }) // Check if we should back off if (!channel.isWritable) { From 82fbca9241aba4c1b0376d67bdb5a53154b519e6 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 6 Jun 2012 16:35:15 +0200 Subject: [PATCH 51/92] Clarifying semantics for ActorSystem.registerOnTermination --- akka-actor/src/main/scala/akka/actor/ActorSystem.scala | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index af7313b41e..721375adda 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -305,8 +305,9 @@ abstract class ActorSystem extends ActorRefFactory { implicit def dispatcher: MessageDispatcher /** - * Register a block of code (callback) to run after all actors in this actor system have - * been stopped. Multiple code blocks may be registered by calling this method multiple times. + * Register a block of code (callback) to run after ActorSystem.shutdown has been issued and + * all actors in this actor system have been stopped. + * Multiple code blocks may be registered by calling this method multiple times. * The callbacks will be run sequentially in reverse order of registration, i.e. * last registration is run first. * @@ -317,8 +318,9 @@ abstract class ActorSystem extends ActorRefFactory { def registerOnTermination[T](code: ⇒ T): Unit /** - * Register a block of code (callback) to run after all actors in this actor system have - * been stopped. Multiple code blocks may be registered by calling this method multiple times. + * Register a block of code (callback) to run after ActorSystem.shutdown has been issued and + * all actors in this actor system have been stopped. + * Multiple code blocks may be registered by calling this method multiple times. * The callbacks will be run sequentially in reverse order of registration, i.e. * last registration is run first. * From 60c11cab7bc4e46bb230e00eeb8070f0862339bf Mon Sep 17 00:00:00 2001 From: viktorklang Date: Thu, 7 Jun 2012 00:46:01 +0300 Subject: [PATCH 52/92] Adding missing slash --- akka-docs/general/configuration.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-docs/general/configuration.rst b/akka-docs/general/configuration.rst index 3be3704b22..1f3f051614 100644 --- a/akka-docs/general/configuration.rst +++ b/akka-docs/general/configuration.rst @@ -65,7 +65,7 @@ When using JarJar, OneJar, Assembly or any jar-bundler Akka's configuration approach relies heavily on the notion of every module/jar having its own reference.conf file, all of these will be discovered by the configuration and loaded. Unfortunately this also means - that if you put merge multiple jars into the same jar, you need to merge all the + that if you put/merge multiple jars into the same jar, you need to merge all the reference.confs as well. Otherwise all defaults will be lost and Akka will not function. Custom application.conf From bc289df018150c7139bc81f3625220a792d306ce Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 5 Jun 2012 22:16:15 +0200 Subject: [PATCH 53/92] Unit tests of Cluster, see 2163 * ClusterSpec - Test gossiping rules for deputies and unreachable - Fix strange/wrong probabilites for gossip to unreachable and deputy nodes - Fix lost order of Members when using map (without .toSeq) on the members SortedSet * MemberSpec - Test equals, hashCode * GossipSpec - Test member merge by status prio - Fix bug in member merge (groupBy was wrong) --- .../src/main/scala/akka/cluster/Cluster.scala | 114 +++++---- .../akka/cluster/MultiNodeClusterSpec.scala | 1 + .../test/scala/akka/cluster/ClusterSpec.scala | 232 ++++++++++++++++++ .../test/scala/akka/cluster/GossipSpec.scala | 42 ++++ .../test/scala/akka/cluster/MemberSpec.scala | 14 ++ 5 files changed, 360 insertions(+), 43 deletions(-) create mode 100644 akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala create mode 100644 akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index c16a34a2ca..935df0acce 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -180,7 +180,7 @@ case class GossipOverview( */ case class Gossip( overview: GossipOverview = GossipOverview(), - members: SortedSet[Member], // sorted set of members with their status, sorted by name + members: SortedSet[Member], // sorted set of members with their status, sorted by address meta: Map[String, Array[Byte]] = Map.empty[String, Array[Byte]], version: VectorClock = VectorClock()) // vector clock version extends ClusterMessage // is a serializable cluster message @@ -214,12 +214,8 @@ case class Gossip( // 1. merge vector clocks val mergedVClock = this.version merge that.version - // 2. group all members by Address => Vector[Member] - var membersGroupedByAddress = Map.empty[Address, Vector[Member]] - (this.members ++ that.members) foreach { m ⇒ - val ms = membersGroupedByAddress.get(m.address).getOrElse(Vector.empty[Member]) - membersGroupedByAddress += (m.address -> (ms :+ m)) - } + // 2. group all members by Address => Seq[Member] + val membersGroupedByAddress = (this.members.toSeq ++ that.members.toSeq).groupBy(_.address) // 3. merge members by selecting the single Member with highest MemberStatus out of the Member groups val mergedMembers = @@ -252,10 +248,9 @@ case class Gossip( * Manages routing of the different cluster commands. * Instantiated as a single instance for each Cluster - e.g. commands are serialized to Cluster message after message. */ -final class ClusterCommandDaemon extends Actor { +private[akka] final class ClusterCommandDaemon(cluster: Cluster) extends Actor { import ClusterAction._ - val cluster = Cluster(context.system) val log = Logging(context.system, this) def receive = { @@ -273,9 +268,8 @@ final class ClusterCommandDaemon extends Actor { * Pooled and routed with N number of configurable instances. * Concurrent access to Cluster. */ -final class ClusterGossipDaemon extends Actor { +private[akka] final class ClusterGossipDaemon(cluster: Cluster) extends Actor { val log = Logging(context.system, this) - val cluster = Cluster(context.system) def receive = { case GossipEnvelope(sender, gossip) ⇒ cluster.receive(sender, gossip) @@ -287,13 +281,13 @@ final class ClusterGossipDaemon extends Actor { /** * Supervisor managing the different Cluster daemons. */ -final class ClusterDaemonSupervisor extends Actor { +private[akka] final class ClusterDaemonSupervisor(cluster: Cluster) extends Actor { val log = Logging(context.system, this) - val cluster = Cluster(context.system) - private val commands = context.actorOf(Props[ClusterCommandDaemon], "commands") + private val commands = context.actorOf(Props(new ClusterCommandDaemon(cluster)), "commands") private val gossip = context.actorOf( - Props[ClusterGossipDaemon].withRouter(RoundRobinRouter(cluster.clusterSettings.NrOfGossipDaemons)), "gossip") + Props(new ClusterGossipDaemon(cluster)).withRouter( + RoundRobinRouter(cluster.clusterSettings.NrOfGossipDaemons)), "gossip") def receive = Actor.emptyBehavior @@ -396,7 +390,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // create superisor for daemons under path "/system/cluster" private val clusterDaemons = { - val createChild = CreateChild(Props[ClusterDaemonSupervisor], "cluster") + val createChild = CreateChild(Props(new ClusterDaemonSupervisor(this)), "cluster") Await.result(system.systemGuardian ? createChild, defaultTimeout.duration) match { case a: ActorRef ⇒ a case e: Exception ⇒ throw e @@ -794,9 +788,11 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ } /** + * INTERNAL API + * * Gossips latest gossip to an address. */ - private def gossipTo(address: Address): Unit = { + protected def gossipTo(address: Address): Unit = { val connection = clusterGossipConnectionFor(address) log.debug("Cluster Node [{}] - Gossiping to [{}]", selfAddress, connection) connection ! GossipEnvelope(self, latestGossip) @@ -805,23 +801,43 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * Gossips latest gossip to a random member in the set of members passed in as argument. * - * @return 'true' if it gossiped to a "deputy" member. + * @return the used [[akka.actor.Address] if any */ - private def gossipToRandomNodeOf(addresses: Iterable[Address]): Boolean = { + private def gossipToRandomNodeOf(addresses: IndexedSeq[Address]): Option[Address] = { log.debug("Cluster Node [{}] - Selecting random node to gossip to [{}]", selfAddress, addresses.mkString(", ")) - if (addresses.isEmpty) false - else { - val peers = addresses filter (_ != selfAddress) // filter out myself - val peer = selectRandomNode(peers) - gossipTo(peer) - deputyNodes exists (peer == _) + val peers = addresses filterNot (_ == selfAddress) // filter out myself + val peer = selectRandomNode(peers) + peer foreach gossipTo + peer + } + + /** + * INTERNAL API + */ + protected[akka] def gossipToUnreachableProbablity(membersSize: Int, unreachableSize: Int): Double = + (membersSize + unreachableSize) match { + case 0 ⇒ 0.0 + case sum ⇒ unreachableSize.toDouble / sum + } + + /** + * INTERNAL API + */ + protected[akka] def gossipToDeputyProbablity(membersSize: Int, unreachableSize: Int, nrOfDeputyNodes: Int): Double = { + if (nrOfDeputyNodes > membersSize) 1.0 + else if (nrOfDeputyNodes == 0) 0.0 + else (membersSize + unreachableSize) match { + case 0 ⇒ 0.0 + case sum ⇒ (nrOfDeputyNodes + unreachableSize).toDouble / sum } } /** + * INTERNAL API + * * Initates a new round of gossip. */ - private def gossip(): Unit = { + private[akka] def gossip(): Unit = { val localState = state.get if (isSingletonCluster(localState)) { @@ -833,38 +849,42 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ log.debug("Cluster Node [{}] - Initiating new round of gossip", selfAddress) val localGossip = localState.latestGossip - val localMembers = localGossip.members + // important to not accidentally use `map` of the SortedSet, since the original order is not preserved + val localMembers = localGossip.members.toIndexedSeq val localMembersSize = localMembers.size + val localMemberAddresses = localMembers map { _.address } - val localUnreachableMembers = localGossip.overview.unreachable + val localUnreachableMembers = localGossip.overview.unreachable.toIndexedSeq val localUnreachableSize = localUnreachableMembers.size // 1. gossip to alive members - val gossipedToDeputy = gossipToRandomNodeOf(localMembers map { _.address }) + val gossipedToAlive = gossipToRandomNodeOf(localMemberAddresses) // 2. gossip to unreachable members if (localUnreachableSize > 0) { - val probability: Double = localUnreachableSize / (localMembersSize + 1) - if (ThreadLocalRandom.current.nextDouble() < probability) gossipToRandomNodeOf(localUnreachableMembers.map(_.address)) + val probability = gossipToUnreachableProbablity(localMembersSize, localUnreachableSize) + if (ThreadLocalRandom.current.nextDouble() < probability) + gossipToRandomNodeOf(localUnreachableMembers.map(_.address)) } // 3. gossip to a deputy nodes for facilitating partition healing - val deputies = deputyNodes - if ((!gossipedToDeputy || localMembersSize < 1) && deputies.nonEmpty) { - if (localMembersSize == 0) gossipToRandomNodeOf(deputies) - else { - val probability = 1.0 / localMembersSize + localUnreachableSize - if (ThreadLocalRandom.current.nextDouble() <= probability) gossipToRandomNodeOf(deputies) - } + val deputies = deputyNodes(localMemberAddresses) + val alreadyGossipedToDeputy = gossipedToAlive.map(deputies.contains(_)).getOrElse(false) + if ((!alreadyGossipedToDeputy || localMembersSize < NrOfDeputyNodes) && deputies.nonEmpty) { + val probability = gossipToDeputyProbablity(localMembersSize, localUnreachableSize, NrOfDeputyNodes) + if (ThreadLocalRandom.current.nextDouble() < probability) + gossipToRandomNodeOf(deputies) } } } /** + * INTERNAL API + * * Reaps the unreachable members (moves them to the 'unreachable' list in the cluster overview) according to the failure detector's verdict. */ @tailrec - final private def reapUnreachableMembers(): Unit = { + final private[akka] def reapUnreachableMembers(): Unit = { val localState = state.get if (!isSingletonCluster(localState) && isAvailable(localState)) { @@ -905,10 +925,12 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ } /** + * INTERNAL API + * * Runs periodic leader actions, such as auto-downing unreachable nodes, assigning partitions etc. */ @tailrec - final private def leaderActions(): Unit = { + final private[akka] def leaderActions(): Unit = { val localState = state.get val localGossip = localState.latestGossip val localMembers = localGossip.members @@ -1082,11 +1104,17 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ private def clusterGossipConnectionFor(address: Address): ActorRef = system.actorFor(RootActorPath(address) / "system" / "cluster" / "gossip") /** - * Gets an Iterable with the addresses of a all the 'deputy' nodes - excluding this node if part of the group. + * Gets the addresses of a all the 'deputy' nodes - excluding this node if part of the group. */ - private def deputyNodes: Iterable[Address] = state.get.latestGossip.members.toIterable map (_.address) drop 1 take NrOfDeputyNodes filter (_ != selfAddress) + private def deputyNodes(addresses: IndexedSeq[Address]): IndexedSeq[Address] = + addresses drop 1 take NrOfDeputyNodes filterNot (_ == selfAddress) - private def selectRandomNode(addresses: Iterable[Address]): Address = addresses.toSeq(ThreadLocalRandom.current nextInt addresses.size) + /** + * INTERNAL API + */ + protected def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = + if (addresses.isEmpty) None + else Some(addresses(ThreadLocalRandom.current nextInt addresses.size)) private def isSingletonCluster(currentState: State): Boolean = currentState.latestGossip.members.size == 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 113064e13c..7f7d60fcdc 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -20,6 +20,7 @@ object MultiNodeClusterSpec { leader-actions-interval = 200 ms unreachable-nodes-reaper-interval = 200 ms periodic-tasks-initial-delay = 300 ms + nr-of-deputy-nodes = 2 } akka.test { single-expect-default = 5 s diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala new file mode 100644 index 0000000000..fdc3095f74 --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -0,0 +1,232 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import akka.testkit.AkkaSpec +import akka.util.duration._ +import akka.util.Duration +import akka.actor.ExtendedActorSystem +import akka.actor.Address +import java.util.concurrent.atomic.AtomicInteger +import org.scalatest.BeforeAndAfter + +object ClusterSpec { + val config = """ + akka.cluster { + auto-down = off + nr-of-deputy-nodes = 3 + periodic-tasks-initial-delay = 120 seconds // turn off scheduled tasks + } + akka.actor.provider = "akka.remote.RemoteActorRefProvider" + akka.remote.netty.port = 0 + akka.loglevel = DEBUG + """ + + case class GossipTo(address: Address) +} + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { + import ClusterSpec._ + + val deterministicRandom = new AtomicInteger + + val cluster = new Cluster(system.asInstanceOf[ExtendedActorSystem]) { + + override def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = { + if (addresses.isEmpty) None + else Some(addresses.toSeq(deterministicRandom.getAndIncrement % addresses.size)) + } + + override def gossipTo(address: Address): Unit = { + if (address == self.address) { + super.gossipTo(address) + } + // represent the gossip with a message to be used in asserts + testActor ! GossipTo(address) + } + + @volatile + var _gossipToUnreachableProbablity = 0.0 + + override def gossipToUnreachableProbablity(membersSize: Int, unreachableSize: Int): Double = { + if (_gossipToUnreachableProbablity < 0.0) super.gossipToUnreachableProbablity(membersSize, unreachableSize) + else _gossipToUnreachableProbablity + } + + @volatile + var _gossipToDeputyProbablity = 0.0 + + override def gossipToDeputyProbablity(membersSize: Int, unreachableSize: Int, deputySize: Int): Double = { + if (_gossipToDeputyProbablity < 0.0) super.gossipToDeputyProbablity(membersSize, unreachableSize, deputySize) + else _gossipToDeputyProbablity + } + + @volatile + var _unavailable: Set[Address] = Set.empty + + override val failureDetector = new AccrualFailureDetector( + system, selfAddress, clusterSettings.FailureDetectorThreshold, clusterSettings.FailureDetectorMaxSampleSize) { + + override def isAvailable(connection: Address): Boolean = { + if (_unavailable.contains(connection)) false + else super.isAvailable(connection) + } + } + + } + + val selfAddress = cluster.self.address + val addresses = IndexedSeq( + selfAddress, + Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 1), + Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 2), + Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 3), + Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 4), + Address("akka", system.name, selfAddress.host.get, selfAddress.port.get + 5)) + + def memberStatus(address: Address): Option[MemberStatus] = + cluster.latestGossip.members.collectFirst { case m if m.address == address ⇒ m.status } + + before { + cluster._gossipToUnreachableProbablity = 0.0 + cluster._gossipToDeputyProbablity = 0.0 + cluster._unavailable = Set.empty + deterministicRandom.set(0) + } + + "A Cluster" must { + + "initially be singleton cluster and reach convergence after first gossip" in { + cluster.isSingletonCluster must be(true) + cluster.latestGossip.members.map(_.address) must be(Set(selfAddress)) + memberStatus(selfAddress) must be(Some(MemberStatus.Joining)) + cluster.convergence.isDefined must be(false) + cluster.gossip() + expectMsg(GossipTo(selfAddress)) + awaitCond(cluster.convergence.isDefined) + memberStatus(selfAddress) must be(Some(MemberStatus.Joining)) + cluster.leaderActions() + memberStatus(selfAddress) must be(Some(MemberStatus.Up)) + } + + "accept a joining node" in { + cluster.joining(addresses(1)) + cluster.latestGossip.members.map(_.address) must be(Set(selfAddress, addresses(1))) + memberStatus(addresses(1)) must be(Some(MemberStatus.Joining)) + // FIXME why is it still convergence immediately after joining? + //cluster.convergence.isDefined must be(false) + } + + "accept a few more joining nodes" in { + for (a ← addresses.drop(2)) { + cluster.joining(a) + memberStatus(a) must be(Some(MemberStatus.Joining)) + } + cluster.latestGossip.members.map(_.address) must be(addresses.toSet) + } + + "order members by host and port" in { + // note the importance of using toSeq before map, otherwise it will not preserve the order + cluster.latestGossip.members.toSeq.map(_.address) must be(addresses.toSeq) + } + + "gossip to random live node" in { + cluster.latestGossip.members + cluster.gossip() + cluster.gossip() + cluster.gossip() + cluster.gossip() + + expectMsg(GossipTo(addresses(1))) + expectMsg(GossipTo(addresses(2))) + expectMsg(GossipTo(addresses(3))) + expectMsg(GossipTo(addresses(4))) + + expectNoMsg(1 second) + } + + "use certain probability for gossiping to unreachable node depending on the number of unreachable and live nodes" in { + cluster._gossipToUnreachableProbablity = -1.0 // use real impl + cluster.gossipToUnreachableProbablity(10, 1) must be < (cluster.gossipToUnreachableProbablity(9, 1)) + cluster.gossipToUnreachableProbablity(10, 1) must be < (cluster.gossipToUnreachableProbablity(10, 2)) + cluster.gossipToUnreachableProbablity(10, 5) must be < (cluster.gossipToUnreachableProbablity(10, 9)) + cluster.gossipToUnreachableProbablity(0, 10) must be <= (1.0) + cluster.gossipToUnreachableProbablity(1, 10) must be <= (1.0) + cluster.gossipToUnreachableProbablity(10, 0) must be(0.0 plusOrMinus (0.0001)) + cluster.gossipToUnreachableProbablity(0, 0) must be(0.0 plusOrMinus (0.0001)) + } + + "use certain probability for gossiping to deputy node depending on the number of unreachable and live nodes" in { + cluster._gossipToDeputyProbablity = -1.0 // use real impl + cluster.gossipToDeputyProbablity(10, 1, 2) must be < (cluster.gossipToDeputyProbablity(9, 1, 2)) + cluster.gossipToDeputyProbablity(10, 1, 2) must be < (cluster.gossipToDeputyProbablity(10, 2, 2)) + cluster.gossipToDeputyProbablity(10, 1, 2) must be < (cluster.gossipToDeputyProbablity(10, 2, 3)) + cluster.gossipToDeputyProbablity(10, 5, 5) must be < (cluster.gossipToDeputyProbablity(10, 9, 5)) + cluster.gossipToDeputyProbablity(0, 10, 0) must be <= (1.0) + cluster.gossipToDeputyProbablity(1, 10, 1) must be <= (1.0) + cluster.gossipToDeputyProbablity(10, 0, 0) must be(0.0 plusOrMinus (0.0001)) + cluster.gossipToDeputyProbablity(0, 0, 0) must be(0.0 plusOrMinus (0.0001)) + cluster.gossipToDeputyProbablity(4, 0, 4) must be(1.0 plusOrMinus (0.0001)) + cluster.gossipToDeputyProbablity(3, 7, 4) must be(1.0 plusOrMinus (0.0001)) + } + + "gossip to duputy node" in { + cluster._gossipToDeputyProbablity = 1.0 // always + + // we have configured 2 deputy nodes + cluster.gossip() // 1 is deputy + cluster.gossip() // 2 is deputy + cluster.gossip() // 3 is deputy + cluster.gossip() // 4 is not deputy, and therefore a deputy is also used + + expectMsg(GossipTo(addresses(1))) + expectMsg(GossipTo(addresses(2))) + expectMsg(GossipTo(addresses(3))) + expectMsg(GossipTo(addresses(4))) + // and the extra gossip to deputy + expectMsgAnyOf(GossipTo(addresses(1)), GossipTo(addresses(2)), GossipTo(addresses(3))) + + expectNoMsg(1 second) + + } + + "gossip to random unreachable node" in { + val dead = Set(addresses(1)) + cluster._unavailable = dead + cluster._gossipToUnreachableProbablity = 1.0 // always + + cluster.reapUnreachableMembers() + cluster.latestGossip.overview.unreachable.map(_.address) must be(dead) + + cluster.gossip() + + expectMsg(GossipTo(addresses(2))) // first available + expectMsg(GossipTo(addresses(1))) // the unavailable + + expectNoMsg(1 second) + } + + "gossip to random deputy node if number of live nodes is less than number of deputy nodes" in { + cluster._gossipToDeputyProbablity = -1.0 // real impl + // 0 and 2 still alive + val dead = Set(addresses(1), addresses(3), addresses(4), addresses(5)) + cluster._unavailable = dead + + cluster.reapUnreachableMembers() + cluster.latestGossip.overview.unreachable.map(_.address) must be(dead) + + for (n ← 1 to 20) { + cluster.gossip() + expectMsg(GossipTo(addresses(2))) // the only available + // and always to one of the 3 deputies + expectMsgAnyOf(GossipTo(addresses(1)), GossipTo(addresses(2)), GossipTo(addresses(3))) + } + + expectNoMsg(1 second) + + } + } +} diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala new file mode 100644 index 0000000000..77cd0c52ba --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -0,0 +1,42 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers +import akka.actor.Address +import scala.collection.immutable.SortedSet + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class GossipSpec extends WordSpec with MustMatchers { + + "A Gossip" must { + + "merge members by status priority" in { + import MemberStatus._ + val a1 = Member(Address("akka", "sys", "a", 2552), Up) + val a2 = Member(Address("akka", "sys", "a", 2552), Joining) + val b1 = Member(Address("akka", "sys", "b", 2552), Up) + val b2 = Member(Address("akka", "sys", "b", 2552), Removed) + val c1 = Member(Address("akka", "sys", "c", 2552), Leaving) + val c2 = Member(Address("akka", "sys", "c", 2552), Up) + val d1 = Member(Address("akka", "sys", "d", 2552), Leaving) + val d2 = Member(Address("akka", "sys", "d", 2552), Removed) + + val g1 = Gossip(members = SortedSet(a1, b1, c1, d1)) + val g2 = Gossip(members = SortedSet(a2, b2, c2, d2)) + + val merged1 = g1 merge g2 + merged1.members must be(SortedSet(a1, b2, c1, d2)) + merged1.members.toSeq.map(_.status) must be(Seq(Up, Removed, Leaving, Removed)) + + val merged2 = g2 merge g1 + merged2.members must be(SortedSet(a1, b2, c1, d2)) + merged2.members.toSeq.map(_.status) must be(Seq(Up, Removed, Leaving, Removed)) + + } + + } +} diff --git a/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala index 050407577e..bc1f70ae86 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MemberSpec.scala @@ -8,6 +8,7 @@ import org.scalatest.WordSpec import org.scalatest.matchers.MustMatchers import akka.actor.Address import scala.util.Random +import scala.collection.immutable.SortedSet @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class MemberSpec extends WordSpec with MustMatchers { @@ -26,6 +27,19 @@ class MemberSpec extends WordSpec with MustMatchers { val expected = IndexedSeq(m1, m2, m3, m4, m5) val shuffled = Random.shuffle(expected) shuffled.sorted must be(expected) + (SortedSet.empty[Member] ++ shuffled).toIndexedSeq must be(expected) + } + + "have stable equals and hashCode" in { + val m1 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Joining) + val m2 = Member(Address("akka", "sys1", "host1", 9000), MemberStatus.Up) + val m3 = Member(Address("akka", "sys1", "host1", 10000), MemberStatus.Up) + + m1 must be(m2) + m1.hashCode must be(m2.hashCode) + + m3 must not be (m2) + m3 must not be (m1) } } } From 502bf5f8d59c091894ee70b8ba1a8981cc3ba93b Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 5 Jun 2012 14:19:38 +0200 Subject: [PATCH 54/92] unbreak config check in RoutedActorRef, which blocked&leaked threads --- .../src/main/scala/akka/routing/Routing.scala | 21 +++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 2f585a1790..21b14a6a3d 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -29,12 +29,6 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup _supervisor, _path) { - // verify that a BalancingDispatcher is not used with a Router - if (_props.routerConfig != NoRouter && _system.dispatchers.isBalancingDispatcher(_props.routerConfig.routerDispatcher)) - throw new ConfigurationException( - "Configuration for actor [" + _path.toString + - "] is invalid - you can not use a 'BalancingDispatcher' as a Router's dispatcher, you can however use it for the routees.") - /* * CAUTION: RoutedActorRef is PROBLEMATIC * ====================================== @@ -47,6 +41,13 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup * before we are done with them: lock the monitor of the actor cell (hence the * override of newActorCell) and use that to block the Router constructor for * as long as it takes to setup the RoutedActorRef itself. + * + * ===> I M P O R T A N T N O T I C E <=== + * + * DO NOT THROW ANY EXCEPTIONS BEFORE THE FOLLOWING TRY-BLOCK WITHOUT + * EXITING THE MONITOR OF THE actorCell! + * + * This is important, just don’t do it! No kidding. */ override def newActorCell( system: ActorSystemImpl, @@ -74,6 +75,14 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup val route = try { + // verify that a BalancingDispatcher is not used with a Router + if (_props.routerConfig != NoRouter && _system.dispatchers.isBalancingDispatcher(_props.routerConfig.routerDispatcher)) { + actorContext.stop(actorContext.self) + throw new ConfigurationException( + "Configuration for actor [" + _path.toString + + "] is invalid - you can not use a 'BalancingDispatcher' as a Router's dispatcher, you can however use it for the routees.") + } + _routeeProvider = routerConfig.createRouteeProvider(actorContext) val r = routerConfig.createRoute(routeeProps, routeeProvider) // initial resize, before message send From 4e9a658609c4eb3c72d4332589fd109c1be52b93 Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 5 Jun 2012 15:07:19 +0200 Subject: [PATCH 55/92] remove unused _receiveTimeout constructor args (ActorCell and LocalActorRef) --- .../src/main/scala/akka/actor/ActorCell.scala | 6 ++---- .../src/main/scala/akka/actor/ActorRef.scala | 14 ++++---------- .../main/scala/akka/actor/ActorRefProvider.scala | 6 ++++++ .../src/main/scala/akka/routing/Routing.scala | 5 ++--- .../src/main/scala/akka/testkit/TestActorRef.scala | 9 ++------- 5 files changed, 16 insertions(+), 24 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 9cb2cb674a..51e223e73f 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -316,8 +316,7 @@ private[akka] class ActorCell( val system: ActorSystemImpl, val self: InternalActorRef, val props: Props, - @volatile var parent: InternalActorRef, - /*no member*/ _receiveTimeout: Option[Duration]) extends UntypedActorContext { + @volatile var parent: InternalActorRef) extends UntypedActorContext { import AbstractActorCell.mailboxOffset import ActorCell._ @@ -351,8 +350,7 @@ private[akka] class ActorCell( /** * In milliseconds */ - var receiveTimeoutData: (Long, Cancellable) = - if (_receiveTimeout.isDefined) (_receiveTimeout.get.toMillis, emptyCancellable) else emptyReceiveTimeoutData + var receiveTimeoutData: (Long, Cancellable) = emptyReceiveTimeoutData @volatile var childrenRefs: ChildrenContainer = EmptyChildrenContainer diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 30b1ccf998..861df570b7 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -228,8 +228,7 @@ private[akka] class LocalActorRef private[akka] ( _props: Props, _supervisor: InternalActorRef, override val path: ActorPath, - val systemService: Boolean = false, - _receiveTimeout: Option[Duration] = None) + val systemService: Boolean = false) extends InternalActorRef with LocalRef { /* @@ -242,16 +241,11 @@ private[akka] class LocalActorRef private[akka] ( * us to use purely factory methods for creating LocalActorRefs. */ @volatile - private var actorCell = newActorCell(_system, this, _props, _supervisor, _receiveTimeout) + private var actorCell = newActorCell(_system, this, _props, _supervisor) actorCell.start() - protected def newActorCell( - system: ActorSystemImpl, - ref: InternalActorRef, - props: Props, - supervisor: InternalActorRef, - receiveTimeout: Option[Duration]): ActorCell = - new ActorCell(system, ref, props, supervisor, receiveTimeout) + protected def newActorCell(system: ActorSystemImpl, ref: InternalActorRef, props: Props, supervisor: InternalActorRef): ActorCell = + new ActorCell(system, ref, props, supervisor) protected def actorContext: ActorContext = actorCell diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index a985a6f8d5..960e8a37e5 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -552,3 +552,9 @@ class LocalActorRefProvider( def getExternalAddressFor(addr: Address): Option[Address] = if (addr == rootPath.address) Some(addr) else None } + +private[akka] class GuardianCell(_system: ActorSystemImpl, _self: InternalActorRef, _props: Props, _parent: InternalActorRef) + extends ActorCell(_system, _self, _props, _parent) { + +} + diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 21b14a6a3d..bcd92794da 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -53,9 +53,8 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup system: ActorSystemImpl, ref: InternalActorRef, props: Props, - supervisor: InternalActorRef, - receiveTimeout: Option[Duration]): ActorCell = { - val cell = super.newActorCell(system, ref, props, supervisor, receiveTimeout) + supervisor: InternalActorRef): ActorCell = { + val cell = super.newActorCell(system, ref, props, supervisor) Unsafe.instance.monitorEnter(cell) cell } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index 279c728e80..c0442d45d6 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -36,13 +36,8 @@ class TestActorRef[T <: Actor]( import TestActorRef.InternalGetActor - override def newActorCell( - system: ActorSystemImpl, - ref: InternalActorRef, - props: Props, - supervisor: InternalActorRef, - receiveTimeout: Option[Duration]): ActorCell = - new ActorCell(system, ref, props, supervisor, receiveTimeout) { + override def newActorCell(system: ActorSystemImpl, ref: InternalActorRef, props: Props, supervisor: InternalActorRef): ActorCell = + new ActorCell(system, ref, props, supervisor) { override def autoReceiveMessage(msg: Envelope) { msg.message match { case InternalGetActor ⇒ sender ! actor From 3271fddd92cd3a0a450fec682aa15b2d2212a2c1 Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 5 Jun 2012 16:58:24 +0200 Subject: [PATCH 56/92] do not discard system messages silently after stop --- .../src/main/scala/akka/actor/ActorCell.scala | 2 +- .../main/scala/akka/dispatch/Mailbox.scala | 40 ++++++++++++++----- 2 files changed, 30 insertions(+), 12 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 51e223e73f..9dbe610195 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -671,7 +671,7 @@ private[akka] class ActorCell( checkReceiveTimeout // Reschedule receive timeout } - private final def handleInvokeFailure(t: Throwable, message: String): Unit = try { + final def handleInvokeFailure(t: Throwable, message: String): Unit = try { dispatcher.reportFailure(new LogEventException(Error(t, self.path.toString, clazz(actor), message), t)) // prevent any further messages to be processed until the actor has been restarted dispatcher.suspend(this) diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index d26e7b2afc..11e58ede7e 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -209,20 +209,38 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes } final def processAllSystemMessages() { + var failure: Throwable = null var nextMessage = systemDrain(null) - try { - while ((nextMessage ne null) && !isClosed) { - if (debug) println(actor.self + " processing system message " + nextMessage + " with " + actor.childrenRefs) - actor systemInvoke nextMessage - nextMessage = nextMessage.next - // don’t ever execute normal message when system message present! - if (nextMessage eq null) nextMessage = systemDrain(null) + while ((nextMessage ne null) && !isClosed) { + val msg = nextMessage + nextMessage = nextMessage.next + msg.next = null + if (debug) println(actor.self + " processing system message " + msg + " with " + actor.childrenRefs) + try actor systemInvoke msg + catch { + case NonFatal(e) ⇒ + if (failure eq null) failure = e + actor.system.eventStream.publish(Error(e, actor.self.path.toString, this.getClass, "exception during processing system message " + msg + ": " + e.getMessage)) } - } catch { - case NonFatal(e) ⇒ - actor.system.eventStream.publish(Error(e, actor.self.path.toString, this.getClass, "exception during processing system messages, dropping " + SystemMessage.size(nextMessage) + " messages!")) - throw e + // don’t ever execute normal message when system message present! + if ((nextMessage eq null) && !isClosed) nextMessage = systemDrain(null) } + /* + * if we closed the mailbox, we must dump the remaining system messages + * to deadLetters (this is essential for DeathWatch) + */ + while (nextMessage ne null) { + val msg = nextMessage + nextMessage = nextMessage.next + msg.next = null + try actor.systemImpl.deadLetterMailbox.systemEnqueue(actor.self, msg) + catch { + case NonFatal(e) ⇒ actor.system.eventStream.publish( + Error(e, actor.self.path.toString, this.getClass, "error while enqueuing " + msg + " to deadLetters: " + e.getMessage)) + } + } + // if something happened while processing, fail this actor (most probable: exception in supervisorStrategy) + if (failure ne null) actor.handleInvokeFailure(failure, failure.getMessage) } @inline From bff03676feb08acdf1c3e2cc98d9d1f1ba3159ee Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 7 Jun 2012 10:45:45 +0200 Subject: [PATCH 57/92] =?UTF-8?q?add=20java=20testing=20doc=20chapter=20an?= =?UTF-8?q?d=20link=20to=20Munish=E2=80=99s=20blog?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- akka-docs/java/index.rst | 1 + akka-docs/java/testing.rst | 14 ++++++++++++++ 2 files changed, 15 insertions(+) create mode 100644 akka-docs/java/testing.rst diff --git a/akka-docs/java/index.rst b/akka-docs/java/index.rst index 4022092dba..669bd7c362 100644 --- a/akka-docs/java/index.rst +++ b/akka-docs/java/index.rst @@ -24,4 +24,5 @@ Java API extending-akka zeromq microkernel + testing howto diff --git a/akka-docs/java/testing.rst b/akka-docs/java/testing.rst new file mode 100644 index 0000000000..d49ba2512f --- /dev/null +++ b/akka-docs/java/testing.rst @@ -0,0 +1,14 @@ +.. _akka-testkit-java: + +############################## +Testing Actor Systems (Java) +############################## + +Due to the conciseness of test DSLs available for Scala, it may be a good idea +to write the test suite in that language even if the main project is written in +Java. If that is not desirable, you can also use :class:`TestKit` and friends +from Java, albeit with more verbose syntax Munish Gupta has `published a nice +post `_ +showing several patterns you may find useful, and for reference documentation +please refer to :ref:`akka-testkit` until that section has been ported over to +cover Java in full. From 921d900f99c326133b6ab4b3ce552efaaf389569 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 7 Jun 2012 12:03:34 +0200 Subject: [PATCH 58/92] Change protected to private[akka], see #2163 --- akka-cluster/src/main/scala/akka/cluster/Cluster.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 935df0acce..ce2e01cbca 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -792,7 +792,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ * * Gossips latest gossip to an address. */ - protected def gossipTo(address: Address): Unit = { + private[akka] def gossipTo(address: Address): Unit = { val connection = clusterGossipConnectionFor(address) log.debug("Cluster Node [{}] - Gossiping to [{}]", selfAddress, connection) connection ! GossipEnvelope(self, latestGossip) @@ -814,7 +814,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * INTERNAL API */ - protected[akka] def gossipToUnreachableProbablity(membersSize: Int, unreachableSize: Int): Double = + private[akka] def gossipToUnreachableProbablity(membersSize: Int, unreachableSize: Int): Double = (membersSize + unreachableSize) match { case 0 ⇒ 0.0 case sum ⇒ unreachableSize.toDouble / sum @@ -823,7 +823,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * INTERNAL API */ - protected[akka] def gossipToDeputyProbablity(membersSize: Int, unreachableSize: Int, nrOfDeputyNodes: Int): Double = { + private[akka] def gossipToDeputyProbablity(membersSize: Int, unreachableSize: Int, nrOfDeputyNodes: Int): Double = { if (nrOfDeputyNodes > membersSize) 1.0 else if (nrOfDeputyNodes == 0) 0.0 else (membersSize + unreachableSize) match { @@ -1112,7 +1112,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ /** * INTERNAL API */ - protected def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = + private[akka] def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = if (addresses.isEmpty) None else Some(addresses(ThreadLocalRandom.current nextInt addresses.size)) From 9ca794dcc851698a920ef6bd4cd395d1910bacbb Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 7 Jun 2012 13:32:12 +0200 Subject: [PATCH 59/92] Totally skip running multi-jvm tests when long-running is excluded, see #2194 --- .../src/test/scala/akka/cluster/ClusterSpec.scala | 2 +- project/AkkaBuild.scala | 14 ++++++++++---- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index fdc3095f74..d3d1d6d0a2 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -21,7 +21,7 @@ object ClusterSpec { } akka.actor.provider = "akka.remote.RemoteActorRefProvider" akka.remote.netty.port = 0 - akka.loglevel = DEBUG + # akka.loglevel = DEBUG """ case class GossipTo(address: Address) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 23d51fe77c..f2535d8b93 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -338,6 +338,11 @@ object AkkaBuild extends Build { // for running only tests by tag use system property: -Dakka.test.tags.only= lazy val useOnlyTestTags: Set[String] = systemPropertyAsSeq("akka.test.tags.only").toSet + def executeMultiJvmTests: Boolean = { + useOnlyTestTags.contains("long-running") || + !(useExcludeTestTags -- useIncludeTestTags).contains("long-running") + } + def systemPropertyAsSeq(name: String): Seq[String] = { val prop = System.getProperty(name, "") if (prop.isEmpty) Seq.empty else prop.split(",").toSeq @@ -402,20 +407,21 @@ object AkkaBuild extends Build { lazy val multiJvmSettings = MultiJvmPlugin.settings ++ inConfig(MultiJvm)(ScalariformPlugin.scalariformSettings) ++ Seq( compileInputs in MultiJvm <<= (compileInputs in MultiJvm) dependsOn (ScalariformKeys.format in MultiJvm), - ScalariformKeys.preferences in MultiJvm := formattingPreferences, - if (multiNodeEnabled) + ScalariformKeys.preferences in MultiJvm := formattingPreferences) ++ + (if (multiNodeEnabled) executeTests in Test <<= ((executeTests in Test), (multiNodeExecuteTests in MultiJvm)) map { case (tr, mr) => val r = tr._2 ++ mr._2 (Tests.overall(r.values), r) } - else + else if (executeMultiJvmTests) executeTests in Test <<= ((executeTests in Test), (executeTests in MultiJvm)) map { case (tr, mr) => val r = tr._2 ++ mr._2 (Tests.overall(r.values), r) } - ) + else Seq.empty) + lazy val mimaSettings = mimaDefaultSettings ++ Seq( // MiMa From 0aa81229e5ab9f408082a56d9262f9060d2def63 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 7 Jun 2012 14:12:13 +0200 Subject: [PATCH 60/92] Fix feedback, see #2194 --- project/AkkaBuild.scala | 29 +++++++++++++++-------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index f2535d8b93..e4a865b5a7 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -408,20 +408,21 @@ object AkkaBuild extends Build { lazy val multiJvmSettings = MultiJvmPlugin.settings ++ inConfig(MultiJvm)(ScalariformPlugin.scalariformSettings) ++ Seq( compileInputs in MultiJvm <<= (compileInputs in MultiJvm) dependsOn (ScalariformKeys.format in MultiJvm), ScalariformKeys.preferences in MultiJvm := formattingPreferences) ++ - (if (multiNodeEnabled) - executeTests in Test <<= ((executeTests in Test), (multiNodeExecuteTests in MultiJvm)) map { - case (tr, mr) => - val r = tr._2 ++ mr._2 - (Tests.overall(r.values), r) - } - else if (executeMultiJvmTests) - executeTests in Test <<= ((executeTests in Test), (executeTests in MultiJvm)) map { - case (tr, mr) => - val r = tr._2 ++ mr._2 - (Tests.overall(r.values), r) - } - else Seq.empty) - + ((executeMultiJvmTests, multiNodeEnabled) match { + case (true, true) => + executeTests in Test <<= ((executeTests in Test), (multiNodeExecuteTests in MultiJvm)) map { + case ((_, testResults), (_, multiNodeResults)) => + val results = testResults ++ multiNodeResults + (Tests.overall(results.values), results) + } + case (true, false) => + executeTests in Test <<= ((executeTests in Test), (executeTests in MultiJvm)) map { + case ((_, testResults), (_, multiNodeResults)) => + val results = testResults ++ multiNodeResults + (Tests.overall(results.values), results) + } + case (false, _) => Seq.empty + }) lazy val mimaSettings = mimaDefaultSettings ++ Seq( // MiMa From dbac17621f80d62e7cd9aa0d4adbc964ad2e82a6 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 7 Jun 2012 15:45:10 +0200 Subject: [PATCH 61/92] Node that joins again should be ignored, see #2184 --- .../src/main/scala/akka/cluster/Cluster.scala | 31 ++++++++++--------- .../scala/akka/cluster/NodeUpSpec.scala | 30 +++++++++++++++++- 2 files changed, 45 insertions(+), 16 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index ce2e01cbca..7d1222a7ab 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -571,27 +571,28 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ val localState = state.get val localGossip = localState.latestGossip val localMembers = localGossip.members - val localOverview = localGossip.overview - val localUnreachableMembers = localOverview.unreachable - // remove the node from the 'unreachable' set in case it is a DOWN node that is rejoining cluster - val newUnreachableMembers = localUnreachableMembers filterNot { _.address == node } - val newOverview = localOverview copy (unreachable = newUnreachableMembers) + if (!localMembers.exists(_.address == node)) { - val newMembers = localMembers + Member(node, MemberStatus.Joining) // add joining node as Joining - val newGossip = localGossip copy (overview = newOverview, members = newMembers) + // remove the node from the 'unreachable' set in case it is a DOWN node that is rejoining cluster + val newUnreachableMembers = localGossip.overview.unreachable filterNot { _.address == node } + val newOverview = localGossip.overview copy (unreachable = newUnreachableMembers) - val versionedGossip = newGossip + vclockNode - val seenVersionedGossip = versionedGossip seen selfAddress + val newMembers = localMembers + Member(node, MemberStatus.Joining) // add joining node as Joining + val newGossip = localGossip copy (overview = newOverview, members = newMembers) - val newState = localState copy (latestGossip = seenVersionedGossip) + val versionedGossip = newGossip + vclockNode + val seenVersionedGossip = versionedGossip seen selfAddress - if (!state.compareAndSet(localState, newState)) joining(node) // recur if we failed update - else { - if (node != selfAddress) failureDetector heartbeat node + val newState = localState copy (latestGossip = seenVersionedGossip) - if (convergence(newState.latestGossip).isDefined) { - newState.memberMembershipChangeListeners foreach { _ notify newMembers } + if (!state.compareAndSet(localState, newState)) joining(node) // recur if we failed update + else { + if (node != selfAddress) failureDetector heartbeat node + + if (convergence(newState.latestGossip).isDefined) { + newState.memberMembershipChangeListeners foreach { _ notify newMembers } + } } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index eafdf2fffd..b5fc5d626b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -9,6 +9,8 @@ import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ import akka.util.duration._ +import scala.collection.immutable.SortedSet +import java.util.concurrent.atomic.AtomicReference object NodeUpMultiJvmSpec extends MultiNodeConfig { val first = role("first") @@ -33,7 +35,33 @@ abstract class NodeUpSpec awaitClusterUp(first, second) - testConductor.enter("after") + testConductor.enter("after-1") + } + + "be unaffected when joining again" taggedAs LongRunningTest in { + + val unexpected = new AtomicReference[SortedSet[Member]] + cluster.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + if (members.size != 2 || members.exists(_.status != MemberStatus.Up)) + unexpected.set(members) + } + }) + testConductor.enter("listener-registered") + + runOn(second) { + cluster.join(node(first).address) + } + testConductor.enter("joined-again") + + // let it run for a while to make sure that nothing bad happens + for (n ← 1 to 20) { + 100.millis.dilated.sleep() + unexpected.get must be(null) + cluster.latestGossip.members.forall(_.status == MemberStatus.Up) + } + + testConductor.enter("after-2") } } } From 8c9d40eb00f927353eda94ccf3dcb0dae97ef302 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 7 Jun 2012 15:56:59 +0200 Subject: [PATCH 62/92] Add missing assert --- akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index b5fc5d626b..f8d0a1f6e2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -58,7 +58,7 @@ abstract class NodeUpSpec for (n ← 1 to 20) { 100.millis.dilated.sleep() unexpected.get must be(null) - cluster.latestGossip.members.forall(_.status == MemberStatus.Up) + cluster.latestGossip.members.forall(_.status == MemberStatus.Up) must be(true) } testConductor.enter("after-2") From fcd08ed2b95a1ed438b4dcb0009695aa1b73dbc1 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 7 Jun 2012 15:14:38 +0200 Subject: [PATCH 63/92] Test normal healthy cluster, see #2195 * Fix that membership listeners should only notified when something changed --- .../src/main/scala/akka/cluster/Cluster.scala | 32 ++++---- .../scala/akka/cluster/SunnyWeatherSpec.scala | 78 +++++++++++++++++++ .../akka/remote/testkit/MultiNodeSpec.scala | 2 +- 3 files changed, 92 insertions(+), 20 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 7d1222a7ab..e2b5c8e751 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -618,13 +618,16 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ if (!state.compareAndSet(localState, newState)) leaving(address) // recur if we failed update else { - failureDetector heartbeat address // update heartbeat in failure detector - if (convergence(newState.latestGossip).isDefined) { - newState.memberMembershipChangeListeners foreach { _ notify newMembers } - } + if (address != selfAddress) failureDetector heartbeat address // update heartbeat in failure detector + notifyMembershipChangeListeners(localState, newState) } } + private def notifyMembershipChangeListeners(oldState: State, newState: State): Unit = + if (newState.latestGossip != oldState.latestGossip && convergence(newState.latestGossip).isDefined) { + newState.memberMembershipChangeListeners foreach { _ notify newState.latestGossip.members } + } + /** * State transition to EXITING. */ @@ -698,9 +701,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ if (!state.compareAndSet(localState, newState)) downing(address) // recur if we fail the update else { - if (convergence(newState.latestGossip).isDefined) { - newState.memberMembershipChangeListeners foreach { _ notify newState.latestGossip.members } - } + notifyMembershipChangeListeners(localState, newState) } } @@ -741,10 +742,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ log.debug("Cluster Node [{}] - Receiving gossip from [{}]", selfAddress, sender.address) if (sender.address != selfAddress) failureDetector heartbeat sender.address - - if (convergence(newState.latestGossip).isDefined) { - newState.memberMembershipChangeListeners foreach { _ notify newState.latestGossip.members } - } + notifyMembershipChangeListeners(localState, newState) } } @@ -841,14 +839,14 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ private[akka] def gossip(): Unit = { val localState = state.get + log.debug("Cluster Node [{}] - Initiating new round of gossip", selfAddress) + if (isSingletonCluster(localState)) { // gossip to myself // TODO could perhaps be optimized, no need to gossip to myself when Up? gossipTo(selfAddress) } else if (isAvailable(localState)) { - log.debug("Cluster Node [{}] - Initiating new round of gossip", selfAddress) - val localGossip = localState.latestGossip // important to not accidentally use `map` of the SortedSet, since the original order is not preserved val localMembers = localGossip.members.toIndexedSeq @@ -917,9 +915,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ else { log.info("Cluster Node [{}] - Marking node(s) as UNREACHABLE [{}]", selfAddress, newlyDetectedUnreachableMembers.mkString(", ")) - if (convergence(newState.latestGossip).isDefined) { - newState.memberMembershipChangeListeners foreach { _ notify newMembers } - } + notifyMembershipChangeListeners(localState, newState) } } } @@ -1040,9 +1036,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ // if we won the race then update else try again if (!state.compareAndSet(localState, newState)) leaderActions() // recur else { - if (convergence(newState.latestGossip).isDefined) { - newState.memberMembershipChangeListeners foreach { _ notify newGossip.members } - } + notifyMembershipChangeListeners(localState, newState) } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala new file mode 100644 index 0000000000..b74fdd09db --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -0,0 +1,78 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import akka.util.duration._ +import java.util.concurrent.atomic.AtomicReference +import scala.collection.immutable.SortedSet + +object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + val fifth = role("fifth") + + commonConfig(ConfigFactory.parseString(""" + akka.cluster { + gossip-interval = 400 ms + nr-of-deputy-nodes = 0 + } + akka.loglevel = DEBUG + """)) +} + +class SunnyWeatherMultiJvmNode1 extends SunnyWeatherSpec +class SunnyWeatherMultiJvmNode2 extends SunnyWeatherSpec +class SunnyWeatherMultiJvmNode3 extends SunnyWeatherSpec +class SunnyWeatherMultiJvmNode4 extends SunnyWeatherSpec +class SunnyWeatherMultiJvmNode5 extends SunnyWeatherSpec + +abstract class SunnyWeatherSpec + extends MultiNodeSpec(SunnyWeatherMultiJvmSpec) + with MultiNodeClusterSpec { + + import SunnyWeatherMultiJvmSpec._ + + override def initialParticipants = roles.size + + "A normal cluster" must { + "be healthy" taggedAs LongRunningTest in { + + // start some + awaitClusterUp(first, second, third) + runOn(first, second, third) { + log.info("3 joined") + } + + // add a few more + awaitClusterUp(first, second, third, fourth, fifth) + log.info("5 joined") + + val unexpected = new AtomicReference[SortedSet[Member]] + cluster.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + // we don't expected any changes to the cluster + unexpected.set(members) + } + }) + + for (n ← 1 to 40) { + testConductor.enter("period-" + n) + unexpected.get must be(null) + awaitUpConvergence(roles.size) + assertLeaderIn(roles) + if (n % 5 == 0) log.info("Passed period [{}]", n) + 1.seconds.sleep + } + + testConductor.enter("after") + } + } +} diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 01a08da718..a0d7d5eac4 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -84,7 +84,7 @@ abstract class MultiNodeConfig { private[testkit] def deployments(node: RoleName): Seq[String] = (_deployments get node getOrElse Nil) ++ _allDeploy - private[testkit] def roles: Seq[RoleName] = _roles + def roles: Seq[RoleName] = _roles } From 2cbc04a5abf8ea3731d09a765e67595514f91629 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 7 Jun 2012 18:38:24 +0200 Subject: [PATCH 64/92] #2196 - Updating Logback dep to 1.0.0 --- project/AkkaBuild.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index e4a865b5a7..b59bec9d42 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -484,7 +484,7 @@ object Dependency { object V { val Camel = "2.8.0" - val Logback = "0.9.28" + val Logback = "1.0.0" val Netty = "3.3.0.Final" val Protobuf = "2.4.1" val ScalaStm = "0.5" From f6365e83e74d743a9e4a223d297cdb3c1dbdeb64 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 7 Jun 2012 20:40:14 +0200 Subject: [PATCH 65/92] Change to logback 1.0.4, see #2198 --- akka-docs/java/logging.rst | 2 +- akka-docs/scala/logging.rst | 2 +- project/AkkaBuild.scala | 8 +------- 3 files changed, 3 insertions(+), 9 deletions(-) diff --git a/akka-docs/java/logging.rst b/akka-docs/java/logging.rst index 03de58de5b..647525ba76 100644 --- a/akka-docs/java/logging.rst +++ b/akka-docs/java/logging.rst @@ -187,7 +187,7 @@ It has one single dependency; the slf4j-api jar. In runtime you also need a SLF4 ch.qos.logback logback-classic - 1.0.0 + 1.0.4 runtime diff --git a/akka-docs/scala/logging.rst b/akka-docs/scala/logging.rst index 4ea96722e5..8f765b4f7e 100644 --- a/akka-docs/scala/logging.rst +++ b/akka-docs/scala/logging.rst @@ -230,7 +230,7 @@ It has one single dependency; the slf4j-api jar. In runtime you also need a SLF4 .. code-block:: scala - lazy val logback = "ch.qos.logback" % "logback-classic" % "1.0.0" % "runtime" + lazy val logback = "ch.qos.logback" % "logback-classic" % "1.0.4" % "runtime" You need to enable the Slf4jEventHandler in the 'event-handlers' element in diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index b59bec9d42..736927e7c2 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -484,7 +484,7 @@ object Dependency { object V { val Camel = "2.8.0" - val Logback = "1.0.0" + val Logback = "1.0.4" val Netty = "3.3.0.Final" val Protobuf = "2.4.1" val ScalaStm = "0.5" @@ -501,12 +501,6 @@ object Dependency { val slf4jApi = "org.slf4j" % "slf4j-api" % V.Slf4j // MIT val zeroMQ = "org.zeromq" % "zeromq-scala-binding_2.9.1" % "0.0.6" // ApacheV2 - // Runtime - - object Runtime { - val logback = "ch.qos.logback" % "logback-classic" % V.Logback % "runtime" // MIT - } - // Test object Test { From 1b68ea7c9db76adf66887fd803f0fdabdf9d9cc7 Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 7 Jun 2012 15:19:28 +0200 Subject: [PATCH 66/92] document processAllSystemMessages semantics --- .../src/main/scala/akka/dispatch/Mailbox.scala | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index 11e58ede7e..b6af478ac7 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -208,6 +208,13 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes } } + /** + * Will at least try to process all queued system messages: in case of + * failure simply drop and go on to the next, because there is nothing to + * restart here (failure is in ActorCell somewhere …). In case the mailbox + * becomes closed (because of processing a Terminate message), dump all + * already dequeued message to deadLetters. + */ final def processAllSystemMessages() { var failure: Throwable = null var nextMessage = systemDrain(null) @@ -216,8 +223,9 @@ private[akka] abstract class Mailbox(val actor: ActorCell, val messageQueue: Mes nextMessage = nextMessage.next msg.next = null if (debug) println(actor.self + " processing system message " + msg + " with " + actor.childrenRefs) - try actor systemInvoke msg - catch { + try { + actor systemInvoke msg + } catch { case NonFatal(e) ⇒ if (failure eq null) failure = e actor.system.eventStream.publish(Error(e, actor.self.path.toString, this.getClass, "exception during processing system message " + msg + ": " + e.getMessage)) From d4070d36ab7cc1fbfe9fb707e780ef0ab1f1c228 Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 7 Jun 2012 23:56:04 +0200 Subject: [PATCH 67/92] remove LocalActorRef.systemService (residue, very old) --- akka-actor/src/main/scala/akka/actor/ActorRef.scala | 3 +-- akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala | 4 ++-- akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala | 3 +-- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 861df570b7..0620a73a28 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -227,8 +227,7 @@ private[akka] class LocalActorRef private[akka] ( _system: ActorSystemImpl, _props: Props, _supervisor: InternalActorRef, - override val path: ActorPath, - val systemService: Boolean = false) + override val path: ActorPath) extends InternalActorRef with LocalRef { /* diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 960e8a37e5..4c200b204c 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -473,7 +473,7 @@ class LocalActorRefProvider( private val guardianProps = Props(new Guardian) lazy val rootGuardian: InternalActorRef = - new LocalActorRef(system, guardianProps, theOneWhoWalksTheBubblesOfSpaceTime, rootPath, true) { + new LocalActorRef(system, guardianProps, theOneWhoWalksTheBubblesOfSpaceTime, rootPath) { override def getParent: InternalActorRef = this override def getSingleChild(name: String): InternalActorRef = name match { case "temp" ⇒ tempContainer @@ -541,7 +541,7 @@ class LocalActorRefProvider( def actorOf(system: ActorSystemImpl, props: Props, supervisor: InternalActorRef, path: ActorPath, systemService: Boolean, deploy: Option[Deploy], lookupDeploy: Boolean): InternalActorRef = { props.routerConfig match { - case NoRouter ⇒ new LocalActorRef(system, props, supervisor, path, systemService) // create a local actor + case NoRouter ⇒ new LocalActorRef(system, props, supervisor, path) // create a local actor case router ⇒ val lookup = if (lookupDeploy) deployer.lookup(path) else None val fromProps = Iterator(props.deploy.copy(routerConfig = props.deploy.routerConfig withFallback router)) diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index c0442d45d6..ed151b6b12 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -31,8 +31,7 @@ class TestActorRef[T <: Actor]( if (_props.dispatcher == Dispatchers.DefaultDispatcherId) CallingThreadDispatcher.Id else _props.dispatcher), _supervisor, - _supervisor.path / name, - false) { + _supervisor.path / name) { import TestActorRef.InternalGetActor From 6a380550f90362c1391ef19086fdb0b78024d26d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 08:59:05 +0200 Subject: [PATCH 68/92] Notify MembershipChangeListeners when 'members' change --- .../src/main/scala/akka/cluster/Cluster.scala | 13 ++++++------- .../MembershipChangeListenerSpec.scala | 19 ++++++++++++------- .../scala/akka/cluster/SunnyWeatherSpec.scala | 2 +- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index e2b5c8e751..4ea43d50e4 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -589,10 +589,7 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ if (!state.compareAndSet(localState, newState)) joining(node) // recur if we failed update else { if (node != selfAddress) failureDetector heartbeat node - - if (convergence(newState.latestGossip).isDefined) { - newState.memberMembershipChangeListeners foreach { _ notify newMembers } - } + notifyMembershipChangeListeners(localState, newState) } } } @@ -623,10 +620,12 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ } } - private def notifyMembershipChangeListeners(oldState: State, newState: State): Unit = - if (newState.latestGossip != oldState.latestGossip && convergence(newState.latestGossip).isDefined) { + private def notifyMembershipChangeListeners(oldState: State, newState: State): Unit = { + val oldMembersStatus = oldState.latestGossip.members.toSeq.map(m ⇒ (m.address, m.status)) + val newMembersStatus = newState.latestGossip.members.toSeq.map(m ⇒ (m.address, m.status)) + if (newMembersStatus != oldMembersStatus) newState.memberMembershipChangeListeners foreach { _ notify newState.latestGossip.members } - } + } /** * State transition to EXITING. diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index c87a280e17..9e190050f9 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -37,7 +37,6 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan awaitClusterUp(first) runOn(first, second) { - cluster.join(firstAddress) val latch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { @@ -45,8 +44,13 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan latch.countDown() } }) + testConductor.enter("listener-1-registered") + cluster.join(firstAddress) latch.await - cluster.convergence.isDefined must be(true) + } + + runOn(third) { + testConductor.enter("listener-1-registered") } testConductor.enter("after-1") @@ -54,10 +58,6 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan "(when three nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { - runOn(third) { - cluster.join(firstAddress) - } - val latch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { @@ -65,8 +65,13 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan latch.countDown() } }) + testConductor.enter("listener-2-registered") + + runOn(third) { + cluster.join(firstAddress) + } + latch.await - cluster.convergence.isDefined must be(true) testConductor.enter("after-2") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index b74fdd09db..f4f42f0117 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -52,7 +52,7 @@ abstract class SunnyWeatherSpec } // add a few more - awaitClusterUp(first, second, third, fourth, fifth) + awaitClusterUp(roles: _*) log.info("5 joined") val unexpected = new AtomicReference[SortedSet[Member]] From 56735477b8758c51ed762629ca1afac7dcbbb96d Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 09:23:36 +0200 Subject: [PATCH 69/92] initialParticipants default as roles.size in cluster tests --- .../ClientDowningNodeThatIsUnreachableSpec.scala | 2 -- .../akka/cluster/ClientDowningNodeThatIsUpSpec.scala | 2 -- .../multi-jvm/scala/akka/cluster/ConvergenceSpec.scala | 2 -- .../cluster/GossipingAccrualFailureDetectorSpec.scala | 2 -- .../scala/akka/cluster/JoinTwoClustersSpec.scala | 2 -- .../LeaderDowningNodeThatIsUnreachableSpec.scala | 2 -- .../scala/akka/cluster/LeaderElectionSpec.scala | 10 ++++------ .../cluster/MembershipChangeListenerExitingSpec.scala | 2 -- .../cluster/MembershipChangeListenerJoinSpec.scala | 2 -- .../cluster/MembershipChangeListenerLeavingSpec.scala | 2 -- .../akka/cluster/MembershipChangeListenerSpec.scala | 2 -- .../akka/cluster/MembershipChangeListenerUpSpec.scala | 2 -- .../scala/akka/cluster/MultiNodeClusterSpec.scala | 2 ++ .../multi-jvm/scala/akka/cluster/NodeJoinSpec.scala | 2 -- .../NodeLeavingAndExitingAndBeingRemovedSpec.scala | 2 -- .../scala/akka/cluster/NodeLeavingAndExitingSpec.scala | 2 -- .../multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala | 2 -- .../scala/akka/cluster/NodeMembershipSpec.scala | 2 -- .../scala/akka/cluster/NodeShutdownSpec.scala | 2 -- .../src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala | 2 -- .../scala/akka/cluster/SunnyWeatherSpec.scala | 2 -- .../test/scala/akka/remote/testkit/MultiNodeSpec.scala | 9 +++++++-- 22 files changed, 13 insertions(+), 46 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index d78afcdeb7..6d4d09f7cb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -29,8 +29,6 @@ class ClientDowningNodeThatIsUnreachableSpec import ClientDowningNodeThatIsUnreachableMultiJvmSpec._ - override def initialParticipants = 4 - "Client of a 4 node cluster" must { "be able to DOWN a node that is UNREACHABLE (killed)" taggedAs LongRunningTest in { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index 5f778c25d1..db00438c9e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -29,8 +29,6 @@ class ClientDowningNodeThatIsUpSpec import ClientDowningNodeThatIsUpMultiJvmSpec._ - override def initialParticipants = 4 - "Client of a 4 node cluster" must { "be able to DOWN a node that is UP (healthy and available)" taggedAs LongRunningTest in { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index a7e5712cfa..9963903b90 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -35,8 +35,6 @@ abstract class ConvergenceSpec with MultiNodeClusterSpec { import ConvergenceMultiJvmSpec._ - override def initialParticipants = 4 - "A cluster of 3 members" must { "reach initial convergence" taggedAs LongRunningTest in { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 9df3e20d68..f75ca3b058 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -27,8 +27,6 @@ abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(Gossipi with MultiNodeClusterSpec { import GossipingAccrualFailureDetectorMultiJvmSpec._ - override def initialParticipants = 3 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index e01839684a..e86602949f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -33,8 +33,6 @@ abstract class JoinTwoClustersSpec import JoinTwoClustersMultiJvmSpec._ - override def initialParticipants = 6 - lazy val a1Address = node(a1).address lazy val b1Address = node(b1).address lazy val c1Address = node(c1).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index ffbd4eb287..616c412556 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -37,8 +37,6 @@ class LeaderDowningNodeThatIsUnreachableSpec import LeaderDowningNodeThatIsUnreachableMultiJvmSpec._ - override def initialParticipants = 4 - "The Leader in a 4 node cluster" must { "be able to DOWN a 'last' node that is UNREACHABLE" taggedAs LongRunningTest in { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index ce4d5a8042..43f0fc19eb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -31,10 +31,8 @@ abstract class LeaderElectionSpec import LeaderElectionMultiJvmSpec._ - override def initialParticipants = 5 - // sorted in the order used by the cluster - lazy val roles = Seq(first, second, third, fourth).sorted + lazy val sortedRoles = Seq(first, second, third, fourth).sorted "A cluster of four nodes" must { @@ -42,15 +40,15 @@ abstract class LeaderElectionSpec awaitClusterUp(first, second, third, fourth) if (myself != controller) { - cluster.isLeader must be(myself == roles.head) - assertLeaderIn(roles) + cluster.isLeader must be(myself == sortedRoles.head) + assertLeaderIn(sortedRoles) } testConductor.enter("after") } def shutdownLeaderAndVerifyNewLeader(alreadyShutdown: Int): Unit = { - val currentRoles = roles.drop(alreadyShutdown) + val currentRoles = sortedRoles.drop(alreadyShutdown) currentRoles.size must be >= (2) val leader = currentRoles.head val aUser = currentRoles.last diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index cdf809187a..d76c3cf689 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -37,8 +37,6 @@ abstract class MembershipChangeListenerExitingSpec import MembershipChangeListenerExitingMultiJvmSpec._ - override def initialParticipants = 3 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index c07ec19f77..bdf8f7d44d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -34,8 +34,6 @@ abstract class MembershipChangeListenerJoinSpec import MembershipChangeListenerJoinMultiJvmSpec._ - override def initialParticipants = 2 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala index 41b69ce7b4..1ff11465bb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -34,8 +34,6 @@ abstract class MembershipChangeListenerLeavingSpec import MembershipChangeListenerLeavingMultiJvmSpec._ - override def initialParticipants = 3 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index 9e190050f9..c48727b1cd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -25,8 +25,6 @@ abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChan with MultiNodeClusterSpec { import MembershipChangeListenerMultiJvmSpec._ - override def initialParticipants = 3 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala index 7709e9854a..3e22dd456d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -27,8 +27,6 @@ abstract class MembershipChangeListenerUpSpec import MembershipChangeListenerUpMultiJvmSpec._ - override def initialParticipants = 2 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index 7f7d60fcdc..b185067ab0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -30,6 +30,8 @@ object MultiNodeClusterSpec { trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ + override def initialParticipants = roles.size + /** * Get or create a cluster node using 'Cluster(system)' extension. */ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala index 0d6a50b82a..066e86aae6 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala @@ -33,8 +33,6 @@ abstract class NodeJoinSpec import NodeJoinMultiJvmSpec._ - override def initialParticipants = 2 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index a974930d0a..8e274be311 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -28,8 +28,6 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec import NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec._ - override def initialParticipants = 3 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 3773ccbd5d..79fff4770f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -36,8 +36,6 @@ abstract class NodeLeavingAndExitingSpec import NodeLeavingAndExitingMultiJvmSpec._ - override def initialParticipants = 3 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index 96876cf4cb..b834492045 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -30,8 +30,6 @@ abstract class NodeLeavingSpec extends MultiNodeSpec(NodeLeavingMultiJvmSpec) with MultiNodeClusterSpec { import NodeLeavingMultiJvmSpec._ - override def initialParticipants = 3 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index edd3e44121..ef65cefd0f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -26,8 +26,6 @@ abstract class NodeMembershipSpec import NodeMembershipMultiJvmSpec._ - override def initialParticipants = 3 - lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address lazy val thirdAddress = node(third).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index b54c0c1b39..4dc90a5b89 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -30,8 +30,6 @@ class NodeShutdownMultiJvmNode2 extends NodeShutdownSpec abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) with MultiNodeClusterSpec { import NodeShutdownMultiJvmSpec._ - override def initialParticipants = 2 - "A cluster of 2 nodes" must { "not be singleton cluster when joined" taggedAs LongRunningTest in { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index f8d0a1f6e2..6cb8bf9e07 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -28,8 +28,6 @@ abstract class NodeUpSpec import NodeUpMultiJvmSpec._ - override def initialParticipants = 2 - "A cluster node that is joining another cluster" must { "be moved to UP by the leader after a convergence" taggedAs LongRunningTest in { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index f4f42f0117..c2f8e8d3f5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -40,8 +40,6 @@ abstract class SunnyWeatherSpec import SunnyWeatherMultiJvmSpec._ - override def initialParticipants = roles.size - "A normal cluster" must { "be healthy" taggedAs LongRunningTest in { diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index a0d7d5eac4..faaab5cdc4 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -84,7 +84,7 @@ abstract class MultiNodeConfig { private[testkit] def deployments(node: RoleName): Seq[String] = (_deployments get node getOrElse Nil) ++ _allDeploy - def roles: Seq[RoleName] = _roles + private[testkit] def roles: Seq[RoleName] = _roles } @@ -131,7 +131,7 @@ object MultiNodeSpec { * `AskTimeoutException: sending to terminated ref breaks promises`. Using lazy * val is fine. */ -abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, roles: Seq[RoleName], deployments: RoleName ⇒ Seq[String]) +abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, _roles: Seq[RoleName], deployments: RoleName ⇒ Seq[String]) extends AkkaSpec(_system) { import MultiNodeSpec._ @@ -143,6 +143,11 @@ abstract class MultiNodeSpec(val myself: RoleName, _system: ActorSystem, roles: * Test Class Interface */ + /** + * All registered roles + */ + def roles: Seq[RoleName] = _roles + /** * TO BE DEFINED BY USER: Defines the number of participants required for starting the test. This * might not be equals to the number of nodes available to the test. From 233b9a6291a9a06e91907f8fd3291d456d7e846f Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 09:41:42 +0200 Subject: [PATCH 70/92] Change loglevel to info, gossiping verification done, see #2195 --- .../src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index c2f8e8d3f5..fcb1393f8a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -24,7 +24,7 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { gossip-interval = 400 ms nr-of-deputy-nodes = 0 } - akka.loglevel = DEBUG + akka.loglevel = INFO """)) } @@ -61,7 +61,7 @@ abstract class SunnyWeatherSpec } }) - for (n ← 1 to 40) { + for (n ← 1 to 30) { testConductor.enter("period-" + n) unexpected.get must be(null) awaitUpConvergence(roles.size) From 531e675ef9020859c96b130b07ed5301173d2c46 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 11:20:19 +0200 Subject: [PATCH 71/92] Ignore the leaving/exit failing tests --- .../cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala | 3 ++- .../scala/akka/cluster/NodeLeavingAndExitingSpec.scala | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index 8e274be311..d85016c714 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -36,7 +36,8 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec "A node that is LEAVING a non-singleton cluster" must { - "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest in { + // FIXME make it work and remove ignore + "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest ignore { awaitClusterUp(first, second, third) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 79fff4770f..2909362fa7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -42,7 +42,8 @@ abstract class NodeLeavingAndExitingSpec "A node that is LEAVING a non-singleton cluster" must { - "be moved to EXITING by the leader" taggedAs LongRunningTest in { + // FIXME make it work and remove ignore + "be moved to EXITING by the leader" taggedAs LongRunningTest ignore { awaitClusterUp(first, second, third) From c7af802dc8c9586c549d9a6fd13814c6c70e53bd Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 11:30:44 +0200 Subject: [PATCH 72/92] Turn on debug logging due to failures --- .../src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index 4dc90a5b89..37d4b4571e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -13,7 +13,7 @@ object NodeShutdownMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = false). + commonConfig(debugConfig(on = true). withFallback(ConfigFactory.parseString(""" akka.cluster { auto-down = on From dcae863f7fbe4de0b57c0634daeb4e99de0416a9 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 13:44:40 +0200 Subject: [PATCH 73/92] Use all heartbeats in failure detector, see #2182 * Failure detector didn't use hearbeat 1 and 2 * Included heartbeat 2 in ordinary stats * For heartbeat 1 use guess stats, important so that connections with only one heartbeat becomes unavailble, the guess corresponds to 1 second interval which results in phi > 8 after 18 seconds * Improved AccrualFailureDetectorSpec --- .../akka/cluster/AccrualFailureDetector.scala | 60 ++++++------- ...LeavingAndExitingAndBeingRemovedSpec.scala | 3 +- .../cluster/NodeLeavingAndExitingSpec.scala | 3 +- .../scala/akka/cluster/NodeShutdownSpec.scala | 2 +- .../cluster/AccrualFailureDetectorSpec.scala | 84 ++++++++++--------- 5 files changed, 78 insertions(+), 74 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index 3caece392c..c86eb3361e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -36,7 +36,11 @@ class AccrualFailureDetector( /** * Holds the failure statistics for a specific node Address. */ - private case class FailureStats(mean: Double = 0.0D, variance: Double = 0.0D, deviation: Double = 0.0D) + private case class FailureStats(mean: Double = 0.0, variance: Double = 0.0, deviation: Double = 0.0) + + // guess statistics for first heartbeat, + // important so that connections with only one heartbeat becomes unavailble + private val failureStatsFirstHeartbeat = FailureStats(mean = 1000.0) /** * Implement using optimistic lockless concurrency, all state is represented @@ -72,7 +76,7 @@ class AccrualFailureDetector( // add starter records for this new connection val newState = oldState copy ( version = oldState.version + 1, - failureStats = oldState.failureStats + (connection -> FailureStats()), + failureStats = oldState.failureStats + (connection -> failureStatsFirstHeartbeat), intervalHistory = oldState.intervalHistory + (connection -> IndexedSeq.empty[Long]), timestamps = oldState.timestamps + (connection -> timeMachine()), explicitRemovals = oldState.explicitRemovals - connection) @@ -93,30 +97,25 @@ class AccrualFailureDetector( case _ ⇒ IndexedSeq.empty[Long] }) :+ interval - val newFailureStats = - if (newIntervalsForConnection.size > 1) { + val newFailureStats = { + val newMean: Double = newIntervalsForConnection.sum.toDouble / newIntervalsForConnection.size - val newMean: Double = newIntervalsForConnection.sum / newIntervalsForConnection.size.toDouble - - val oldConnectionFailureStats = oldState.failureStats.get(connection).getOrElse { - throw new IllegalStateException("Can't calculate new failure statistics due to missing heartbeat history") - } - - val deviationSum = - newIntervalsForConnection - .map(_.toDouble) - .foldLeft(0.0D)((x, y) ⇒ x + (y - newMean)) - - val newVariance: Double = deviationSum / newIntervalsForConnection.size.toDouble - val newDeviation: Double = math.sqrt(newVariance) - - val newFailureStats = oldConnectionFailureStats copy (mean = newMean, deviation = newDeviation, variance = newVariance) - oldState.failureStats + (connection -> newFailureStats) - - } else { - oldState.failureStats + val oldConnectionFailureStats = oldState.failureStats.get(connection).getOrElse { + throw new IllegalStateException("Can't calculate new failure statistics due to missing heartbeat history") } + val deviationSum = + newIntervalsForConnection + .map(_.toDouble) + .foldLeft(0.0)((x, y) ⇒ x + (y - newMean)) + + val newVariance: Double = deviationSum / newIntervalsForConnection.size + val newDeviation: Double = math.sqrt(newVariance) + + val newFailureStats = oldConnectionFailureStats copy (mean = newMean, deviation = newDeviation, variance = newVariance) + oldState.failureStats + (connection -> newFailureStats) + } + val newState = oldState copy (version = oldState.version + 1, failureStats = newFailureStats, intervalHistory = oldState.intervalHistory + (connection -> newIntervalsForConnection), @@ -132,8 +131,7 @@ class AccrualFailureDetector( * Calculates how likely it is that the connection has failed. *

* If a connection does not have any records in failure detector then it is - * considered dead. This is true either if the heartbeat have not started - * yet or the connection have been explicitly removed. + * considered healthy. *

* Implementations of 'Cumulative Distribution Function' for Exponential Distribution. * For a discussion on the math read [https://issues.apache.org/jira/browse/CASSANDRA-2597]. @@ -145,21 +143,22 @@ class AccrualFailureDetector( val phi = // if connection has been removed explicitly if (oldState.explicitRemovals.contains(connection)) Double.MaxValue - else if (oldTimestamp.isEmpty) 0.0D // treat unmanaged connections, e.g. with zero heartbeats, as healthy connections + else if (oldTimestamp.isEmpty) 0.0 // treat unmanaged connections, e.g. with zero heartbeats, as healthy connections else { val timestampDiff = timeMachine() - oldTimestamp.get val mean = oldState.failureStats.get(connection) match { case Some(FailureStats(mean, _, _)) ⇒ mean - case _ ⇒ throw new IllegalStateException("Can't calculate Failure Detector Phi value for a node that have no heartbeat history") + case _ ⇒ + if (!oldState.intervalHistory.contains(connection)) 1000.0 + else throw new IllegalStateException("Can't calculate Failure Detector Phi value for a node that have no heartbeat history") } - if (mean == 0.0D) 0.0D + if (mean == 0.0) 0.0 else PhiFactor * timestampDiff / mean } - // only log if PHI value is starting to get interesting - if (phi > 0.0D) log.debug("Node [{}] - Phi value [{}] and threshold [{}] for connection [{}] ", address, phi, threshold, connection) + log.debug("Node [{}] - Phi value [{}] and threshold [{}] for connection [{}] ", address, phi, threshold, connection) phi } @@ -168,6 +167,7 @@ class AccrualFailureDetector( */ @tailrec final def remove(connection: Address) { + log.debug("Node [{}] - Remove connection [{}] ", address, connection) val oldState = state.get if (oldState.failureStats.contains(connection)) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index d85016c714..8e274be311 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -36,8 +36,7 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec "A node that is LEAVING a non-singleton cluster" must { - // FIXME make it work and remove ignore - "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest ignore { + "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest in { awaitClusterUp(first, second, third) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 2909362fa7..79fff4770f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -42,8 +42,7 @@ abstract class NodeLeavingAndExitingSpec "A node that is LEAVING a non-singleton cluster" must { - // FIXME make it work and remove ignore - "be moved to EXITING by the leader" taggedAs LongRunningTest ignore { + "be moved to EXITING by the leader" taggedAs LongRunningTest in { awaitClusterUp(first, second, third) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index 37d4b4571e..4dc90a5b89 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -13,7 +13,7 @@ object NodeShutdownMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") - commonConfig(debugConfig(on = true). + commonConfig(debugConfig(on = false). withFallback(ConfigFactory.parseString(""" akka.cluster { auto-down = on diff --git a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala index 173ce799f8..1cf62daf1c 100644 --- a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala @@ -17,7 +17,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" val conn = Address("akka", "", "localhost", 2552) val conn2 = Address("akka", "", "localhost", 2553) - def fakeTimeGenerator(timeIntervals: List[Long]): () ⇒ Long = { + def fakeTimeGenerator(timeIntervals: Seq[Long]): () ⇒ Long = { var times = timeIntervals.tail.foldLeft(List[Long](timeIntervals.head))((acc, c) ⇒ acc ::: List[Long](acc.last + c)) def timeGenerator(): Long = { val currentTime = times.head @@ -27,22 +27,47 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" timeGenerator } - "return phi value of 0.0D on startup for each address" in { + "return phi value of 0.0 on startup for each address, when no heartbeats" in { val fd = new AccrualFailureDetector(system, conn) - fd.phi(conn) must be(0.0D) - fd.phi(conn2) must be(0.0D) + fd.phi(conn) must be(0.0) + fd.phi(conn2) must be(0.0) + } + + "return phi based on guess when only one heartbeat" in { + // 1 second ticks + val timeInterval = Vector.fill(30)(1000L) + val fd = new AccrualFailureDetector(system, conn, + timeMachine = fakeTimeGenerator(timeInterval)) + + fd.heartbeat(conn) + fd.phi(conn) must be > (0.0) + // let time go + for (n ← 2 to 8) + fd.phi(conn) must be < (4.0) + for (n ← 9 to 18) + fd.phi(conn) must be < (8.0) + + fd.phi(conn) must be > (8.0) + } + + "return phi value using first interval after second heartbeat" in { + val timeInterval = List[Long](0, 100, 100, 100) + val fd = new AccrualFailureDetector(system, conn, + timeMachine = fakeTimeGenerator(timeInterval)) + + fd.heartbeat(conn) + fd.phi(conn) must be > (0.0) + fd.heartbeat(conn) + fd.phi(conn) must be > (0.0) } "mark node as available after a series of successful heartbeats" in { val timeInterval = List[Long](0, 1000, 100, 100) - val ft = fakeTimeGenerator(timeInterval) - - val fd = new AccrualFailureDetector(system, conn, timeMachine = ft) + val fd = new AccrualFailureDetector(system, conn, + timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) - fd.heartbeat(conn) - fd.heartbeat(conn) fd.isAvailable(conn) must be(true) @@ -50,18 +75,13 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as dead after explicit removal of connection" in { val timeInterval = List[Long](0, 1000, 100, 100, 100) - val ft = fakeTimeGenerator(timeInterval) - - val fd = new AccrualFailureDetector(system, conn, timeMachine = ft) + val fd = new AccrualFailureDetector(system, conn, + timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) - fd.heartbeat(conn) - fd.heartbeat(conn) - fd.isAvailable(conn) must be(true) - fd.remove(conn) fd.isAvailable(conn) must be(false) @@ -69,14 +89,12 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as available after explicit removal of connection and receiving heartbeat again" in { val timeInterval = List[Long](0, 1000, 100, 1100, 1100, 1100, 1100, 1100, 100) - val ft = fakeTimeGenerator(timeInterval) - - val fd = new AccrualFailureDetector(system, conn, timeMachine = ft) + val fd = new AccrualFailureDetector(system, conn, + timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 fd.heartbeat(conn) //1000 - fd.heartbeat(conn) //1100 fd.isAvailable(conn) must be(true) //2200 @@ -87,9 +105,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" // it receives heartbeat from an explicitly removed node fd.heartbeat(conn) //4400 - fd.heartbeat(conn) //5500 - fd.heartbeat(conn) //6600 fd.isAvailable(conn) must be(true) //6700 @@ -98,40 +114,29 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as dead if heartbeat are missed" in { val timeInterval = List[Long](0, 1000, 100, 100, 5000) val ft = fakeTimeGenerator(timeInterval) - - val fd = new AccrualFailureDetector(system, conn, threshold = 3, timeMachine = ft) + val fd = new AccrualFailureDetector(system, conn, threshold = 3, + timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 - fd.heartbeat(conn) //1000 - fd.heartbeat(conn) //1100 fd.isAvailable(conn) must be(true) //1200 - fd.isAvailable(conn) must be(false) //6200 } "mark node as available if it starts heartbeat again after being marked dead due to detection of failure" in { val timeInterval = List[Long](0, 1000, 100, 1100, 5000, 100, 1000, 100, 100) - val ft = fakeTimeGenerator(timeInterval) - - val fd = new AccrualFailureDetector(system, conn, threshold = 3, timeMachine = ft) + val fd = new AccrualFailureDetector(system, conn, threshold = 3, + timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 - fd.heartbeat(conn) //1000 - fd.heartbeat(conn) //1100 - fd.isAvailable(conn) must be(true) //1200 - fd.isAvailable(conn) must be(false) //6200 - fd.heartbeat(conn) //6300 - fd.heartbeat(conn) //7300 - fd.heartbeat(conn) //7400 fd.isAvailable(conn) must be(true) //7500 @@ -139,8 +144,8 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "use maxSampleSize heartbeats" in { val timeInterval = List[Long](0, 100, 100, 100, 100, 600, 1000, 1000, 1000, 1000, 1000) - val ft = fakeTimeGenerator(timeInterval) - val fd = new AccrualFailureDetector(system, conn, maxSampleSize = 3, timeMachine = ft) + val fd = new AccrualFailureDetector(system, conn, maxSampleSize = 3, + timeMachine = fakeTimeGenerator(timeInterval)) // 100 ms interval fd.heartbeat(conn) //0 @@ -156,5 +161,6 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" val phi2 = fd.phi(conn) //5000 phi2 must be(phi1.plusOrMinus(0.001)) } + } } From 18260a3b7bcda1055572eef472cb654d62c25604 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 8 Jun 2012 13:56:53 +0200 Subject: [PATCH 74/92] #2203 - publish failed message deliveries to DeadLetters when bounded or durable mailbox enqueues fail --- .../actor/ActorWithBoundedStashSpec.scala | 51 ++++++------------- .../akka/dispatch/MailboxConfigSpec.scala | 10 ++-- .../main/scala/akka/dispatch/Mailbox.scala | 35 +++++-------- 3 files changed, 32 insertions(+), 64 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala index 9d411268aa..33283b18cf 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala @@ -6,7 +6,7 @@ package akka.actor import akka.testkit._ import akka.testkit.DefaultTimeout import akka.testkit.TestEvent._ -import akka.dispatch.{ Await, MessageQueueAppendFailedException, BoundedDequeBasedMailbox } +import akka.dispatch.{ Await, BoundedDequeBasedMailbox } import akka.pattern.ask import akka.util.duration._ import akka.actor.ActorSystem.Settings @@ -17,16 +17,8 @@ object ActorWithBoundedStashSpec { class StashingActor(implicit sys: ActorSystem) extends Actor with Stash { def receive = { - case "hello" ⇒ - stash() - sender ! "OK" - case "world" ⇒ - try { - unstashAll() - } catch { - case e: MessageQueueAppendFailedException ⇒ - expectedException.open() - } + case "hello" ⇒ stash() + case "world" ⇒ unstashAll() } } @@ -36,18 +28,10 @@ object ActorWithBoundedStashSpec { def receive = { case "hello" ⇒ numStashed += 1 - try { - stash() - } catch { - case e: StashOverflowException ⇒ - if (numStashed == 21) stashOverflow.open() - } + try stash() catch { case e: StashOverflowException ⇒ if (numStashed == 21) sender ! "STASHOVERFLOW" } } } - @volatile var expectedException: TestLatch = null - @volatile var stashOverflow: TestLatch = null - val testConf: Config = ConfigFactory.parseString(""" my-dispatcher { mailbox-type = "akka.actor.ActorWithBoundedStashSpec$Bounded" @@ -56,47 +40,42 @@ object ActorWithBoundedStashSpec { """) // bounded deque-based mailbox with capacity 10 - class Bounded(settings: Settings, config: Config) extends BoundedDequeBasedMailbox(10, 5 seconds) - + class Bounded(settings: Settings, config: Config) extends BoundedDequeBasedMailbox(10, 1 seconds) } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class ActorWithBoundedStashSpec extends AkkaSpec(ActorWithBoundedStashSpec.testConf) with DefaultTimeout with BeforeAndAfterEach { +class ActorWithBoundedStashSpec extends AkkaSpec(ActorWithBoundedStashSpec.testConf) with DefaultTimeout with BeforeAndAfterEach with ImplicitSender { import ActorWithBoundedStashSpec._ implicit val sys = system - override def atStartup { - system.eventStream.publish(Mute(EventFilter[Exception]("Crashing..."))) - } + override def atStartup { system.eventStream.publish(Mute(EventFilter[Exception]("Crashing..."))) } def myProps(creator: ⇒ Actor): Props = Props(creator).withDispatcher("my-dispatcher") "An Actor with Stash and BoundedDequeBasedMailbox" must { - "throw a MessageQueueAppendFailedException in case of a capacity violation" in { - ActorWithBoundedStashSpec.expectedException = new TestLatch + "end up in DeadLetters in case of a capacity violation" in { + system.eventStream.subscribe(testActor, classOf[DeadLetter]) + val stasher = system.actorOf(myProps(new StashingActor)) // fill up stash - val futures = for (_ ← 1 to 11) yield { stasher ? "hello" } - futures foreach { Await.ready(_, 10 seconds) } + (1 to 11) foreach { _ ⇒ stasher ! "hello" } // cause unstashAll with capacity violation stasher ! "world" - Await.ready(ActorWithBoundedStashSpec.expectedException, 10 seconds) + expectMsg(DeadLetter("hello", testActor, stasher)) + system.eventStream.unsubscribe(testActor, classOf[DeadLetter]) } - } "An Actor with bounded Stash" must { "throw a StashOverflowException in case of a stash capacity violation" in { - ActorWithBoundedStashSpec.stashOverflow = new TestLatch val stasher = system.actorOf(myProps(new StashingActorWithOverflow)) // fill up stash - for (_ ← 1 to 21) { stasher ! "hello" } - Await.ready(ActorWithBoundedStashSpec.stashOverflow, 10 seconds) + (1 to 21) foreach { _ ⇒ stasher ! "hello" } + expectMsg("STASHOVERFLOW") } - } } diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala index 8759f1aad9..4f2d61de65 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala @@ -6,9 +6,8 @@ import java.util.concurrent.ConcurrentLinkedQueue import akka.util._ import akka.util.duration._ import akka.testkit.AkkaSpec -import akka.actor.{ ActorRef, ActorContext, Props, LocalActorRef } import com.typesafe.config.Config -import akka.actor.ActorSystem +import akka.actor._ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAndAfterEach { @@ -39,9 +38,10 @@ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAn q.numberOfMessages must be === config.capacity q.hasMessages must be === true - intercept[MessageQueueAppendFailedException] { - q.enqueue(null, exampleMessage) - } + system.eventStream.subscribe(testActor, classOf[DeadLetter]) + q.enqueue(testActor, exampleMessage) + expectMsg(DeadLetter(exampleMessage.message, system.deadLetters, testActor)) + system.eventStream.unsubscribe(testActor, classOf[DeadLetter]) q.dequeue must be === exampleMessage q.numberOfMessages must be(config.capacity - 1) diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index b6af478ac7..25fc0250af 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -6,18 +6,11 @@ package akka.dispatch import akka.AkkaException import java.util.{ Comparator, PriorityQueue, Queue, Deque } import akka.util._ -import akka.actor.{ ActorCell, ActorRef } import java.util.concurrent._ import annotation.tailrec import akka.event.Logging.Error -import akka.actor.ActorContext import com.typesafe.config.Config -import akka.actor.ActorSystem - -/** - * This exception normally is thrown when a bounded mailbox is over capacity - */ -class MessageQueueAppendFailedException(message: String, cause: Throwable = null) extends AkkaException(message, cause) +import akka.actor._ /** * INTERNAL API @@ -401,13 +394,11 @@ trait BoundedMessageQueueSemantics extends QueueBasedMessageQueue { def pushTimeOut: Duration override def queue: BlockingQueue[Envelope] - def enqueue(receiver: ActorRef, handle: Envelope) { + def enqueue(receiver: ActorRef, handle: Envelope): Unit = if (pushTimeOut.length > 0) { - queue.offer(handle, pushTimeOut.length, pushTimeOut.unit) || { - throw new MessageQueueAppendFailedException("Couldn't enqueue message " + handle + " to " + receiver) - } + if (!queue.offer(handle, pushTimeOut.length, pushTimeOut.unit)) + receiver.asInstanceOf[InternalActorRef].provider.deadLetters ! DeadLetter(handle.message, handle.sender, receiver) } else queue put handle - } def dequeue(): Envelope = queue.poll() } @@ -439,18 +430,16 @@ trait BoundedDequeBasedMessageQueueSemantics extends DequeBasedMessageQueue { override def queue: BlockingDeque[Envelope] def enqueue(receiver: ActorRef, handle: Envelope): Unit = - if (pushTimeOut.length > 0) - queue.offer(handle, pushTimeOut.length, pushTimeOut.unit) || { - throw new MessageQueueAppendFailedException("Couldn't enqueue message " + handle + " to " + receiver) - } - else queue put handle + if (pushTimeOut.length > 0) { + if (!queue.offer(handle, pushTimeOut.length, pushTimeOut.unit)) + receiver.asInstanceOf[InternalActorRef].provider.deadLetters ! DeadLetter(handle.message, handle.sender, receiver) + } else queue put handle def enqueueFirst(receiver: ActorRef, handle: Envelope): Unit = - if (pushTimeOut.length > 0) - queue.offerFirst(handle, pushTimeOut.length, pushTimeOut.unit) || { - throw new MessageQueueAppendFailedException("Couldn't enqueue message " + handle + " to " + receiver) - } - else queue putFirst handle + if (pushTimeOut.length > 0) { + if (!queue.offerFirst(handle, pushTimeOut.length, pushTimeOut.unit)) + receiver.asInstanceOf[InternalActorRef].provider.deadLetters ! DeadLetter(handle.message, handle.sender, receiver) + } else queue putFirst handle def dequeue(): Envelope = queue.poll() } From f6fb742fcf2ef9200c4589812e6fd09f53955d66 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 14:13:08 +0200 Subject: [PATCH 75/92] Incorparated improvements/cleanup based on feedback, see #2182 --- .../scala/akka/cluster/AccrualFailureDetector.scala | 11 ++++------- 1 file changed, 4 insertions(+), 7 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index c86eb3361e..c7aaf12fcf 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -104,10 +104,9 @@ class AccrualFailureDetector( throw new IllegalStateException("Can't calculate new failure statistics due to missing heartbeat history") } - val deviationSum = - newIntervalsForConnection - .map(_.toDouble) - .foldLeft(0.0)((x, y) ⇒ x + (y - newMean)) + val deviationSum = (0.0d /: newIntervalsForConnection) { (mean, interval) ⇒ + mean + interval.toDouble - newMean + } val newVariance: Double = deviationSum / newIntervalsForConnection.size val newDeviation: Double = math.sqrt(newVariance) @@ -149,9 +148,7 @@ class AccrualFailureDetector( val mean = oldState.failureStats.get(connection) match { case Some(FailureStats(mean, _, _)) ⇒ mean - case _ ⇒ - if (!oldState.intervalHistory.contains(connection)) 1000.0 - else throw new IllegalStateException("Can't calculate Failure Detector Phi value for a node that have no heartbeat history") + case _ ⇒ throw new IllegalStateException("Can't calculate Failure Detector Phi value for a node that have no heartbeat history") } if (mean == 0.0) 0.0 From c1d68ecfbf3ca67f3eeceff5879d92e4807c6bfc Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 8 Jun 2012 14:26:10 +0200 Subject: [PATCH 76/92] Minor formatting --- .../src/test/scala/akka/actor/SupervisorSpec.scala | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala index d295e6db4f..9e14a510e2 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala @@ -339,9 +339,7 @@ class SupervisorSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitSende OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = 10 seconds)(classOf[Exception] :: Nil)))) val dyingProps = Props(new Actor { - inits.incrementAndGet - - if (inits.get % 2 == 0) throw new IllegalStateException("Don't wanna!") + if (inits.incrementAndGet % 2 == 0) throw new IllegalStateException("Don't wanna!") def receive = { case Ping ⇒ sender ! PongMessage From fb62311f49f4e0155d080fcbcf788e932bb75757 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 15:03:11 +0200 Subject: [PATCH 77/92] Rename NodeShutdownSpec to SingletonClusterSpec, see #2182 --- ...deShutdownSpec.scala => SingletonClusterSpec.scala} | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) rename akka-cluster/src/multi-jvm/scala/akka/cluster/{NodeShutdownSpec.scala => SingletonClusterSpec.scala} (78%) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala similarity index 78% rename from akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala index 4dc90a5b89..68d20012f5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala @@ -9,7 +9,7 @@ import akka.remote.testkit.MultiNodeSpec import akka.testkit._ import akka.util.duration._ -object NodeShutdownMultiJvmSpec extends MultiNodeConfig { +object SingletonClusterMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") @@ -24,11 +24,11 @@ object NodeShutdownMultiJvmSpec extends MultiNodeConfig { } -class NodeShutdownMultiJvmNode1 extends NodeShutdownSpec -class NodeShutdownMultiJvmNode2 extends NodeShutdownSpec +class SingletonClusterMultiJvmNode1 extends SingletonClusterSpec +class SingletonClusterMultiJvmNode2 extends SingletonClusterSpec -abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) with MultiNodeClusterSpec { - import NodeShutdownMultiJvmSpec._ +abstract class SingletonClusterSpec extends MultiNodeSpec(SingletonClusterMultiJvmSpec) with MultiNodeClusterSpec { + import SingletonClusterMultiJvmSpec._ "A cluster of 2 nodes" must { From a1dd4bc23560778fc48cb7440cb3c4f36753a83a Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 15:28:03 +0200 Subject: [PATCH 78/92] Remove jenkins color codes in LogRoleReplace script --- .../scala/akka/remote/testkit/LogRoleReplace.scala | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala index 3b3527240e..1e5a53d82e 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/LogRoleReplace.scala @@ -90,6 +90,7 @@ class LogRoleReplace { private val RoleStarted = """\[([\w\-]+)\].*Role \[([\w]+)\] started""".r private val RemoteServerStarted = """\[([\w\-]+)\].*RemoteServerStarted@akka://.*@([\w\-\.]+):([0-9]+)""".r + private val ColorCode = """\[[0-9]+m""" private var replacements: Map[String, String] = Map.empty private var jvmToAddress: Map[String, String] = Map.empty @@ -106,12 +107,16 @@ class LogRoleReplace { } def processLine(line: String): String = { - if (updateReplacements(line)) - replaceLine(line) + val cleanLine = removeColorCodes(line) + if (updateReplacements(cleanLine)) + replaceLine(cleanLine) else - line + cleanLine } + private def removeColorCodes(line: String): String = + line.replaceAll(ColorCode, "") + private def updateReplacements(line: String): Boolean = { if (line.startsWith("[info] * ")) { // reset when new test begins From 8479db7e75dea006658869addc02d22cbe644c52 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 8 Jun 2012 15:30:17 +0200 Subject: [PATCH 79/92] #2186 - Adding test to verify that parent is restarted if decider throws an exception --- .../scala/akka/actor/SupervisorSpec.scala | 34 +++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala index 9e14a510e2..3db5b5b5dc 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala @@ -364,5 +364,39 @@ class SupervisorSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitSende system.stop(supervisor) } + + "must not lose system messages when a NonFatal exception occurs when processing a system message" in { + val parent = system.actorOf(Props(new Actor { + override val supervisorStrategy = OneForOneStrategy()({ + case e: IllegalStateException if e.getMessage == "OHNOES" ⇒ throw e + case _ ⇒ SupervisorStrategy.Restart + }) + val child = context.watch(context.actorOf(Props(new Actor { + override def postRestart(reason: Throwable): Unit = testActor ! "child restarted" + def receive = { + case "die" ⇒ throw new IllegalStateException("OHNOES") + case "test" ⇒ sender ! "child green" + } + }), "child")) + + override def postRestart(reason: Throwable): Unit = testActor ! "parent restarted" + + def receive = { + case t @ Terminated(`child`) ⇒ testActor ! "child terminated" + case "die" ⇒ child ! "die" + case "test" ⇒ sender ! "green" + case "testchild" ⇒ child forward "test" + } + })) + + parent ! "die" + parent ! "testchild" + expectMsg("parent restarted") + expectMsg("child terminated") + parent ! "test" + expectMsg("green") + parent ! "testchild" + expectMsg("child green") + } } } From c9e259d56992a785de70361734c62a3b35b591d0 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 16:51:27 +0200 Subject: [PATCH 80/92] Turn on debug logging due to failures --- .../src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala | 2 +- .../src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index e86602949f..2e27f4c3bd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -17,7 +17,7 @@ object JoinTwoClustersMultiJvmSpec extends MultiNodeConfig { val c1 = role("c1") val c2 = role("c2") - commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) + commonConfig(debugConfig(on = true).withFallback(MultiNodeClusterSpec.clusterConfig)) } class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index fcb1393f8a..e36980d859 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -24,7 +24,7 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { gossip-interval = 400 ms nr-of-deputy-nodes = 0 } - akka.loglevel = INFO + akka.loglevel = DEBUG """)) } From 2b69f67777fa3b7d73a97a3afc23ac45182e14b4 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Fri, 8 Jun 2012 16:54:40 +0200 Subject: [PATCH 81/92] Ignore due to failures, see #2180 --- .../cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala | 3 ++- .../scala/akka/cluster/NodeLeavingAndExitingSpec.scala | 3 ++- .../src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala | 3 ++- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index 8e274be311..d85016c714 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -36,7 +36,8 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec "A node that is LEAVING a non-singleton cluster" must { - "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest in { + // FIXME make it work and remove ignore + "be moved to EXITING and then to REMOVED by the reaper" taggedAs LongRunningTest ignore { awaitClusterUp(first, second, third) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 79fff4770f..2909362fa7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -42,7 +42,8 @@ abstract class NodeLeavingAndExitingSpec "A node that is LEAVING a non-singleton cluster" must { - "be moved to EXITING by the leader" taggedAs LongRunningTest in { + // FIXME make it work and remove ignore + "be moved to EXITING by the leader" taggedAs LongRunningTest ignore { awaitClusterUp(first, second, third) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index b834492045..27bc36a3bf 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -36,7 +36,8 @@ abstract class NodeLeavingSpec extends MultiNodeSpec(NodeLeavingMultiJvmSpec) "A node that is LEAVING a non-singleton cluster" must { - "be marked as LEAVING in the converged membership table" taggedAs LongRunningTest in { + // FIXME make it work and remove ignore + "be marked as LEAVING in the converged membership table" taggedAs LongRunningTest ignore { awaitClusterUp(first, second, third) From fd455d14bff465d51a5abc1cb98a781cd65147c3 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 8 Jun 2012 21:57:39 +0200 Subject: [PATCH 82/92] #2208 - Upgrading to Netty 3.5.0 - remove StaticChannelPipeline since it's deprecated. --- .../akka/remote/testconductor/RemoteConnection.scala | 7 +++++-- .../src/main/scala/akka/remote/netty/Client.scala | 2 +- .../scala/akka/remote/netty/NettyRemoteSupport.scala | 9 ++++++--- project/AkkaBuild.scala | 2 +- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala index 5aeb484c42..1979857bf0 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala @@ -3,7 +3,7 @@ */ package akka.remote.testconductor -import org.jboss.netty.channel.{ Channel, ChannelPipeline, ChannelPipelineFactory, ChannelUpstreamHandler, SimpleChannelUpstreamHandler, StaticChannelPipeline } +import org.jboss.netty.channel.{ Channel, ChannelPipeline, ChannelPipelineFactory, ChannelUpstreamHandler, SimpleChannelUpstreamHandler, DefaultChannelPipeline } import org.jboss.netty.channel.socket.nio.{ NioClientSocketChannelFactory, NioServerSocketChannelFactory } import org.jboss.netty.bootstrap.{ ClientBootstrap, ServerBootstrap } import org.jboss.netty.handler.codec.frame.{ LengthFieldBasedFrameDecoder, LengthFieldPrepender } @@ -12,6 +12,7 @@ import org.jboss.netty.handler.codec.protobuf.{ ProtobufDecoder, ProtobufEncoder import org.jboss.netty.handler.timeout.{ ReadTimeoutHandler, ReadTimeoutException } import java.net.InetSocketAddress import java.util.concurrent.Executors +import akka.event.Logging /** * INTERNAL API. @@ -21,7 +22,9 @@ private[akka] class TestConductorPipelineFactory(handler: ChannelUpstreamHandler val encap = List(new LengthFieldPrepender(4), new LengthFieldBasedFrameDecoder(10000, 0, 4, 0, 4)) val proto = List(new ProtobufEncoder, new ProtobufDecoder(TestConductorProtocol.Wrapper.getDefaultInstance)) val msg = List(new MsgEncoder, new MsgDecoder) - new StaticChannelPipeline(encap ::: proto ::: msg ::: handler :: Nil: _*) + (encap ::: proto ::: msg ::: handler :: Nil).foldLeft(new DefaultChannelPipeline) { + (pipe, handler) ⇒ pipe.addLast(Logging.simpleName(handler.getClass), handler); pipe + } } } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 86c534c418..e3a2cea9a7 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -8,7 +8,7 @@ import java.net.{ InetAddress, InetSocketAddress } import org.jboss.netty.util.{ Timeout, TimerTask, HashedWheelTimer } import org.jboss.netty.bootstrap.ClientBootstrap import org.jboss.netty.channel.group.DefaultChannelGroup -import org.jboss.netty.channel.{ ChannelFutureListener, ChannelHandler, StaticChannelPipeline, MessageEvent, ExceptionEvent, ChannelStateEvent, ChannelPipelineFactory, ChannelPipeline, ChannelHandlerContext, ChannelFuture, Channel } +import org.jboss.netty.channel.{ ChannelFutureListener, ChannelHandler, DefaultChannelPipeline, MessageEvent, ExceptionEvent, ChannelStateEvent, ChannelPipelineFactory, ChannelPipeline, ChannelHandlerContext, ChannelFuture, Channel } import org.jboss.netty.handler.codec.frame.{ LengthFieldPrepender, LengthFieldBasedFrameDecoder } import org.jboss.netty.handler.execution.ExecutionHandler import org.jboss.netty.handler.timeout.{ IdleState, IdleStateEvent, IdleStateAwareChannelHandler, IdleStateHandler } diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index b42239f470..61124cfecb 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -12,7 +12,7 @@ import java.util.concurrent.Executors import scala.collection.mutable.HashMap import org.jboss.netty.channel.group.{ DefaultChannelGroup, ChannelGroupFuture } import org.jboss.netty.channel.socket.nio.NioClientSocketChannelFactory -import org.jboss.netty.channel.{ ChannelHandlerContext, Channel, StaticChannelPipeline, ChannelHandler, ChannelPipelineFactory, ChannelLocal } +import org.jboss.netty.channel.{ ChannelHandlerContext, Channel, DefaultChannelPipeline, ChannelHandler, ChannelPipelineFactory, ChannelLocal } import org.jboss.netty.handler.codec.frame.{ LengthFieldPrepender, LengthFieldBasedFrameDecoder } import org.jboss.netty.handler.codec.protobuf.{ ProtobufEncoder, ProtobufDecoder } import org.jboss.netty.handler.execution.{ ExecutionHandler, OrderedMemoryAwareThreadPoolExecutor } @@ -50,10 +50,13 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider */ object PipelineFactory { /** - * Construct a StaticChannelPipeline from a sequence of handlers; to be used + * Construct a DefaultChannelPipeline from a sequence of handlers; to be used * in implementations of ChannelPipelineFactory. */ - def apply(handlers: Seq[ChannelHandler]): StaticChannelPipeline = new StaticChannelPipeline(handlers: _*) + def apply(handlers: Seq[ChannelHandler]): DefaultChannelPipeline = + handlers.foldLeft(new DefaultChannelPipeline) { + (pipe, handler) ⇒ pipe.addLast(Logging.simpleName(handler.getClass), handler); pipe + } /** * Constructs the NettyRemoteTransport default pipeline with the give “head” handler, which diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 736927e7c2..d6d23eb56b 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -485,7 +485,7 @@ object Dependency { object V { val Camel = "2.8.0" val Logback = "1.0.4" - val Netty = "3.3.0.Final" + val Netty = "3.5.0.Final" val Protobuf = "2.4.1" val ScalaStm = "0.5" val Scalatest = "1.6.1" From e6ee3e2a953768c982f0150837471041dcf46060 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sun, 10 Jun 2012 16:50:04 +0200 Subject: [PATCH 83/92] Ignoring ConvergenceSpec until fixed. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index 9963903b90..65571b97b3 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -37,7 +37,7 @@ abstract class ConvergenceSpec "A cluster of 3 members" must { - "reach initial convergence" taggedAs LongRunningTest in { + "reach initial convergence" taggedAs LongRunningTest ignore { awaitClusterUp(first, second, third) runOn(fourth) { @@ -47,7 +47,7 @@ abstract class ConvergenceSpec testConductor.enter("after-1") } - "not reach convergence while any nodes are unreachable" taggedAs LongRunningTest in { + "not reach convergence while any nodes are unreachable" taggedAs LongRunningTest ignore { val thirdAddress = node(third).address testConductor.enter("before-shutdown") @@ -78,7 +78,7 @@ abstract class ConvergenceSpec testConductor.enter("after-2") } - "not move a new joining node to Up while there is no convergence" taggedAs LongRunningTest in { + "not move a new joining node to Up while there is no convergence" taggedAs LongRunningTest ignore { runOn(fourth) { // try to join cluster.join(node(first).address) From a4499b06bb00945bd63f6b352190e6e0a4560b26 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sun, 10 Jun 2012 16:52:33 +0200 Subject: [PATCH 84/92] Abstracted the FailureDetector into a interface trait and added controllable failure detector mock. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Abstracted a FailureDetector trait. - Added a FailureDetectorPuppet mock that can be user controllable - Added option to define a custom failure detector - Misc minor fixes Signed-off-by: Jonas Bonér --- .../akka/cluster/AccrualFailureDetector.scala | 28 ++++-- .../src/main/scala/akka/cluster/Cluster.scala | 21 +++- .../scala/akka/cluster/ClusterSettings.scala | 4 + .../scala/akka/cluster/FailureDetector.scala | 99 +++++++++++++++++++ .../cluster/AccrualFailureDetectorSpec.scala | 18 ++-- .../akka/cluster/ClusterConfigSpec.scala | 1 + .../test/scala/akka/cluster/ClusterSpec.scala | 6 +- 7 files changed, 151 insertions(+), 26 deletions(-) create mode 100644 akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index c7aaf12fcf..cdca8c9503 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -4,7 +4,8 @@ package akka.cluster -import akka.actor.{ ActorSystem, Address } +import akka.actor.{ ActorSystem, Address, ExtendedActorSystem } +import akka.remote.RemoteActorRefProvider import akka.event.Logging import scala.collection.immutable.Map @@ -23,11 +24,20 @@ import java.util.concurrent.atomic.AtomicReference * Default threshold is 8, but can be configured in the Akka config. */ class AccrualFailureDetector( - system: ActorSystem, - address: Address, + val system: ActorSystem, val threshold: Int = 8, val maxSampleSize: Int = 1000, - val timeMachine: () ⇒ Long = System.currentTimeMillis) { + val timeMachine: () ⇒ Long = System.currentTimeMillis) extends FailureDetector { + + def this( + system: ActorSystem, + settings: ClusterSettings, + timeMachine: () ⇒ Long = System.currentTimeMillis) = + this( + system, + settings.FailureDetectorThreshold, + settings.FailureDetectorMaxSampleSize, + timeMachine) private final val PhiFactor = 1.0 / math.log(10.0) @@ -65,8 +75,8 @@ class AccrualFailureDetector( * Records a heartbeat for a connection. */ @tailrec - final def heartbeat(connection: Address) { - log.debug("Node [{}] - Heartbeat from connection [{}] ", address, connection) + final def heartbeat(connection: Address): Unit = { + log.debug("Heartbeat from connection [{}] ", connection) val oldState = state.get val latestTimestamp = oldState.timestamps.get(connection) @@ -155,7 +165,7 @@ class AccrualFailureDetector( else PhiFactor * timestampDiff / mean } - log.debug("Node [{}] - Phi value [{}] and threshold [{}] for connection [{}] ", address, phi, threshold, connection) + log.debug("Phi value [{}] and threshold [{}] for connection [{}] ", phi, threshold, connection) phi } @@ -163,8 +173,8 @@ class AccrualFailureDetector( * Removes the heartbeat management for a connection. */ @tailrec - final def remove(connection: Address) { - log.debug("Node [{}] - Remove connection [{}] ", address, connection) + final def remove(connection: Address): Unit = { + log.debug("Remove connection [{}] ", connection) val oldState = state.get if (oldState.failureStats.contains(connection)) { diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 4ea43d50e4..e788450148 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -306,7 +306,22 @@ object Cluster extends ExtensionId[Cluster] with ExtensionIdProvider { override def lookup = Cluster - override def createExtension(system: ExtendedActorSystem): Cluster = new Cluster(system) + override def createExtension(system: ExtendedActorSystem): Cluster = { + val clusterSettings = new ClusterSettings(system.settings.config, system.name) + + def createDefaultFD() = new AccrualFailureDetector(system, clusterSettings) + val failureDetector = clusterSettings.FailureDetectorImplementationClass match { + case None ⇒ createDefaultFD() + case Some(fqcn) ⇒ system.dynamicAccess.createInstanceFor[FailureDetector](fqcn, Seq((classOf[ActorSystem], system), (classOf[ClusterSettings], clusterSettings))) match { + case Right(fd) ⇒ fd + case Left(e) ⇒ + system.log.error(e, "Could not create custom failure detector - falling back to default") + createDefaultFD() + } + } + + new Cluster(system, failureDetector) + } } /** @@ -349,7 +364,7 @@ trait ClusterNodeMBean { * if (Cluster(system).isLeader) { ... } * }}} */ -class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ +class Cluster(system: ExtendedActorSystem, val failureDetector: FailureDetector) extends Extension { clusterNode ⇒ /** * Represents the state for this Cluster. Implemented using optimistic lockless concurrency. @@ -369,8 +384,6 @@ class Cluster(system: ExtendedActorSystem) extends Extension { clusterNode ⇒ import clusterSettings._ val selfAddress = remote.transport.address - val failureDetector = new AccrualFailureDetector( - system, selfAddress, FailureDetectorThreshold, FailureDetectorMaxSampleSize) private val vclockNode = VectorClock.Node(selfAddress.toString) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 0e7dac06ab..b58775e222 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -15,6 +15,10 @@ class ClusterSettings(val config: Config, val systemName: String) { import config._ val FailureDetectorThreshold = getInt("akka.cluster.failure-detector.threshold") val FailureDetectorMaxSampleSize = getInt("akka.cluster.failure-detector.max-sample-size") + val FailureDetectorImplementationClass: Option[String] = getString("akka.cluster.failure-detector.implementation-class") match { + case "" ⇒ None + case fqcn ⇒ Some(fqcn) + } val NodeToJoin: Option[Address] = getString("akka.cluster.node-to-join") match { case "" ⇒ None case AddressFromURIString(addr) ⇒ Some(addr) diff --git a/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala new file mode 100644 index 0000000000..897d0413b5 --- /dev/null +++ b/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala @@ -0,0 +1,99 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import akka.actor.{ Address, ActorSystem } +import akka.event.{ Logging, LogSource } + +/** + * Interface for Akka failure detectors. + */ +trait FailureDetector { + + /** + * Returns true if the connection is considered to be up and healthy + * and returns false otherwise. + */ + def isAvailable(connection: Address): Boolean + + /** + * Records a heartbeat for a connection. + */ + def heartbeat(connection: Address): Unit + + /** + * Calculates how likely it is that the connection has failed. + *

+ * If a connection does not have any records in failure detector then it is + * considered healthy. + */ + def phi(connection: Address): Double + + /** + * Removes the heartbeat management for a connection. + */ + def remove(connection: Address): Unit +} + +/** + * User controllable "puppet" failure detector. + */ +class FailureDetectorPuppet(system: ActorSystem, connectionsToStartWith: Address*) extends FailureDetector { + import java.util.concurrent.ConcurrentHashMap + + trait Status + object Up extends Status + object Down extends Status + + implicit val logSource: LogSource[AnyRef] = new LogSource[AnyRef] { + def genString(o: AnyRef): String = o.getClass.getName + override def getClazz(o: AnyRef): Class[_] = o.getClass + } + + val log = Logging(system, this) + + private val connections = { + val cs = new ConcurrentHashMap[Address, Status] + connectionsToStartWith foreach { cs put (_, Up) } + cs + } + + def +(connection: Address): this.type = { + log.debug("Adding cluster node [{}]", connection) + connections.put(connection, Up) + this + } + + def markAsDown(connection: Address): this.type = { + connections.put(connection, Down) + this + } + + def markAsUp(connection: Address): this.type = { + connections.put(connection, Up) + this + } + + def isAvailable(connection: Address): Boolean = connections.get(connection) match { + case null ⇒ + this + connection + true + case Up ⇒ + log.debug("isAvailable: Cluster node IS NOT available [{}]", connection) + true + case Down ⇒ + log.debug("isAvailable: Cluster node IS available [{}]", connection) + false + } + + def heartbeat(connection: Address): Unit = log.debug("Heart beat from cluster node[{}]", connection) + + def phi(connection: Address): Double = 0.1D + + def remove(connection: Address): Unit = { + log.debug("Removing cluster node [{}]", connection) + connections.remove(connection) + } +} diff --git a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala index 1cf62daf1c..bd4d5d2c52 100644 --- a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala @@ -28,7 +28,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" } "return phi value of 0.0 on startup for each address, when no heartbeats" in { - val fd = new AccrualFailureDetector(system, conn) + val fd = new AccrualFailureDetector(system) fd.phi(conn) must be(0.0) fd.phi(conn2) must be(0.0) } @@ -36,7 +36,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "return phi based on guess when only one heartbeat" in { // 1 second ticks val timeInterval = Vector.fill(30)(1000L) - val fd = new AccrualFailureDetector(system, conn, + val fd = new AccrualFailureDetector(system, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) @@ -52,7 +52,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "return phi value using first interval after second heartbeat" in { val timeInterval = List[Long](0, 100, 100, 100) - val fd = new AccrualFailureDetector(system, conn, + val fd = new AccrualFailureDetector(system, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) @@ -63,7 +63,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as available after a series of successful heartbeats" in { val timeInterval = List[Long](0, 1000, 100, 100) - val fd = new AccrualFailureDetector(system, conn, + val fd = new AccrualFailureDetector(system, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) @@ -75,7 +75,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as dead after explicit removal of connection" in { val timeInterval = List[Long](0, 1000, 100, 100, 100) - val fd = new AccrualFailureDetector(system, conn, + val fd = new AccrualFailureDetector(system, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) @@ -89,7 +89,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as available after explicit removal of connection and receiving heartbeat again" in { val timeInterval = List[Long](0, 1000, 100, 1100, 1100, 1100, 1100, 1100, 100) - val fd = new AccrualFailureDetector(system, conn, + val fd = new AccrualFailureDetector(system, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 @@ -114,7 +114,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as dead if heartbeat are missed" in { val timeInterval = List[Long](0, 1000, 100, 100, 5000) val ft = fakeTimeGenerator(timeInterval) - val fd = new AccrualFailureDetector(system, conn, threshold = 3, + val fd = new AccrualFailureDetector(system, threshold = 3, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 @@ -127,7 +127,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "mark node as available if it starts heartbeat again after being marked dead due to detection of failure" in { val timeInterval = List[Long](0, 1000, 100, 1100, 5000, 100, 1000, 100, 100) - val fd = new AccrualFailureDetector(system, conn, threshold = 3, + val fd = new AccrualFailureDetector(system, threshold = 3, timeMachine = fakeTimeGenerator(timeInterval)) fd.heartbeat(conn) //0 @@ -144,7 +144,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" "use maxSampleSize heartbeats" in { val timeInterval = List[Long](0, 100, 100, 100, 100, 600, 1000, 1000, 1000, 1000, 1000) - val fd = new AccrualFailureDetector(system, conn, maxSampleSize = 3, + val fd = new AccrualFailureDetector(system, maxSampleSize = 3, timeMachine = fakeTimeGenerator(timeInterval)) // 100 ms interval diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 6b2ff1962c..9bce41a831 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -18,6 +18,7 @@ class ClusterConfigSpec extends AkkaSpec { import settings._ FailureDetectorThreshold must be(8) FailureDetectorMaxSampleSize must be(1000) + FailureDetectorImplementationClass must be(None) NodeToJoin must be(None) PeriodicTasksInitialDelay must be(1 seconds) GossipInterval must be(1 second) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index d3d1d6d0a2..5b4bca3379 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -33,7 +33,7 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { val deterministicRandom = new AtomicInteger - val cluster = new Cluster(system.asInstanceOf[ExtendedActorSystem]) { + val cluster = new Cluster(system.asInstanceOf[ExtendedActorSystem], new FailureDetectorPuppet(system)) { override def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = { if (addresses.isEmpty) None @@ -67,9 +67,7 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { @volatile var _unavailable: Set[Address] = Set.empty - override val failureDetector = new AccrualFailureDetector( - system, selfAddress, clusterSettings.FailureDetectorThreshold, clusterSettings.FailureDetectorMaxSampleSize) { - + override val failureDetector = new AccrualFailureDetector(system, clusterSettings) { override def isAvailable(connection: Address): Boolean = { if (_unavailable.contains(connection)) false else super.isAvailable(connection) From 0030fa1b528bdac181a34e7d211b4cc26b09c678 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sun, 10 Jun 2012 16:53:17 +0200 Subject: [PATCH 85/92] Made LeaderDowningNodeThatIsUnreachableSpec make use of the new FailureDetectorPuppet as a sample of how to use it. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- ...aderDowningNodeThatIsUnreachableSpec.scala | 28 +++++++++++++------ 1 file changed, 20 insertions(+), 8 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index 616c412556..f3f8015ced 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -7,7 +7,7 @@ import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.actor.Address +import akka.actor._ import akka.util.duration._ object LeaderDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { @@ -16,12 +16,9 @@ object LeaderDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { val third = role("third") val fourth = role("fourth") - commonConfig(debugConfig(on = true). + commonConfig(debugConfig(on = false). withFallback(ConfigFactory.parseString(""" - akka.cluster { - auto-down = on - failure-detector.threshold = 4 - } + akka.cluster.auto-down = on """)). withFallback(MultiNodeClusterSpec.clusterConfig)) } @@ -37,10 +34,20 @@ class LeaderDowningNodeThatIsUnreachableSpec import LeaderDowningNodeThatIsUnreachableMultiJvmSpec._ + // Set up the puppet failure detector + lazy val failureDetector = new FailureDetectorPuppet(system = system) + lazy val clusterNode = new Cluster(system.asInstanceOf[ExtendedActorSystem], failureDetector) + + override def cluster = clusterNode + + lazy val firstAddress = node(first).address + lazy val secondAddress = node(second).address + lazy val thirdAddress = node(third).address + lazy val fourthAddress = node(fourth).address + "The Leader in a 4 node cluster" must { "be able to DOWN a 'last' node that is UNREACHABLE" taggedAs LongRunningTest in { - val fourthAddress = node(fourth).address awaitClusterUp(first, second, third, fourth) runOn(first) { @@ -48,6 +55,9 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.shutdown(fourth, 0) testConductor.enter("down-fourth-node") + // mark the node as unreachable in the failure detector + failureDetector markAsDown fourthAddress + // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(fourthAddress), 30.seconds) @@ -67,7 +77,6 @@ class LeaderDowningNodeThatIsUnreachableSpec } "be able to DOWN a 'middle' node that is UNREACHABLE" taggedAs LongRunningTest in { - val secondAddress = node(second).address testConductor.enter("before-down-second-node") runOn(first) { @@ -75,6 +84,9 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.shutdown(second, 0) testConductor.enter("down-second-node") + // mark the node as unreachable in the failure detector + failureDetector markAsDown secondAddress + // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds) From ec7177be740fc070c2a5fe483dbc76a49b35d6fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 11 Jun 2012 10:06:53 +0200 Subject: [PATCH 86/92] Misc fixes after FailureDetectorPuppet and abstraction review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Moved FailureDetectorPuppet to its own file in src/test. - Removed 'phi' method from FailureDetector public API. - Throwing exception instead of falling back to default if we can't load the custom FD. - Removed add-connection method in FailureDetectorPuppet. Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/cluster/Cluster.scala | 15 ++-- .../scala/akka/cluster/FailureDetector.scala | 75 +------------------ .../test/scala/akka/cluster/ClusterSpec.scala | 2 +- .../akka/cluster/FailureDetectorPuppet.scala | 60 +++++++++++++++ 4 files changed, 70 insertions(+), 82 deletions(-) create mode 100644 akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index e788450148..891c8972b0 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -309,15 +309,14 @@ object Cluster extends ExtensionId[Cluster] with ExtensionIdProvider { override def createExtension(system: ExtendedActorSystem): Cluster = { val clusterSettings = new ClusterSettings(system.settings.config, system.name) - def createDefaultFD() = new AccrualFailureDetector(system, clusterSettings) val failureDetector = clusterSettings.FailureDetectorImplementationClass match { - case None ⇒ createDefaultFD() - case Some(fqcn) ⇒ system.dynamicAccess.createInstanceFor[FailureDetector](fqcn, Seq((classOf[ActorSystem], system), (classOf[ClusterSettings], clusterSettings))) match { - case Right(fd) ⇒ fd - case Left(e) ⇒ - system.log.error(e, "Could not create custom failure detector - falling back to default") - createDefaultFD() - } + case None ⇒ new AccrualFailureDetector(system, clusterSettings) + case Some(fqcn) ⇒ + system.dynamicAccess.createInstanceFor[FailureDetector]( + fqcn, Seq((classOf[ActorSystem], system), (classOf[ClusterSettings], clusterSettings))) match { + case Right(fd) ⇒ fd + case Left(e) ⇒ throw new ConfigurationException("Could not create custom failure detector [" + fqcn + "] due to:" + e.toString) + } } new Cluster(system, failureDetector) diff --git a/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala index 897d0413b5..60af0a1c41 100644 --- a/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala @@ -4,8 +4,7 @@ package akka.cluster -import akka.actor.{ Address, ActorSystem } -import akka.event.{ Logging, LogSource } +import akka.actor.Address /** * Interface for Akka failure detectors. @@ -13,8 +12,7 @@ import akka.event.{ Logging, LogSource } trait FailureDetector { /** - * Returns true if the connection is considered to be up and healthy - * and returns false otherwise. + * Returns true if the connection is considered to be up and healthy and returns false otherwise. */ def isAvailable(connection: Address): Boolean @@ -23,77 +21,8 @@ trait FailureDetector { */ def heartbeat(connection: Address): Unit - /** - * Calculates how likely it is that the connection has failed. - *

- * If a connection does not have any records in failure detector then it is - * considered healthy. - */ - def phi(connection: Address): Double - /** * Removes the heartbeat management for a connection. */ def remove(connection: Address): Unit } - -/** - * User controllable "puppet" failure detector. - */ -class FailureDetectorPuppet(system: ActorSystem, connectionsToStartWith: Address*) extends FailureDetector { - import java.util.concurrent.ConcurrentHashMap - - trait Status - object Up extends Status - object Down extends Status - - implicit val logSource: LogSource[AnyRef] = new LogSource[AnyRef] { - def genString(o: AnyRef): String = o.getClass.getName - override def getClazz(o: AnyRef): Class[_] = o.getClass - } - - val log = Logging(system, this) - - private val connections = { - val cs = new ConcurrentHashMap[Address, Status] - connectionsToStartWith foreach { cs put (_, Up) } - cs - } - - def +(connection: Address): this.type = { - log.debug("Adding cluster node [{}]", connection) - connections.put(connection, Up) - this - } - - def markAsDown(connection: Address): this.type = { - connections.put(connection, Down) - this - } - - def markAsUp(connection: Address): this.type = { - connections.put(connection, Up) - this - } - - def isAvailable(connection: Address): Boolean = connections.get(connection) match { - case null ⇒ - this + connection - true - case Up ⇒ - log.debug("isAvailable: Cluster node IS NOT available [{}]", connection) - true - case Down ⇒ - log.debug("isAvailable: Cluster node IS available [{}]", connection) - false - } - - def heartbeat(connection: Address): Unit = log.debug("Heart beat from cluster node[{}]", connection) - - def phi(connection: Address): Double = 0.1D - - def remove(connection: Address): Unit = { - log.debug("Removing cluster node [{}]", connection) - connections.remove(connection) - } -} diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index 5b4bca3379..f60e6fa7dc 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -67,7 +67,7 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with BeforeAndAfter { @volatile var _unavailable: Set[Address] = Set.empty - override val failureDetector = new AccrualFailureDetector(system, clusterSettings) { + override val failureDetector = new FailureDetectorPuppet(system) { override def isAvailable(connection: Address): Boolean = { if (_unavailable.contains(connection)) false else super.isAvailable(connection) diff --git a/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala new file mode 100644 index 0000000000..3245a15f97 --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala @@ -0,0 +1,60 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import akka.actor.{ Address, ActorSystem } +import akka.event.{ Logging, LogSource } + +/** + * User controllable "puppet" failure detector. + */ +class FailureDetectorPuppet(system: ActorSystem, settings: ClusterSettings) extends FailureDetector { + import java.util.concurrent.ConcurrentHashMap + + def this(system: ActorSystem) = this(system, new ClusterSettings(system.settings.config, system.name)) + + trait Status + object Up extends Status + object Down extends Status + + implicit private val logSource: LogSource[AnyRef] = new LogSource[AnyRef] { + def genString(o: AnyRef): String = o.getClass.getName + override def getClazz(o: AnyRef): Class[_] = o.getClass + } + + private val log = Logging(system, this) + + private val connections = new ConcurrentHashMap[Address, Status] + + def markAsDown(connection: Address): this.type = { + connections.put(connection, Down) + this + } + + def markAsUp(connection: Address): this.type = { + connections.put(connection, Up) + this + } + + def isAvailable(connection: Address): Boolean = connections.get(connection) match { + case null ⇒ + log.debug("Adding cluster node [{}]", connection) + connections.put(connection, Up) + true + case Up ⇒ + log.debug("isAvailable: Cluster node IS NOT available [{}]", connection) + true + case Down ⇒ + log.debug("isAvailable: Cluster node IS available [{}]", connection) + false + } + + def heartbeat(connection: Address): Unit = log.debug("Heart beat from cluster node[{}]", connection) + + def remove(connection: Address): Unit = { + log.debug("Removing cluster node [{}]", connection) + connections.remove(connection) + } +} From 44fefb9b55dc682fac79fb0337bd0045b37ec728 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 11 Jun 2012 11:05:19 +0200 Subject: [PATCH 87/92] #2187 - Making Warning sections in docs yellow so they aren't overlooked --- akka-docs/_sphinx/themes/akka/static/docs.css | 36 +++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/akka-docs/_sphinx/themes/akka/static/docs.css b/akka-docs/_sphinx/themes/akka/static/docs.css index 7b6d3dbf52..3d37718c68 100644 --- a/akka-docs/_sphinx/themes/akka/static/docs.css +++ b/akka-docs/_sphinx/themes/akka/static/docs.css @@ -90,6 +90,42 @@ strong {color: #1d3c52; } box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25); } +.warning { + background-image: none; + background-color: #fdf5d9; + filter: progid:DXImageTransform.Microsoft.gradient(enabled = false); + padding: 14px; + border-color: #ffffc4; + -webkit-box-shadow: none; + -moz-box-shadow: none; + box-shadow: none; + margin-bottom: 18px; + position: relative; + padding: 7px 15px; + color: #404040; + background-repeat: repeat-x; + background-image: -khtml-gradient(linear, left top, left bottom, from(#ffffc4), to(#ffff00)); + background-image: -moz-linear-gradient(top, #ffffc4, #ffff00); + background-image: -ms-linear-gradient(top, #ffffc4, #ffff00); + background-image: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #ffffc4), color-stop(100%, #ffff00)); + background-image: -webkit-linear-gradient(top, #ffffc4, #ffff00); + background-image: -o-linear-gradient(top, #ffffc4, #ffff00); + background-image: linear-gradient(top, #ffffc4, #ffff00); + filter: progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffc4', endColorstr='#ffff00', GradientType=0); + text-shadow: 0 -1px 0 rgba(0, 0, 0, 0.25); + border-color: #dff69a #ffff00 #E4C652; + border-color: rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.1) rgba(0, 0, 0, 0.25); + text-shadow: 0 1px 0 rgba(255, 255, 255, 0.5); + border-width: 1px; + border-style: solid; + -webkit-border-radius: 4px; + -moz-border-radius: 4px; + border-radius: 4px; + -webkit-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25); + -moz-box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25); + box-shadow: inset 0 1px 0 rgba(255, 255, 255, 0.25); +} + .admonition p.admonition-title { color: rgba(0, 0, 0, 0.6); text-shadow: 0 1px 0 rgba(255, 255, 255, .7); From edc0c0d888d88adf90cc9c3201f9707d6fa1c4b5 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 11 Jun 2012 11:26:28 +0200 Subject: [PATCH 88/92] #2119 - enforce Java6 for releases --- project/scripts/release | 2 ++ 1 file changed, 2 insertions(+) diff --git a/project/scripts/release b/project/scripts/release index 886e6629b1..13795b3d53 100755 --- a/project/scripts/release +++ b/project/scripts/release @@ -93,6 +93,8 @@ fi declare -r version=$1 declare -r publish_path="${release_server}:${release_path}" +[[ `java -version 2>&1 | grep "java version" | awk '{print $3}' | tr -d \" | awk '{split($0, array, ".")} END{print array[2]}'` -eq 6 ]] || fail "Java version is not 1.6" + # check for a git command type -P git &> /dev/null || fail "git command not found" From 523f433e4b328ab6976d0b4f037432c9772a4825 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 11 Jun 2012 11:36:29 +0200 Subject: [PATCH 89/92] Fixed potential problem in test --- .../cluster/LeaderDowningNodeThatIsUnreachableSpec.scala | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index f3f8015ced..dc383dca43 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -40,16 +40,12 @@ class LeaderDowningNodeThatIsUnreachableSpec override def cluster = clusterNode - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - lazy val thirdAddress = node(third).address - lazy val fourthAddress = node(fourth).address - "The Leader in a 4 node cluster" must { "be able to DOWN a 'last' node that is UNREACHABLE" taggedAs LongRunningTest in { awaitClusterUp(first, second, third, fourth) + val fourthAddress = node(fourth).address runOn(first) { // kill 'fourth' node testConductor.shutdown(fourth, 0) @@ -77,8 +73,9 @@ class LeaderDowningNodeThatIsUnreachableSpec } "be able to DOWN a 'middle' node that is UNREACHABLE" taggedAs LongRunningTest in { - testConductor.enter("before-down-second-node") + val secondAddress = node(second).address + testConductor.enter("before-down-second-node") runOn(first) { // kill 'second' node testConductor.shutdown(second, 0) From b65cf5c2ec233a9f7952485aee9081498f4fa95c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 11 Jun 2012 14:32:17 +0200 Subject: [PATCH 90/92] Created FailureDetectorStrategy with two implementations: FailureDetectorPuppetStrategy and AccrualFailureDetectorStrategy. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Created FailureDetectorStrategy base trait. - Created FailureDetectorPuppetStrategy. - Created AccrualFailureDetectorStrategy. - Created two versions of LeaderDowningNodeThatIsUnreachableMultiJvmSpec - LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppet - LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetector - Added AccrualFailureDetectorStrategy to all the remaining tests - will be split up into two versions shortly. Signed-off-by: Jonas Bonér --- ...ientDowningNodeThatIsUnreachableSpec.scala | 10 +-- .../ClientDowningNodeThatIsUpSpec.scala | 10 +-- .../scala/akka/cluster/ConvergenceSpec.scala | 8 +-- .../cluster/FailureDetectorStrategy.scala | 61 +++++++++++++++++++ .../GossipingAccrualFailureDetectorSpec.scala | 6 +- .../akka/cluster/JoinTwoClustersSpec.scala | 12 ++-- ...aderDowningNodeThatIsUnreachableSpec.scala | 25 ++++---- .../akka/cluster/LeaderElectionSpec.scala | 10 +-- .../MembershipChangeListenerExitingSpec.scala | 6 +- .../MembershipChangeListenerJoinSpec.scala | 4 +- .../MembershipChangeListenerLeavingSpec.scala | 6 +- .../MembershipChangeListenerSpec.scala | 6 +- .../MembershipChangeListenerUpSpec.scala | 4 +- .../akka/cluster/MultiNodeClusterSpec.scala | 17 +++--- .../scala/akka/cluster/NodeJoinSpec.scala | 4 +- ...LeavingAndExitingAndBeingRemovedSpec.scala | 6 +- .../cluster/NodeLeavingAndExitingSpec.scala | 6 +- .../scala/akka/cluster/NodeLeavingSpec.scala | 6 +- .../akka/cluster/NodeMembershipSpec.scala | 6 +- .../scala/akka/cluster/NodeShutdownSpec.scala | 4 +- .../scala/akka/cluster/NodeUpSpec.scala | 4 +- .../scala/akka/cluster/SunnyWeatherSpec.scala | 10 +-- .../akka/cluster/FailureDetectorPuppet.scala | 4 +- 23 files changed, 149 insertions(+), 86 deletions(-) create mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/FailureDetectorStrategy.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index 6d4d09f7cb..d1a9f756dd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -18,12 +18,12 @@ object ClientDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class ClientDowningNodeThatIsUnreachableMultiJvmNode1 extends ClientDowningNodeThatIsUnreachableSpec -class ClientDowningNodeThatIsUnreachableMultiJvmNode2 extends ClientDowningNodeThatIsUnreachableSpec -class ClientDowningNodeThatIsUnreachableMultiJvmNode3 extends ClientDowningNodeThatIsUnreachableSpec -class ClientDowningNodeThatIsUnreachableMultiJvmNode4 extends ClientDowningNodeThatIsUnreachableSpec +class ClientDowningNodeThatIsUnreachableMultiJvmNode1 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUnreachableMultiJvmNode2 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUnreachableMultiJvmNode3 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUnreachableMultiJvmNode4 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy -class ClientDowningNodeThatIsUnreachableSpec +abstract class ClientDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(ClientDowningNodeThatIsUnreachableMultiJvmSpec) with MultiNodeClusterSpec { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index db00438c9e..687596745b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -18,12 +18,12 @@ object ClientDowningNodeThatIsUpMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class ClientDowningNodeThatIsUpMultiJvmNode1 extends ClientDowningNodeThatIsUpSpec -class ClientDowningNodeThatIsUpMultiJvmNode2 extends ClientDowningNodeThatIsUpSpec -class ClientDowningNodeThatIsUpMultiJvmNode3 extends ClientDowningNodeThatIsUpSpec -class ClientDowningNodeThatIsUpMultiJvmNode4 extends ClientDowningNodeThatIsUpSpec +class ClientDowningNodeThatIsUpMultiJvmNode1 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUpMultiJvmNode2 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUpMultiJvmNode3 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUpMultiJvmNode4 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy -class ClientDowningNodeThatIsUpSpec +abstract class ClientDowningNodeThatIsUpSpec extends MultiNodeSpec(ClientDowningNodeThatIsUpMultiJvmSpec) with MultiNodeClusterSpec { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index 65571b97b3..df47e19bec 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -25,10 +25,10 @@ object ConvergenceMultiJvmSpec extends MultiNodeConfig { withFallback(MultiNodeClusterSpec.clusterConfig)) } -class ConvergenceMultiJvmNode1 extends ConvergenceSpec -class ConvergenceMultiJvmNode2 extends ConvergenceSpec -class ConvergenceMultiJvmNode3 extends ConvergenceSpec -class ConvergenceMultiJvmNode4 extends ConvergenceSpec +class ConvergenceMultiJvmNode1 extends ConvergenceSpec with AccrualFailureDetectorStrategy +class ConvergenceMultiJvmNode2 extends ConvergenceSpec with AccrualFailureDetectorStrategy +class ConvergenceMultiJvmNode3 extends ConvergenceSpec with AccrualFailureDetectorStrategy +class ConvergenceMultiJvmNode4 extends ConvergenceSpec with AccrualFailureDetectorStrategy abstract class ConvergenceSpec extends MultiNodeSpec(ConvergenceMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/FailureDetectorStrategy.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/FailureDetectorStrategy.scala new file mode 100644 index 0000000000..dcbb65d0f1 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/FailureDetectorStrategy.scala @@ -0,0 +1,61 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import akka.actor.Address +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ + +/** + * Base trait for all failure detector strategies. + */ +trait FailureDetectorStrategy { + + /** + * Get or create the FailureDetector to be used in the cluster node. + * To be defined by subclass. + */ + def failureDetector: FailureDetector + + /** + * Marks a node as available in the failure detector. + * To be defined by subclass. + */ + def markNodeAsAvailable(address: Address): Unit + + /** + * Marks a node as unavailable in the failure detector. + * To be defined by subclass. + */ + def markNodeAsUnavailable(address: Address): Unit +} + +/** + * Defines a FailureDetectorPuppet-based FailureDetectorStrategy. + */ +trait FailureDetectorPuppetStrategy extends FailureDetectorStrategy { self: MultiNodeSpec ⇒ + + /** + * The puppet instance. Separated from 'failureDetector' field so we don't have to cast when using the puppet specific methods. + */ + private val puppet = new FailureDetectorPuppet(system) + + override def failureDetector: FailureDetector = puppet + + override def markNodeAsAvailable(address: Address): Unit = puppet markNodeAsAvailable address + + override def markNodeAsUnavailable(address: Address): Unit = puppet markNodeAsUnavailable address +} + +/** + * Defines a AccrualFailureDetector-based FailureDetectorStrategy. + */ +trait AccrualFailureDetectorStrategy extends FailureDetectorStrategy { self: MultiNodeSpec ⇒ + + override val failureDetector: FailureDetector = new AccrualFailureDetector(system, new ClusterSettings(system.settings.config, system.name)) + + override def markNodeAsAvailable(address: Address): Unit = { /* no-op */ } + + override def markNodeAsUnavailable(address: Address): Unit = { /* no-op */ } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index f75ca3b058..63090b7a1f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -19,9 +19,9 @@ object GossipingAccrualFailureDetectorMultiJvmSpec extends MultiNodeConfig { withFallback(MultiNodeClusterSpec.clusterConfig)) } -class GossipingAccrualFailureDetectorMultiJvmNode1 extends GossipingAccrualFailureDetectorSpec -class GossipingAccrualFailureDetectorMultiJvmNode2 extends GossipingAccrualFailureDetectorSpec -class GossipingAccrualFailureDetectorMultiJvmNode3 extends GossipingAccrualFailureDetectorSpec +class GossipingAccrualFailureDetectorMultiJvmNode1 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy +class GossipingAccrualFailureDetectorMultiJvmNode2 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy +class GossipingAccrualFailureDetectorMultiJvmNode3 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(GossipingAccrualFailureDetectorMultiJvmSpec) with MultiNodeClusterSpec { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index e86602949f..2000e63253 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -20,12 +20,12 @@ object JoinTwoClustersMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec -class JoinTwoClustersMultiJvmNode2 extends JoinTwoClustersSpec -class JoinTwoClustersMultiJvmNode3 extends JoinTwoClustersSpec -class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec -class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec -class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec +class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy +class JoinTwoClustersMultiJvmNode2 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy +class JoinTwoClustersMultiJvmNode3 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy +class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy +class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy +class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index dc383dca43..7dcb6b20f6 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -23,23 +23,22 @@ object LeaderDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { withFallback(MultiNodeClusterSpec.clusterConfig)) } -class LeaderDowningNodeThatIsUnreachableMultiJvmNode1 extends LeaderDowningNodeThatIsUnreachableSpec -class LeaderDowningNodeThatIsUnreachableMultiJvmNode2 extends LeaderDowningNodeThatIsUnreachableSpec -class LeaderDowningNodeThatIsUnreachableMultiJvmNode3 extends LeaderDowningNodeThatIsUnreachableSpec -class LeaderDowningNodeThatIsUnreachableMultiJvmNode4 extends LeaderDowningNodeThatIsUnreachableSpec +class LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode1 extends LeaderDowningNodeThatIsUnreachableSpec with FailureDetectorPuppetStrategy +class LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode2 extends LeaderDowningNodeThatIsUnreachableSpec with FailureDetectorPuppetStrategy +class LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode3 extends LeaderDowningNodeThatIsUnreachableSpec with FailureDetectorPuppetStrategy +class LeaderDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode4 extends LeaderDowningNodeThatIsUnreachableSpec with FailureDetectorPuppetStrategy -class LeaderDowningNodeThatIsUnreachableSpec +class LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode1 extends LeaderDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode2 extends LeaderDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode3 extends LeaderDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class LeaderDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode4 extends LeaderDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy + +abstract class LeaderDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(LeaderDowningNodeThatIsUnreachableMultiJvmSpec) with MultiNodeClusterSpec { import LeaderDowningNodeThatIsUnreachableMultiJvmSpec._ - // Set up the puppet failure detector - lazy val failureDetector = new FailureDetectorPuppet(system = system) - lazy val clusterNode = new Cluster(system.asInstanceOf[ExtendedActorSystem], failureDetector) - - override def cluster = clusterNode - "The Leader in a 4 node cluster" must { "be able to DOWN a 'last' node that is UNREACHABLE" taggedAs LongRunningTest in { @@ -52,7 +51,7 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.enter("down-fourth-node") // mark the node as unreachable in the failure detector - failureDetector markAsDown fourthAddress + markNodeAsUnavailable(fourthAddress) // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- @@ -82,7 +81,7 @@ class LeaderDowningNodeThatIsUnreachableSpec testConductor.enter("down-second-node") // mark the node as unreachable in the failure detector - failureDetector markAsDown secondAddress + markNodeAsUnavailable(secondAddress) // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index 43f0fc19eb..f44b494917 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -19,11 +19,11 @@ object LeaderElectionMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class LeaderElectionMultiJvmNode1 extends LeaderElectionSpec -class LeaderElectionMultiJvmNode2 extends LeaderElectionSpec -class LeaderElectionMultiJvmNode3 extends LeaderElectionSpec -class LeaderElectionMultiJvmNode4 extends LeaderElectionSpec -class LeaderElectionMultiJvmNode5 extends LeaderElectionSpec +class LeaderElectionMultiJvmNode1 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionMultiJvmNode2 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionMultiJvmNode3 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionMultiJvmNode4 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionMultiJvmNode5 extends LeaderElectionSpec with AccrualFailureDetectorStrategy abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index d76c3cf689..7389a01ffc 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -27,9 +27,9 @@ object MembershipChangeListenerExitingMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class MembershipChangeListenerExitingMultiJvmNode1 extends MembershipChangeListenerExitingSpec -class MembershipChangeListenerExitingMultiJvmNode2 extends MembershipChangeListenerExitingSpec -class MembershipChangeListenerExitingMultiJvmNode3 extends MembershipChangeListenerExitingSpec +class MembershipChangeListenerExitingMultiJvmNode1 extends MembershipChangeListenerExitingSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerExitingMultiJvmNode2 extends MembershipChangeListenerExitingSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerExitingMultiJvmNode3 extends MembershipChangeListenerExitingSpec with AccrualFailureDetectorStrategy abstract class MembershipChangeListenerExitingSpec extends MultiNodeSpec(MembershipChangeListenerExitingMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index bdf8f7d44d..8a940375ef 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -25,8 +25,8 @@ object MembershipChangeListenerJoinMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class MembershipChangeListenerJoinMultiJvmNode1 extends MembershipChangeListenerJoinSpec -class MembershipChangeListenerJoinMultiJvmNode2 extends MembershipChangeListenerJoinSpec +class MembershipChangeListenerJoinMultiJvmNode1 extends MembershipChangeListenerJoinSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerJoinMultiJvmNode2 extends MembershipChangeListenerJoinSpec with AccrualFailureDetectorStrategy abstract class MembershipChangeListenerJoinSpec extends MultiNodeSpec(MembershipChangeListenerJoinMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala index 1ff11465bb..d7c79407a2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -24,9 +24,9 @@ object MembershipChangeListenerLeavingMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig)) } -class MembershipChangeListenerLeavingMultiJvmNode1 extends MembershipChangeListenerLeavingSpec -class MembershipChangeListenerLeavingMultiJvmNode2 extends MembershipChangeListenerLeavingSpec -class MembershipChangeListenerLeavingMultiJvmNode3 extends MembershipChangeListenerLeavingSpec +class MembershipChangeListenerLeavingMultiJvmNode1 extends MembershipChangeListenerLeavingSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerLeavingMultiJvmNode2 extends MembershipChangeListenerLeavingSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerLeavingMultiJvmNode3 extends MembershipChangeListenerLeavingSpec with AccrualFailureDetectorStrategy abstract class MembershipChangeListenerLeavingSpec extends MultiNodeSpec(MembershipChangeListenerLeavingMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala index c48727b1cd..914db94acb 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala @@ -17,9 +17,9 @@ object MembershipChangeListenerMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class MembershipChangeListenerMultiJvmNode1 extends MembershipChangeListenerSpec -class MembershipChangeListenerMultiJvmNode2 extends MembershipChangeListenerSpec -class MembershipChangeListenerMultiJvmNode3 extends MembershipChangeListenerSpec +class MembershipChangeListenerMultiJvmNode1 extends MembershipChangeListenerSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerMultiJvmNode2 extends MembershipChangeListenerSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerMultiJvmNode3 extends MembershipChangeListenerSpec with AccrualFailureDetectorStrategy abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChangeListenerMultiJvmSpec) with MultiNodeClusterSpec { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala index 3e22dd456d..4cd81cd0e7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -18,8 +18,8 @@ object MembershipChangeListenerUpMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class MembershipChangeListenerUpMultiJvmNode1 extends MembershipChangeListenerUpSpec -class MembershipChangeListenerUpMultiJvmNode2 extends MembershipChangeListenerUpSpec +class MembershipChangeListenerUpMultiJvmNode1 extends MembershipChangeListenerUpSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerUpMultiJvmNode2 extends MembershipChangeListenerUpSpec with AccrualFailureDetectorStrategy abstract class MembershipChangeListenerUpSpec extends MultiNodeSpec(MembershipChangeListenerUpMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index b185067ab0..39ecd8b0dc 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -5,7 +5,7 @@ package akka.cluster import com.typesafe.config.Config import com.typesafe.config.ConfigFactory -import akka.actor.Address +import akka.actor.{Address, ExtendedActorSystem} import akka.remote.testconductor.RoleName import akka.remote.testkit.MultiNodeSpec import akka.testkit._ @@ -28,14 +28,19 @@ object MultiNodeClusterSpec { """) } -trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ +trait MultiNodeClusterSpec extends FailureDetectorStrategy { self: MultiNodeSpec ⇒ override def initialParticipants = roles.size /** - * Get or create a cluster node using 'Cluster(system)' extension. + * The cluster node instance. Needs to be lazily created. */ - def cluster: Cluster = Cluster(system) + private lazy val clusterNode = new Cluster(system.asInstanceOf[ExtendedActorSystem], failureDetector) + + /** + * Get the cluster node to use. + */ + def cluster: Cluster = clusterNode /** * Use this method instead of 'cluster.self' @@ -48,9 +53,7 @@ trait MultiNodeClusterSpec { self: MultiNodeSpec ⇒ * nodes (roles). First node will be started first * and others will join the first. */ - def startCluster(roles: RoleName*): Unit = { - awaitStartCluster(false, roles.toSeq) - } + def startCluster(roles: RoleName*): Unit = awaitStartCluster(false, roles.toSeq) /** * Initialize the cluster of the specified member diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala index 066e86aae6..58ed162af7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala @@ -24,8 +24,8 @@ object NodeJoinMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class NodeJoinMultiJvmNode1 extends NodeJoinSpec -class NodeJoinMultiJvmNode2 extends NodeJoinSpec +class NodeJoinMultiJvmNode1 extends NodeJoinSpec with AccrualFailureDetectorStrategy +class NodeJoinMultiJvmNode2 extends NodeJoinSpec with AccrualFailureDetectorStrategy abstract class NodeJoinSpec extends MultiNodeSpec(NodeJoinMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index 8e274be311..a16ae055f0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -18,9 +18,9 @@ object NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec extends MultiNodeConfig commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode1 extends NodeLeavingAndExitingAndBeingRemovedSpec -class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode2 extends NodeLeavingAndExitingAndBeingRemovedSpec -class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode3 extends NodeLeavingAndExitingAndBeingRemovedSpec +class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode1 extends NodeLeavingAndExitingAndBeingRemovedSpec with AccrualFailureDetectorStrategy +class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode2 extends NodeLeavingAndExitingAndBeingRemovedSpec with AccrualFailureDetectorStrategy +class NodeLeavingAndExitingAndBeingRemovedMultiJvmNode3 extends NodeLeavingAndExitingAndBeingRemovedSpec with AccrualFailureDetectorStrategy abstract class NodeLeavingAndExitingAndBeingRemovedSpec extends MultiNodeSpec(NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 79fff4770f..bb32d8641f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -26,9 +26,9 @@ object NodeLeavingAndExitingMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class NodeLeavingAndExitingMultiJvmNode1 extends NodeLeavingAndExitingSpec -class NodeLeavingAndExitingMultiJvmNode2 extends NodeLeavingAndExitingSpec -class NodeLeavingAndExitingMultiJvmNode3 extends NodeLeavingAndExitingSpec +class NodeLeavingAndExitingMultiJvmNode1 extends NodeLeavingAndExitingSpec with AccrualFailureDetectorStrategy +class NodeLeavingAndExitingMultiJvmNode2 extends NodeLeavingAndExitingSpec with AccrualFailureDetectorStrategy +class NodeLeavingAndExitingMultiJvmNode3 extends NodeLeavingAndExitingSpec with AccrualFailureDetectorStrategy abstract class NodeLeavingAndExitingSpec extends MultiNodeSpec(NodeLeavingAndExitingMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index b834492045..eccba596f2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -22,9 +22,9 @@ object NodeLeavingMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig)) } -class NodeLeavingMultiJvmNode1 extends NodeLeavingSpec -class NodeLeavingMultiJvmNode2 extends NodeLeavingSpec -class NodeLeavingMultiJvmNode3 extends NodeLeavingSpec +class NodeLeavingMultiJvmNode1 extends NodeLeavingSpec with AccrualFailureDetectorStrategy +class NodeLeavingMultiJvmNode2 extends NodeLeavingSpec with AccrualFailureDetectorStrategy +class NodeLeavingMultiJvmNode3 extends NodeLeavingSpec with AccrualFailureDetectorStrategy abstract class NodeLeavingSpec extends MultiNodeSpec(NodeLeavingMultiJvmSpec) with MultiNodeClusterSpec { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index ef65cefd0f..c7fa1569f2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -16,9 +16,9 @@ object NodeMembershipMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec -class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec -class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec +class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec with AccrualFailureDetectorStrategy +class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec with AccrualFailureDetectorStrategy +class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec with AccrualFailureDetectorStrategy abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index 4dc90a5b89..7417ae06d5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -24,8 +24,8 @@ object NodeShutdownMultiJvmSpec extends MultiNodeConfig { } -class NodeShutdownMultiJvmNode1 extends NodeShutdownSpec -class NodeShutdownMultiJvmNode2 extends NodeShutdownSpec +class NodeShutdownMultiJvmNode1 extends NodeShutdownSpec with AccrualFailureDetectorStrategy +class NodeShutdownMultiJvmNode2 extends NodeShutdownSpec with AccrualFailureDetectorStrategy abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) with MultiNodeClusterSpec { import NodeShutdownMultiJvmSpec._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index 6cb8bf9e07..4a2342fca1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -19,8 +19,8 @@ object NodeUpMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class NodeUpMultiJvmNode1 extends NodeUpSpec -class NodeUpMultiJvmNode2 extends NodeUpSpec +class NodeUpMultiJvmNode1 extends NodeUpSpec with AccrualFailureDetectorStrategy +class NodeUpMultiJvmNode2 extends NodeUpSpec with AccrualFailureDetectorStrategy abstract class NodeUpSpec extends MultiNodeSpec(NodeUpMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index fcb1393f8a..cabaf21ab1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -28,11 +28,11 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { """)) } -class SunnyWeatherMultiJvmNode1 extends SunnyWeatherSpec -class SunnyWeatherMultiJvmNode2 extends SunnyWeatherSpec -class SunnyWeatherMultiJvmNode3 extends SunnyWeatherSpec -class SunnyWeatherMultiJvmNode4 extends SunnyWeatherSpec -class SunnyWeatherMultiJvmNode5 extends SunnyWeatherSpec +class SunnyWeatherMultiJvmNode1 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy +class SunnyWeatherMultiJvmNode2 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy +class SunnyWeatherMultiJvmNode3 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy +class SunnyWeatherMultiJvmNode4 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy +class SunnyWeatherMultiJvmNode5 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy abstract class SunnyWeatherSpec extends MultiNodeSpec(SunnyWeatherMultiJvmSpec) diff --git a/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala index 3245a15f97..f35bca381d 100644 --- a/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala +++ b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala @@ -28,12 +28,12 @@ class FailureDetectorPuppet(system: ActorSystem, settings: ClusterSettings) exte private val connections = new ConcurrentHashMap[Address, Status] - def markAsDown(connection: Address): this.type = { + def markNodeAsUnavailable(connection: Address): this.type = { connections.put(connection, Down) this } - def markAsUp(connection: Address): this.type = { + def markNodeAsAvailable(connection: Address): this.type = { connections.put(connection, Up) this } From 2dcceb58ce688b9fec6174126bfe6d3b774d0f74 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 11 Jun 2012 16:48:19 +0200 Subject: [PATCH 91/92] Split up all tests that are related to failure detection into two versions: Accrual FD and FD Puppet. Also moved all tests that are not failure detection tests to use FD Puppet. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- ...ientDowningNodeThatIsUnreachableSpec.scala | 14 +++- .../ClientDowningNodeThatIsUpSpec.scala | 15 +++- .../scala/akka/cluster/ConvergenceSpec.scala | 21 ++--- .../GossipingAccrualFailureDetectorSpec.scala | 10 ++- .../akka/cluster/JoinTwoClustersSpec.scala | 12 +-- ...aderDowningNodeThatIsUnreachableSpec.scala | 4 +- .../akka/cluster/LeaderElectionSpec.scala | 19 +++-- .../MembershipChangeListenerExitingSpec.scala | 6 +- .../MembershipChangeListenerJoinSpec.scala | 12 +-- .../MembershipChangeListenerLeavingSpec.scala | 6 +- .../MembershipChangeListenerSpec.scala | 77 ------------------- .../MembershipChangeListenerUpSpec.scala | 53 +++++++++---- .../scala/akka/cluster/NodeJoinSpec.scala | 12 +-- .../cluster/NodeLeavingAndExitingSpec.scala | 6 +- .../scala/akka/cluster/NodeLeavingSpec.scala | 14 ++-- .../akka/cluster/NodeMembershipSpec.scala | 6 +- .../scala/akka/cluster/NodeShutdownSpec.scala | 17 +++- .../scala/akka/cluster/NodeUpSpec.scala | 4 +- .../scala/akka/cluster/SunnyWeatherSpec.scala | 12 +-- 19 files changed, 145 insertions(+), 175 deletions(-) delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index d1a9f756dd..343f0c7c17 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -18,10 +18,15 @@ object ClientDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class ClientDowningNodeThatIsUnreachableMultiJvmNode1 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy -class ClientDowningNodeThatIsUnreachableMultiJvmNode2 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy -class ClientDowningNodeThatIsUnreachableMultiJvmNode3 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy -class ClientDowningNodeThatIsUnreachableMultiJvmNode4 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode1 extends ClientDowningNodeThatIsUnreachableSpec with FailureDetectorPuppetStrategy +class ClientDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode2 extends ClientDowningNodeThatIsUnreachableSpec with FailureDetectorPuppetStrategy +class ClientDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode3 extends ClientDowningNodeThatIsUnreachableSpec with FailureDetectorPuppetStrategy +class ClientDowningNodeThatIsUnreachableWithFailureDetectorPuppetMultiJvmNode4 extends ClientDowningNodeThatIsUnreachableSpec with FailureDetectorPuppetStrategy + +class ClientDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode1 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode2 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode3 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUnreachableWithAccrualFailureDetectorMultiJvmNode4 extends ClientDowningNodeThatIsUnreachableSpec with AccrualFailureDetectorStrategy abstract class ClientDowningNodeThatIsUnreachableSpec extends MultiNodeSpec(ClientDowningNodeThatIsUnreachableMultiJvmSpec) @@ -38,6 +43,7 @@ abstract class ClientDowningNodeThatIsUnreachableSpec runOn(first) { // kill 'third' node testConductor.shutdown(third, 0) + markNodeAsUnavailable(thirdAddress) // mark 'third' node as DOWN cluster.down(thirdAddress) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index 687596745b..95eeefd982 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -18,10 +18,15 @@ object ClientDowningNodeThatIsUpMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class ClientDowningNodeThatIsUpMultiJvmNode1 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy -class ClientDowningNodeThatIsUpMultiJvmNode2 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy -class ClientDowningNodeThatIsUpMultiJvmNode3 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy -class ClientDowningNodeThatIsUpMultiJvmNode4 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUpWithFailureDetectorPuppetMultiJvmNode1 extends ClientDowningNodeThatIsUpSpec with FailureDetectorPuppetStrategy +class ClientDowningNodeThatIsUpWithFailureDetectorPuppetMultiJvmNode2 extends ClientDowningNodeThatIsUpSpec with FailureDetectorPuppetStrategy +class ClientDowningNodeThatIsUpWithFailureDetectorPuppetMultiJvmNode3 extends ClientDowningNodeThatIsUpSpec with FailureDetectorPuppetStrategy +class ClientDowningNodeThatIsUpWithFailureDetectorPuppetMultiJvmNode4 extends ClientDowningNodeThatIsUpSpec with FailureDetectorPuppetStrategy + +class ClientDowningNodeThatIsUpWithAccrualFailureDetectorMultiJvmNode1 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUpWithAccrualFailureDetectorMultiJvmNode2 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUpWithAccrualFailureDetectorMultiJvmNode3 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy +class ClientDowningNodeThatIsUpWithAccrualFailureDetectorMultiJvmNode4 extends ClientDowningNodeThatIsUpSpec with AccrualFailureDetectorStrategy abstract class ClientDowningNodeThatIsUpSpec extends MultiNodeSpec(ClientDowningNodeThatIsUpMultiJvmSpec) @@ -40,6 +45,8 @@ abstract class ClientDowningNodeThatIsUpSpec cluster.down(thirdAddress) testConductor.enter("down-third-node") + markNodeAsUnavailable(thirdAddress) + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Seq(thirdAddress)) cluster.latestGossip.members.exists(_.address == thirdAddress) must be(false) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index df47e19bec..bdc0a1ae8b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -17,22 +17,24 @@ object ConvergenceMultiJvmSpec extends MultiNodeConfig { val fourth = role("fourth") commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" - akka.cluster { - failure-detector.threshold = 4 - } - """)). + withFallback(ConfigFactory.parseString("akka.cluster.failure-detector.threshold = 4")). withFallback(MultiNodeClusterSpec.clusterConfig)) } -class ConvergenceMultiJvmNode1 extends ConvergenceSpec with AccrualFailureDetectorStrategy -class ConvergenceMultiJvmNode2 extends ConvergenceSpec with AccrualFailureDetectorStrategy -class ConvergenceMultiJvmNode3 extends ConvergenceSpec with AccrualFailureDetectorStrategy -class ConvergenceMultiJvmNode4 extends ConvergenceSpec with AccrualFailureDetectorStrategy +class ConvergenceWithFailureDetectorPuppetMultiJvmNode1 extends ConvergenceSpec with FailureDetectorPuppetStrategy +class ConvergenceWithFailureDetectorPuppetMultiJvmNode2 extends ConvergenceSpec with FailureDetectorPuppetStrategy +class ConvergenceWithFailureDetectorPuppetMultiJvmNode3 extends ConvergenceSpec with FailureDetectorPuppetStrategy +class ConvergenceWithFailureDetectorPuppetMultiJvmNode4 extends ConvergenceSpec with FailureDetectorPuppetStrategy + +class ConvergenceWithAccrualFailureDetectorMultiJvmNode1 extends ConvergenceSpec with AccrualFailureDetectorStrategy +class ConvergenceWithAccrualFailureDetectorMultiJvmNode2 extends ConvergenceSpec with AccrualFailureDetectorStrategy +class ConvergenceWithAccrualFailureDetectorMultiJvmNode3 extends ConvergenceSpec with AccrualFailureDetectorStrategy +class ConvergenceWithAccrualFailureDetectorMultiJvmNode4 extends ConvergenceSpec with AccrualFailureDetectorStrategy abstract class ConvergenceSpec extends MultiNodeSpec(ConvergenceMultiJvmSpec) with MultiNodeClusterSpec { + import ConvergenceMultiJvmSpec._ "A cluster of 3 members" must { @@ -54,6 +56,7 @@ abstract class ConvergenceSpec runOn(first) { // kill 'third' node testConductor.shutdown(third, 0) + markNodeAsUnavailable(thirdAddress) } runOn(first, second) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 63090b7a1f..b14c0d927c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -19,12 +19,14 @@ object GossipingAccrualFailureDetectorMultiJvmSpec extends MultiNodeConfig { withFallback(MultiNodeClusterSpec.clusterConfig)) } -class GossipingAccrualFailureDetectorMultiJvmNode1 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy -class GossipingAccrualFailureDetectorMultiJvmNode2 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy -class GossipingAccrualFailureDetectorMultiJvmNode3 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy +class GossipingWithAccrualFailureDetectorMultiJvmNode1 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy +class GossipingWithAccrualFailureDetectorMultiJvmNode2 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy +class GossipingWithAccrualFailureDetectorMultiJvmNode3 extends GossipingAccrualFailureDetectorSpec with AccrualFailureDetectorStrategy -abstract class GossipingAccrualFailureDetectorSpec extends MultiNodeSpec(GossipingAccrualFailureDetectorMultiJvmSpec) +abstract class GossipingAccrualFailureDetectorSpec + extends MultiNodeSpec(GossipingAccrualFailureDetectorMultiJvmSpec) with MultiNodeClusterSpec { + import GossipingAccrualFailureDetectorMultiJvmSpec._ lazy val firstAddress = node(first).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index 2000e63253..4b64bb6e58 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -20,12 +20,12 @@ object JoinTwoClustersMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy -class JoinTwoClustersMultiJvmNode2 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy -class JoinTwoClustersMultiJvmNode3 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy -class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy -class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy -class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec with AccrualFailureDetectorStrategy +class JoinTwoClustersMultiJvmNode1 extends JoinTwoClustersSpec with FailureDetectorPuppetStrategy +class JoinTwoClustersMultiJvmNode2 extends JoinTwoClustersSpec with FailureDetectorPuppetStrategy +class JoinTwoClustersMultiJvmNode3 extends JoinTwoClustersSpec with FailureDetectorPuppetStrategy +class JoinTwoClustersMultiJvmNode4 extends JoinTwoClustersSpec with FailureDetectorPuppetStrategy +class JoinTwoClustersMultiJvmNode5 extends JoinTwoClustersSpec with FailureDetectorPuppetStrategy +class JoinTwoClustersMultiJvmNode6 extends JoinTwoClustersSpec with FailureDetectorPuppetStrategy abstract class JoinTwoClustersSpec extends MultiNodeSpec(JoinTwoClustersMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index 7dcb6b20f6..5e2545394d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -17,9 +17,7 @@ object LeaderDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { val fourth = role("fourth") commonConfig(debugConfig(on = false). - withFallback(ConfigFactory.parseString(""" - akka.cluster.auto-down = on - """)). + withFallback(ConfigFactory.parseString("akka.cluster.auto-down = on")). withFallback(MultiNodeClusterSpec.clusterConfig)) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index f44b494917..e161206ba0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -19,11 +19,17 @@ object LeaderElectionMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class LeaderElectionMultiJvmNode1 extends LeaderElectionSpec with AccrualFailureDetectorStrategy -class LeaderElectionMultiJvmNode2 extends LeaderElectionSpec with AccrualFailureDetectorStrategy -class LeaderElectionMultiJvmNode3 extends LeaderElectionSpec with AccrualFailureDetectorStrategy -class LeaderElectionMultiJvmNode4 extends LeaderElectionSpec with AccrualFailureDetectorStrategy -class LeaderElectionMultiJvmNode5 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionWithFailureDetectorPuppetMultiJvmNode1 extends LeaderElectionSpec with FailureDetectorPuppetStrategy +class LeaderElectionWithFailureDetectorPuppetMultiJvmNode2 extends LeaderElectionSpec with FailureDetectorPuppetStrategy +class LeaderElectionWithFailureDetectorPuppetMultiJvmNode3 extends LeaderElectionSpec with FailureDetectorPuppetStrategy +class LeaderElectionWithFailureDetectorPuppetMultiJvmNode4 extends LeaderElectionSpec with FailureDetectorPuppetStrategy +class LeaderElectionWithFailureDetectorPuppetMultiJvmNode5 extends LeaderElectionSpec with FailureDetectorPuppetStrategy + +class LeaderElectionWithAccrualFailureDetectorMultiJvmNode1 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionWithAccrualFailureDetectorMultiJvmNode2 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionWithAccrualFailureDetectorMultiJvmNode3 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionWithAccrualFailureDetectorMultiJvmNode4 extends LeaderElectionSpec with AccrualFailureDetectorStrategy +class LeaderElectionWithAccrualFailureDetectorMultiJvmNode5 extends LeaderElectionSpec with AccrualFailureDetectorStrategy abstract class LeaderElectionSpec extends MultiNodeSpec(LeaderElectionMultiJvmSpec) @@ -57,9 +63,11 @@ abstract class LeaderElectionSpec myself match { case `controller` ⇒ + val leaderAddress = node(leader).address testConductor.enter("before-shutdown") testConductor.shutdown(leader, 0) testConductor.enter("after-shutdown", "after-down", "completed") + markNodeAsUnavailable(leaderAddress) case `leader` ⇒ testConductor.enter("before-shutdown", "after-shutdown") @@ -71,6 +79,7 @@ abstract class LeaderElectionSpec // user marks the shutdown leader as DOWN cluster.down(leaderAddress) testConductor.enter("after-down", "completed") + markNodeAsUnavailable(leaderAddress) case _ if remainingRoles.contains(myself) ⇒ // remaining cluster nodes, not shutdown diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index 7389a01ffc..d9b2c7b876 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -27,9 +27,9 @@ object MembershipChangeListenerExitingMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class MembershipChangeListenerExitingMultiJvmNode1 extends MembershipChangeListenerExitingSpec with AccrualFailureDetectorStrategy -class MembershipChangeListenerExitingMultiJvmNode2 extends MembershipChangeListenerExitingSpec with AccrualFailureDetectorStrategy -class MembershipChangeListenerExitingMultiJvmNode3 extends MembershipChangeListenerExitingSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerExitingMultiJvmNode1 extends MembershipChangeListenerExitingSpec with FailureDetectorPuppetStrategy +class MembershipChangeListenerExitingMultiJvmNode2 extends MembershipChangeListenerExitingSpec with FailureDetectorPuppetStrategy +class MembershipChangeListenerExitingMultiJvmNode3 extends MembershipChangeListenerExitingSpec with FailureDetectorPuppetStrategy abstract class MembershipChangeListenerExitingSpec extends MultiNodeSpec(MembershipChangeListenerExitingMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index 8a940375ef..2809ae820b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -17,16 +17,12 @@ object MembershipChangeListenerJoinMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" - akka.cluster { - leader-actions-interval = 5 s # increase the leader action task interval to allow time checking for JOIN before leader moves it to UP - } - """) - .withFallback(MultiNodeClusterSpec.clusterConfig))) + .withFallback(ConfigFactory.parseString("akka.cluster.leader-actions-interval = 5 s") // increase the leader action task interval to allow time checking for JOIN before leader moves it to UP + .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class MembershipChangeListenerJoinMultiJvmNode1 extends MembershipChangeListenerJoinSpec with AccrualFailureDetectorStrategy -class MembershipChangeListenerJoinMultiJvmNode2 extends MembershipChangeListenerJoinSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerJoinMultiJvmNode1 extends MembershipChangeListenerJoinSpec with FailureDetectorPuppetStrategy +class MembershipChangeListenerJoinMultiJvmNode2 extends MembershipChangeListenerJoinSpec with FailureDetectorPuppetStrategy abstract class MembershipChangeListenerJoinSpec extends MultiNodeSpec(MembershipChangeListenerJoinMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala index d7c79407a2..57cec4f389 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -24,9 +24,9 @@ object MembershipChangeListenerLeavingMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig)) } -class MembershipChangeListenerLeavingMultiJvmNode1 extends MembershipChangeListenerLeavingSpec with AccrualFailureDetectorStrategy -class MembershipChangeListenerLeavingMultiJvmNode2 extends MembershipChangeListenerLeavingSpec with AccrualFailureDetectorStrategy -class MembershipChangeListenerLeavingMultiJvmNode3 extends MembershipChangeListenerLeavingSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerLeavingMultiJvmNode1 extends MembershipChangeListenerLeavingSpec with FailureDetectorPuppetStrategy +class MembershipChangeListenerLeavingMultiJvmNode2 extends MembershipChangeListenerLeavingSpec with FailureDetectorPuppetStrategy +class MembershipChangeListenerLeavingMultiJvmNode3 extends MembershipChangeListenerLeavingSpec with FailureDetectorPuppetStrategy abstract class MembershipChangeListenerLeavingSpec extends MultiNodeSpec(MembershipChangeListenerLeavingMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala deleted file mode 100644 index 914db94acb..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerSpec.scala +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import scala.collection.immutable.SortedSet -import com.typesafe.config.ConfigFactory -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ - -object MembershipChangeListenerMultiJvmSpec extends MultiNodeConfig { - val first = role("first") - val second = role("second") - val third = role("third") - - commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) -} - -class MembershipChangeListenerMultiJvmNode1 extends MembershipChangeListenerSpec with AccrualFailureDetectorStrategy -class MembershipChangeListenerMultiJvmNode2 extends MembershipChangeListenerSpec with AccrualFailureDetectorStrategy -class MembershipChangeListenerMultiJvmNode3 extends MembershipChangeListenerSpec with AccrualFailureDetectorStrategy - -abstract class MembershipChangeListenerSpec extends MultiNodeSpec(MembershipChangeListenerMultiJvmSpec) - with MultiNodeClusterSpec { - import MembershipChangeListenerMultiJvmSpec._ - - lazy val firstAddress = node(first).address - lazy val secondAddress = node(second).address - - "A set of connected cluster systems" must { - - "(when two nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { - - awaitClusterUp(first) - - runOn(first, second) { - val latch = TestLatch() - cluster.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) - latch.countDown() - } - }) - testConductor.enter("listener-1-registered") - cluster.join(firstAddress) - latch.await - } - - runOn(third) { - testConductor.enter("listener-1-registered") - } - - testConductor.enter("after-1") - } - - "(when three nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { - - val latch = TestLatch() - cluster.registerListener(new MembershipChangeListener { - def notify(members: SortedSet[Member]) { - if (members.size == 3 && members.forall(_.status == MemberStatus.Up)) - latch.countDown() - } - }) - testConductor.enter("listener-2-registered") - - runOn(third) { - cluster.join(firstAddress) - } - - latch.await - - testConductor.enter("after-2") - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala index 4cd81cd0e7..c89bbe1f0a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -5,21 +5,21 @@ package akka.cluster import scala.collection.immutable.SortedSet import com.typesafe.config.ConfigFactory -import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ object MembershipChangeListenerUpMultiJvmSpec extends MultiNodeConfig { val first = role("first") val second = role("second") + val third = role("third") commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class MembershipChangeListenerUpMultiJvmNode1 extends MembershipChangeListenerUpSpec with AccrualFailureDetectorStrategy -class MembershipChangeListenerUpMultiJvmNode2 extends MembershipChangeListenerUpSpec with AccrualFailureDetectorStrategy +class MembershipChangeListenerUpMultiJvmNode1 extends MembershipChangeListenerUpSpec with FailureDetectorPuppetStrategy +class MembershipChangeListenerUpMultiJvmNode2 extends MembershipChangeListenerUpSpec with FailureDetectorPuppetStrategy +class MembershipChangeListenerUpMultiJvmNode3 extends MembershipChangeListenerUpSpec with FailureDetectorPuppetStrategy abstract class MembershipChangeListenerUpSpec extends MultiNodeSpec(MembershipChangeListenerUpMultiJvmSpec) @@ -30,29 +30,50 @@ abstract class MembershipChangeListenerUpSpec lazy val firstAddress = node(first).address lazy val secondAddress = node(second).address - "A registered MembershipChangeListener" must { - "be notified when new node is marked as UP by the leader" taggedAs LongRunningTest in { + "A set of connected cluster systems" must { - runOn(first) { - val upLatch = TestLatch() + "(when two nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { + + awaitClusterUp(first) + + runOn(first, second) { + val latch = TestLatch() cluster.registerListener(new MembershipChangeListener { def notify(members: SortedSet[Member]) { if (members.size == 2 && members.forall(_.status == MemberStatus.Up)) - upLatch.countDown() + latch.countDown() } }) - testConductor.enter("registered-listener") - - upLatch.await - awaitUpConvergence(numberOfMembers = 2) + testConductor.enter("listener-1-registered") + cluster.join(firstAddress) + latch.await } - runOn(second) { - testConductor.enter("registered-listener") + runOn(third) { + testConductor.enter("listener-1-registered") + } + + testConductor.enter("after-1") + } + + "(when three nodes) after cluster convergence updates the membership table then all MembershipChangeListeners should be triggered" taggedAs LongRunningTest in { + + val latch = TestLatch() + cluster.registerListener(new MembershipChangeListener { + def notify(members: SortedSet[Member]) { + if (members.size == 3 && members.forall(_.status == MemberStatus.Up)) + latch.countDown() + } + }) + testConductor.enter("listener-2-registered") + + runOn(third) { cluster.join(firstAddress) } - testConductor.enter("after") + latch.await + + testConductor.enter("after-2") } } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala index 58ed162af7..6cf5fc220d 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala @@ -16,16 +16,12 @@ object NodeJoinMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" - akka.cluster { - leader-actions-interval = 5 s # increase the leader action task interval - } - """) - .withFallback(MultiNodeClusterSpec.clusterConfig))) + .withFallback(ConfigFactory.parseString("akka.cluster.leader-actions-interval = 5 s") // increase the leader action task interval + .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class NodeJoinMultiJvmNode1 extends NodeJoinSpec with AccrualFailureDetectorStrategy -class NodeJoinMultiJvmNode2 extends NodeJoinSpec with AccrualFailureDetectorStrategy +class NodeJoinMultiJvmNode1 extends NodeJoinSpec with FailureDetectorPuppetStrategy +class NodeJoinMultiJvmNode2 extends NodeJoinSpec with FailureDetectorPuppetStrategy abstract class NodeJoinSpec extends MultiNodeSpec(NodeJoinMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index bb32d8641f..ef285b5070 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -26,9 +26,9 @@ object NodeLeavingAndExitingMultiJvmSpec extends MultiNodeConfig { .withFallback(MultiNodeClusterSpec.clusterConfig))) } -class NodeLeavingAndExitingMultiJvmNode1 extends NodeLeavingAndExitingSpec with AccrualFailureDetectorStrategy -class NodeLeavingAndExitingMultiJvmNode2 extends NodeLeavingAndExitingSpec with AccrualFailureDetectorStrategy -class NodeLeavingAndExitingMultiJvmNode3 extends NodeLeavingAndExitingSpec with AccrualFailureDetectorStrategy +class NodeLeavingAndExitingMultiJvmNode1 extends NodeLeavingAndExitingSpec with FailureDetectorPuppetStrategy +class NodeLeavingAndExitingMultiJvmNode2 extends NodeLeavingAndExitingSpec with FailureDetectorPuppetStrategy +class NodeLeavingAndExitingMultiJvmNode3 extends NodeLeavingAndExitingSpec with FailureDetectorPuppetStrategy abstract class NodeLeavingAndExitingSpec extends MultiNodeSpec(NodeLeavingAndExitingMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala index eccba596f2..8f637d87e5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingSpec.scala @@ -16,18 +16,18 @@ object NodeLeavingMultiJvmSpec extends MultiNodeConfig { commonConfig( debugConfig(on = false) - .withFallback(ConfigFactory.parseString(""" - akka.cluster.unreachable-nodes-reaper-frequency = 30 s - """)) + .withFallback(ConfigFactory.parseString("akka.cluster.unreachable-nodes-reaper-frequency = 30 s")) .withFallback(MultiNodeClusterSpec.clusterConfig)) } -class NodeLeavingMultiJvmNode1 extends NodeLeavingSpec with AccrualFailureDetectorStrategy -class NodeLeavingMultiJvmNode2 extends NodeLeavingSpec with AccrualFailureDetectorStrategy -class NodeLeavingMultiJvmNode3 extends NodeLeavingSpec with AccrualFailureDetectorStrategy +class NodeLeavingMultiJvmNode1 extends NodeLeavingSpec with FailureDetectorPuppetStrategy +class NodeLeavingMultiJvmNode2 extends NodeLeavingSpec with FailureDetectorPuppetStrategy +class NodeLeavingMultiJvmNode3 extends NodeLeavingSpec with FailureDetectorPuppetStrategy -abstract class NodeLeavingSpec extends MultiNodeSpec(NodeLeavingMultiJvmSpec) +abstract class NodeLeavingSpec + extends MultiNodeSpec(NodeLeavingMultiJvmSpec) with MultiNodeClusterSpec { + import NodeLeavingMultiJvmSpec._ lazy val firstAddress = node(first).address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index c7fa1569f2..fb0573f77f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -16,9 +16,9 @@ object NodeMembershipMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec with AccrualFailureDetectorStrategy -class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec with AccrualFailureDetectorStrategy -class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec with AccrualFailureDetectorStrategy +class NodeMembershipMultiJvmNode1 extends NodeMembershipSpec with FailureDetectorPuppetStrategy +class NodeMembershipMultiJvmNode2 extends NodeMembershipSpec with FailureDetectorPuppetStrategy +class NodeMembershipMultiJvmNode3 extends NodeMembershipSpec with FailureDetectorPuppetStrategy abstract class NodeMembershipSpec extends MultiNodeSpec(NodeMembershipMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala index 7417ae06d5..69b0a43a20 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeShutdownSpec.scala @@ -16,7 +16,7 @@ object NodeShutdownMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false). withFallback(ConfigFactory.parseString(""" akka.cluster { - auto-down = on + auto-down = on failure-detector.threshold = 4 } """)). @@ -24,10 +24,16 @@ object NodeShutdownMultiJvmSpec extends MultiNodeConfig { } -class NodeShutdownMultiJvmNode1 extends NodeShutdownSpec with AccrualFailureDetectorStrategy -class NodeShutdownMultiJvmNode2 extends NodeShutdownSpec with AccrualFailureDetectorStrategy +class NodeShutdownWithFailureDetectorPuppetMultiJvmNode1 extends NodeShutdownSpec with FailureDetectorPuppetStrategy +class NodeShutdownWithFailureDetectorPuppetMultiJvmNode2 extends NodeShutdownSpec with FailureDetectorPuppetStrategy + +class NodeShutdownWithAccrualFailureDetectorMultiJvmNode1 extends NodeShutdownSpec with AccrualFailureDetectorStrategy +class NodeShutdownWithAccrualFailureDetectorMultiJvmNode2 extends NodeShutdownSpec with AccrualFailureDetectorStrategy + +abstract class NodeShutdownSpec + extends MultiNodeSpec(NodeShutdownMultiJvmSpec) + with MultiNodeClusterSpec { -abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) with MultiNodeClusterSpec { import NodeShutdownMultiJvmSpec._ "A cluster of 2 nodes" must { @@ -44,6 +50,9 @@ abstract class NodeShutdownSpec extends MultiNodeSpec(NodeShutdownMultiJvmSpec) runOn(first) { val secondAddress = node(second).address testConductor.shutdown(second, 0) + + markNodeAsUnavailable(secondAddress) + awaitUpConvergence(numberOfMembers = 1, canNotBePartOfMemberRing = Seq(secondAddress), 30.seconds) cluster.isSingletonCluster must be(true) assertLeader(first) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index 4a2342fca1..0fdc3c89b8 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -19,8 +19,8 @@ object NodeUpMultiJvmSpec extends MultiNodeConfig { commonConfig(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig)) } -class NodeUpMultiJvmNode1 extends NodeUpSpec with AccrualFailureDetectorStrategy -class NodeUpMultiJvmNode2 extends NodeUpSpec with AccrualFailureDetectorStrategy +class NodeUpMultiJvmNode1 extends NodeUpSpec with FailureDetectorPuppetStrategy +class NodeUpMultiJvmNode2 extends NodeUpSpec with FailureDetectorPuppetStrategy abstract class NodeUpSpec extends MultiNodeSpec(NodeUpMultiJvmSpec) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index cabaf21ab1..b8486841c6 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -21,18 +21,18 @@ object SunnyWeatherMultiJvmSpec extends MultiNodeConfig { commonConfig(ConfigFactory.parseString(""" akka.cluster { - gossip-interval = 400 ms + gossip-interval = 400 ms nr-of-deputy-nodes = 0 } akka.loglevel = INFO """)) } -class SunnyWeatherMultiJvmNode1 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy -class SunnyWeatherMultiJvmNode2 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy -class SunnyWeatherMultiJvmNode3 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy -class SunnyWeatherMultiJvmNode4 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy -class SunnyWeatherMultiJvmNode5 extends SunnyWeatherSpec with AccrualFailureDetectorStrategy +class SunnyWeatherMultiJvmNode1 extends SunnyWeatherSpec with FailureDetectorPuppetStrategy +class SunnyWeatherMultiJvmNode2 extends SunnyWeatherSpec with FailureDetectorPuppetStrategy +class SunnyWeatherMultiJvmNode3 extends SunnyWeatherSpec with FailureDetectorPuppetStrategy +class SunnyWeatherMultiJvmNode4 extends SunnyWeatherSpec with FailureDetectorPuppetStrategy +class SunnyWeatherMultiJvmNode5 extends SunnyWeatherSpec with FailureDetectorPuppetStrategy abstract class SunnyWeatherSpec extends MultiNodeSpec(SunnyWeatherMultiJvmSpec) From 36b040cfab7164079af75f6cf5ebbd00a279a245 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 11 Jun 2012 18:11:02 +0200 Subject: [PATCH 92/92] Unbreaking master --- akka-cluster/src/main/resources/reference.conf | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index 8c905d5b29..cdaf8c729c 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -33,7 +33,6 @@ akka { # how often should the node move nodes, marked as unreachable by the failure detector, out of the membership ring? unreachable-nodes-reaper-interval = 1s - # accrual failure detection config failure-detector { # defines the failure detector threshold @@ -43,6 +42,8 @@ akka { # actual crashes threshold = 8 + implementation-class = "" + max-sample-size = 1000 } }