diff --git a/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java index a87b7933d8..f494fd7d81 100644 --- a/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java +++ b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java @@ -14,6 +14,7 @@ import java.util.LinkedList; import java.lang.Iterable; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import static akka.japi.Util.manifest; import akka.testkit.AkkaSpec; @@ -45,7 +46,7 @@ public class JavaFutureTests { } }, system.dispatcher()); - Future f2 = f1.map(new Function() { + Future f2 = f1.map(new Mapper() { public String apply(String s) { return s + " World"; } @@ -59,8 +60,8 @@ public class JavaFutureTests { final CountDownLatch latch = new CountDownLatch(1); Promise cf = Futures.promise(system.dispatcher()); Future f = cf; - f.onSuccess(new Procedure() { - public void apply(String result) { + f.onSuccess(new OnSuccess() { + public void onSuccess(String result) { if (result.equals("foo")) latch.countDown(); } @@ -76,8 +77,8 @@ public class JavaFutureTests { final CountDownLatch latch = new CountDownLatch(1); Promise cf = Futures.promise(system.dispatcher()); Future f = cf; - f.onFailure(new Procedure() { - public void apply(Throwable t) { + f.onFailure(new OnFailure() { + public void onFailure(Throwable t) { if (t instanceof NullPointerException) latch.countDown(); } @@ -94,8 +95,8 @@ public class JavaFutureTests { final CountDownLatch latch = new CountDownLatch(1); Promise cf = Futures.promise(system.dispatcher()); Future f = cf; - f.onComplete(new Procedure2() { - public void apply(Throwable t, String r) { + f.onComplete(new OnComplete() { + public void onComplete(Throwable t, String r) { latch.countDown(); } }); @@ -110,8 +111,8 @@ public class JavaFutureTests { final CountDownLatch latch = new CountDownLatch(1); Promise cf = Futures.promise(system.dispatcher()); Future f = cf; - f.foreach(new Procedure() { - public void apply(String future) { + f.foreach(new Foreach() { + public void each(String future) { latch.countDown(); } }); @@ -127,7 +128,7 @@ public class JavaFutureTests { Promise cf = Futures.promise(system.dispatcher()); cf.success("1000"); Future f = cf; - Future r = f.flatMap(new Function>() { + Future r = f.flatMap(new Mapper>() { public Future apply(String r) { latch.countDown(); Promise cf = Futures.promise(system.dispatcher()); @@ -146,8 +147,8 @@ public class JavaFutureTests { final CountDownLatch latch = new CountDownLatch(1); Promise cf = Futures.promise(system.dispatcher()); Future f = cf; - Future r = f.filter(new Function() { - public Boolean apply(String r) { + Future r = f.filter(new Filter() { + public boolean filter(String r) { latch.countDown(); return r.equals("foo"); } @@ -267,15 +268,55 @@ public class JavaFutureTests { } }, system.dispatcher()); - assertEquals(expect, Await.result(f, timeout)); + assertEquals(expect, Await.result(f, timeout).get()); } @Test - public void BlockMustBeCallable() { + public void blockMustBeCallable() { Promise p = Futures.promise(system.dispatcher()); Duration d = Duration.create(1, TimeUnit.SECONDS); p.success("foo"); Await.ready(p, d); assertEquals(Await.result(p, d), "foo"); } + + @Test + public void mapToMustBeCallable() { + Promise p = Futures.promise(system.dispatcher()); + Future f = p.future().mapTo(manifest(String.class)); + Duration d = Duration.create(1, TimeUnit.SECONDS); + p.success("foo"); + Await.ready(p, d); + assertEquals(Await.result(p, d), "foo"); + } + + @Test + public void recoverToMustBeCallable() { + final IllegalStateException fail = new IllegalStateException("OHNOES"); + Promise p = Futures.promise(system.dispatcher()); + Future f = p.future().recover(new Recover() { + public Object recover(Throwable t) throws Throwable { + if (t == fail) return "foo"; + else throw t; + } + }); + Duration d = Duration.create(1, TimeUnit.SECONDS); + p.failure(fail); + assertEquals(Await.result(f, d), "foo"); + } + + @Test + public void recoverWithToMustBeCallable() { + final IllegalStateException fail = new IllegalStateException("OHNOES"); + Promise p = Futures.promise(system.dispatcher()); + Future f = p.future().recoverWith(new Recover>() { + public Future recover(Throwable t) throws Throwable { + if (t == fail) return Futures.successful("foo", system.dispatcher()).future(); + else throw t; + } + }); + Duration d = Duration.create(1, TimeUnit.SECONDS); + p.failure(fail); + assertEquals(Await.result(f, d), "foo"); + } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala index c8df739b48..6800033d4c 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala @@ -3,53 +3,30 @@ */ package akka.actor -import org.scalatest.BeforeAndAfterAll import akka.util.duration._ -import akka.testkit.AkkaSpec -import akka.testkit.DefaultTimeout -import java.util.concurrent.TimeoutException +import akka.testkit._ import akka.dispatch.Await import akka.util.Timeout import akka.pattern.{ ask, AskTimeoutException } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class ActorTimeoutSpec extends AkkaSpec with BeforeAndAfterAll with DefaultTimeout { +class ActorTimeoutSpec extends AkkaSpec { - val defaultTimeout = system.settings.ActorTimeout.duration - val testTimeout = if (system.settings.ActorTimeout.duration < 400.millis) 500 millis else 100 millis + val testTimeout = 200.millis.dilated "An Actor-based Future" must { - "use the global default timeout if no implicit in scope" in { - within(defaultTimeout - 100.millis, defaultTimeout + 400.millis) { - val echo = system.actorOf(Props.empty) - try { - val d = system.settings.ActorTimeout.duration - val f = echo ? "hallo" - intercept[AskTimeoutException] { Await.result(f, d + d) } - } finally { system.stop(echo) } - } - } - "use implicitly supplied timeout" in { implicit val timeout = Timeout(testTimeout) - within(testTimeout - 100.millis, testTimeout + 300.millis) { - val echo = system.actorOf(Props.empty) - try { - val f = (echo ? "hallo").mapTo[String] - intercept[AskTimeoutException] { Await.result(f, testTimeout + testTimeout) } - } finally { system.stop(echo) } - } + val echo = system.actorOf(Props.empty) + val f = (echo ? "hallo") + intercept[AskTimeoutException] { Await.result(f, testTimeout * 2) } } "use explicitly supplied timeout" in { - within(testTimeout - 100.millis, testTimeout + 300.millis) { - val echo = system.actorOf(Props.empty) - val f = echo.?("hallo")(testTimeout) - try { - intercept[AskTimeoutException] { Await.result(f, testTimeout + 300.millis) } - } finally { system.stop(echo) } - } + val echo = system.actorOf(Props.empty) + val f = echo.?("hallo")(testTimeout) + intercept[AskTimeoutException] { Await.result(f, testTimeout * 2) } } } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala index 981ce89ef6..6f8639f4a4 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala @@ -9,12 +9,15 @@ object ConsistencySpec { consistency-dispatcher { throughput = 1 keep-alive-time = 1 ms - core-pool-size-min = 10 - core-pool-size-max = 10 - max-pool-size-min = 10 - max-pool-size-max = 10 - task-queue-type = array - task-queue-size = 7 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = 10 + core-pool-size-max = 10 + max-pool-size-min = 10 + max-pool-size-max = 10 + task-queue-type = array + task-queue-size = 7 + } } """ class CacheMisaligned(var value: Long, var padding1: Long, var padding2: Long, var padding3: Int) //Vars, no final fences diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala index ca6d90e721..3a2c1bb627 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala @@ -7,7 +7,6 @@ package akka.actor import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } import akka.testkit._ import TestEvent.Mute -import FSM._ import akka.util.duration._ import akka.event._ import com.typesafe.config.ConfigFactory @@ -52,7 +51,7 @@ object FSMActorSpec { } } case Event("hello", _) ⇒ stay replying "world" - case Event("bye", _) ⇒ stop(Shutdown) + case Event("bye", _) ⇒ stop(FSM.Shutdown) } when(Open) { @@ -63,7 +62,7 @@ object FSMActorSpec { } whenUnhandled { - case Ev(msg) ⇒ { + case Event(msg, _) ⇒ { log.warning("unhandled event " + msg + " in state " + stateName + " with data " + stateData) unhandledLatch.open stay @@ -82,7 +81,7 @@ object FSMActorSpec { } onTermination { - case StopEvent(Shutdown, Locked, _) ⇒ + case StopEvent(FSM.Shutdown, Locked, _) ⇒ // stop is called from lockstate with shutdown as reason... terminatedLatch.open } @@ -110,6 +109,8 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im "unlock the lock" in { + import FSM.{ Transition, CurrentState, SubscribeTransitionCallBack } + val latches = new Latches import latches._ @@ -163,7 +164,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im val fsm = TestActorRef(new Actor with FSM[Int, Null] { startWith(1, null) when(1) { - case Ev("go") ⇒ goto(2) + case Event("go", _) ⇒ goto(2) } }) val name = fsm.path.toString @@ -182,7 +183,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im lazy val fsm = new Actor with FSM[Int, Null] { override def preStart = { started.countDown } startWith(1, null) - when(1) { NullFunction } + when(1) { FSM.NullFunction } onTermination { case x ⇒ testActor ! x } @@ -190,7 +191,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im val ref = system.actorOf(Props(fsm)) Await.ready(started, timeout.duration) system.stop(ref) - expectMsg(1 second, fsm.StopEvent(Shutdown, 1, null)) + expectMsg(1 second, fsm.StopEvent(FSM.Shutdown, 1, null)) } "log events and transitions if asked to do so" in { @@ -204,12 +205,12 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im val fsm = TestActorRef(new Actor with LoggingFSM[Int, Null] { startWith(1, null) when(1) { - case Ev("go") ⇒ - setTimer("t", Shutdown, 1.5 seconds, false) + case Event("go", _) ⇒ + setTimer("t", FSM.Shutdown, 1.5 seconds, false) goto(2) } when(2) { - case Ev("stop") ⇒ + case Event("stop", _) ⇒ cancelTimer("t") stop } @@ -230,7 +231,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im expectMsgPF(1 second, hint = "processing Event(stop,null)") { case Logging.Debug(`name`, `fsmClass`, s: String) if s.startsWith("processing Event(stop,null) from Actor[") ⇒ true } - expectMsgAllOf(1 second, Logging.Debug(name, fsmClass, "canceling timer 't'"), Normal) + expectMsgAllOf(1 second, Logging.Debug(name, fsmClass, "canceling timer 't'"), FSM.Normal) expectNoMsg(1 second) system.eventStream.unsubscribe(testActor) } @@ -251,6 +252,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im }) fsmref ! "log" val fsm = fsmref.underlyingActor + import FSM.LogEntry expectMsg(1 second, IndexedSeq(LogEntry(1, 0, "log"))) fsmref ! "count" fsmref ! "log" diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala index bf5a1974ee..59468125eb 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala @@ -160,37 +160,37 @@ object FSMTimingSpec { startWith(Initial, 0) when(Initial) { - case Ev(TestSingleTimer) ⇒ + case Event(TestSingleTimer, _) ⇒ setTimer("tester", Tick, 500 millis, false) goto(TestSingleTimer) - case Ev(TestRepeatedTimer) ⇒ + case Event(TestRepeatedTimer, _) ⇒ setTimer("tester", Tick, 100 millis, true) goto(TestRepeatedTimer) using 4 - case Ev(TestStateTimeoutOverride) ⇒ + case Event(TestStateTimeoutOverride, _) ⇒ goto(TestStateTimeout) forMax (Duration.Inf) - case Ev(x: FSMTimingSpec.State) ⇒ goto(x) + case Event(x: FSMTimingSpec.State, _) ⇒ goto(x) } when(TestStateTimeout, stateTimeout = 500 millis) { - case Ev(StateTimeout) ⇒ goto(Initial) - case Ev(Cancel) ⇒ goto(Initial) replying (Cancel) + case Event(StateTimeout, _) ⇒ goto(Initial) + case Event(Cancel, _) ⇒ goto(Initial) replying (Cancel) } when(TestSingleTimer) { - case Ev(Tick) ⇒ + case Event(Tick, _) ⇒ tester ! Tick goto(Initial) } when(TestCancelTimer) { - case Ev(Tick) ⇒ + case Event(Tick, _) ⇒ setTimer("hallo", Tock, 1 milli, false) TestKit.awaitCond(context.asInstanceOf[ActorCell].mailbox.hasMessages, 1 second) cancelTimer("hallo") sender ! Tick setTimer("hallo", Tock, 500 millis, false) stay - case Ev(Tock) ⇒ + case Event(Tock, _) ⇒ tester ! Tock stay - case Ev(Cancel) ⇒ + case Event(Cancel, _) ⇒ cancelTimer("hallo") goto(Initial) } @@ -206,29 +206,29 @@ object FSMTimingSpec { } when(TestCancelStateTimerInNamedTimerMessage) { // FSM is suspended after processing this message and resumed 500ms later - case Ev(Tick) ⇒ + case Event(Tick, _) ⇒ suspend(self) setTimer("named", Tock, 1 millis, false) TestKit.awaitCond(context.asInstanceOf[ActorCell].mailbox.hasMessages, 1 second) stay forMax (1 millis) replying Tick - case Ev(Tock) ⇒ + case Event(Tock, _) ⇒ goto(TestCancelStateTimerInNamedTimerMessage2) } when(TestCancelStateTimerInNamedTimerMessage2) { - case Ev(StateTimeout) ⇒ + case Event(StateTimeout, _) ⇒ goto(Initial) - case Ev(Cancel) ⇒ + case Event(Cancel, _) ⇒ goto(Initial) replying Cancel } when(TestUnhandled) { - case Ev(SetHandler) ⇒ + case Event(SetHandler, _) ⇒ whenUnhandled { - case Ev(Tick) ⇒ + case Event(Tick, _) ⇒ tester ! Unhandled(Tick) stay } stay - case Ev(Cancel) ⇒ + case Event(Cancel, _) ⇒ whenUnhandled(NullFunction) goto(Initial) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala index 8d8fc5e725..691be63a0b 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala @@ -5,7 +5,6 @@ package akka.actor import akka.testkit._ import akka.util.duration._ -import FSM._ import akka.util.Duration object FSMTransitionSpec { @@ -17,13 +16,13 @@ object FSMTransitionSpec { class MyFSM(target: ActorRef) extends Actor with FSM[Int, Unit] { startWith(0, Unit) when(0) { - case Ev("tick") ⇒ goto(1) + case Event("tick", _) ⇒ goto(1) } when(1) { - case Ev("tick") ⇒ goto(0) + case Event("tick", _) ⇒ goto(0) } whenUnhandled { - case Ev("reply") ⇒ stay replying "reply" + case Event("reply", _) ⇒ stay replying "reply" } initialize override def preRestart(reason: Throwable, msg: Option[Any]) { target ! "restarted" } @@ -32,10 +31,10 @@ object FSMTransitionSpec { class OtherFSM(target: ActorRef) extends Actor with FSM[Int, Int] { startWith(0, 0) when(0) { - case Ev("tick") ⇒ goto(1) using (1) + case Event("tick", _) ⇒ goto(1) using (1) } when(1) { - case Ev(_) ⇒ stay + case _ ⇒ stay } onTransition { case 0 -> 1 ⇒ target ! ((stateData, nextStateData)) @@ -56,6 +55,8 @@ class FSMTransitionSpec extends AkkaSpec with ImplicitSender { "A FSM transition notifier" must { "notify listeners" in { + import FSM.{ SubscribeTransitionCallBack, CurrentState, Transition } + val fsm = system.actorOf(Props(new MyFSM(testActor))) within(1 second) { fsm ! SubscribeTransitionCallBack(testActor) @@ -77,8 +78,8 @@ class FSMTransitionSpec extends AkkaSpec with ImplicitSender { })) within(300 millis) { - fsm ! SubscribeTransitionCallBack(forward) - expectMsg(CurrentState(fsm, 0)) + fsm ! FSM.SubscribeTransitionCallBack(forward) + expectMsg(FSM.CurrentState(fsm, 0)) system.stop(forward) fsm ! "tick" expectNoMsg diff --git a/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala b/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala index 13ed9d8c7e..915a8d5fc8 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala @@ -4,7 +4,7 @@ package akka.actor -import akka.util.{ ByteString, Duration, Timer } +import akka.util.{ ByteString, Duration, Deadline } import akka.util.duration._ import scala.util.continuations._ import akka.testkit._ @@ -244,13 +244,13 @@ class IOActorSpec extends AkkaSpec with DefaultTimeout { val promise = Promise[T]()(executor) - val timer = timeout match { - case Some(duration) ⇒ Some(Timer(duration)) + val timer: Option[Deadline] = timeout match { + case Some(duration) ⇒ Some(duration fromNow) case None ⇒ None } def check(n: Int, e: Throwable): Boolean = - (count.isEmpty || (n < count.get)) && (timer.isEmpty || timer.get.isTicking) && (filter.isEmpty || filter.get(e)) + (count.isEmpty || (n < count.get)) && (timer.isEmpty || timer.get.hasTimeLeft()) && (filter.isEmpty || filter.get(e)) def run(n: Int) { future onComplete { diff --git a/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala index 82cd08fa77..5ebd8ff565 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala @@ -14,8 +14,11 @@ object LocalActorRefProviderSpec { akka { actor { default-dispatcher { - core-pool-size-min = 16 - core-pool-size-max = 16 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = 16 + core-pool-size-max = 16 + } } } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala index 49b37cc506..b83fe78338 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala @@ -25,10 +25,13 @@ object TypedActorSpec { val config = """ pooled-dispatcher { type = BalancingDispatcher - core-pool-size-min = 60 - core-pool-size-max = 60 - max-pool-size-min = 60 - max-pool-size-max = 60 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = 60 + core-pool-size-max = 60 + max-pool-size-min = 60 + max-pool-size-max = 60 + } } """ diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index e2b697a08f..45e1954486 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -448,16 +448,14 @@ object DispatcherModelSpec { class MessageDispatcherInterceptorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends MessageDispatcherConfigurator(config, prerequisites) { - private val instance: MessageDispatcher = { - configureThreadPool(config, - threadPoolConfig ⇒ new Dispatcher(prerequisites, - config.getString("id"), - config.getInt("throughput"), - Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), - mailboxType, - threadPoolConfig, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) with MessageDispatcherInterceptor).build - } + private val instance: MessageDispatcher = + new Dispatcher(prerequisites, + config.getString("id"), + config.getInt("throughput"), + Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), + mailboxType, + configureExecutor(), + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) with MessageDispatcherInterceptor override def dispatcher(): MessageDispatcher = instance } @@ -522,16 +520,14 @@ object BalancingDispatcherModelSpec { class BalancingMessageDispatcherInterceptorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends MessageDispatcherConfigurator(config, prerequisites) { - private val instance: MessageDispatcher = { - configureThreadPool(config, - threadPoolConfig ⇒ new BalancingDispatcher(prerequisites, - config.getString("id"), - config.getInt("throughput"), - Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), - mailboxType, - threadPoolConfig, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) with MessageDispatcherInterceptor).build - } + private val instance: MessageDispatcher = + new BalancingDispatcher(prerequisites, + config.getString("id"), + config.getInt("throughput"), + Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), + mailboxType, + configureExecutor(), + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) with MessageDispatcherInterceptor override def dispatcher(): MessageDispatcher = instance } diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala index 2dce8346db..4b3dd4a5b3 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala @@ -16,14 +16,20 @@ object DispatcherActorSpec { } test-throughput-dispatcher { throughput = 101 - core-pool-size-min = 1 - core-pool-size-max = 1 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = 1 + core-pool-size-max = 1 + } } test-throughput-deadline-dispatcher { throughput = 2 throughput-deadline-time = 100 milliseconds - core-pool-size-min = 1 - core-pool-size-max = 1 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = 1 + core-pool-size-max = 1 + } } """ diff --git a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala index 67c7a51b60..ad39057d1d 100644 --- a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala @@ -18,35 +18,62 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference) { val settings = system.settings val config = settings.config - import config._ - getString("akka.version") must equal("2.0-SNAPSHOT") - settings.ConfigVersion must equal("2.0-SNAPSHOT") + { + import config._ - getBoolean("akka.daemonic") must equal(false) + getString("akka.version") must equal("2.0-SNAPSHOT") + settings.ConfigVersion must equal("2.0-SNAPSHOT") - getString("akka.actor.default-dispatcher.type") must equal("Dispatcher") - getMilliseconds("akka.actor.default-dispatcher.keep-alive-time") must equal(60 * 1000) - getDouble("akka.actor.default-dispatcher.core-pool-size-factor") must equal(3.0) - getDouble("akka.actor.default-dispatcher.max-pool-size-factor") must equal(3.0) - getInt("akka.actor.default-dispatcher.task-queue-size") must equal(-1) - getString("akka.actor.default-dispatcher.task-queue-type") must equal("linked") - getBoolean("akka.actor.default-dispatcher.allow-core-timeout") must equal(true) - getInt("akka.actor.default-dispatcher.mailbox-capacity") must equal(-1) - getMilliseconds("akka.actor.default-dispatcher.mailbox-push-timeout-time") must equal(10 * 1000) - getString("akka.actor.default-dispatcher.mailboxType") must be("") - getMilliseconds("akka.actor.default-dispatcher.shutdown-timeout") must equal(1 * 1000) - getInt("akka.actor.default-dispatcher.throughput") must equal(5) - getMilliseconds("akka.actor.default-dispatcher.throughput-deadline-time") must equal(0) + getBoolean("akka.daemonic") must equal(false) + getBoolean("akka.actor.serialize-messages") must equal(false) + settings.SerializeAllMessages must equal(false) - getBoolean("akka.actor.serialize-messages") must equal(false) - settings.SerializeAllMessages must equal(false) + getInt("akka.scheduler.ticksPerWheel") must equal(512) + settings.SchedulerTicksPerWheel must equal(512) - getInt("akka.scheduler.ticksPerWheel") must equal(512) - settings.SchedulerTicksPerWheel must equal(512) + getMilliseconds("akka.scheduler.tickDuration") must equal(100) + settings.SchedulerTickDuration must equal(100 millis) + } - getMilliseconds("akka.scheduler.tickDuration") must equal(100) - settings.SchedulerTickDuration must equal(100 millis) + { + val c = config.getConfig("akka.actor.default-dispatcher") + + //General dispatcher config + + { + c.getString("type") must equal("Dispatcher") + c.getString("executor") must equal("fork-join-executor") + c.getInt("mailbox-capacity") must equal(-1) + c.getMilliseconds("mailbox-push-timeout-time") must equal(10 * 1000) + c.getString("mailboxType") must be("") + c.getMilliseconds("shutdown-timeout") must equal(1 * 1000) + c.getInt("throughput") must equal(5) + c.getMilliseconds("throughput-deadline-time") must equal(0) + } + + //Fork join executor config + + { + val pool = c.getConfig("fork-join-executor") + pool.getInt("parallelism-min") must equal(8) + pool.getDouble("parallelism-factor") must equal(3.0) + pool.getInt("parallelism-max") must equal(64) + } + + //Thread pool executor config + + { + val pool = c.getConfig("thread-pool-executor") + import pool._ + getMilliseconds("keep-alive-time") must equal(60 * 1000) + getDouble("core-pool-size-factor") must equal(3.0) + getDouble("max-pool-size-factor") must equal(3.0) + getInt("task-queue-size") must equal(-1) + getString("task-queue-type") must equal("linked") + getBoolean("allow-core-timeout") must equal(true) + } + } } } } diff --git a/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala b/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala index 09fba90fc8..26f92d8a6d 100644 --- a/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala +++ b/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala @@ -5,11 +5,10 @@ package akka.dataflow import akka.actor.{ Actor, Props } import akka.dispatch.{ Future, Await } -import akka.actor.future2actor import akka.util.duration._ import akka.testkit.AkkaSpec import akka.testkit.DefaultTimeout -import akka.pattern.ask +import akka.pattern.{ ask, pipe } class Future2ActorSpec extends AkkaSpec with DefaultTimeout { diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala index 71db22cd9a..e058218f2d 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala @@ -13,10 +13,10 @@ import akka.testkit.AkkaSpec import org.scalatest.junit.JUnitSuite import akka.testkit.DefaultTimeout import akka.testkit.TestLatch -import java.util.concurrent.{ TimeoutException, TimeUnit, CountDownLatch } import scala.runtime.NonLocalReturnControl import akka.pattern.ask import java.lang.{ IllegalStateException, ArithmeticException } +import java.util.concurrent._ object FutureSpec { class TestActor extends Actor { @@ -39,7 +39,6 @@ object FutureSpec { } } -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class JavaFutureSpec extends JavaFutureTests with JUnitSuite @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) @@ -55,11 +54,11 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa val empty = Promise[String]() val timedOut = Promise.successful[String]("Timedout") - Await.result(failure or timedOut, timeout.duration) must be("Timedout") - Await.result(timedOut or empty, timeout.duration) must be("Timedout") - Await.result(failure or failure or timedOut, timeout.duration) must be("Timedout") + Await.result(failure fallbackTo timedOut, timeout.duration) must be("Timedout") + Await.result(timedOut fallbackTo empty, timeout.duration) must be("Timedout") + Await.result(failure fallbackTo failure fallbackTo timedOut, timeout.duration) must be("Timedout") intercept[RuntimeException] { - Await.result(failure or otherFailure, timeout.duration) + Await.result(failure fallbackTo otherFailure, timeout.duration) }.getMessage must be("last") } } @@ -303,6 +302,32 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa } } + "recoverWith from exceptions" in { + val o = new IllegalStateException("original") + val r = new IllegalStateException("recovered") + + intercept[IllegalStateException] { + Await.result(Promise.failed[String](o) recoverWith { case _ if false == true ⇒ Promise.successful("yay!") }, timeout.duration) + } must be(o) + + Await.result(Promise.failed[String](o) recoverWith { case _ ⇒ Promise.successful("yay!") }, timeout.duration) must equal("yay!") + + intercept[IllegalStateException] { + Await.result(Promise.failed[String](o) recoverWith { case _ ⇒ Promise.failed[String](r) }, timeout.duration) + } must be(r) + } + + "andThen like a boss" in { + val q = new LinkedBlockingQueue[Int] + for (i ← 1 to 1000) { + Await.result(Future { q.add(1); 3 } andThen { case _ ⇒ q.add(2) } andThen { case Right(0) ⇒ q.add(Int.MaxValue) } andThen { case _ ⇒ q.add(3); }, timeout.duration) must be(3) + q.poll() must be(1) + q.poll() must be(2) + q.poll() must be(3) + q.clear() + } + } + "firstCompletedOf" in { val futures = Vector.fill[Future[Int]](10)(Promise[Int]()) :+ Promise.successful[Int](5) Await.result(Future.firstCompletedOf(futures), timeout.duration) must be(5) @@ -856,7 +881,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa "be completed" in { f((future, _) ⇒ future must be('completed)) } "contain a value" in { f((future, result) ⇒ future.value must be(Some(Right(result)))) } "return result with 'get'" in { f((future, result) ⇒ Await.result(future, timeout.duration) must be(result)) } - "return result with 'Await.sync'" in { f((future, result) ⇒ Await.result(future, timeout.duration) must be(result)) } + "return result with 'Await.result'" in { f((future, result) ⇒ Await.result(future, timeout.duration) must be(result)) } "not timeout" in { f((future, _) ⇒ Await.ready(future, 0 millis)) } "filter result" in { f { (future, result) ⇒ @@ -907,7 +932,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa }) } "throw exception with 'get'" in { f((future, message) ⇒ (evaluating { Await.result(future, timeout.duration) } must produce[E]).getMessage must be(message)) } - "throw exception with 'Await.sync'" in { f((future, message) ⇒ (evaluating { Await.result(future, timeout.duration) } must produce[E]).getMessage must be(message)) } + "throw exception with 'Await.result'" in { f((future, message) ⇒ (evaluating { Await.result(future, timeout.duration) } must produce[E]).getMessage must be(message)) } "retain exception with filter" in { f { (future, message) ⇒ (evaluating { Await.result(future filter (_ ⇒ true), timeout.duration) } must produce[E]).getMessage must be(message) diff --git a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughput10000PerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughput10000PerformanceSpec.scala deleted file mode 100644 index 1ef92549c2..0000000000 --- a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughput10000PerformanceSpec.scala +++ /dev/null @@ -1,169 +0,0 @@ -package akka.performance.microbench - -import akka.performance.workbench.PerformanceSpec -import org.apache.commons.math.stat.descriptive.DescriptiveStatistics -import akka.actor._ -import java.util.concurrent.{ ThreadPoolExecutor, CountDownLatch, TimeUnit } -import akka.dispatch._ -import java.util.concurrent.ThreadPoolExecutor.AbortPolicy -import java.util.concurrent.BlockingQueue -import java.util.concurrent.LinkedBlockingQueue -import akka.util.Duration -import akka.util.duration._ - -// -server -Xms512M -Xmx1024M -XX:+UseParallelGC -Dbenchmark=true -Dbenchmark.repeatFactor=500 -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class TellThroughput10000PerformanceSpec extends PerformanceSpec { - import TellThroughput10000PerformanceSpec._ - - val repeat = 30000L * repeatFactor - - "Tell" must { - "warmup" in { - runScenario(4, warmup = true) - } - "warmup more" in { - runScenario(4, warmup = true) - } - "perform with load 1" in { - runScenario(1) - } - "perform with load 2" in { - runScenario(2) - } - "perform with load 4" in { - runScenario(4) - } - "perform with load 6" in { - runScenario(6) - } - "perform with load 8" in { - runScenario(8) - } - "perform with load 10" in { - runScenario(10) - } - "perform with load 12" in { - runScenario(12) - } - "perform with load 14" in { - runScenario(14) - } - "perform with load 16" in { - runScenario(16) - } - "perform with load 18" in { - runScenario(18) - } - "perform with load 20" in { - runScenario(20) - } - "perform with load 22" in { - runScenario(22) - } - "perform with load 24" in { - runScenario(24) - } - "perform with load 26" in { - runScenario(26) - } - "perform with load 28" in { - runScenario(28) - } - "perform with load 30" in { - runScenario(30) - } - "perform with load 32" in { - runScenario(32) - } - "perform with load 34" in { - runScenario(34) - } - "perform with load 36" in { - runScenario(36) - } - "perform with load 38" in { - runScenario(38) - } - "perform with load 40" in { - runScenario(40) - } - "perform with load 42" in { - runScenario(42) - } - "perform with load 44" in { - runScenario(44) - } - "perform with load 46" in { - runScenario(46) - } - "perform with load 48" in { - runScenario(48) - } - - def runScenario(numberOfClients: Int, warmup: Boolean = false) { - if (acceptClients(numberOfClients)) { - - val dispatcherKey = "benchmark.high-throughput-dispatcher" - val latch = new CountDownLatch(numberOfClients) - val repeatsPerClient = repeat / numberOfClients - val destinations = for (i ← 0 until numberOfClients) - yield system.actorOf(Props(new Destination).withDispatcher(dispatcherKey)) - val clients = for ((dest, j) ← destinations.zipWithIndex) - yield system.actorOf(Props(new Client(dest, latch, repeatsPerClient)).withDispatcher(dispatcherKey)) - - val start = System.nanoTime - clients.foreach(_ ! Run) - val ok = latch.await(maxRunDuration.toMillis, TimeUnit.MILLISECONDS) - val durationNs = (System.nanoTime - start) - - if (!warmup) { - ok must be(true) - logMeasurement(numberOfClients, durationNs, repeat) - } - clients.foreach(system.stop(_)) - destinations.foreach(system.stop(_)) - - } - } - } -} - -object TellThroughput10000PerformanceSpec { - - case object Run - case object Msg - - class Destination extends Actor { - def receive = { - case Msg ⇒ sender ! Msg - } - } - - class Client( - actor: ActorRef, - latch: CountDownLatch, - repeat: Long) extends Actor { - - var sent = 0L - var received = 0L - - def receive = { - case Msg ⇒ - received += 1 - if (sent < repeat) { - actor ! Msg - sent += 1 - } else if (received >= repeat) { - latch.countDown() - } - case Run ⇒ - for (i ← 0L until math.min(20000L, repeat)) { - actor ! Msg - sent += 1 - } - } - - } - -} diff --git a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputComputationPerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputComputationPerformanceSpec.scala index 0b47a1f722..4bee0c8655 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputComputationPerformanceSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputComputationPerformanceSpec.scala @@ -100,15 +100,14 @@ class TellThroughputComputationPerformanceSpec extends PerformanceSpec { def runScenario(numberOfClients: Int, warmup: Boolean = false) { if (acceptClients(numberOfClients)) { - val clientDispatcher = "benchmark.client-dispatcher" - val destinationDispatcher = "benchmark.destination-dispatcher" + val throughputDispatcher = "benchmark.throughput-dispatcher" val latch = new CountDownLatch(numberOfClients) val repeatsPerClient = repeat / numberOfClients val destinations = for (i ← 0 until numberOfClients) - yield system.actorOf(Props(new Destination).withDispatcher(destinationDispatcher)) + yield system.actorOf(Props(new Destination).withDispatcher(throughputDispatcher)) val clients = for (dest ← destinations) - yield system.actorOf(Props(new Client(dest, latch, repeatsPerClient)).withDispatcher(clientDispatcher)) + yield system.actorOf(Props(new Client(dest, latch, repeatsPerClient)).withDispatcher(throughputDispatcher)) val start = System.nanoTime clients.foreach(_ ! Run) diff --git a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPerformanceSpec.scala index 552dbf62e9..f028fec6b0 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPerformanceSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPerformanceSpec.scala @@ -16,10 +16,10 @@ class TellThroughputPerformanceSpec extends PerformanceSpec { "Tell" must { "warmup" in { - runScenario(4, warmup = true) + runScenario(8, warmup = true) } "warmup more" in { - runScenario(4, warmup = true) + runScenario(8, warmup = true) } "perform with load 1" in { runScenario(1) @@ -48,19 +48,66 @@ class TellThroughputPerformanceSpec extends PerformanceSpec { "perform with load 16" in { runScenario(16) } + "perform with load 18" in { + runScenario(18) + } + "perform with load 20" in { + runScenario(20) + } + "perform with load 22" in { + runScenario(22) + } + "perform with load 24" in { + runScenario(24) + } + "perform with load 26" in { + runScenario(26) + } + "perform with load 28" in { + runScenario(28) + } + "perform with load 30" in { + runScenario(30) + } + "perform with load 32" in { + runScenario(32) + } + "perform with load 34" in { + runScenario(34) + } + "perform with load 36" in { + runScenario(36) + } + "perform with load 38" in { + runScenario(38) + } + "perform with load 40" in { + runScenario(40) + } + "perform with load 42" in { + runScenario(42) + } + "perform with load 44" in { + runScenario(44) + } + "perform with load 46" in { + runScenario(46) + } + "perform with load 48" in { + runScenario(48) + } def runScenario(numberOfClients: Int, warmup: Boolean = false) { if (acceptClients(numberOfClients)) { - val clientDispatcher = "benchmark.client-dispatcher" - val destinationDispatcher = "benchmark.destination-dispatcher" + val throughputDispatcher = "benchmark.throughput-dispatcher" val latch = new CountDownLatch(numberOfClients) val repeatsPerClient = repeat / numberOfClients val destinations = for (i ← 0 until numberOfClients) - yield system.actorOf(Props(new Destination).withDispatcher(destinationDispatcher)) + yield system.actorOf(Props(new Destination).withDispatcher(throughputDispatcher)) val clients = for (dest ← destinations) - yield system.actorOf(Props(new Client(dest, latch, repeatsPerClient)).withDispatcher(clientDispatcher)) + yield system.actorOf(Props(new Client(dest, latch, repeatsPerClient)).withDispatcher(throughputDispatcher)) val start = System.nanoTime clients.foreach(_ ! Run) diff --git a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPinnedDispatchersPerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPinnedDispatchersPerformanceSpec.scala deleted file mode 100644 index 4d9ad3eef1..0000000000 --- a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPinnedDispatchersPerformanceSpec.scala +++ /dev/null @@ -1,171 +0,0 @@ -package akka.performance.microbench - -import akka.performance.workbench.PerformanceSpec -import org.apache.commons.math.stat.descriptive.DescriptiveStatistics -import akka.actor._ -import java.util.concurrent.{ ThreadPoolExecutor, CountDownLatch, TimeUnit } -import akka.dispatch._ -import java.util.concurrent.ThreadPoolExecutor.AbortPolicy -import java.util.concurrent.BlockingQueue -import java.util.concurrent.LinkedBlockingQueue -import akka.util.Duration -import akka.util.duration._ - -// -server -Xms512M -Xmx1024M -XX:+UseParallelGC -Dbenchmark=true -Dbenchmark.repeatFactor=500 -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class TellThroughputPinnedDispatchersPerformanceSpec extends PerformanceSpec { - import TellThroughputPinnedDispatchersPerformanceSpec._ - - val repeat = 30000L * repeatFactor - - "Tell" must { - "warmup" in { - runScenario(4, warmup = true) - } - "warmup more" in { - runScenario(4, warmup = true) - } - "perform with load 1" in { - runScenario(1) - } - "perform with load 2" in { - runScenario(2) - } - "perform with load 4" in { - runScenario(4) - } - "perform with load 6" in { - runScenario(6) - } - "perform with load 8" in { - runScenario(8) - } - "perform with load 10" in { - runScenario(10) - } - "perform with load 12" in { - runScenario(12) - } - "perform with load 14" in { - runScenario(14) - } - "perform with load 16" in { - runScenario(16) - } - "perform with load 18" in { - runScenario(18) - } - "perform with load 20" in { - runScenario(20) - } - "perform with load 22" in { - runScenario(22) - } - "perform with load 24" in { - runScenario(24) - } - "perform with load 26" in { - runScenario(26) - } - "perform with load 28" in { - runScenario(28) - } - "perform with load 30" in { - runScenario(30) - } - "perform with load 32" in { - runScenario(32) - } - "perform with load 34" in { - runScenario(34) - } - "perform with load 36" in { - runScenario(36) - } - "perform with load 38" in { - runScenario(38) - } - "perform with load 40" in { - runScenario(40) - } - "perform with load 42" in { - runScenario(42) - } - "perform with load 44" in { - runScenario(44) - } - "perform with load 46" in { - runScenario(46) - } - "perform with load 48" in { - runScenario(48) - } - - def runScenario(numberOfClients: Int, warmup: Boolean = false) { - if (acceptClients(numberOfClients)) { - - val pinnedDispatcher = "benchmark.pinned-dispatcher" - - val latch = new CountDownLatch(numberOfClients) - val repeatsPerClient = repeat / numberOfClients - - val destinations = for (i ← 0 until numberOfClients) - yield system.actorOf(Props(new Destination).withDispatcher(pinnedDispatcher)) - val clients = for ((dest, j) ← destinations.zipWithIndex) - yield system.actorOf(Props(new Client(dest, latch, repeatsPerClient)).withDispatcher(pinnedDispatcher)) - - val start = System.nanoTime - clients.foreach(_ ! Run) - val ok = latch.await(maxRunDuration.toMillis, TimeUnit.MILLISECONDS) - val durationNs = (System.nanoTime - start) - - if (!warmup) { - ok must be(true) - logMeasurement(numberOfClients, durationNs, repeat) - } - clients.foreach(system.stop(_)) - destinations.foreach(system.stop(_)) - - } - } - } -} - -object TellThroughputPinnedDispatchersPerformanceSpec { - - case object Run - case object Msg - - class Destination extends Actor { - def receive = { - case Msg ⇒ sender ! Msg - } - } - - class Client( - actor: ActorRef, - latch: CountDownLatch, - repeat: Long) extends Actor { - - var sent = 0L - var received = 0L - - def receive = { - case Msg ⇒ - received += 1 - if (sent < repeat) { - actor ! Msg - sent += 1 - } else if (received >= repeat) { - latch.countDown() - } - case Run ⇒ - for (i ← 0L until math.min(1000L, repeat)) { - actor ! Msg - sent += 1 - } - } - - } - -} diff --git a/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingLatencyPerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingLatencyPerformanceSpec.scala index 9ba77e71e8..58b2e7e315 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingLatencyPerformanceSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingLatencyPerformanceSpec.scala @@ -84,7 +84,7 @@ class TradingLatencyPerformanceSpec extends PerformanceSpec { } yield Bid(s + i, 100 - i, 1000) val orders = askOrders.zip(bidOrders).map(x ⇒ Seq(x._1, x._2)).flatten - val clientDispatcher = "benchmark.client-dispatcher" + val latencyDispatcher = "benchmark.trading-dispatcher" val ordersPerClient = repeat * orders.size / numberOfClients val totalNumberOfOrders = ordersPerClient * numberOfClients @@ -93,7 +93,7 @@ class TradingLatencyPerformanceSpec extends PerformanceSpec { val start = System.nanoTime val clients = (for (i ← 0 until numberOfClients) yield { val receiver = receivers(i % receivers.size) - val props = Props(new Client(receiver, orders, latch, ordersPerClient, clientDelay.toMicros.toInt)).withDispatcher(clientDispatcher) + val props = Props(new Client(receiver, orders, latch, ordersPerClient, clientDelay.toMicros.toInt)).withDispatcher(latencyDispatcher) system.actorOf(props) }) diff --git a/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingSystem.scala b/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingSystem.scala index 7fe2783a9a..1adb2ecbc7 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingSystem.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingSystem.scala @@ -39,11 +39,9 @@ class AkkaTradingSystem(val system: ActorSystem) extends TradingSystem { val orDispatcher = orderReceiverDispatcher val meDispatcher = matchingEngineDispatcher - // by default we use default-dispatcher - def orderReceiverDispatcher: Option[String] = None + def orderReceiverDispatcher: Option[String] = Some("benchmark.trading-dispatcher") - // by default we use default-dispatcher - def matchingEngineDispatcher: Option[String] = None + def matchingEngineDispatcher: Option[String] = Some("benchmark.trading-dispatcher") override val orderbooksGroupedByMatchingEngine: List[List[Orderbook]] = for (groupOfSymbols: List[String] ← OrderbookRepository.orderbookSymbolsGroupedByMatchingEngine) diff --git a/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingThroughputPerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingThroughputPerformanceSpec.scala index 7092f87666..a1033d7682 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingThroughputPerformanceSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingThroughputPerformanceSpec.scala @@ -81,7 +81,7 @@ class TradingThroughputPerformanceSpec extends PerformanceSpec { } yield Bid(s + i, 100 - i, 1000) val orders = askOrders.zip(bidOrders).map(x ⇒ Seq(x._1, x._2)).flatten - val clientDispatcher = "benchmark.client-dispatcher" + val throughputDispatcher = "benchmark.trading-dispatcher" val ordersPerClient = repeat * orders.size / numberOfClients val totalNumberOfOrders = ordersPerClient * numberOfClients @@ -90,7 +90,7 @@ class TradingThroughputPerformanceSpec extends PerformanceSpec { val start = System.nanoTime val clients = (for (i ← 0 until numberOfClients) yield { val receiver = receivers(i % receivers.size) - val props = Props(new Client(receiver, orders, latch, ordersPerClient)).withDispatcher(clientDispatcher) + val props = Props(new Client(receiver, orders, latch, ordersPerClient)).withDispatcher(throughputDispatcher) system.actorOf(props) }) diff --git a/akka-actor-tests/src/test/scala/akka/performance/workbench/BenchmarkConfig.scala b/akka-actor-tests/src/test/scala/akka/performance/workbench/BenchmarkConfig.scala index 11ed21c9aa..e31e667678 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/workbench/BenchmarkConfig.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/workbench/BenchmarkConfig.scala @@ -20,38 +20,40 @@ object BenchmarkConfig { resultDir = "target/benchmark" useDummyOrderbook = false - client-dispatcher { - core-pool-size-min = ${benchmark.maxClients} - core-pool-size-max = ${benchmark.maxClients} - } - - destination-dispatcher { - core-pool-size-min = ${benchmark.maxClients} - core-pool-size-max = ${benchmark.maxClients} - } - - high-throughput-dispatcher { - throughput = 10000 - core-pool-size-min = ${benchmark.maxClients} - core-pool-size-max = ${benchmark.maxClients} - } - - pinned-dispatcher { - type = PinnedDispatcher + throughput-dispatcher { + throughput = 5 + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = ${benchmark.maxClients} + parallelism-max = ${benchmark.maxClients} + } } latency-dispatcher { throughput = 1 - core-pool-size-min = ${benchmark.maxClients} - core-pool-size-max = ${benchmark.maxClients} + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = ${benchmark.maxClients} + parallelism-max = ${benchmark.maxClients} + } + } + + trading-dispatcher { + throughput = 5 + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = ${benchmark.maxClients} + parallelism-max = ${benchmark.maxClients} + } } } """) private val longRunningBenchmarkConfig = ConfigFactory.parseString(""" benchmark { longRunning = true + minClients = 4 maxClients = 48 - repeatFactor = 150 + repeatFactor = 2000 maxRunDuration = 120 seconds useDummyOrderbook = true } diff --git a/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala index 3d27f8a303..ca6e42d67f 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala @@ -31,7 +31,8 @@ abstract class PerformanceSpec(cfg: Config = BenchmarkConfig.config) extends Akk def compareResultWith: Option[String] = None def acceptClients(numberOfClients: Int): Boolean = { - (minClients <= numberOfClients && numberOfClients <= maxClients) + (minClients <= numberOfClients && numberOfClients <= maxClients && + (maxClients <= 16 || numberOfClients % 4 == 0)) } def logMeasurement(numberOfClients: Int, durationNs: Long, n: Long) { diff --git a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala index cc6fd9b852..8a9f3dc5c9 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala @@ -20,8 +20,11 @@ object ConfiguredLocalRoutingSpec { akka { actor { default-dispatcher { - core-pool-size-min = 8 - core-pool-size-max = 16 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = 8 + core-pool-size-max = 16 + } } deployment { /config { diff --git a/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java b/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java new file mode 100644 index 0000000000..f92e5541f4 --- /dev/null +++ b/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java @@ -0,0 +1,2674 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package akka.jsr166y; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Random; +import java.util.concurrent.AbstractExecutorService; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.RunnableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.AbstractQueuedSynchronizer; +import java.util.concurrent.locks.Condition; +import akka.util.Unsafe; + +/** + * An {@link ExecutorService} for running {@link ForkJoinTask}s. + * A {@code ForkJoinPool} provides the entry point for submissions + * from non-{@code ForkJoinTask} clients, as well as management and + * monitoring operations. + * + *

A {@code ForkJoinPool} differs from other kinds of {@link + * ExecutorService} mainly by virtue of employing + * work-stealing: all threads in the pool attempt to find and + * execute tasks submitted to the pool and/or created by other active + * tasks (eventually blocking waiting for work if none exist). This + * enables efficient processing when most tasks spawn other subtasks + * (as do most {@code ForkJoinTask}s), as well as when many small + * tasks are submitted to the pool from external clients. Especially + * when setting asyncMode to true in constructors, {@code + * ForkJoinPool}s may also be appropriate for use with event-style + * tasks that are never joined. + * + *

A {@code ForkJoinPool} is constructed with a given target + * parallelism level; by default, equal to the number of available + * processors. The pool attempts to maintain enough active (or + * available) threads by dynamically adding, suspending, or resuming + * internal worker threads, even if some tasks are stalled waiting to + * join others. However, no such adjustments are guaranteed in the + * face of blocked IO or other unmanaged synchronization. The nested + * {@link ManagedBlocker} interface enables extension of the kinds of + * synchronization accommodated. + * + *

In addition to execution and lifecycle control methods, this + * class provides status check methods (for example + * {@link #getStealCount}) that are intended to aid in developing, + * tuning, and monitoring fork/join applications. Also, method + * {@link #toString} returns indications of pool state in a + * convenient form for informal monitoring. + * + *

As is the case with other ExecutorServices, there are three + * main task execution methods summarized in the following table. + * These are designed to be used primarily by clients not already + * engaged in fork/join computations in the current pool. The main + * forms of these methods accept instances of {@code ForkJoinTask}, + * but overloaded forms also allow mixed execution of plain {@code + * Runnable}- or {@code Callable}- based activities as well. However, + * tasks that are already executing in a pool should normally instead + * use the within-computation forms listed in the table unless using + * async event-style tasks that are not usually joined, in which case + * there is little difference among choice of methods. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Call from non-fork/join clients Call from within fork/join computations
Arrange async execution {@link #execute(ForkJoinTask)} {@link ForkJoinTask#fork}
Await and obtain result {@link #invoke(ForkJoinTask)} {@link ForkJoinTask#invoke}
Arrange exec and obtain Future {@link #submit(ForkJoinTask)} {@link ForkJoinTask#fork} (ForkJoinTasks are Futures)
+ * + *

Sample Usage. Normally a single {@code ForkJoinPool} is + * used for all parallel task execution in a program or subsystem. + * Otherwise, use would not usually outweigh the construction and + * bookkeeping overhead of creating a large set of threads. For + * example, a common pool could be used for the {@code SortTasks} + * illustrated in {@link RecursiveAction}. Because {@code + * ForkJoinPool} uses threads in {@linkplain java.lang.Thread#isDaemon + * daemon} mode, there is typically no need to explicitly {@link + * #shutdown} such a pool upon program exit. + * + *

 {@code
+ * static final ForkJoinPool mainPool = new ForkJoinPool();
+ * ...
+ * public void sort(long[] array) {
+ *   mainPool.invoke(new SortTask(array, 0, array.length));
+ * }}
+ * + *

Implementation notes: This implementation restricts the + * maximum number of running threads to 32767. Attempts to create + * pools with greater than the maximum number result in + * {@code IllegalArgumentException}. + * + *

This implementation rejects submitted tasks (that is, by throwing + * {@link RejectedExecutionException}) only when the pool is shut down + * or internal resources have been exhausted. + * + * @since 1.7 + * @author Doug Lea + */ +public class ForkJoinPool extends AbstractExecutorService { + + /* + * Implementation Overview + * + * This class and its nested classes provide the main + * functionality and control for a set of worker threads: + * Submissions from non-FJ threads enter into submission queues. + * Workers take these tasks and typically split them into subtasks + * that may be stolen by other workers. Preference rules give + * first priority to processing tasks from their own queues (LIFO + * or FIFO, depending on mode), then to randomized FIFO steals of + * tasks in other queues. + * + * WorkQueues + * ========== + * + * Most operations occur within work-stealing queues (in nested + * class WorkQueue). These are special forms of Deques that + * support only three of the four possible end-operations -- push, + * pop, and poll (aka steal), under the further constraints that + * push and pop are called only from the owning thread (or, as + * extended here, under a lock), while poll may be called from + * other threads. (If you are unfamiliar with them, you probably + * want to read Herlihy and Shavit's book "The Art of + * Multiprocessor programming", chapter 16 describing these in + * more detail before proceeding.) The main work-stealing queue + * design is roughly similar to those in the papers "Dynamic + * Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005 + * (http://research.sun.com/scalable/pubs/index.html) and + * "Idempotent work stealing" by Michael, Saraswat, and Vechev, + * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186). + * The main differences ultimately stem from GC requirements that + * we null out taken slots as soon as we can, to maintain as small + * a footprint as possible even in programs generating huge + * numbers of tasks. To accomplish this, we shift the CAS + * arbitrating pop vs poll (steal) from being on the indices + * ("base" and "top") to the slots themselves. So, both a + * successful pop and poll mainly entail a CAS of a slot from + * non-null to null. Because we rely on CASes of references, we + * do not need tag bits on base or top. They are simple ints as + * used in any circular array-based queue (see for example + * ArrayDeque). Updates to the indices must still be ordered in a + * way that guarantees that top == base means the queue is empty, + * but otherwise may err on the side of possibly making the queue + * appear nonempty when a push, pop, or poll have not fully + * committed. Note that this means that the poll operation, + * considered individually, is not wait-free. One thief cannot + * successfully continue until another in-progress one (or, if + * previously empty, a push) completes. However, in the + * aggregate, we ensure at least probabilistic non-blockingness. + * If an attempted steal fails, a thief always chooses a different + * random victim target to try next. So, in order for one thief to + * progress, it suffices for any in-progress poll or new push on + * any empty queue to complete. + * + * This approach also enables support of a user mode in which local + * task processing is in FIFO, not LIFO order, simply by using + * poll rather than pop. This can be useful in message-passing + * frameworks in which tasks are never joined. However neither + * mode considers affinities, loads, cache localities, etc, so + * rarely provide the best possible performance on a given + * machine, but portably provide good throughput by averaging over + * these factors. (Further, even if we did try to use such + * information, we do not usually have a basis for exploiting it. + * For example, some sets of tasks profit from cache affinities, + * but others are harmed by cache pollution effects.) + * + * WorkQueues are also used in a similar way for tasks submitted + * to the pool. We cannot mix these tasks in the same queues used + * for work-stealing (this would contaminate lifo/fifo + * processing). Instead, we loosely associate submission queues + * with submitting threads, using a form of hashing. The + * ThreadLocal Submitter class contains a value initially used as + * a hash code for choosing existing queues, but may be randomly + * repositioned upon contention with other submitters. In + * essence, submitters act like workers except that they never + * take tasks, and they are multiplexed on to a finite number of + * shared work queues. However, classes are set up so that future + * extensions could allow submitters to optionally help perform + * tasks as well. Insertion of tasks in shared mode requires a + * lock (mainly to protect in the case of resizing) but we use + * only a simple spinlock (using bits in field runState), because + * submitters encountering a busy queue move on to try or create + * other queues, so never block. + * + * Management + * ========== + * + * The main throughput advantages of work-stealing stem from + * decentralized control -- workers mostly take tasks from + * themselves or each other. We cannot negate this in the + * implementation of other management responsibilities. The main + * tactic for avoiding bottlenecks is packing nearly all + * essentially atomic control state into two volatile variables + * that are by far most often read (not written) as status and + * consistency checks. + * + * Field "ctl" contains 64 bits holding all the information needed + * to atomically decide to add, inactivate, enqueue (on an event + * queue), dequeue, and/or re-activate workers. To enable this + * packing, we restrict maximum parallelism to (1<<15)-1 (which is + * far in excess of normal operating range) to allow ids, counts, + * and their negations (used for thresholding) to fit into 16bit + * fields. + * + * Field "runState" contains 32 bits needed to register and + * deregister WorkQueues, as well as to enable shutdown. It is + * only modified under a lock (normally briefly held, but + * occasionally protecting allocations and resizings) but even + * when locked remains available to check consistency. An + * auxiliary field "growHints", also only modified under lock, + * contains a candidate index for the next WorkQueue and + * a mask for submission queue indices. + * + * Recording WorkQueues. WorkQueues are recorded in the + * "workQueues" array that is created upon pool construction and + * expanded if necessary. Updates to the array while recording + * new workers and unrecording terminated ones are protected from + * each other by a lock but the array is otherwise concurrently + * readable, and accessed directly. To simplify index-based + * operations, the array size is always a power of two, and all + * readers must tolerate null slots. Shared (submission) queues + * are at even indices, worker queues at odd indices. Grouping + * them together in this way simplifies and speeds up task + * scanning. To avoid flailing during start-up, the array is + * presized to hold twice #parallelism workers (which is unlikely + * to need further resizing during execution). But to avoid + * dealing with so many null slots, variable runState includes a + * mask for the nearest power of two that contains all currently + * used indices. + * + * All worker thread creation is on-demand, triggered by task + * submissions, replacement of terminated workers, and/or + * compensation for blocked workers. However, all other support + * code is set up to work with other policies. To ensure that we + * do not hold on to worker references that would prevent GC, ALL + * accesses to workQueues are via indices into the workQueues + * array (which is one source of some of the messy code + * constructions here). In essence, the workQueues array serves as + * a weak reference mechanism. Thus for example the wait queue + * field of ctl stores indices, not references. Access to the + * workQueues in associated methods (for example signalWork) must + * both index-check and null-check the IDs. All such accesses + * ignore bad IDs by returning out early from what they are doing, + * since this can only be associated with termination, in which + * case it is OK to give up. All uses of the workQueues array + * also check that it is non-null (even if previously + * non-null). This allows nulling during termination, which is + * currently not necessary, but remains an option for + * resource-revocation-based shutdown schemes. It also helps + * reduce JIT issuance of uncommon-trap code, which tends to + * unnecessarily complicate control flow in some methods. + * + * Event Queuing. Unlike HPC work-stealing frameworks, we cannot + * let workers spin indefinitely scanning for tasks when none can + * be found immediately, and we cannot start/resume workers unless + * there appear to be tasks available. On the other hand, we must + * quickly prod them into action when new tasks are submitted or + * generated. In many usages, ramp-up time to activate workers is + * the main limiting factor in overall performance (this is + * compounded at program start-up by JIT compilation and + * allocation). So we try to streamline this as much as possible. + * We park/unpark workers after placing in an event wait queue + * when they cannot find work. This "queue" is actually a simple + * Treiber stack, headed by the "id" field of ctl, plus a 15bit + * counter value (that reflects the number of times a worker has + * been inactivated) to avoid ABA effects (we need only as many + * version numbers as worker threads). Successors are held in + * field WorkQueue.nextWait. Queuing deals with several intrinsic + * races, mainly that a task-producing thread can miss seeing (and + * signalling) another thread that gave up looking for work but + * has not yet entered the wait queue. We solve this by requiring + * a full sweep of all workers (via repeated calls to method + * scan()) both before and after a newly waiting worker is added + * to the wait queue. During a rescan, the worker might release + * some other queued worker rather than itself, which has the same + * net effect. Because enqueued workers may actually be rescanning + * rather than waiting, we set and clear the "parker" field of + * WorkQueues to reduce unnecessary calls to unpark. (This + * requires a secondary recheck to avoid missed signals.) Note + * the unusual conventions about Thread.interrupts surrounding + * parking and other blocking: Because interrupts are used solely + * to alert threads to check termination, which is checked anyway + * upon blocking, we clear status (using Thread.interrupted) + * before any call to park, so that park does not immediately + * return due to status being set via some other unrelated call to + * interrupt in user code. + * + * Signalling. We create or wake up workers only when there + * appears to be at least one task they might be able to find and + * execute. When a submission is added or another worker adds a + * task to a queue that previously had fewer than two tasks, they + * signal waiting workers (or trigger creation of new ones if + * fewer than the given parallelism level -- see signalWork). + * These primary signals are buttressed by signals during rescans; + * together these cover the signals needed in cases when more + * tasks are pushed but untaken, and improve performance compared + * to having one thread wake up all workers. + * + * Trimming workers. To release resources after periods of lack of + * use, a worker starting to wait when the pool is quiescent will + * time out and terminate if the pool has remained quiescent for + * SHRINK_RATE nanosecs. This will slowly propagate, eventually + * terminating all workers after long periods of non-use. + * + * Shutdown and Termination. A call to shutdownNow atomically sets + * a runState bit and then (non-atomically) sets each worker's + * runState status, cancels all unprocessed tasks, and wakes up + * all waiting workers. Detecting whether termination should + * commence after a non-abrupt shutdown() call requires more work + * and bookkeeping. We need consensus about quiescence (i.e., that + * there is no more work). The active count provides a primary + * indication but non-abrupt shutdown still requires a rechecking + * scan for any workers that are inactive but not queued. + * + * Joining Tasks + * ============= + * + * Any of several actions may be taken when one worker is waiting + * to join a task stolen (or always held) by another. Because we + * are multiplexing many tasks on to a pool of workers, we can't + * just let them block (as in Thread.join). We also cannot just + * reassign the joiner's run-time stack with another and replace + * it later, which would be a form of "continuation", that even if + * possible is not necessarily a good idea since we sometimes need + * both an unblocked task and its continuation to progress. + * Instead we combine two tactics: + * + * Helping: Arranging for the joiner to execute some task that it + * would be running if the steal had not occurred. + * + * Compensating: Unless there are already enough live threads, + * method tryCompensate() may create or re-activate a spare + * thread to compensate for blocked joiners until they unblock. + * + * A third form (implemented in tryRemoveAndExec and + * tryPollForAndExec) amounts to helping a hypothetical + * compensator: If we can readily tell that a possible action of a + * compensator is to steal and execute the task being joined, the + * joining thread can do so directly, without the need for a + * compensation thread (although at the expense of larger run-time + * stacks, but the tradeoff is typically worthwhile). + * + * The ManagedBlocker extension API can't use helping so relies + * only on compensation in method awaitBlocker. + * + * The algorithm in tryHelpStealer entails a form of "linear" + * helping: Each worker records (in field currentSteal) the most + * recent task it stole from some other worker. Plus, it records + * (in field currentJoin) the task it is currently actively + * joining. Method tryHelpStealer uses these markers to try to + * find a worker to help (i.e., steal back a task from and execute + * it) that could hasten completion of the actively joined task. + * In essence, the joiner executes a task that would be on its own + * local deque had the to-be-joined task not been stolen. This may + * be seen as a conservative variant of the approach in Wagner & + * Calder "Leapfrogging: a portable technique for implementing + * efficient futures" SIGPLAN Notices, 1993 + * (http://portal.acm.org/citation.cfm?id=155354). It differs in + * that: (1) We only maintain dependency links across workers upon + * steals, rather than use per-task bookkeeping. This sometimes + * requires a linear scan of workQueues array to locate stealers, but + * often doesn't because stealers leave hints (that may become + * stale/wrong) of where to locate them. A stealHint is only a + * hint because a worker might have had multiple steals and the + * hint records only one of them (usually the most current). + * Hinting isolates cost to when it is needed, rather than adding + * to per-task overhead. (2) It is "shallow", ignoring nesting + * and potentially cyclic mutual steals. (3) It is intentionally + * racy: field currentJoin is updated only while actively joining, + * which means that we miss links in the chain during long-lived + * tasks, GC stalls etc (which is OK since blocking in such cases + * is usually a good idea). (4) We bound the number of attempts + * to find work (see MAX_HELP_DEPTH) and fall back to suspending + * the worker and if necessary replacing it with another. + * + * It is impossible to keep exactly the target parallelism number + * of threads running at any given time. Determining the + * existence of conservatively safe helping targets, the + * availability of already-created spares, and the apparent need + * to create new spares are all racy, so we rely on multiple + * retries of each. Currently, in keeping with on-demand + * signalling policy, we compensate only if blocking would leave + * less than one active (non-waiting, non-blocked) worker. + * Additionally, to avoid some false alarms due to GC, lagging + * counters, system activity, etc, compensated blocking for joins + * is only attempted after rechecks stabilize in + * ForkJoinTask.awaitJoin. (Retries are interspersed with + * Thread.yield, for good citizenship.) + * + * Style notes: There is a lot of representation-level coupling + * among classes ForkJoinPool, ForkJoinWorkerThread, and + * ForkJoinTask. The fields of WorkQueue maintain data structures + * managed by ForkJoinPool, so are directly accessed. There is + * little point trying to reduce this, since any associated future + * changes in representations will need to be accompanied by + * algorithmic changes anyway. Several methods intrinsically + * sprawl because they must accumulate sets of consistent reads of + * volatiles held in local variables. Methods signalWork() and + * scan() are the main bottlenecks, so are especially heavily + * micro-optimized/mangled. There are lots of inline assignments + * (of form "while ((local = field) != 0)") which are usually the + * simplest way to ensure the required read orderings (which are + * sometimes critical). This leads to a "C"-like style of listing + * declarations of these locals at the heads of methods or blocks. + * There are several occurrences of the unusual "do {} while + * (!cas...)" which is the simplest way to force an update of a + * CAS'ed variable. There are also other coding oddities that help + * some methods perform reasonably even when interpreted (not + * compiled). + * + * The order of declarations in this file is: + * (1) Static utility functions + * (2) Nested (static) classes + * (3) Static fields + * (4) Fields, along with constants used when unpacking some of them + * (5) Internal control methods + * (6) Callbacks and other support for ForkJoinTask methods + * (7) Exported methods + * (8) Static block initializing statics in minimally dependent order + */ + + // Static utilities + + /** + * Computes an initial hash code (also serving as a non-zero + * random seed) for a thread id. This method is expected to + * provide higher-quality hash codes than using method hashCode(). + */ + static final int hashId(long id) { + int h = (int)id ^ (int)(id >>> 32); // Use MurmurHash of thread id + h ^= h >>> 16; h *= 0x85ebca6b; + h ^= h >>> 13; h *= 0xc2b2ae35; + h ^= h >>> 16; + return (h == 0) ? 1 : h; // ensure nonzero + } + + /** + * If there is a security manager, makes sure caller has + * permission to modify threads. + */ + private static void checkPermission() { + SecurityManager security = System.getSecurityManager(); + if (security != null) + security.checkPermission(modifyThreadPermission); + } + + // Nested classes + + /** + * Factory for creating new {@link ForkJoinWorkerThread}s. + * A {@code ForkJoinWorkerThreadFactory} must be defined and used + * for {@code ForkJoinWorkerThread} subclasses that extend base + * functionality or initialize threads with different contexts. + */ + public static interface ForkJoinWorkerThreadFactory { + /** + * Returns a new worker thread operating in the given pool. + * + * @param pool the pool this thread works in + * @throws NullPointerException if the pool is null + */ + public ForkJoinWorkerThread newThread(ForkJoinPool pool); + } + + /** + * Default ForkJoinWorkerThreadFactory implementation; creates a + * new ForkJoinWorkerThread. + */ + static class DefaultForkJoinWorkerThreadFactory + implements ForkJoinWorkerThreadFactory { + public ForkJoinWorkerThread newThread(ForkJoinPool pool) { + return new ForkJoinWorkerThread(pool); + } + } + + /** + * A simple non-reentrant lock used for exclusion when managing + * queues and workers. We use a custom lock so that we can readily + * probe lock state in constructions that check among alternative + * actions. The lock is normally only very briefly held, and + * sometimes treated as a spinlock, but other usages block to + * reduce overall contention in those cases where locked code + * bodies perform allocation/resizing. + */ + static final class Mutex extends AbstractQueuedSynchronizer { + public final boolean tryAcquire(int ignore) { + return compareAndSetState(0, 1); + } + public final boolean tryRelease(int ignore) { + setState(0); + return true; + } + public final void lock() { acquire(0); } + public final void unlock() { release(0); } + public final boolean isHeldExclusively() { return getState() == 1; } + public final Condition newCondition() { return new ConditionObject(); } + } + + /** + * Class for artificial tasks that are used to replace the target + * of local joins if they are removed from an interior queue slot + * in WorkQueue.tryRemoveAndExec. We don't need the proxy to + * actually do anything beyond having a unique identity. + */ + static final class EmptyTask extends ForkJoinTask { + EmptyTask() { status = ForkJoinTask.NORMAL; } // force done + public final Void getRawResult() { return null; } + public final void setRawResult(Void x) {} + public final boolean exec() { return true; } + } + + /** + * Queues supporting work-stealing as well as external task + * submission. See above for main rationale and algorithms. + * Implementation relies heavily on "Unsafe" intrinsics + * and selective use of "volatile": + * + * Field "base" is the index (mod array.length) of the least valid + * queue slot, which is always the next position to steal (poll) + * from if nonempty. Reads and writes require volatile orderings + * but not CAS, because updates are only performed after slot + * CASes. + * + * Field "top" is the index (mod array.length) of the next queue + * slot to push to or pop from. It is written only by owner thread + * for push, or under lock for trySharedPush, and accessed by + * other threads only after reading (volatile) base. Both top and + * base are allowed to wrap around on overflow, but (top - base) + * (or more commonly -(base - top) to force volatile read of base + * before top) still estimates size. + * + * The array slots are read and written using the emulation of + * volatiles/atomics provided by Unsafe. Insertions must in + * general use putOrderedObject as a form of releasing store to + * ensure that all writes to the task object are ordered before + * its publication in the queue. (Although we can avoid one case + * of this when locked in trySharedPush.) All removals entail a + * CAS to null. The array is always a power of two. To ensure + * safety of Unsafe array operations, all accesses perform + * explicit null checks and implicit bounds checks via + * power-of-two masking. + * + * In addition to basic queuing support, this class contains + * fields described elsewhere to control execution. It turns out + * to work better memory-layout-wise to include them in this + * class rather than a separate class. + * + * Performance on most platforms is very sensitive to placement of + * instances of both WorkQueues and their arrays -- we absolutely + * do not want multiple WorkQueue instances or multiple queue + * arrays sharing cache lines. (It would be best for queue objects + * and their arrays to share, but there is nothing available to + * help arrange that). Unfortunately, because they are recorded + * in a common array, WorkQueue instances are often moved to be + * adjacent by garbage collectors. To reduce impact, we use field + * padding that works OK on common platforms; this effectively + * trades off slightly slower average field access for the sake of + * avoiding really bad worst-case access. (Until better JVM + * support is in place, this padding is dependent on transient + * properties of JVM field layout rules.) We also take care in + * allocating, sizing and resizing the array. Non-shared queue + * arrays are initialized (via method growArray) by workers before + * use. Others are allocated on first use. + */ + static final class WorkQueue { + /** + * Capacity of work-stealing queue array upon initialization. + * Must be a power of two; at least 4, but set larger to + * reduce cacheline sharing among queues. + */ + static final int INITIAL_QUEUE_CAPACITY = 1 << 8; + + /** + * Maximum size for queue arrays. Must be a power of two less + * than or equal to 1 << (31 - width of array entry) to ensure + * lack of wraparound of index calculations, but defined to a + * value a bit less than this to help users trap runaway + * programs before saturating systems. + */ + static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M + + volatile long totalSteals; // cumulative number of steals + int seed; // for random scanning; initialize nonzero + volatile int eventCount; // encoded inactivation count; < 0 if inactive + int nextWait; // encoded record of next event waiter + int rescans; // remaining scans until block + int nsteals; // top-level task executions since last idle + final int mode; // lifo, fifo, or shared + int poolIndex; // index of this queue in pool (or 0) + int stealHint; // index of most recent known stealer + volatile int runState; // 1: locked, -1: terminate; else 0 + volatile int base; // index of next slot for poll + int top; // index of next slot for push + ForkJoinTask[] array; // the elements (initially unallocated) + final ForkJoinWorkerThread owner; // owning thread or null if shared + volatile Thread parker; // == owner during call to park; else null + ForkJoinTask currentJoin; // task being joined in awaitJoin + ForkJoinTask currentSteal; // current non-local task being executed + // Heuristic padding to ameliorate unfortunate memory placements + Object p00, p01, p02, p03, p04, p05, p06, p07, p08, p09, p0a; + + WorkQueue(ForkJoinWorkerThread owner, int mode) { + this.owner = owner; + this.mode = mode; + // Place indices in the center of array (that is not yet allocated) + base = top = INITIAL_QUEUE_CAPACITY >>> 1; + } + + /** + * Returns number of tasks in the queue. + */ + final int queueSize() { + int n = base - top; // non-owner callers must read base first + return (n >= 0) ? 0 : -n; + } + + /** + * Pushes a task. Call only by owner in unshared queues. + * + * @param task the task. Caller must ensure non-null. + * @param p if non-null, pool to signal if necessary + * @throw RejectedExecutionException if array cannot be resized + */ + final void push(ForkJoinTask task, ForkJoinPool p) { + ForkJoinTask[] a; + int s = top, m, n; + if ((a = array) != null) { // ignore if queue removed + U.putOrderedObject + (a, (((m = a.length - 1) & s) << ASHIFT) + ABASE, task); + if ((n = (top = s + 1) - base) <= 2) { + if (p != null) + p.signalWork(); + } + else if (n >= m) + growArray(true); + } + } + + /** + * Pushes a task if lock is free and array is either big + * enough or can be resized to be big enough. + * + * @param task the task. Caller must ensure non-null. + * @return true if submitted + */ + final boolean trySharedPush(ForkJoinTask task) { + boolean submitted = false; + if (runState == 0 && U.compareAndSwapInt(this, RUNSTATE, 0, 1)) { + ForkJoinTask[] a = array; + int s = top; + try { + if ((a != null && a.length > s + 1 - base) || + (a = growArray(false)) != null) { // must presize + int j = (((a.length - 1) & s) << ASHIFT) + ABASE; + U.putObject(a, (long)j, task); // don't need "ordered" + top = s + 1; + submitted = true; + } + } finally { + runState = 0; // unlock + } + } + return submitted; + } + + /** + * Takes next task, if one exists, in FIFO order. + */ + final ForkJoinTask poll() { + ForkJoinTask[] a; int b; ForkJoinTask t; + while ((b = base) - top < 0 && (a = array) != null) { + int j = (((a.length - 1) & b) << ASHIFT) + ABASE; + if ((t = (ForkJoinTask)U.getObjectVolatile(a, j)) != null && + base == b && + U.compareAndSwapObject(a, j, t, null)) { + base = b + 1; + return t; + } + } + return null; + } + + /** + * Takes next task, if one exists, in LIFO order. Call only + * by owner in unshared queues. (We do not have a shared + * version of this method because it is never needed.) + */ + final ForkJoinTask pop() { + ForkJoinTask t; int m; + ForkJoinTask[] a = array; + if (a != null && (m = a.length - 1) >= 0) { + for (int s; (s = top - 1) - base >= 0;) { + int j = ((m & s) << ASHIFT) + ABASE; + if ((t = (ForkJoinTask)U.getObjectVolatile(a, j)) == null) + break; + if (U.compareAndSwapObject(a, j, t, null)) { + top = s; + return t; + } + } + } + return null; + } + + /** + * Takes next task, if one exists, in order specified by mode. + */ + final ForkJoinTask nextLocalTask() { + return mode == 0 ? pop() : poll(); + } + + /** + * Returns next task, if one exists, in order specified by mode. + */ + final ForkJoinTask peek() { + ForkJoinTask[] a = array; int m; + if (a == null || (m = a.length - 1) < 0) + return null; + int i = mode == 0 ? top - 1 : base; + int j = ((i & m) << ASHIFT) + ABASE; + return (ForkJoinTask)U.getObjectVolatile(a, j); + } + + /** + * Returns task at index b if b is current base of queue. + */ + final ForkJoinTask pollAt(int b) { + ForkJoinTask t; ForkJoinTask[] a; + if ((a = array) != null) { + int j = (((a.length - 1) & b) << ASHIFT) + ABASE; + if ((t = (ForkJoinTask)U.getObjectVolatile(a, j)) != null && + base == b && + U.compareAndSwapObject(a, j, t, null)) { + base = b + 1; + return t; + } + } + return null; + } + + /** + * Pops the given task only if it is at the current top. + */ + final boolean tryUnpush(ForkJoinTask t) { + ForkJoinTask[] a; int s; + if ((a = array) != null && (s = top) != base && + U.compareAndSwapObject + (a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) { + top = s; + return true; + } + return false; + } + + /** + * Polls the given task only if it is at the current base. + */ + final boolean pollFor(ForkJoinTask task) { + ForkJoinTask[] a; int b; + if ((b = base) - top < 0 && (a = array) != null) { + int j = (((a.length - 1) & b) << ASHIFT) + ABASE; + if (U.getObjectVolatile(a, j) == task && base == b && + U.compareAndSwapObject(a, j, task, null)) { + base = b + 1; + return true; + } + } + return false; + } + + /** + * If present, removes from queue and executes the given task, or + * any other cancelled task. Returns (true) immediately on any CAS + * or consistency check failure so caller can retry. + * + * @return false if no progress can be made + */ + final boolean tryRemoveAndExec(ForkJoinTask task) { + boolean removed = false, empty = true, progress = true; + ForkJoinTask[] a; int m, s, b, n; + if ((a = array) != null && (m = a.length - 1) >= 0 && + (n = (s = top) - (b = base)) > 0) { + for (ForkJoinTask t;;) { // traverse from s to b + int j = ((--s & m) << ASHIFT) + ABASE; + t = (ForkJoinTask)U.getObjectVolatile(a, j); + if (t == null) // inconsistent length + break; + else if (t == task) { + if (s + 1 == top) { // pop + if (!U.compareAndSwapObject(a, j, task, null)) + break; + top = s; + removed = true; + } + else if (base == b) // replace with proxy + removed = U.compareAndSwapObject(a, j, task, + new EmptyTask()); + break; + } + else if (t.status >= 0) + empty = false; + else if (s + 1 == top) { // pop and throw away + if (U.compareAndSwapObject(a, j, t, null)) + top = s; + break; + } + if (--n == 0) { + if (!empty && base == b) + progress = false; + break; + } + } + } + if (removed) + task.doExec(); + return progress; + } + + /** + * Initializes or doubles the capacity of array. Call either + * by owner or with lock held -- it is OK for base, but not + * top, to move while resizings are in progress. + * + * @param rejectOnFailure if true, throw exception if capacity + * exceeded (relayed ultimately to user); else return null. + */ + final ForkJoinTask[] growArray(boolean rejectOnFailure) { + ForkJoinTask[] oldA = array; + int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY; + if (size <= MAXIMUM_QUEUE_CAPACITY) { + int oldMask, t, b; + ForkJoinTask[] a = array = new ForkJoinTask[size]; + if (oldA != null && (oldMask = oldA.length - 1) >= 0 && + (t = top) - (b = base) > 0) { + int mask = size - 1; + do { + ForkJoinTask x; + int oldj = ((b & oldMask) << ASHIFT) + ABASE; + int j = ((b & mask) << ASHIFT) + ABASE; + x = (ForkJoinTask)U.getObjectVolatile(oldA, oldj); + if (x != null && + U.compareAndSwapObject(oldA, oldj, x, null)) + U.putObjectVolatile(a, j, x); + } while (++b != t); + } + return a; + } + else if (!rejectOnFailure) + return null; + else + throw new RejectedExecutionException("Queue capacity exceeded"); + } + + /** + * Removes and cancels all known tasks, ignoring any exceptions. + */ + final void cancelAll() { + ForkJoinTask.cancelIgnoringExceptions(currentJoin); + ForkJoinTask.cancelIgnoringExceptions(currentSteal); + for (ForkJoinTask t; (t = poll()) != null; ) + ForkJoinTask.cancelIgnoringExceptions(t); + } + + /** + * Computes next value for random probes. Scans don't require + * a very high quality generator, but also not a crummy one. + * Marsaglia xor-shift is cheap and works well enough. Note: + * This is manually inlined in several usages in ForkJoinPool + * to avoid writes inside busy scan loops. + */ + final int nextSeed() { + int r = seed; + r ^= r << 13; + r ^= r >>> 17; + return seed = r ^= r << 5; + } + + // Execution methods + + /** + * Removes and runs tasks until empty, using local mode + * ordering. + */ + final void runLocalTasks() { + if (base - top < 0) { + for (ForkJoinTask t; (t = nextLocalTask()) != null; ) + t.doExec(); + } + } + + /** + * Executes a top-level task and any local tasks remaining + * after execution. + * + * @return true unless terminating + */ + final boolean runTask(ForkJoinTask t) { + boolean alive = true; + if (t != null) { + currentSteal = t; + t.doExec(); + runLocalTasks(); + ++nsteals; + currentSteal = null; + } + else if (runState < 0) // terminating + alive = false; + return alive; + } + + /** + * Executes a non-top-level (stolen) task. + */ + final void runSubtask(ForkJoinTask t) { + if (t != null) { + ForkJoinTask ps = currentSteal; + currentSteal = t; + t.doExec(); + currentSteal = ps; + } + } + + /** + * Returns true if owned and not known to be blocked. + */ + final boolean isApparentlyUnblocked() { + Thread wt; Thread.State s; + return (eventCount >= 0 && + (wt = owner) != null && + (s = wt.getState()) != Thread.State.BLOCKED && + s != Thread.State.WAITING && + s != Thread.State.TIMED_WAITING); + } + + /** + * If this owned and is not already interrupted, try to + * interrupt and/or unpark, ignoring exceptions. + */ + final void interruptOwner() { + Thread wt, p; + if ((wt = owner) != null && !wt.isInterrupted()) { + try { + wt.interrupt(); + } catch (SecurityException ignore) { + } + } + if ((p = parker) != null) + U.unpark(p); + } + + // Unsafe mechanics + private static final sun.misc.Unsafe U; + private static final long RUNSTATE; + private static final int ABASE; + private static final int ASHIFT; + static { + int s; + try { + U = getUnsafe(); + Class k = WorkQueue.class; + Class ak = ForkJoinTask[].class; + RUNSTATE = U.objectFieldOffset + (k.getDeclaredField("runState")); + ABASE = U.arrayBaseOffset(ak); + s = U.arrayIndexScale(ak); + } catch (Exception e) { + throw new Error(e); + } + if ((s & (s-1)) != 0) + throw new Error("data type scale not a power of two"); + ASHIFT = 31 - Integer.numberOfLeadingZeros(s); + } + } + + /** + * Per-thread records for threads that submit to pools. Currently + * holds only pseudo-random seed / index that is used to choose + * submission queues in method doSubmit. In the future, this may + * also incorporate a means to implement different task rejection + * and resubmission policies. + */ + static final class Submitter { + int seed; + Submitter() { seed = hashId(Thread.currentThread().getId()); } + } + + /** ThreadLocal class for Submitters */ + static final class ThreadSubmitter extends ThreadLocal { + public Submitter initialValue() { return new Submitter(); } + } + + // static fields (initialized in static initializer below) + + /** + * Creates a new ForkJoinWorkerThread. This factory is used unless + * overridden in ForkJoinPool constructors. + */ + public static final ForkJoinWorkerThreadFactory + defaultForkJoinWorkerThreadFactory; + + /** + * Generator for assigning sequence numbers as pool names. + */ + private static final AtomicInteger poolNumberGenerator; + + /** + * Permission required for callers of methods that may start or + * kill threads. + */ + private static final RuntimePermission modifyThreadPermission; + + /** + * Per-thread submission bookeeping. Shared across all pools + * to reduce ThreadLocal pollution and because random motion + * to avoid contention in one pool is likely to hold for others. + */ + private static final ThreadSubmitter submitters; + + // static constants + + /** + * The wakeup interval (in nanoseconds) for a worker waiting for a + * task when the pool is quiescent to instead try to shrink the + * number of workers. The exact value does not matter too + * much. It must be short enough to release resources during + * sustained periods of idleness, but not so short that threads + * are continually re-created. + */ + private static final long SHRINK_RATE = + 4L * 1000L * 1000L * 1000L; // 4 seconds + + /** + * The timeout value for attempted shrinkage, includes + * some slop to cope with system timer imprecision. + */ + private static final long SHRINK_TIMEOUT = SHRINK_RATE - (SHRINK_RATE / 10); + + /** + * The maximum stolen->joining link depth allowed in tryHelpStealer. + * Depths for legitimate chains are unbounded, but we use a fixed + * constant to avoid (otherwise unchecked) cycles and to bound + * staleness of traversal parameters at the expense of sometimes + * blocking when we could be helping. + */ + private static final int MAX_HELP_DEPTH = 16; + + /** + * Bits and masks for control variables + * + * Field ctl is a long packed with: + * AC: Number of active running workers minus target parallelism (16 bits) + * TC: Number of total workers minus target parallelism (16 bits) + * ST: true if pool is terminating (1 bit) + * EC: the wait count of top waiting thread (15 bits) + * ID: poolIndex of top of Treiber stack of waiters (16 bits) + * + * When convenient, we can extract the upper 32 bits of counts and + * the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e = + * (int)ctl. The ec field is never accessed alone, but always + * together with id and st. The offsets of counts by the target + * parallelism and the positionings of fields makes it possible to + * perform the most common checks via sign tests of fields: When + * ac is negative, there are not enough active workers, when tc is + * negative, there are not enough total workers, and when e is + * negative, the pool is terminating. To deal with these possibly + * negative fields, we use casts in and out of "short" and/or + * signed shifts to maintain signedness. + * + * When a thread is queued (inactivated), its eventCount field is + * set negative, which is the only way to tell if a worker is + * prevented from executing tasks, even though it must continue to + * scan for them to avoid queuing races. Note however that + * eventCount updates lag releases so usage requires care. + * + * Field runState is an int packed with: + * SHUTDOWN: true if shutdown is enabled (1 bit) + * SEQ: a sequence number updated upon (de)registering workers (15 bits) + * MASK: mask (power of 2 - 1) covering all registered poolIndexes (16 bits) + * + * The combination of mask and sequence number enables simple + * consistency checks: Staleness of read-only operations on the + * workQueues array can be checked by comparing runState before vs + * after the reads. The low 16 bits (i.e, anding with SMASK) hold + * the smallest power of two covering all indices, minus + * one. + */ + + // bit positions/shifts for fields + private static final int AC_SHIFT = 48; + private static final int TC_SHIFT = 32; + private static final int ST_SHIFT = 31; + private static final int EC_SHIFT = 16; + + // bounds + private static final int POOL_MAX = 0x7fff; // max #workers - 1 + private static final int SMASK = 0xffff; // short bits + private static final int SQMASK = 0xfffe; // even short bits + private static final int SHORT_SIGN = 1 << 15; + private static final int INT_SIGN = 1 << 31; + + // masks + private static final long STOP_BIT = 0x0001L << ST_SHIFT; + private static final long AC_MASK = ((long)SMASK) << AC_SHIFT; + private static final long TC_MASK = ((long)SMASK) << TC_SHIFT; + + // units for incrementing and decrementing + private static final long TC_UNIT = 1L << TC_SHIFT; + private static final long AC_UNIT = 1L << AC_SHIFT; + + // masks and units for dealing with u = (int)(ctl >>> 32) + private static final int UAC_SHIFT = AC_SHIFT - 32; + private static final int UTC_SHIFT = TC_SHIFT - 32; + private static final int UAC_MASK = SMASK << UAC_SHIFT; + private static final int UTC_MASK = SMASK << UTC_SHIFT; + private static final int UAC_UNIT = 1 << UAC_SHIFT; + private static final int UTC_UNIT = 1 << UTC_SHIFT; + + // masks and units for dealing with e = (int)ctl + private static final int E_MASK = 0x7fffffff; // no STOP_BIT + private static final int E_SEQ = 1 << EC_SHIFT; + + // runState bits + private static final int SHUTDOWN = 1 << 31; + private static final int RS_SEQ = 1 << 16; + private static final int RS_SEQ_MASK = 0x7fff0000; + + // access mode for WorkQueue + static final int LIFO_QUEUE = 0; + static final int FIFO_QUEUE = 1; + static final int SHARED_QUEUE = -1; + + // Instance fields + + /* + * Field layout order in this class tends to matter more than one + * would like. Runtime layout order is only loosely related to + * declaration order and may differ across JVMs, but the following + * empirically works OK on current JVMs. + */ + + volatile long ctl; // main pool control + final int parallelism; // parallelism level + final int localMode; // per-worker scheduling mode + int growHints; // for expanding indices/ranges + volatile int runState; // shutdown status, seq, and mask + WorkQueue[] workQueues; // main registry + final Mutex lock; // for registration + final Condition termination; // for awaitTermination + final ForkJoinWorkerThreadFactory factory; // factory for new workers + final Thread.UncaughtExceptionHandler ueh; // per-worker UEH + final AtomicLong stealCount; // collect counts when terminated + final AtomicInteger nextWorkerNumber; // to create worker name string + final String workerNamePrefix; // to create worker name string + + // Creating, registering, deregistering and running workers + + /** + * Tries to create and start a worker + */ + private void addWorker() { + Throwable ex = null; + ForkJoinWorkerThread wt = null; + try { + if ((wt = factory.newThread(this)) != null) { + wt.start(); + return; + } + } catch (Throwable e) { + ex = e; + } + deregisterWorker(wt, ex); // adjust counts etc on failure + } + + /** + * Callback from ForkJoinWorkerThread constructor to assign a + * public name. This must be separate from registerWorker because + * it is called during the "super" constructor call in + * ForkJoinWorkerThread. + */ + final String nextWorkerName() { + return workerNamePrefix.concat + (Integer.toString(nextWorkerNumber.addAndGet(1))); + } + + /** + * Callback from ForkJoinWorkerThread constructor to establish and + * record its WorkQueue. + * + * @param wt the worker thread + */ + final void registerWorker(ForkJoinWorkerThread wt) { + WorkQueue w = wt.workQueue; + Mutex lock = this.lock; + lock.lock(); + try { + int g = growHints, k = g & SMASK; + WorkQueue[] ws = workQueues; + if (ws != null) { // ignore on shutdown + int n = ws.length; + if ((k & 1) == 0 || k >= n || ws[k] != null) { + for (k = 1; k < n && ws[k] != null; k += 2) + ; // workers are at odd indices + if (k >= n) // resize + workQueues = ws = Arrays.copyOf(ws, n << 1); + } + w.eventCount = w.poolIndex = k; // establish before recording + ws[k] = w; + growHints = (g & ~SMASK) | ((k + 2) & SMASK); + int rs = runState; + int m = rs & SMASK; // recalculate runState mask + if (k > m) + m = (m << 1) + 1; + runState = (rs & SHUTDOWN) | ((rs + RS_SEQ) & RS_SEQ_MASK) | m; + } + } finally { + lock.unlock(); + } + } + + /** + * Final callback from terminating worker, as well as upon failure + * to construct or start a worker in addWorker. Removes record of + * worker from array, and adjusts counts. If pool is shutting + * down, tries to complete termination. + * + * @param wt the worker thread or null if addWorker failed + * @param ex the exception causing failure, or null if none + */ + final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) { + WorkQueue w = null; + if (wt != null && (w = wt.workQueue) != null) { + w.runState = -1; // ensure runState is set + stealCount.getAndAdd(w.totalSteals + w.nsteals); + int idx = w.poolIndex; + Mutex lock = this.lock; + lock.lock(); + try { // remove record from array + WorkQueue[] ws = workQueues; + if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w) { + ws[idx] = null; + growHints = (growHints & ~SMASK) | idx; + } + } finally { + lock.unlock(); + } + } + + long c; // adjust ctl counts + do {} while (!U.compareAndSwapLong + (this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) | + ((c - TC_UNIT) & TC_MASK) | + (c & ~(AC_MASK|TC_MASK))))); + + if (!tryTerminate(false, false) && w != null) { + w.cancelAll(); // cancel remaining tasks + if (w.array != null) // suppress signal if never ran + signalWork(); // wake up or create replacement + if (ex == null) // help clean refs on way out + ForkJoinTask.helpExpungeStaleExceptions(); + } + + if (ex != null) // rethrow + U.throwException(ex); + } + + /** + * Top-level runloop for workers, called by ForkJoinWorkerThread.run. + */ + final void runWorker(ForkJoinWorkerThread wt) { + // Initialize queue array and seed in this thread + WorkQueue w = wt.workQueue; + w.growArray(false); + w.seed = hashId(Thread.currentThread().getId()); + + do {} while (w.runTask(scan(w))); + } + + // Submissions + + /** + * Unless shutting down, adds the given task to a submission queue + * at submitter's current queue index (modulo submission + * range). If no queue exists at the index, one is created unless + * pool lock is busy. If the queue and/or lock are busy, another + * index is randomly chosen. The mask in growHints controls the + * effective index range of queues considered. The mask is + * expanded, up to the current workerQueue mask, upon any detected + * contention but otherwise remains small to avoid needlessly + * creating queues when there is no contention. + */ + private void doSubmit(ForkJoinTask task) { + if (task == null) + throw new NullPointerException(); + Submitter s = submitters.get(); + for (int r = s.seed, m = growHints >>> 16;;) { + WorkQueue[] ws; WorkQueue q; Mutex lk; + int k = r & m & SQMASK; // use only even indices + if (runState < 0 || (ws = workQueues) == null || ws.length <= k) + throw new RejectedExecutionException(); // shutting down + if ((q = ws[k]) == null && (lk = lock).tryAcquire(0)) { + try { // try to create new queue + if (ws == workQueues && (q = ws[k]) == null) { + int rs; // update runState seq + ws[k] = q = new WorkQueue(null, SHARED_QUEUE); + runState = (((rs = runState) & SHUTDOWN) | + ((rs + RS_SEQ) & ~SHUTDOWN)); + } + } finally { + lk.unlock(); + } + } + if (q != null) { + if (q.trySharedPush(task)) { + signalWork(); + return; + } + else if (m < parallelism - 1 && m < (runState & SMASK)) { + Mutex lock = this.lock; + lock.lock(); // block until lock free + int g = growHints; + if (g >>> 16 == m) // expand range + growHints = (((m << 1) + 1) << 16) | (g & SMASK); + lock.unlock(); // no need for try/finally + } + else if ((r & m) == 0) + Thread.yield(); // occasionally yield if busy + } + if (m == (m = growHints >>> 16)) { + r ^= r << 13; // update seed unless new range + r ^= r >>> 17; // same xorshift as WorkQueues + s.seed = r ^= r << 5; + } + } + } + + // Maintaining ctl counts + + /** + * Increments active count; mainly called upon return from blocking. + */ + final void incrementActiveCount() { + long c; + do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT)); + } + + /** + * Tries to activate or create a worker if too few are active. + */ + final void signalWork() { + long c; int u; + while ((u = (int)((c = ctl) >>> 32)) < 0) { // too few active + WorkQueue[] ws = workQueues; int e, i; WorkQueue w; Thread p; + if ((e = (int)c) > 0) { // at least one waiting + if (ws != null && (i = e & SMASK) < ws.length && + (w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) { + long nc = (((long)(w.nextWait & E_MASK)) | + ((long)(u + UAC_UNIT) << 32)); + if (U.compareAndSwapLong(this, CTL, c, nc)) { + w.eventCount = (e + E_SEQ) & E_MASK; + if ((p = w.parker) != null) + U.unpark(p); // activate and release + break; + } + } + else + break; + } + else if (e == 0 && (u & SHORT_SIGN) != 0) { // too few total + long nc = (long)(((u + UTC_UNIT) & UTC_MASK) | + ((u + UAC_UNIT) & UAC_MASK)) << 32; + if (U.compareAndSwapLong(this, CTL, c, nc)) { + addWorker(); + break; + } + } + else + break; + } + } + + /** + * Tries to decrement active count (sometimes implicitly) and + * possibly release or create a compensating worker in preparation + * for blocking. Fails on contention or termination. + * + * @return true if the caller can block, else should recheck and retry + */ + final boolean tryCompensate() { + WorkQueue w; Thread p; + int pc = parallelism, e, u, ac, tc, i; + long c = ctl; + WorkQueue[] ws = workQueues; + if ((e = (int)c) >= 0) { + if ((ac = ((u = (int)(c >>> 32)) >> UAC_SHIFT)) <= 0 && + e != 0 && ws != null && (i = e & SMASK) < ws.length && + (w = ws[i]) != null) { + long nc = (long)(w.nextWait & E_MASK) | (c & (AC_MASK|TC_MASK)); + if (w.eventCount == (e | INT_SIGN) && + U.compareAndSwapLong(this, CTL, c, nc)) { + w.eventCount = (e + E_SEQ) & E_MASK; + if ((p = w.parker) != null) + U.unpark(p); + return true; // release an idle worker + } + } + else if ((tc = (short)(u >>> UTC_SHIFT)) >= 0 && ac + pc > 1) { + long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK); + if (U.compareAndSwapLong(this, CTL, c, nc)) + return true; // no compensation needed + } + else if (tc + pc < POOL_MAX) { + long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK); + if (U.compareAndSwapLong(this, CTL, c, nc)) { + addWorker(); + return true; // create replacement + } + } + } + return false; + } + + // Scanning for tasks + + /** + * Scans for and, if found, returns one task, else possibly + * inactivates the worker. This method operates on single reads of + * volatile state and is designed to be re-invoked continuously in + * part because it returns upon detecting inconsistencies, + * contention, or state changes that indicate possible success on + * re-invocation. + * + * The scan searches for tasks across queues, randomly selecting + * the first #queues probes, favoring steals over submissions + * (by exploiting even/odd indexing), and then performing a + * circular sweep of all queues. The scan terminates upon either + * finding a non-empty queue, or completing a full sweep. If the + * worker is not inactivated, it takes and returns a task from + * this queue. On failure to find a task, we take one of the + * following actions, after which the caller will retry calling + * this method unless terminated. + * + * * If pool is terminating, terminate the worker. + * + * * If not a complete sweep, try to release a waiting worker. If + * the scan terminated because the worker is inactivated, then the + * released worker will often be the calling worker, and it can + * succeed obtaining a task on the next call. Or maybe it is + * another worker, but with same net effect. Releasing in other + * cases as well ensures that we have enough workers running. + * + * * If the caller has run a task since the last empty scan, + * return (to allow rescan) if other workers are not also yet + * enqueued. Field WorkQueue.rescans counts down on each scan to + * ensure eventual inactivation and blocking. + * + * * If not already enqueued, try to inactivate and enqueue the + * worker on wait queue. + * + * * If already enqueued and none of the above apply, either park + * awaiting signal, or if this is the most recent waiter and pool + * is quiescent, relay to idleAwaitWork to check for termination + * and possibly shrink pool. + * + * @param w the worker (via its WorkQueue) + * @return a task or null of none found + */ + private final ForkJoinTask scan(WorkQueue w) { + boolean swept = false; // true after full empty scan + WorkQueue[] ws; // volatile read order matters + int r = w.seed, ec = w.eventCount; // ec is negative if inactive + int rs = runState, m = rs & SMASK; + if ((ws = workQueues) != null && ws.length > m) { // consistency check + for (int k = 0, j = -1 - m; ; ++j) { + WorkQueue q; int b; + if (j < 0) { // random probes while j negative + r ^= r << 13; r ^= r >>> 17; k = (r ^= r << 5) | (j & 1); + } // worker (not submit) for odd j + else // cyclic scan when j >= 0 + k += 7; // step 7 reduces array packing bias + if ((q = ws[k & m]) != null && (b = q.base) - q.top < 0) { + ForkJoinTask t = (ec >= 0) ? q.pollAt(b) : null; + w.seed = r; // save seed for next scan + if (t != null) + return t; + break; + } + else if (j - m > m) { + if (rs == runState) // staleness check + swept = true; + break; + } + } + + // Decode ctl on empty scan + long c = ctl; int e = (int)c, a = (int)(c >> AC_SHIFT), nr, ns; + if (e < 0) // pool is terminating + w.runState = -1; + else if (!swept) { // try to release a waiter + WorkQueue v; Thread p; + if (e > 0 && a < 0 && (v = ws[e & m]) != null && + v.eventCount == (e | INT_SIGN)) { + long nc = ((long)(v.nextWait & E_MASK) | + ((c + AC_UNIT) & (AC_MASK|TC_MASK))); + if (U.compareAndSwapLong(this, CTL, c, nc)) { + v.eventCount = (e + E_SEQ) & E_MASK; + if ((p = v.parker) != null) + U.unpark(p); + } + } + } + else if ((nr = w.rescans) > 0) { // continue rescanning + int ac = a + parallelism; + if (((w.rescans = (ac < nr) ? ac : nr - 1) & 3) == 0 && + w.eventCount == ec) + Thread.yield(); // occasionally yield + } + else if (ec >= 0) { // try to enqueue + long nc = (long)ec | ((c - AC_UNIT) & (AC_MASK|TC_MASK)); + w.nextWait = e; + w.eventCount = ec | INT_SIGN;// mark as inactive + if (!U.compareAndSwapLong(this, CTL, c, nc)) + w.eventCount = ec; // unmark on CAS failure + else if ((ns = w.nsteals) != 0) { + w.nsteals = 0; // set rescans if ran task + w.rescans = a + parallelism; + w.totalSteals += ns; + } + } + else { // already queued + if (parallelism == -a) + idleAwaitWork(w); // quiescent + if (w.eventCount == ec) { + Thread.interrupted(); // clear status + ForkJoinWorkerThread wt = w.owner; + U.putObject(wt, PARKBLOCKER, this); + w.parker = wt; // emulate LockSupport.park + if (w.eventCount == ec) // recheck + U.park(false, 0L); // block + w.parker = null; + U.putObject(wt, PARKBLOCKER, null); + } + } + } + return null; + } + + /** + * If inactivating worker w has caused pool to become quiescent, + * checks for pool termination, and, so long as this is not the + * only worker, waits for event for up to SHRINK_RATE nanosecs. + * On timeout, if ctl has not changed, terminates the worker, + * which will in turn wake up another worker to possibly repeat + * this process. + * + * @param w the calling worker + */ + private void idleAwaitWork(WorkQueue w) { + long c; int nw, ec; + if (!tryTerminate(false, false) && + (int)((c = ctl) >> AC_SHIFT) + parallelism == 0 && + (ec = w.eventCount) == ((int)c | INT_SIGN) && + (nw = w.nextWait) != 0) { + long nc = ((long)(nw & E_MASK) | // ctl to restore on timeout + ((c + AC_UNIT) & AC_MASK) | (c & TC_MASK)); + ForkJoinWorkerThread wt = w.owner; + while (ctl == c) { + long startTime = System.nanoTime(); + Thread.interrupted(); // timed variant of version in scan() + U.putObject(wt, PARKBLOCKER, this); + w.parker = wt; + if (ctl == c) + U.park(false, SHRINK_RATE); + w.parker = null; + U.putObject(wt, PARKBLOCKER, null); + if (ctl != c) + break; + if (System.nanoTime() - startTime >= SHRINK_TIMEOUT && + U.compareAndSwapLong(this, CTL, c, nc)) { + w.eventCount = (ec + E_SEQ) | E_MASK; + w.runState = -1; // shrink + break; + } + } + } + } + + /** + * Tries to locate and execute tasks for a stealer of the given + * task, or in turn one of its stealers, Traces currentSteal -> + * currentJoin links looking for a thread working on a descendant + * of the given task and with a non-empty queue to steal back and + * execute tasks from. The first call to this method upon a + * waiting join will often entail scanning/search, (which is OK + * because the joiner has nothing better to do), but this method + * leaves hints in workers to speed up subsequent calls. The + * implementation is very branchy to cope with potential + * inconsistencies or loops encountering chains that are stale, + * unknown, or of length greater than MAX_HELP_DEPTH links. All + * of these cases are dealt with by just retrying by caller. + * + * @param joiner the joining worker + * @param task the task to join + * @return true if found or ran a task (and so is immediately retryable) + */ + final boolean tryHelpStealer(WorkQueue joiner, ForkJoinTask task) { + ForkJoinTask subtask; // current target + boolean progress = false; + int depth = 0; // current chain depth + int m = runState & SMASK; + WorkQueue[] ws = workQueues; + + if (ws != null && ws.length > m && (subtask = task).status >= 0) { + outer:for (WorkQueue j = joiner;;) { + // Try to find the stealer of subtask, by first using hint + WorkQueue stealer = null; + WorkQueue v = ws[j.stealHint & m]; + if (v != null && v.currentSteal == subtask) + stealer = v; + else { + for (int i = 1; i <= m; i += 2) { + if ((v = ws[i]) != null && v.currentSteal == subtask) { + stealer = v; + j.stealHint = i; // save hint + break; + } + } + if (stealer == null) + break; + } + + for (WorkQueue q = stealer;;) { // Try to help stealer + ForkJoinTask t; int b; + if (task.status < 0) + break outer; + if ((b = q.base) - q.top < 0) { + progress = true; + if (subtask.status < 0) + break outer; // stale + if ((t = q.pollAt(b)) != null) { + stealer.stealHint = joiner.poolIndex; + joiner.runSubtask(t); + } + } + else { // empty - try to descend to find stealer's stealer + ForkJoinTask next = stealer.currentJoin; + if (++depth == MAX_HELP_DEPTH || subtask.status < 0 || + next == null || next == subtask) + break outer; // max depth, stale, dead-end, cyclic + subtask = next; + j = stealer; + break; + } + } + } + } + return progress; + } + + /** + * If task is at base of some steal queue, steals and executes it. + * + * @param joiner the joining worker + * @param task the task + */ + final void tryPollForAndExec(WorkQueue joiner, ForkJoinTask task) { + WorkQueue[] ws; + int m = runState & SMASK; + if ((ws = workQueues) != null && ws.length > m) { + for (int j = 1; j <= m && task.status >= 0; j += 2) { + WorkQueue q = ws[j]; + if (q != null && q.pollFor(task)) { + joiner.runSubtask(task); + break; + } + } + } + } + + /** + * Returns a non-empty steal queue, if one is found during a random, + * then cyclic scan, else null. This method must be retried by + * caller if, by the time it tries to use the queue, it is empty. + */ + private WorkQueue findNonEmptyStealQueue(WorkQueue w) { + int r = w.seed; // Same idea as scan(), but ignoring submissions + for (WorkQueue[] ws;;) { + int m = runState & SMASK; + if ((ws = workQueues) == null) + return null; + if (ws.length > m) { + WorkQueue q; + for (int k = 0, j = -1 - m;; ++j) { + if (j < 0) { + r ^= r << 13; r ^= r >>> 17; k = r ^= r << 5; + } + else + k += 7; + if ((q = ws[(k | 1) & m]) != null && q.base - q.top < 0) { + w.seed = r; + return q; + } + else if (j - m > m) + return null; + } + } + } + } + + /** + * Runs tasks until {@code isQuiescent()}. We piggyback on + * active count ctl maintenance, but rather than blocking + * when tasks cannot be found, we rescan until all others cannot + * find tasks either. + */ + final void helpQuiescePool(WorkQueue w) { + for (boolean active = true;;) { + w.runLocalTasks(); // exhaust local queue + WorkQueue q = findNonEmptyStealQueue(w); + if (q != null) { + ForkJoinTask t; + if (!active) { // re-establish active count + long c; + active = true; + do {} while (!U.compareAndSwapLong + (this, CTL, c = ctl, c + AC_UNIT)); + } + if ((t = q.poll()) != null) + w.runSubtask(t); + } + else { + long c; + if (active) { // decrement active count without queuing + active = false; + do {} while (!U.compareAndSwapLong + (this, CTL, c = ctl, c -= AC_UNIT)); + } + else + c = ctl; // re-increment on exit + if ((int)(c >> AC_SHIFT) + parallelism == 0) { + do {} while (!U.compareAndSwapLong + (this, CTL, c = ctl, c + AC_UNIT)); + break; + } + } + } + } + + /** + * Gets and removes a local or stolen task for the given worker. + * + * @return a task, if available + */ + final ForkJoinTask nextTaskFor(WorkQueue w) { + for (ForkJoinTask t;;) { + WorkQueue q; + if ((t = w.nextLocalTask()) != null) + return t; + if ((q = findNonEmptyStealQueue(w)) == null) + return null; + if ((t = q.poll()) != null) + return t; + } + } + + /** + * Returns the approximate (non-atomic) number of idle threads per + * active thread to offset steal queue size for method + * ForkJoinTask.getSurplusQueuedTaskCount(). + */ + final int idlePerActive() { + // Approximate at powers of two for small values, saturate past 4 + int p = parallelism; + int a = p + (int)(ctl >> AC_SHIFT); + return (a > (p >>>= 1) ? 0 : + a > (p >>>= 1) ? 1 : + a > (p >>>= 1) ? 2 : + a > (p >>>= 1) ? 4 : + 8); + } + + // Termination + + /** + * Possibly initiates and/or completes termination. The caller + * triggering termination runs three passes through workQueues: + * (0) Setting termination status, followed by wakeups of queued + * workers; (1) cancelling all tasks; (2) interrupting lagging + * threads (likely in external tasks, but possibly also blocked in + * joins). Each pass repeats previous steps because of potential + * lagging thread creation. + * + * @param now if true, unconditionally terminate, else only + * if no work and no active workers + * @param enable if true, enable shutdown when next possible + * @return true if now terminating or terminated + */ + private boolean tryTerminate(boolean now, boolean enable) { + Mutex lock = this.lock; + for (long c;;) { + if (((c = ctl) & STOP_BIT) != 0) { // already terminating + if ((short)(c >>> TC_SHIFT) == -parallelism) { + lock.lock(); // don't need try/finally + termination.signalAll(); // signal when 0 workers + lock.unlock(); + } + return true; + } + if (runState >= 0) { // not yet enabled + if (!enable) + return false; + lock.lock(); + runState |= SHUTDOWN; + lock.unlock(); + } + if (!now) { // check if idle & no tasks + if ((int)(c >> AC_SHIFT) != -parallelism || + hasQueuedSubmissions()) + return false; + // Check for unqueued inactive workers. One pass suffices. + WorkQueue[] ws = workQueues; WorkQueue w; + if (ws != null) { + for (int i = 1; i < ws.length; i += 2) { + if ((w = ws[i]) != null && w.eventCount >= 0) + return false; + } + } + } + if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) { + for (int pass = 0; pass < 3; ++pass) { + WorkQueue[] ws = workQueues; + if (ws != null) { + WorkQueue w; + int n = ws.length; + for (int i = 0; i < n; ++i) { + if ((w = ws[i]) != null) { + w.runState = -1; + if (pass > 0) { + w.cancelAll(); + if (pass > 1) + w.interruptOwner(); + } + } + } + // Wake up workers parked on event queue + int i, e; long cc; Thread p; + while ((e = (int)(cc = ctl) & E_MASK) != 0 && + (i = e & SMASK) < n && + (w = ws[i]) != null) { + long nc = ((long)(w.nextWait & E_MASK) | + ((cc + AC_UNIT) & AC_MASK) | + (cc & (TC_MASK|STOP_BIT))); + if (w.eventCount == (e | INT_SIGN) && + U.compareAndSwapLong(this, CTL, cc, nc)) { + w.eventCount = (e + E_SEQ) & E_MASK; + w.runState = -1; + if ((p = w.parker) != null) + U.unpark(p); + } + } + } + } + } + } + } + + // Exported methods + + // Constructors + + /** + * Creates a {@code ForkJoinPool} with parallelism equal to {@link + * java.lang.Runtime#availableProcessors}, using the {@linkplain + * #defaultForkJoinWorkerThreadFactory default thread factory}, + * no UncaughtExceptionHandler, and non-async LIFO processing mode. + * + * @throws SecurityException if a security manager exists and + * the caller is not permitted to modify threads + * because it does not hold {@link + * java.lang.RuntimePermission}{@code ("modifyThread")} + */ + public ForkJoinPool() { + this(Runtime.getRuntime().availableProcessors(), + defaultForkJoinWorkerThreadFactory, null, false); + } + + /** + * Creates a {@code ForkJoinPool} with the indicated parallelism + * level, the {@linkplain + * #defaultForkJoinWorkerThreadFactory default thread factory}, + * no UncaughtExceptionHandler, and non-async LIFO processing mode. + * + * @param parallelism the parallelism level + * @throws IllegalArgumentException if parallelism less than or + * equal to zero, or greater than implementation limit + * @throws SecurityException if a security manager exists and + * the caller is not permitted to modify threads + * because it does not hold {@link + * java.lang.RuntimePermission}{@code ("modifyThread")} + */ + public ForkJoinPool(int parallelism) { + this(parallelism, defaultForkJoinWorkerThreadFactory, null, false); + } + + /** + * Creates a {@code ForkJoinPool} with the given parameters. + * + * @param parallelism the parallelism level. For default value, + * use {@link java.lang.Runtime#availableProcessors}. + * @param factory the factory for creating new threads. For default value, + * use {@link #defaultForkJoinWorkerThreadFactory}. + * @param handler the handler for internal worker threads that + * terminate due to unrecoverable errors encountered while executing + * tasks. For default value, use {@code null}. + * @param asyncMode if true, + * establishes local first-in-first-out scheduling mode for forked + * tasks that are never joined. This mode may be more appropriate + * than default locally stack-based mode in applications in which + * worker threads only process event-style asynchronous tasks. + * For default value, use {@code false}. + * @throws IllegalArgumentException if parallelism less than or + * equal to zero, or greater than implementation limit + * @throws NullPointerException if the factory is null + * @throws SecurityException if a security manager exists and + * the caller is not permitted to modify threads + * because it does not hold {@link + * java.lang.RuntimePermission}{@code ("modifyThread")} + */ + public ForkJoinPool(int parallelism, + ForkJoinWorkerThreadFactory factory, + Thread.UncaughtExceptionHandler handler, + boolean asyncMode) { + checkPermission(); + if (factory == null) + throw new NullPointerException(); + if (parallelism <= 0 || parallelism > POOL_MAX) + throw new IllegalArgumentException(); + this.parallelism = parallelism; + this.factory = factory; + this.ueh = handler; + this.localMode = asyncMode ? FIFO_QUEUE : LIFO_QUEUE; + this.growHints = 1; + long np = (long)(-parallelism); // offset ctl counts + this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK); + // initialize workQueues array with room for 2*parallelism if possible + int n = parallelism << 1; + if (n >= POOL_MAX) + n = POOL_MAX; + else { // See Hackers Delight, sec 3.2, where n < (1 << 16) + n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; + } + this.workQueues = new WorkQueue[(n + 1) << 1]; // #slots = 2 * #workers + this.termination = (this.lock = new Mutex()).newCondition(); + this.stealCount = new AtomicLong(); + this.nextWorkerNumber = new AtomicInteger(); + StringBuilder sb = new StringBuilder("ForkJoinPool-"); + sb.append(poolNumberGenerator.incrementAndGet()); + sb.append("-worker-"); + this.workerNamePrefix = sb.toString(); + } + + // Execution methods + + /** + * Performs the given task, returning its result upon completion. + * If the computation encounters an unchecked Exception or Error, + * it is rethrown as the outcome of this invocation. Rethrown + * exceptions behave in the same way as regular exceptions, but, + * when possible, contain stack traces (as displayed for example + * using {@code ex.printStackTrace()}) of both the current thread + * as well as the thread actually encountering the exception; + * minimally only the latter. + * + * @param task the task + * @return the task's result + * @throws NullPointerException if the task is null + * @throws RejectedExecutionException if the task cannot be + * scheduled for execution + */ + public T invoke(ForkJoinTask task) { + doSubmit(task); + return task.join(); + } + + /** + * Arranges for (asynchronous) execution of the given task. + * + * @param task the task + * @throws NullPointerException if the task is null + * @throws RejectedExecutionException if the task cannot be + * scheduled for execution + */ + public void execute(ForkJoinTask task) { + doSubmit(task); + } + + // AbstractExecutorService methods + + /** + * @throws NullPointerException if the task is null + * @throws RejectedExecutionException if the task cannot be + * scheduled for execution + */ + public void execute(Runnable task) { + if (task == null) + throw new NullPointerException(); + ForkJoinTask job; + if (task instanceof ForkJoinTask) // avoid re-wrap + job = (ForkJoinTask) task; + else + job = ForkJoinTask.adapt(task, null); + doSubmit(job); + } + + /** + * Submits a ForkJoinTask for execution. + * + * @param task the task to submit + * @return the task + * @throws NullPointerException if the task is null + * @throws RejectedExecutionException if the task cannot be + * scheduled for execution + */ + public ForkJoinTask submit(ForkJoinTask task) { + doSubmit(task); + return task; + } + + /** + * @throws NullPointerException if the task is null + * @throws RejectedExecutionException if the task cannot be + * scheduled for execution + */ + public ForkJoinTask submit(Callable task) { + if (task == null) + throw new NullPointerException(); + ForkJoinTask job = ForkJoinTask.adapt(task); + doSubmit(job); + return job; + } + + /** + * @throws NullPointerException if the task is null + * @throws RejectedExecutionException if the task cannot be + * scheduled for execution + */ + public ForkJoinTask submit(Runnable task, T result) { + if (task == null) + throw new NullPointerException(); + ForkJoinTask job = ForkJoinTask.adapt(task, result); + doSubmit(job); + return job; + } + + /** + * @throws NullPointerException if the task is null + * @throws RejectedExecutionException if the task cannot be + * scheduled for execution + */ + public ForkJoinTask submit(Runnable task) { + if (task == null) + throw new NullPointerException(); + ForkJoinTask job; + if (task instanceof ForkJoinTask) // avoid re-wrap + job = (ForkJoinTask) task; + else + job = ForkJoinTask.adapt(task, null); + doSubmit(job); + return job; + } + + /** + * @throws NullPointerException {@inheritDoc} + * @throws RejectedExecutionException {@inheritDoc} + */ + public List> invokeAll(Collection> tasks) { + // In previous versions of this class, this method constructed + // a task to run ForkJoinTask.invokeAll, but now external + // invocation of multiple tasks is at least as efficient. + List> fs = new ArrayList>(tasks.size()); + // Workaround needed because method wasn't declared with + // wildcards in return type but should have been. + @SuppressWarnings({"unchecked", "rawtypes"}) + List> futures = (List>) (List) fs; + + boolean done = false; + try { + for (Callable t : tasks) { + ForkJoinTask f = ForkJoinTask.adapt(t); + doSubmit(f); + fs.add(f); + } + for (ForkJoinTask f : fs) + f.quietlyJoin(); + done = true; + return futures; + } finally { + if (!done) + for (ForkJoinTask f : fs) + f.cancel(false); + } + } + + /** + * Returns the factory used for constructing new workers. + * + * @return the factory used for constructing new workers + */ + public ForkJoinWorkerThreadFactory getFactory() { + return factory; + } + + /** + * Returns the handler for internal worker threads that terminate + * due to unrecoverable errors encountered while executing tasks. + * + * @return the handler, or {@code null} if none + */ + public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() { + return ueh; + } + + /** + * Returns the targeted parallelism level of this pool. + * + * @return the targeted parallelism level of this pool + */ + public int getParallelism() { + return parallelism; + } + + /** + * Returns the number of worker threads that have started but not + * yet terminated. The result returned by this method may differ + * from {@link #getParallelism} when threads are created to + * maintain parallelism when others are cooperatively blocked. + * + * @return the number of worker threads + */ + public int getPoolSize() { + return parallelism + (short)(ctl >>> TC_SHIFT); + } + + /** + * Returns {@code true} if this pool uses local first-in-first-out + * scheduling mode for forked tasks that are never joined. + * + * @return {@code true} if this pool uses async mode + */ + public boolean getAsyncMode() { + return localMode != 0; + } + + /** + * Returns an estimate of the number of worker threads that are + * not blocked waiting to join tasks or for other managed + * synchronization. This method may overestimate the + * number of running threads. + * + * @return the number of worker threads + */ + public int getRunningThreadCount() { + int rc = 0; + WorkQueue[] ws; WorkQueue w; + if ((ws = workQueues) != null) { + for (int i = 1; i < ws.length; i += 2) { + if ((w = ws[i]) != null && w.isApparentlyUnblocked()) + ++rc; + } + } + return rc; + } + + /** + * Returns an estimate of the number of threads that are currently + * stealing or executing tasks. This method may overestimate the + * number of active threads. + * + * @return the number of active threads + */ + public int getActiveThreadCount() { + int r = parallelism + (int)(ctl >> AC_SHIFT); + return (r <= 0) ? 0 : r; // suppress momentarily negative values + } + + /** + * Returns {@code true} if all worker threads are currently idle. + * An idle worker is one that cannot obtain a task to execute + * because none are available to steal from other threads, and + * there are no pending submissions to the pool. This method is + * conservative; it might not return {@code true} immediately upon + * idleness of all threads, but will eventually become true if + * threads remain inactive. + * + * @return {@code true} if all threads are currently idle + */ + public boolean isQuiescent() { + return (int)(ctl >> AC_SHIFT) + parallelism == 0; + } + + /** + * Returns an estimate of the total number of tasks stolen from + * one thread's work queue by another. The reported value + * underestimates the actual total number of steals when the pool + * is not quiescent. This value may be useful for monitoring and + * tuning fork/join programs: in general, steal counts should be + * high enough to keep threads busy, but low enough to avoid + * overhead and contention across threads. + * + * @return the number of steals + */ + public long getStealCount() { + long count = stealCount.get(); + WorkQueue[] ws; WorkQueue w; + if ((ws = workQueues) != null) { + for (int i = 1; i < ws.length; i += 2) { + if ((w = ws[i]) != null) + count += w.totalSteals; + } + } + return count; + } + + /** + * Returns an estimate of the total number of tasks currently held + * in queues by worker threads (but not including tasks submitted + * to the pool that have not begun executing). This value is only + * an approximation, obtained by iterating across all threads in + * the pool. This method may be useful for tuning task + * granularities. + * + * @return the number of queued tasks + */ + public long getQueuedTaskCount() { + long count = 0; + WorkQueue[] ws; WorkQueue w; + if ((ws = workQueues) != null) { + for (int i = 1; i < ws.length; i += 2) { + if ((w = ws[i]) != null) + count += w.queueSize(); + } + } + return count; + } + + /** + * Returns an estimate of the number of tasks submitted to this + * pool that have not yet begun executing. This method may take + * time proportional to the number of submissions. + * + * @return the number of queued submissions + */ + public int getQueuedSubmissionCount() { + int count = 0; + WorkQueue[] ws; WorkQueue w; + if ((ws = workQueues) != null) { + for (int i = 0; i < ws.length; i += 2) { + if ((w = ws[i]) != null) + count += w.queueSize(); + } + } + return count; + } + + /** + * Returns {@code true} if there are any tasks submitted to this + * pool that have not yet begun executing. + * + * @return {@code true} if there are any queued submissions + */ + public boolean hasQueuedSubmissions() { + WorkQueue[] ws; WorkQueue w; + if ((ws = workQueues) != null) { + for (int i = 0; i < ws.length; i += 2) { + if ((w = ws[i]) != null && w.queueSize() != 0) + return true; + } + } + return false; + } + + /** + * Removes and returns the next unexecuted submission if one is + * available. This method may be useful in extensions to this + * class that re-assign work in systems with multiple pools. + * + * @return the next submission, or {@code null} if none + */ + protected ForkJoinTask pollSubmission() { + WorkQueue[] ws; WorkQueue w; ForkJoinTask t; + if ((ws = workQueues) != null) { + for (int i = 0; i < ws.length; i += 2) { + if ((w = ws[i]) != null && (t = w.poll()) != null) + return t; + } + } + return null; + } + + /** + * Removes all available unexecuted submitted and forked tasks + * from scheduling queues and adds them to the given collection, + * without altering their execution status. These may include + * artificially generated or wrapped tasks. This method is + * designed to be invoked only when the pool is known to be + * quiescent. Invocations at other times may not remove all + * tasks. A failure encountered while attempting to add elements + * to collection {@code c} may result in elements being in + * neither, either or both collections when the associated + * exception is thrown. The behavior of this operation is + * undefined if the specified collection is modified while the + * operation is in progress. + * + * @param c the collection to transfer elements into + * @return the number of elements transferred + */ + protected int drainTasksTo(Collection> c) { + int count = 0; + WorkQueue[] ws; WorkQueue w; ForkJoinTask t; + if ((ws = workQueues) != null) { + for (int i = 0; i < ws.length; ++i) { + if ((w = ws[i]) != null) { + while ((t = w.poll()) != null) { + c.add(t); + ++count; + } + } + } + } + return count; + } + + /** + * Returns a string identifying this pool, as well as its state, + * including indications of run state, parallelism level, and + * worker and task counts. + * + * @return a string identifying this pool, as well as its state + */ + public String toString() { + // Use a single pass through workQueues to collect counts + long qt = 0L, qs = 0L; int rc = 0; + long st = stealCount.get(); + long c = ctl; + WorkQueue[] ws; WorkQueue w; + if ((ws = workQueues) != null) { + for (int i = 0; i < ws.length; ++i) { + if ((w = ws[i]) != null) { + int size = w.queueSize(); + if ((i & 1) == 0) + qs += size; + else { + qt += size; + st += w.totalSteals; + if (w.isApparentlyUnblocked()) + ++rc; + } + } + } + } + int pc = parallelism; + int tc = pc + (short)(c >>> TC_SHIFT); + int ac = pc + (int)(c >> AC_SHIFT); + if (ac < 0) // ignore transient negative + ac = 0; + String level; + if ((c & STOP_BIT) != 0) + level = (tc == 0) ? "Terminated" : "Terminating"; + else + level = runState < 0 ? "Shutting down" : "Running"; + return super.toString() + + "[" + level + + ", parallelism = " + pc + + ", size = " + tc + + ", active = " + ac + + ", running = " + rc + + ", steals = " + st + + ", tasks = " + qt + + ", submissions = " + qs + + "]"; + } + + /** + * Initiates an orderly shutdown in which previously submitted + * tasks are executed, but no new tasks will be accepted. + * Invocation has no additional effect if already shut down. + * Tasks that are in the process of being submitted concurrently + * during the course of this method may or may not be rejected. + * + * @throws SecurityException if a security manager exists and + * the caller is not permitted to modify threads + * because it does not hold {@link + * java.lang.RuntimePermission}{@code ("modifyThread")} + */ + public void shutdown() { + checkPermission(); + tryTerminate(false, true); + } + + /** + * Attempts to cancel and/or stop all tasks, and reject all + * subsequently submitted tasks. Tasks that are in the process of + * being submitted or executed concurrently during the course of + * this method may or may not be rejected. This method cancels + * both existing and unexecuted tasks, in order to permit + * termination in the presence of task dependencies. So the method + * always returns an empty list (unlike the case for some other + * Executors). + * + * @return an empty list + * @throws SecurityException if a security manager exists and + * the caller is not permitted to modify threads + * because it does not hold {@link + * java.lang.RuntimePermission}{@code ("modifyThread")} + */ + public List shutdownNow() { + checkPermission(); + tryTerminate(true, true); + return Collections.emptyList(); + } + + /** + * Returns {@code true} if all tasks have completed following shut down. + * + * @return {@code true} if all tasks have completed following shut down + */ + public boolean isTerminated() { + long c = ctl; + return ((c & STOP_BIT) != 0L && + (short)(c >>> TC_SHIFT) == -parallelism); + } + + /** + * Returns {@code true} if the process of termination has + * commenced but not yet completed. This method may be useful for + * debugging. A return of {@code true} reported a sufficient + * period after shutdown may indicate that submitted tasks have + * ignored or suppressed interruption, or are waiting for IO, + * causing this executor not to properly terminate. (See the + * advisory notes for class {@link ForkJoinTask} stating that + * tasks should not normally entail blocking operations. But if + * they do, they must abort them on interrupt.) + * + * @return {@code true} if terminating but not yet terminated + */ + public boolean isTerminating() { + long c = ctl; + return ((c & STOP_BIT) != 0L && + (short)(c >>> TC_SHIFT) != -parallelism); + } + + /** + * Returns {@code true} if this pool has been shut down. + * + * @return {@code true} if this pool has been shut down + */ + public boolean isShutdown() { + return runState < 0; + } + + /** + * Blocks until all tasks have completed execution after a shutdown + * request, or the timeout occurs, or the current thread is + * interrupted, whichever happens first. + * + * @param timeout the maximum time to wait + * @param unit the time unit of the timeout argument + * @return {@code true} if this executor terminated and + * {@code false} if the timeout elapsed before termination + * @throws InterruptedException if interrupted while waiting + */ + public boolean awaitTermination(long timeout, TimeUnit unit) + throws InterruptedException { + long nanos = unit.toNanos(timeout); + final Mutex lock = this.lock; + lock.lock(); + try { + for (;;) { + if (isTerminated()) + return true; + if (nanos <= 0) + return false; + nanos = termination.awaitNanos(nanos); + } + } finally { + lock.unlock(); + } + } + + /** + * Interface for extending managed parallelism for tasks running + * in {@link ForkJoinPool}s. + * + *

A {@code ManagedBlocker} provides two methods. Method + * {@code isReleasable} must return {@code true} if blocking is + * not necessary. Method {@code block} blocks the current thread + * if necessary (perhaps internally invoking {@code isReleasable} + * before actually blocking). These actions are performed by any + * thread invoking {@link ForkJoinPool#managedBlock}. The + * unusual methods in this API accommodate synchronizers that may, + * but don't usually, block for long periods. Similarly, they + * allow more efficient internal handling of cases in which + * additional workers may be, but usually are not, needed to + * ensure sufficient parallelism. Toward this end, + * implementations of method {@code isReleasable} must be amenable + * to repeated invocation. + * + *

For example, here is a ManagedBlocker based on a + * ReentrantLock: + *

 {@code
+     * class ManagedLocker implements ManagedBlocker {
+     *   final ReentrantLock lock;
+     *   boolean hasLock = false;
+     *   ManagedLocker(ReentrantLock lock) { this.lock = lock; }
+     *   public boolean block() {
+     *     if (!hasLock)
+     *       lock.lock();
+     *     return true;
+     *   }
+     *   public boolean isReleasable() {
+     *     return hasLock || (hasLock = lock.tryLock());
+     *   }
+     * }}
+ * + *

Here is a class that possibly blocks waiting for an + * item on a given queue: + *

 {@code
+     * class QueueTaker implements ManagedBlocker {
+     *   final BlockingQueue queue;
+     *   volatile E item = null;
+     *   QueueTaker(BlockingQueue q) { this.queue = q; }
+     *   public boolean block() throws InterruptedException {
+     *     if (item == null)
+     *       item = queue.take();
+     *     return true;
+     *   }
+     *   public boolean isReleasable() {
+     *     return item != null || (item = queue.poll()) != null;
+     *   }
+     *   public E getItem() { // call after pool.managedBlock completes
+     *     return item;
+     *   }
+     * }}
+ */ + public static interface ManagedBlocker { + /** + * Possibly blocks the current thread, for example waiting for + * a lock or condition. + * + * @return {@code true} if no additional blocking is necessary + * (i.e., if isReleasable would return true) + * @throws InterruptedException if interrupted while waiting + * (the method is not required to do so, but is allowed to) + */ + boolean block() throws InterruptedException; + + /** + * Returns {@code true} if blocking is unnecessary. + */ + boolean isReleasable(); + } + + /** + * Blocks in accord with the given blocker. If the current thread + * is a {@link ForkJoinWorkerThread}, this method possibly + * arranges for a spare thread to be activated if necessary to + * ensure sufficient parallelism while the current thread is blocked. + * + *

If the caller is not a {@link ForkJoinTask}, this method is + * behaviorally equivalent to + *

 {@code
+     * while (!blocker.isReleasable())
+     *   if (blocker.block())
+     *     return;
+     * }
+ * + * If the caller is a {@code ForkJoinTask}, then the pool may + * first be expanded to ensure parallelism, and later adjusted. + * + * @param blocker the blocker + * @throws InterruptedException if blocker.block did so + */ + public static void managedBlock(ManagedBlocker blocker) + throws InterruptedException { + Thread t = Thread.currentThread(); + ForkJoinPool p = ((t instanceof ForkJoinWorkerThread) ? + ((ForkJoinWorkerThread)t).pool : null); + while (!blocker.isReleasable()) { + if (p == null || p.tryCompensate()) { + try { + do {} while (!blocker.isReleasable() && !blocker.block()); + } finally { + if (p != null) + p.incrementActiveCount(); + } + break; + } + } + } + + // AbstractExecutorService overrides. These rely on undocumented + // fact that ForkJoinTask.adapt returns ForkJoinTasks that also + // implement RunnableFuture. + + protected RunnableFuture newTaskFor(Runnable runnable, T value) { + return (RunnableFuture) ForkJoinTask.adapt(runnable, value); + } + + protected RunnableFuture newTaskFor(Callable callable) { + return (RunnableFuture) ForkJoinTask.adapt(callable); + } + + // Unsafe mechanics + private static final sun.misc.Unsafe U; + private static final long CTL; + private static final long PARKBLOCKER; + + static { + poolNumberGenerator = new AtomicInteger(); + modifyThreadPermission = new RuntimePermission("modifyThread"); + defaultForkJoinWorkerThreadFactory = + new DefaultForkJoinWorkerThreadFactory(); + submitters = new ThreadSubmitter(); + try { + U = getUnsafe(); + Class k = ForkJoinPool.class; + CTL = U.objectFieldOffset + (k.getDeclaredField("ctl")); + Class tk = Thread.class; + PARKBLOCKER = U.objectFieldOffset + (tk.getDeclaredField("parkBlocker")); + } catch (Exception e) { + throw new Error(e); + } + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + return Unsafe.instance; + } +} diff --git a/akka-actor/src/main/java/akka/jsr166y/ForkJoinTask.java b/akka-actor/src/main/java/akka/jsr166y/ForkJoinTask.java new file mode 100644 index 0000000000..996d05e647 --- /dev/null +++ b/akka-actor/src/main/java/akka/jsr166y/ForkJoinTask.java @@ -0,0 +1,1543 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package akka.jsr166y; +import java.io.Serializable; +import java.util.Collection; +import java.util.List; +import java.util.RandomAccess; +import java.lang.ref.WeakReference; +import java.lang.ref.ReferenceQueue; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.RunnableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.locks.ReentrantLock; +import java.lang.reflect.Constructor; + +/** + * Abstract base class for tasks that run within a {@link ForkJoinPool}. + * A {@code ForkJoinTask} is a thread-like entity that is much + * lighter weight than a normal thread. Huge numbers of tasks and + * subtasks may be hosted by a small number of actual threads in a + * ForkJoinPool, at the price of some usage limitations. + * + *

A "main" {@code ForkJoinTask} begins execution when submitted + * to a {@link ForkJoinPool}. Once started, it will usually in turn + * start other subtasks. As indicated by the name of this class, + * many programs using {@code ForkJoinTask} employ only methods + * {@link #fork} and {@link #join}, or derivatives such as {@link + * #invokeAll(ForkJoinTask...) invokeAll}. However, this class also + * provides a number of other methods that can come into play in + * advanced usages, as well as extension mechanics that allow + * support of new forms of fork/join processing. + * + *

A {@code ForkJoinTask} is a lightweight form of {@link Future}. + * The efficiency of {@code ForkJoinTask}s stems from a set of + * restrictions (that are only partially statically enforceable) + * reflecting their main use as computational tasks calculating pure + * functions or operating on purely isolated objects. The primary + * coordination mechanisms are {@link #fork}, that arranges + * asynchronous execution, and {@link #join}, that doesn't proceed + * until the task's result has been computed. Computations should + * ideally avoid {@code synchronized} methods or blocks, and should + * minimize other blocking synchronization apart from joining other + * tasks or using synchronizers such as Phasers that are advertised to + * cooperate with fork/join scheduling. Subdividable tasks should also + * not perform blocking IO, and should ideally access variables that + * are completely independent of those accessed by other running + * tasks. These guidelines are loosely enforced by not permitting + * checked exceptions such as {@code IOExceptions} to be + * thrown. However, computations may still encounter unchecked + * exceptions, that are rethrown to callers attempting to join + * them. These exceptions may additionally include {@link + * RejectedExecutionException} stemming from internal resource + * exhaustion, such as failure to allocate internal task + * queues. Rethrown exceptions behave in the same way as regular + * exceptions, but, when possible, contain stack traces (as displayed + * for example using {@code ex.printStackTrace()}) of both the thread + * that initiated the computation as well as the thread actually + * encountering the exception; minimally only the latter. + * + *

It is possible to define and use ForkJoinTasks that may block, + * but doing do requires three further considerations: (1) Completion + * of few if any other tasks should be dependent on a task + * that blocks on external synchronization or IO. Event-style async + * tasks that are never joined often fall into this category. (2) To + * minimize resource impact, tasks should be small; ideally performing + * only the (possibly) blocking action. (3) Unless the {@link + * ForkJoinPool.ManagedBlocker} API is used, or the number of possibly + * blocked tasks is known to be less than the pool's {@link + * ForkJoinPool#getParallelism} level, the pool cannot guarantee that + * enough threads will be available to ensure progress or good + * performance. + * + *

The primary method for awaiting completion and extracting + * results of a task is {@link #join}, but there are several variants: + * The {@link Future#get} methods support interruptible and/or timed + * waits for completion and report results using {@code Future} + * conventions. Method {@link #invoke} is semantically + * equivalent to {@code fork(); join()} but always attempts to begin + * execution in the current thread. The "quiet" forms of + * these methods do not extract results or report exceptions. These + * may be useful when a set of tasks are being executed, and you need + * to delay processing of results or exceptions until all complete. + * Method {@code invokeAll} (available in multiple versions) + * performs the most common form of parallel invocation: forking a set + * of tasks and joining them all. + * + *

In the most typical usages, a fork-join pair act like a call + * (fork) and return (join) from a parallel recursive function. As is + * the case with other forms of recursive calls, returns (joins) + * should be performed innermost-first. For example, {@code a.fork(); + * b.fork(); b.join(); a.join();} is likely to be substantially more + * efficient than joining {@code a} before {@code b}. + * + *

The execution status of tasks may be queried at several levels + * of detail: {@link #isDone} is true if a task completed in any way + * (including the case where a task was cancelled without executing); + * {@link #isCompletedNormally} is true if a task completed without + * cancellation or encountering an exception; {@link #isCancelled} is + * true if the task was cancelled (in which case {@link #getException} + * returns a {@link java.util.concurrent.CancellationException}); and + * {@link #isCompletedAbnormally} is true if a task was either + * cancelled or encountered an exception, in which case {@link + * #getException} will return either the encountered exception or + * {@link java.util.concurrent.CancellationException}. + * + *

The ForkJoinTask class is not usually directly subclassed. + * Instead, you subclass one of the abstract classes that support a + * particular style of fork/join processing, typically {@link + * RecursiveAction} for computations that do not return results, or + * {@link RecursiveTask} for those that do. Normally, a concrete + * ForkJoinTask subclass declares fields comprising its parameters, + * established in a constructor, and then defines a {@code compute} + * method that somehow uses the control methods supplied by this base + * class. While these methods have {@code public} access (to allow + * instances of different task subclasses to call each other's + * methods), some of them may only be called from within other + * ForkJoinTasks (as may be determined using method {@link + * #inForkJoinPool}). Attempts to invoke them in other contexts + * result in exceptions or errors, possibly including + * {@code ClassCastException}. + * + *

Method {@link #join} and its variants are appropriate for use + * only when completion dependencies are acyclic; that is, the + * parallel computation can be described as a directed acyclic graph + * (DAG). Otherwise, executions may encounter a form of deadlock as + * tasks cyclically wait for each other. However, this framework + * supports other methods and techniques (for example the use of + * {@link Phaser}, {@link #helpQuiesce}, and {@link #complete}) that + * may be of use in constructing custom subclasses for problems that + * are not statically structured as DAGs. To support such usages a + * ForkJoinTask may be atomically marked using {@link + * #markForkJoinTask} and checked for marking using {@link + * #isMarkedForkJoinTask}. The ForkJoinTask implementation does not + * use these {@code protected} methods or marks for any purpose, but + * they may be of use in the construction of specialized subclasses. + * For example, parallel graph traversals can use the supplied methods + * to avoid revisiting nodes/tasks that have already been processed. + * Also, completion based designs can use them to record that one + * subtask has completed. (Method names for marking are bulky in part + * to encourage definition of methods that reflect their usage + * patterns.) + * + *

Most base support methods are {@code final}, to prevent + * overriding of implementations that are intrinsically tied to the + * underlying lightweight task scheduling framework. Developers + * creating new basic styles of fork/join processing should minimally + * implement {@code protected} methods {@link #exec}, {@link + * #setRawResult}, and {@link #getRawResult}, while also introducing + * an abstract computational method that can be implemented in its + * subclasses, possibly relying on other {@code protected} methods + * provided by this class. + * + *

ForkJoinTasks should perform relatively small amounts of + * computation. Large tasks should be split into smaller subtasks, + * usually via recursive decomposition. As a very rough rule of thumb, + * a task should perform more than 100 and less than 10000 basic + * computational steps, and should avoid indefinite looping. If tasks + * are too big, then parallelism cannot improve throughput. If too + * small, then memory and internal task maintenance overhead may + * overwhelm processing. + * + *

This class provides {@code adapt} methods for {@link Runnable} + * and {@link Callable}, that may be of use when mixing execution of + * {@code ForkJoinTasks} with other kinds of tasks. When all tasks are + * of this form, consider using a pool constructed in asyncMode. + * + *

ForkJoinTasks are {@code Serializable}, which enables them to be + * used in extensions such as remote execution frameworks. It is + * sensible to serialize tasks only before or after, but not during, + * execution. Serialization is not relied on during execution itself. + * + * @since 1.7 + * @author Doug Lea + */ +public abstract class ForkJoinTask implements Future, Serializable { + + /* + * See the internal documentation of class ForkJoinPool for a + * general implementation overview. ForkJoinTasks are mainly + * responsible for maintaining their "status" field amidst relays + * to methods in ForkJoinWorkerThread and ForkJoinPool. + * + * The methods of this class are more-or-less layered into + * (1) basic status maintenance + * (2) execution and awaiting completion + * (3) user-level methods that additionally report results. + * This is sometimes hard to see because this file orders exported + * methods in a way that flows well in javadocs. + */ + + /** + * The number of times to try to help join a task without any + * apparent progress before giving up and blocking. The value is + * arbitrary but should be large enough to cope with transient + * stalls (due to GC etc) that can cause helping methods not to be + * able to proceed because other workers have not progressed to + * the point where subtasks can be found or taken. + */ + private static final int HELP_RETRIES = 32; + + /* + * The status field holds run control status bits packed into a + * single int to minimize footprint and to ensure atomicity (via + * CAS). Status is initially zero, and takes on nonnegative + * values until completed, upon which status holds value + * NORMAL, CANCELLED, or EXCEPTIONAL. Tasks undergoing blocking + * waits by other threads have the SIGNAL bit set. Completion of + * a stolen task with SIGNAL set awakens any waiters via + * notifyAll. Even though suboptimal for some purposes, we use + * basic builtin wait/notify to take advantage of "monitor + * inflation" in JVMs that we would otherwise need to emulate to + * avoid adding further per-task bookkeeping overhead. We want + * these monitors to be "fat", i.e., not use biasing or thin-lock + * techniques, so use some odd coding idioms that tend to avoid + * them. + */ + + /** The run status of this task */ + volatile int status; // accessed directly by pool and workers + static final int NORMAL = 0xfffffffc; // negative with low 2 bits 0 + static final int CANCELLED = 0xfffffff8; // must be < NORMAL + static final int EXCEPTIONAL = 0xfffffff4; // must be < CANCELLED + static final int SIGNAL = 0x00000001; + static final int MARKED = 0x00000002; + + /** + * Marks completion and wakes up threads waiting to join this + * task, also clearing signal request bits. A specialization for + * NORMAL completion is in method doExec. + * + * @param completion one of NORMAL, CANCELLED, EXCEPTIONAL + * @return completion status on exit + */ + private int setCompletion(int completion) { + for (int s;;) { + if ((s = status) < 0) + return s; + if (U.compareAndSwapInt(this, STATUS, s, (s & ~SIGNAL)|completion)) { + if ((s & SIGNAL) != 0) + synchronized (this) { notifyAll(); } + return completion; + } + } + } + + /** + * Primary execution method for stolen tasks. Unless done, calls + * exec and records status if completed, but doesn't wait for + * completion otherwise. + * + * @return status on exit from this method + */ + final int doExec() { + int s; boolean completed; + if ((s = status) >= 0) { + try { + completed = exec(); + } catch (Throwable rex) { + return setExceptionalCompletion(rex); + } + while ((s = status) >= 0 && completed) { + if (U.compareAndSwapInt(this, STATUS, s, (s & ~SIGNAL)|NORMAL)) { + if ((s & SIGNAL) != 0) + synchronized (this) { notifyAll(); } + return NORMAL; + } + } + } + return s; + } + + /** + * Blocks a non-worker-thread until completion. + * @return status upon completion + */ + private int externalAwaitDone() { + int s; + if ((s = status) >= 0) { + boolean interrupted = false; + synchronized (this) { + while ((s = status) >= 0) { + if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) { + try { + wait(); + } catch (InterruptedException ie) { + interrupted = true; + } + } + } + } + if (interrupted) + Thread.currentThread().interrupt(); + } + return s; + } + + /** + * Blocks a non-worker-thread until completion or interruption or timeout. + */ + private int externalInterruptibleAwaitDone(long millis) + throws InterruptedException { + int s; + if (Thread.interrupted()) + throw new InterruptedException(); + if ((s = status) >= 0) { + synchronized (this) { + while ((s = status) >= 0) { + if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) { + wait(millis); + if (millis > 0L) + break; + } + } + } + } + return s; + } + + + /** + * Implementation for join, get, quietlyJoin. Directly handles + * only cases of already-completed, external wait, and + * unfork+exec. Others are relayed to awaitJoin. + * + * @return status upon completion + */ + private int doJoin() { + int s; Thread t; ForkJoinWorkerThread wt; ForkJoinPool.WorkQueue w; + if ((s = status) >= 0) { + if (!((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) + s = externalAwaitDone(); + else if (!(w = (wt = (ForkJoinWorkerThread)t).workQueue). + tryUnpush(this) || (s = doExec()) >= 0) + s = awaitJoin(w, wt.pool); + } + return s; + } + + /** + * Helps and/or blocks until joined. + * + * @param w the joiner + * @param p the pool + * @return status upon completion + */ + private int awaitJoin(ForkJoinPool.WorkQueue w, ForkJoinPool p) { + int s; + ForkJoinTask prevJoin = w.currentJoin; + w.currentJoin = this; + for (int k = HELP_RETRIES; (s = status) >= 0;) { + if ((w.queueSize() > 0) ? + w.tryRemoveAndExec(this) : // self-help + p.tryHelpStealer(w, this)) // help process tasks + k = HELP_RETRIES; // reset if made progress + else if ((s = status) < 0) // recheck + break; + else if (--k > 0) { + if ((k & 3) == 1) + Thread.yield(); // occasionally yield + } + else if (k == 0) + p.tryPollForAndExec(w, this); // uncommon self-help case + else if (p.tryCompensate()) { // true if can block + try { + int ss = status; + if (ss >= 0 && // assert need signal + U.compareAndSwapInt(this, STATUS, ss, ss | SIGNAL)) { + synchronized (this) { + if (status >= 0) // block + wait(); + } + } + } catch (InterruptedException ignore) { + } finally { + p.incrementActiveCount(); // re-activate + } + } + } + w.currentJoin = prevJoin; + return s; + } + + /** + * Implementation for invoke, quietlyInvoke. + * + * @return status upon completion + */ + private int doInvoke() { + int s; Thread t; + if ((s = doExec()) >= 0) { + if (!((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) + s = externalAwaitDone(); + else { + ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t; + s = awaitJoin(wt.workQueue, wt.pool); + } + } + return s; + } + + // Exception table support + + /** + * Table of exceptions thrown by tasks, to enable reporting by + * callers. Because exceptions are rare, we don't directly keep + * them with task objects, but instead use a weak ref table. Note + * that cancellation exceptions don't appear in the table, but are + * instead recorded as status values. + * + * Note: These statics are initialized below in static block. + */ + private static final ExceptionNode[] exceptionTable; + private static final ReentrantLock exceptionTableLock; + private static final ReferenceQueue exceptionTableRefQueue; + + /** + * Fixed capacity for exceptionTable. + */ + private static final int EXCEPTION_MAP_CAPACITY = 32; + + /** + * Key-value nodes for exception table. The chained hash table + * uses identity comparisons, full locking, and weak references + * for keys. The table has a fixed capacity because it only + * maintains task exceptions long enough for joiners to access + * them, so should never become very large for sustained + * periods. However, since we do not know when the last joiner + * completes, we must use weak references and expunge them. We do + * so on each operation (hence full locking). Also, some thread in + * any ForkJoinPool will call helpExpungeStaleExceptions when its + * pool becomes isQuiescent. + */ + static final class ExceptionNode extends WeakReference> { + final Throwable ex; + ExceptionNode next; + final long thrower; // use id not ref to avoid weak cycles + ExceptionNode(ForkJoinTask task, Throwable ex, ExceptionNode next) { + super(task, exceptionTableRefQueue); + this.ex = ex; + this.next = next; + this.thrower = Thread.currentThread().getId(); + } + } + + /** + * Records exception and sets exceptional completion. + * + * @return status on exit + */ + private int setExceptionalCompletion(Throwable ex) { + int h = System.identityHashCode(this); + final ReentrantLock lock = exceptionTableLock; + lock.lock(); + try { + expungeStaleExceptions(); + ExceptionNode[] t = exceptionTable; + int i = h & (t.length - 1); + for (ExceptionNode e = t[i]; ; e = e.next) { + if (e == null) { + t[i] = new ExceptionNode(this, ex, t[i]); + break; + } + if (e.get() == this) // already present + break; + } + } finally { + lock.unlock(); + } + return setCompletion(EXCEPTIONAL); + } + + /** + * Cancels, ignoring any exceptions thrown by cancel. Used during + * worker and pool shutdown. Cancel is spec'ed not to throw any + * exceptions, but if it does anyway, we have no recourse during + * shutdown, so guard against this case. + */ + static final void cancelIgnoringExceptions(ForkJoinTask t) { + if (t != null && t.status >= 0) { + try { + t.cancel(false); + } catch (Throwable ignore) { + } + } + } + + /** + * Removes exception node and clears status + */ + private void clearExceptionalCompletion() { + int h = System.identityHashCode(this); + final ReentrantLock lock = exceptionTableLock; + lock.lock(); + try { + ExceptionNode[] t = exceptionTable; + int i = h & (t.length - 1); + ExceptionNode e = t[i]; + ExceptionNode pred = null; + while (e != null) { + ExceptionNode next = e.next; + if (e.get() == this) { + if (pred == null) + t[i] = next; + else + pred.next = next; + break; + } + pred = e; + e = next; + } + expungeStaleExceptions(); + status = 0; + } finally { + lock.unlock(); + } + } + + /** + * Returns a rethrowable exception for the given task, if + * available. To provide accurate stack traces, if the exception + * was not thrown by the current thread, we try to create a new + * exception of the same type as the one thrown, but with the + * recorded exception as its cause. If there is no such + * constructor, we instead try to use a no-arg constructor, + * followed by initCause, to the same effect. If none of these + * apply, or any fail due to other exceptions, we return the + * recorded exception, which is still correct, although it may + * contain a misleading stack trace. + * + * @return the exception, or null if none + */ + private Throwable getThrowableException() { + if (status != EXCEPTIONAL) + return null; + int h = System.identityHashCode(this); + ExceptionNode e; + final ReentrantLock lock = exceptionTableLock; + lock.lock(); + try { + expungeStaleExceptions(); + ExceptionNode[] t = exceptionTable; + e = t[h & (t.length - 1)]; + while (e != null && e.get() != this) + e = e.next; + } finally { + lock.unlock(); + } + Throwable ex; + if (e == null || (ex = e.ex) == null) + return null; + if (e.thrower != Thread.currentThread().getId()) { + Class ec = ex.getClass(); + try { + Constructor noArgCtor = null; + Constructor[] cs = ec.getConstructors();// public ctors only + for (int i = 0; i < cs.length; ++i) { + Constructor c = cs[i]; + Class[] ps = c.getParameterTypes(); + if (ps.length == 0) + noArgCtor = c; + else if (ps.length == 1 && ps[0] == Throwable.class) + return (Throwable)(c.newInstance(ex)); + } + if (noArgCtor != null) { + Throwable wx = (Throwable)(noArgCtor.newInstance()); + wx.initCause(ex); + return wx; + } + } catch (Exception ignore) { + } + } + return ex; + } + + /** + * Poll stale refs and remove them. Call only while holding lock. + */ + private static void expungeStaleExceptions() { + for (Object x; (x = exceptionTableRefQueue.poll()) != null;) { + if (x instanceof ExceptionNode) { + ForkJoinTask key = ((ExceptionNode)x).get(); + ExceptionNode[] t = exceptionTable; + int i = System.identityHashCode(key) & (t.length - 1); + ExceptionNode e = t[i]; + ExceptionNode pred = null; + while (e != null) { + ExceptionNode next = e.next; + if (e == x) { + if (pred == null) + t[i] = next; + else + pred.next = next; + break; + } + pred = e; + e = next; + } + } + } + } + + /** + * If lock is available, poll stale refs and remove them. + * Called from ForkJoinPool when pools become quiescent. + */ + static final void helpExpungeStaleExceptions() { + final ReentrantLock lock = exceptionTableLock; + if (lock.tryLock()) { + try { + expungeStaleExceptions(); + } finally { + lock.unlock(); + } + } + } + + /** + * Report the result of invoke or join; called only upon + * non-normal return of internal versions. + */ + private V reportResult() { + int s; Throwable ex; + if ((s = status) == CANCELLED) + throw new CancellationException(); + if (s == EXCEPTIONAL && (ex = getThrowableException()) != null) + U.throwException(ex); + return getRawResult(); + } + + // public methods + + /** + * Arranges to asynchronously execute this task. While it is not + * necessarily enforced, it is a usage error to fork a task more + * than once unless it has completed and been reinitialized. + * Subsequent modifications to the state of this task or any data + * it operates on are not necessarily consistently observable by + * any thread other than the one executing it unless preceded by a + * call to {@link #join} or related methods, or a call to {@link + * #isDone} returning {@code true}. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @return {@code this}, to simplify usage + */ + public final ForkJoinTask fork() { + ForkJoinWorkerThread wt; + (wt = (ForkJoinWorkerThread)Thread.currentThread()). + workQueue.push(this, wt.pool); + return this; + } + + /** + * Returns the result of the computation when it {@link #isDone is + * done}. This method differs from {@link #get()} in that + * abnormal completion results in {@code RuntimeException} or + * {@code Error}, not {@code ExecutionException}, and that + * interrupts of the calling thread do not cause the + * method to abruptly return by throwing {@code + * InterruptedException}. + * + * @return the computed result + */ + public final V join() { + if (doJoin() != NORMAL) + return reportResult(); + else + return getRawResult(); + } + + /** + * Commences performing this task, awaits its completion if + * necessary, and returns its result, or throws an (unchecked) + * {@code RuntimeException} or {@code Error} if the underlying + * computation did so. + * + * @return the computed result + */ + public final V invoke() { + if (doInvoke() != NORMAL) + return reportResult(); + else + return getRawResult(); + } + + /** + * Forks the given tasks, returning when {@code isDone} holds for + * each task or an (unchecked) exception is encountered, in which + * case the exception is rethrown. If more than one task + * encounters an exception, then this method throws any one of + * these exceptions. If any task encounters an exception, the + * other may be cancelled. However, the execution status of + * individual tasks is not guaranteed upon exceptional return. The + * status of each task may be obtained using {@link + * #getException()} and related methods to check if they have been + * cancelled, completed normally or exceptionally, or left + * unprocessed. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @param t1 the first task + * @param t2 the second task + * @throws NullPointerException if any task is null + */ + public static void invokeAll(ForkJoinTask t1, ForkJoinTask t2) { + t2.fork(); + t1.invoke(); + t2.join(); + } + + /** + * Forks the given tasks, returning when {@code isDone} holds for + * each task or an (unchecked) exception is encountered, in which + * case the exception is rethrown. If more than one task + * encounters an exception, then this method throws any one of + * these exceptions. If any task encounters an exception, others + * may be cancelled. However, the execution status of individual + * tasks is not guaranteed upon exceptional return. The status of + * each task may be obtained using {@link #getException()} and + * related methods to check if they have been cancelled, completed + * normally or exceptionally, or left unprocessed. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @param tasks the tasks + * @throws NullPointerException if any task is null + */ + public static void invokeAll(ForkJoinTask... tasks) { + Throwable ex = null; + int last = tasks.length - 1; + for (int i = last; i >= 0; --i) { + ForkJoinTask t = tasks[i]; + if (t == null) { + if (ex == null) + ex = new NullPointerException(); + } + else if (i != 0) + t.fork(); + else if (t.doInvoke() < NORMAL && ex == null) + ex = t.getException(); + } + for (int i = 1; i <= last; ++i) { + ForkJoinTask t = tasks[i]; + if (t != null) { + if (ex != null) + t.cancel(false); + else if (t.doJoin() < NORMAL) + ex = t.getException(); + } + } + if (ex != null) + U.throwException(ex); + } + + /** + * Forks all tasks in the specified collection, returning when + * {@code isDone} holds for each task or an (unchecked) exception + * is encountered, in which case the exception is rethrown. If + * more than one task encounters an exception, then this method + * throws any one of these exceptions. If any task encounters an + * exception, others may be cancelled. However, the execution + * status of individual tasks is not guaranteed upon exceptional + * return. The status of each task may be obtained using {@link + * #getException()} and related methods to check if they have been + * cancelled, completed normally or exceptionally, or left + * unprocessed. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @param tasks the collection of tasks + * @return the tasks argument, to simplify usage + * @throws NullPointerException if tasks or any element are null + */ + public static > Collection invokeAll(Collection tasks) { + if (!(tasks instanceof RandomAccess) || !(tasks instanceof List)) { + invokeAll(tasks.toArray(new ForkJoinTask[tasks.size()])); + return tasks; + } + @SuppressWarnings("unchecked") + List> ts = + (List>) tasks; + Throwable ex = null; + int last = ts.size() - 1; + for (int i = last; i >= 0; --i) { + ForkJoinTask t = ts.get(i); + if (t == null) { + if (ex == null) + ex = new NullPointerException(); + } + else if (i != 0) + t.fork(); + else if (t.doInvoke() < NORMAL && ex == null) + ex = t.getException(); + } + for (int i = 1; i <= last; ++i) { + ForkJoinTask t = ts.get(i); + if (t != null) { + if (ex != null) + t.cancel(false); + else if (t.doJoin() < NORMAL) + ex = t.getException(); + } + } + if (ex != null) + U.throwException(ex); + return tasks; + } + + /** + * Attempts to cancel execution of this task. This attempt will + * fail if the task has already completed or could not be + * cancelled for some other reason. If successful, and this task + * has not started when {@code cancel} is called, execution of + * this task is suppressed. After this method returns + * successfully, unless there is an intervening call to {@link + * #reinitialize}, subsequent calls to {@link #isCancelled}, + * {@link #isDone}, and {@code cancel} will return {@code true} + * and calls to {@link #join} and related methods will result in + * {@code CancellationException}. + * + *

This method may be overridden in subclasses, but if so, must + * still ensure that these properties hold. In particular, the + * {@code cancel} method itself must not throw exceptions. + * + *

This method is designed to be invoked by other + * tasks. To terminate the current task, you can just return or + * throw an unchecked exception from its computation method, or + * invoke {@link #completeExceptionally}. + * + * @param mayInterruptIfRunning this value has no effect in the + * default implementation because interrupts are not used to + * control cancellation. + * + * @return {@code true} if this task is now cancelled + */ + public boolean cancel(boolean mayInterruptIfRunning) { + return setCompletion(CANCELLED) == CANCELLED; + } + + public final boolean isDone() { + return status < 0; + } + + public final boolean isCancelled() { + return status == CANCELLED; + } + + /** + * Returns {@code true} if this task threw an exception or was cancelled. + * + * @return {@code true} if this task threw an exception or was cancelled + */ + public final boolean isCompletedAbnormally() { + return status < NORMAL; + } + + /** + * Returns {@code true} if this task completed without throwing an + * exception and was not cancelled. + * + * @return {@code true} if this task completed without throwing an + * exception and was not cancelled + */ + public final boolean isCompletedNormally() { + return status == NORMAL; + } + + /** + * Returns the exception thrown by the base computation, or a + * {@code CancellationException} if cancelled, or {@code null} if + * none or if the method has not yet completed. + * + * @return the exception, or {@code null} if none + */ + public final Throwable getException() { + int s = status; + return ((s >= NORMAL) ? null : + (s == CANCELLED) ? new CancellationException() : + getThrowableException()); + } + + /** + * Completes this task abnormally, and if not already aborted or + * cancelled, causes it to throw the given exception upon + * {@code join} and related operations. This method may be used + * to induce exceptions in asynchronous tasks, or to force + * completion of tasks that would not otherwise complete. Its use + * in other situations is discouraged. This method is + * overridable, but overridden versions must invoke {@code super} + * implementation to maintain guarantees. + * + * @param ex the exception to throw. If this exception is not a + * {@code RuntimeException} or {@code Error}, the actual exception + * thrown will be a {@code RuntimeException} with cause {@code ex}. + */ + public void completeExceptionally(Throwable ex) { + setExceptionalCompletion((ex instanceof RuntimeException) || + (ex instanceof Error) ? ex : + new RuntimeException(ex)); + } + + /** + * Completes this task, and if not already aborted or cancelled, + * returning the given value as the result of subsequent + * invocations of {@code join} and related operations. This method + * may be used to provide results for asynchronous tasks, or to + * provide alternative handling for tasks that would not otherwise + * complete normally. Its use in other situations is + * discouraged. This method is overridable, but overridden + * versions must invoke {@code super} implementation to maintain + * guarantees. + * + * @param value the result value for this task + */ + public void complete(V value) { + try { + setRawResult(value); + } catch (Throwable rex) { + setExceptionalCompletion(rex); + return; + } + setCompletion(NORMAL); + } + + /** + * Waits if necessary for the computation to complete, and then + * retrieves its result. + * + * @return the computed result + * @throws CancellationException if the computation was cancelled + * @throws ExecutionException if the computation threw an + * exception + * @throws InterruptedException if the current thread is not a + * member of a ForkJoinPool and was interrupted while waiting + */ + public final V get() throws InterruptedException, ExecutionException { + int s = (Thread.currentThread() instanceof ForkJoinWorkerThread) ? + doJoin() : externalInterruptibleAwaitDone(0L); + Throwable ex; + if (s == CANCELLED) + throw new CancellationException(); + if (s == EXCEPTIONAL && (ex = getThrowableException()) != null) + throw new ExecutionException(ex); + return getRawResult(); + } + + /** + * Waits if necessary for at most the given time for the computation + * to complete, and then retrieves its result, if available. + * + * @param timeout the maximum time to wait + * @param unit the time unit of the timeout argument + * @return the computed result + * @throws CancellationException if the computation was cancelled + * @throws ExecutionException if the computation threw an + * exception + * @throws InterruptedException if the current thread is not a + * member of a ForkJoinPool and was interrupted while waiting + * @throws TimeoutException if the wait timed out + */ + public final V get(long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + // Messy in part because we measure in nanos, but wait in millis + int s; long millis, nanos; + Thread t = Thread.currentThread(); + if (!(t instanceof ForkJoinWorkerThread)) { + if ((millis = unit.toMillis(timeout)) > 0L) + s = externalInterruptibleAwaitDone(millis); + else + s = status; + } + else if ((s = status) >= 0 && (nanos = unit.toNanos(timeout)) > 0L) { + long deadline = System.nanoTime() + nanos; + ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t; + ForkJoinPool.WorkQueue w = wt.workQueue; + ForkJoinPool p = wt.pool; + if (w.tryUnpush(this)) + doExec(); + boolean blocking = false; + try { + while ((s = status) >= 0) { + if (w.runState < 0) + cancelIgnoringExceptions(this); + else if (!blocking) + blocking = p.tryCompensate(); + else { + millis = TimeUnit.NANOSECONDS.toMillis(nanos); + if (millis > 0L && + U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) { + try { + synchronized (this) { + if (status >= 0) + wait(millis); + } + } catch (InterruptedException ie) { + } + } + if ((s = status) < 0 || + (nanos = deadline - System.nanoTime()) <= 0L) + break; + } + } + } finally { + if (blocking) + p.incrementActiveCount(); + } + } + if (s != NORMAL) { + Throwable ex; + if (s == CANCELLED) + throw new CancellationException(); + if (s != EXCEPTIONAL) + throw new TimeoutException(); + if ((ex = getThrowableException()) != null) + throw new ExecutionException(ex); + } + return getRawResult(); + } + + /** + * Joins this task, without returning its result or throwing its + * exception. This method may be useful when processing + * collections of tasks when some have been cancelled or otherwise + * known to have aborted. + */ + public final void quietlyJoin() { + doJoin(); + } + + /** + * Commences performing this task and awaits its completion if + * necessary, without returning its result or throwing its + * exception. + */ + public final void quietlyInvoke() { + doInvoke(); + } + + /** + * Possibly executes tasks until the pool hosting the current task + * {@link ForkJoinPool#isQuiescent is quiescent}. This method may + * be of use in designs in which many tasks are forked, but none + * are explicitly joined, instead executing them until all are + * processed. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + */ + public static void helpQuiesce() { + ForkJoinWorkerThread wt = + (ForkJoinWorkerThread)Thread.currentThread(); + wt.pool.helpQuiescePool(wt.workQueue); + } + + /** + * Resets the internal bookkeeping state of this task, allowing a + * subsequent {@code fork}. This method allows repeated reuse of + * this task, but only if reuse occurs when this task has either + * never been forked, or has been forked, then completed and all + * outstanding joins of this task have also completed. Effects + * under any other usage conditions are not guaranteed. + * This method may be useful when executing + * pre-constructed trees of subtasks in loops. + * + *

Upon completion of this method, {@code isDone()} reports + * {@code false}, and {@code getException()} reports {@code + * null}. However, the value returned by {@code getRawResult} is + * unaffected. To clear this value, you can invoke {@code + * setRawResult(null)}. + */ + public void reinitialize() { + if (status == EXCEPTIONAL) + clearExceptionalCompletion(); + else + status = 0; + } + + /** + * Returns the pool hosting the current task execution, or null + * if this task is executing outside of any ForkJoinPool. + * + * @see #inForkJoinPool + * @return the pool, or {@code null} if none + */ + public static ForkJoinPool getPool() { + Thread t = Thread.currentThread(); + return (t instanceof ForkJoinWorkerThread) ? + ((ForkJoinWorkerThread) t).pool : null; + } + + /** + * Returns {@code true} if the current thread is a {@link + * ForkJoinWorkerThread} executing as a ForkJoinPool computation. + * + * @return {@code true} if the current thread is a {@link + * ForkJoinWorkerThread} executing as a ForkJoinPool computation, + * or {@code false} otherwise + */ + public static boolean inForkJoinPool() { + return Thread.currentThread() instanceof ForkJoinWorkerThread; + } + + /** + * Tries to unschedule this task for execution. This method will + * typically succeed if this task is the most recently forked task + * by the current thread, and has not commenced executing in + * another thread. This method may be useful when arranging + * alternative local processing of tasks that could have been, but + * were not, stolen. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @return {@code true} if unforked + */ + public boolean tryUnfork() { + return ((ForkJoinWorkerThread)Thread.currentThread()) + .workQueue.tryUnpush(this); + } + + /** + * Returns an estimate of the number of tasks that have been + * forked by the current worker thread but not yet executed. This + * value may be useful for heuristic decisions about whether to + * fork other tasks. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @return the number of tasks + */ + public static int getQueuedTaskCount() { + return ((ForkJoinWorkerThread) Thread.currentThread()) + .workQueue.queueSize(); + } + + /** + * Returns an estimate of how many more locally queued tasks are + * held by the current worker thread than there are other worker + * threads that might steal them. This value may be useful for + * heuristic decisions about whether to fork other tasks. In many + * usages of ForkJoinTasks, at steady state, each worker should + * aim to maintain a small constant surplus (for example, 3) of + * tasks, and to process computations locally if this threshold is + * exceeded. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @return the surplus number of tasks, which may be negative + */ + public static int getSurplusQueuedTaskCount() { + /* + * The aim of this method is to return a cheap heuristic guide + * for task partitioning when programmers, frameworks, tools, + * or languages have little or no idea about task granularity. + * In essence by offering this method, we ask users only about + * tradeoffs in overhead vs expected throughput and its + * variance, rather than how finely to partition tasks. + * + * In a steady state strict (tree-structured) computation, + * each thread makes available for stealing enough tasks for + * other threads to remain active. Inductively, if all threads + * play by the same rules, each thread should make available + * only a constant number of tasks. + * + * The minimum useful constant is just 1. But using a value of + * 1 would require immediate replenishment upon each steal to + * maintain enough tasks, which is infeasible. Further, + * partitionings/granularities of offered tasks should + * minimize steal rates, which in general means that threads + * nearer the top of computation tree should generate more + * than those nearer the bottom. In perfect steady state, each + * thread is at approximately the same level of computation + * tree. However, producing extra tasks amortizes the + * uncertainty of progress and diffusion assumptions. + * + * So, users will want to use values larger, but not much + * larger than 1 to both smooth over transient shortages and + * hedge against uneven progress; as traded off against the + * cost of extra task overhead. We leave the user to pick a + * threshold value to compare with the results of this call to + * guide decisions, but recommend values such as 3. + * + * When all threads are active, it is on average OK to + * estimate surplus strictly locally. In steady-state, if one + * thread is maintaining say 2 surplus tasks, then so are + * others. So we can just use estimated queue length. + * However, this strategy alone leads to serious mis-estimates + * in some non-steady-state conditions (ramp-up, ramp-down, + * other stalls). We can detect many of these by further + * considering the number of "idle" threads, that are known to + * have zero queued tasks, so compensate by a factor of + * (#idle/#active) threads. + */ + ForkJoinWorkerThread wt = + (ForkJoinWorkerThread)Thread.currentThread(); + return wt.workQueue.queueSize() - wt.pool.idlePerActive(); + } + + // Extension methods + + /** + * Returns the result that would be returned by {@link #join}, even + * if this task completed abnormally, or {@code null} if this task + * is not known to have been completed. This method is designed + * to aid debugging, as well as to support extensions. Its use in + * any other context is discouraged. + * + * @return the result, or {@code null} if not completed + */ + public abstract V getRawResult(); + + /** + * Forces the given value to be returned as a result. This method + * is designed to support extensions, and should not in general be + * called otherwise. + * + * @param value the value + */ + protected abstract void setRawResult(V value); + + /** + * Immediately performs the base action of this task. This method + * is designed to support extensions, and should not in general be + * called otherwise. The return value controls whether this task + * is considered to be done normally. It may return false in + * asynchronous actions that require explicit invocations of + * {@link #complete} to become joinable. It may also throw an + * (unchecked) exception to indicate abnormal exit. + * + * @return {@code true} if completed normally + */ + protected abstract boolean exec(); + + /** + * Returns, but does not unschedule or execute, a task queued by + * the current thread but not yet executed, if one is immediately + * available. There is no guarantee that this task will actually + * be polled or executed next. Conversely, this method may return + * null even if a task exists but cannot be accessed without + * contention with other threads. This method is designed + * primarily to support extensions, and is unlikely to be useful + * otherwise. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @return the next task, or {@code null} if none are available + */ + protected static ForkJoinTask peekNextLocalTask() { + return ((ForkJoinWorkerThread) Thread.currentThread()).workQueue.peek(); + } + + /** + * Unschedules and returns, without executing, the next task + * queued by the current thread but not yet executed. This method + * is designed primarily to support extensions, and is unlikely to + * be useful otherwise. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @return the next task, or {@code null} if none are available + */ + protected static ForkJoinTask pollNextLocalTask() { + return ((ForkJoinWorkerThread) Thread.currentThread()) + .workQueue.nextLocalTask(); + } + + /** + * Unschedules and returns, without executing, the next task + * queued by the current thread but not yet executed, if one is + * available, or if not available, a task that was forked by some + * other thread, if available. Availability may be transient, so a + * {@code null} result does not necessarily imply quiescence + * of the pool this task is operating in. This method is designed + * primarily to support extensions, and is unlikely to be useful + * otherwise. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @return a task, or {@code null} if none are available + */ + protected static ForkJoinTask pollTask() { + ForkJoinWorkerThread wt = + (ForkJoinWorkerThread)Thread.currentThread(); + return wt.pool.nextTaskFor(wt.workQueue); + } + + // Mark-bit operations + + /** + * Returns true if this task is marked. + * + * @return true if this task is marked + * @since 1.8 + */ + public final boolean isMarkedForkJoinTask() { + return (status & MARKED) != 0; + } + + /** + * Atomically sets the mark on this task. + * + * @return true if this task was previously unmarked + * @since 1.8 + */ + public final boolean markForkJoinTask() { + for (int s;;) { + if (((s = status) & MARKED) != 0) + return false; + if (U.compareAndSwapInt(this, STATUS, s, s | MARKED)) + return true; + } + } + + /** + * Atomically clears the mark on this task. + * + * @return true if this task was previously marked + * @since 1.8 + */ + public final boolean unmarkForkJoinTask() { + for (int s;;) { + if (((s = status) & MARKED) == 0) + return false; + if (U.compareAndSwapInt(this, STATUS, s, s & ~MARKED)) + return true; + } + } + + /** + * Adaptor for Runnables. This implements RunnableFuture + * to be compliant with AbstractExecutorService constraints + * when used in ForkJoinPool. + */ + static final class AdaptedRunnable extends ForkJoinTask + implements RunnableFuture { + final Runnable runnable; + final T resultOnCompletion; + T result; + AdaptedRunnable(Runnable runnable, T result) { + if (runnable == null) throw new NullPointerException(); + this.runnable = runnable; + this.resultOnCompletion = result; + } + public T getRawResult() { return result; } + public void setRawResult(T v) { result = v; } + public boolean exec() { + runnable.run(); + result = resultOnCompletion; + return true; + } + public void run() { invoke(); } + private static final long serialVersionUID = 5232453952276885070L; + } + + /** + * Adaptor for Callables + */ + static final class AdaptedCallable extends ForkJoinTask + implements RunnableFuture { + final Callable callable; + T result; + AdaptedCallable(Callable callable) { + if (callable == null) throw new NullPointerException(); + this.callable = callable; + } + public T getRawResult() { return result; } + public void setRawResult(T v) { result = v; } + public boolean exec() { + try { + result = callable.call(); + return true; + } catch (Error err) { + throw err; + } catch (RuntimeException rex) { + throw rex; + } catch (Exception ex) { + throw new RuntimeException(ex); + } + } + public void run() { invoke(); } + private static final long serialVersionUID = 2838392045355241008L; + } + + /** + * Returns a new {@code ForkJoinTask} that performs the {@code run} + * method of the given {@code Runnable} as its action, and returns + * a null result upon {@link #join}. + * + * @param runnable the runnable action + * @return the task + */ + public static ForkJoinTask adapt(Runnable runnable) { + return new AdaptedRunnable(runnable, null); + } + + /** + * Returns a new {@code ForkJoinTask} that performs the {@code run} + * method of the given {@code Runnable} as its action, and returns + * the given result upon {@link #join}. + * + * @param runnable the runnable action + * @param result the result upon completion + * @return the task + */ + public static ForkJoinTask adapt(Runnable runnable, T result) { + return new AdaptedRunnable(runnable, result); + } + + /** + * Returns a new {@code ForkJoinTask} that performs the {@code call} + * method of the given {@code Callable} as its action, and returns + * its result upon {@link #join}, translating any checked exceptions + * encountered into {@code RuntimeException}. + * + * @param callable the callable action + * @return the task + */ + public static ForkJoinTask adapt(Callable callable) { + return new AdaptedCallable(callable); + } + + // Serialization support + + private static final long serialVersionUID = -7721805057305804111L; + + /** + * Saves this task to a stream (that is, serializes it). + * + * @serialData the current run status and the exception thrown + * during execution, or {@code null} if none + */ + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + s.writeObject(getException()); + } + + /** + * Reconstitutes this task from a stream (that is, deserializes it). + */ + private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + Object ex = s.readObject(); + if (ex != null) + setExceptionalCompletion((Throwable)ex); + } + + // Unsafe mechanics + private static final sun.misc.Unsafe U; + private static final long STATUS; + static { + exceptionTableLock = new ReentrantLock(); + exceptionTableRefQueue = new ReferenceQueue(); + exceptionTable = new ExceptionNode[EXCEPTION_MAP_CAPACITY]; + try { + U = getUnsafe(); + STATUS = U.objectFieldOffset + (ForkJoinTask.class.getDeclaredField("status")); + } catch (Exception e) { + throw new Error(e); + } + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException se) { + try { + return java.security.AccessController.doPrivileged + (new java.security + .PrivilegedExceptionAction() { + public sun.misc.Unsafe run() throws Exception { + java.lang.reflect.Field f = sun.misc + .Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } + } +} diff --git a/akka-actor/src/main/java/akka/jsr166y/ForkJoinWorkerThread.java b/akka-actor/src/main/java/akka/jsr166y/ForkJoinWorkerThread.java new file mode 100644 index 0000000000..61b0cce979 --- /dev/null +++ b/akka-actor/src/main/java/akka/jsr166y/ForkJoinWorkerThread.java @@ -0,0 +1,119 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package akka.jsr166y; + +/** + * A thread managed by a {@link ForkJoinPool}, which executes + * {@link ForkJoinTask}s. + * This class is subclassable solely for the sake of adding + * functionality -- there are no overridable methods dealing with + * scheduling or execution. However, you can override initialization + * and termination methods surrounding the main task processing loop. + * If you do create such a subclass, you will also need to supply a + * custom {@link ForkJoinPool.ForkJoinWorkerThreadFactory} to use it + * in a {@code ForkJoinPool}. + * + * @since 1.7 + * @author Doug Lea + */ +public class ForkJoinWorkerThread extends Thread { + /* + * ForkJoinWorkerThreads are managed by ForkJoinPools and perform + * ForkJoinTasks. For explanation, see the internal documentation + * of class ForkJoinPool. + */ + + final ForkJoinPool.WorkQueue workQueue; // Work-stealing mechanics + final ForkJoinPool pool; // the pool this thread works in + + /** + * Creates a ForkJoinWorkerThread operating in the given pool. + * + * @param pool the pool this thread works in + * @throws NullPointerException if pool is null + */ + protected ForkJoinWorkerThread(ForkJoinPool pool) { + super(pool.nextWorkerName()); + setDaemon(true); + Thread.UncaughtExceptionHandler ueh = pool.ueh; + if (ueh != null) + setUncaughtExceptionHandler(ueh); + this.pool = pool; + this.workQueue = new ForkJoinPool.WorkQueue(this, pool.localMode); + pool.registerWorker(this); + } + + /** + * Returns the pool hosting this thread. + * + * @return the pool + */ + public ForkJoinPool getPool() { + return pool; + } + + /** + * Returns the index number of this thread in its pool. The + * returned value ranges from zero to the maximum number of + * threads (minus one) that have ever been created in the pool. + * This method may be useful for applications that track status or + * collect results per-worker rather than per-task. + * + * @return the index number + */ + public int getPoolIndex() { + return workQueue.poolIndex; + } + + /** + * Initializes internal state after construction but before + * processing any tasks. If you override this method, you must + * invoke {@code super.onStart()} at the beginning of the method. + * Initialization requires care: Most fields must have legal + * default values, to ensure that attempted accesses from other + * threads work correctly even before this thread starts + * processing tasks. + */ + protected void onStart() { + } + + /** + * Performs cleanup associated with termination of this worker + * thread. If you override this method, you must invoke + * {@code super.onTermination} at the end of the overridden method. + * + * @param exception the exception causing this thread to abort due + * to an unrecoverable error, or {@code null} if completed normally + */ + protected void onTermination(Throwable exception) { + } + + /** + * This method is required to be public, but should never be + * called explicitly. It performs the main run loop to execute + * {@link ForkJoinTask}s. + */ + public void run() { + Throwable exception = null; + try { + onStart(); + pool.runWorker(this); + } catch (Throwable ex) { + exception = ex; + } finally { + try { + onTermination(exception); + } catch (Throwable ex) { + if (exception == null) + exception = ex; + } finally { + pool.deregisterWorker(this, exception); + } + } + } +} + diff --git a/akka-actor/src/main/java/akka/jsr166y/RecursiveAction.java b/akka-actor/src/main/java/akka/jsr166y/RecursiveAction.java new file mode 100644 index 0000000000..c13c513171 --- /dev/null +++ b/akka-actor/src/main/java/akka/jsr166y/RecursiveAction.java @@ -0,0 +1,164 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package akka.jsr166y; + +/** + * A recursive resultless {@link ForkJoinTask}. This class + * establishes conventions to parameterize resultless actions as + * {@code Void} {@code ForkJoinTask}s. Because {@code null} is the + * only valid value of type {@code Void}, methods such as {@code join} + * always return {@code null} upon completion. + * + *

Sample Usages. Here is a simple but complete ForkJoin + * sort that sorts a given {@code long[]} array: + * + *

 {@code
+ * static class SortTask extends RecursiveAction {
+ *   final long[] array; final int lo, hi;
+ *   SortTask(long[] array, int lo, int hi) {
+ *     this.array = array; this.lo = lo; this.hi = hi;
+ *   }
+ *   SortTask(long[] array) { this(array, 0, array.length); }
+ *   protected void compute() {
+ *     if (hi - lo < THRESHOLD)
+ *       sortSequentially(lo, hi);
+ *     else {
+ *       int mid = (lo + hi) >>> 1;
+ *       invokeAll(new SortTask(array, lo, mid),
+ *                 new SortTask(array, mid, hi));
+ *       merge(lo, mid, hi);
+ *     }
+ *   }
+ *   // implementation details follow:
+ *   final static int THRESHOLD = 1000;
+ *   void sortSequentially(int lo, int hi) {
+ *     Arrays.sort(array, lo, hi);
+ *   }
+ *   void merge(int lo, int mid, int hi) {
+ *     long[] buf = Arrays.copyOfRange(array, lo, mid);
+ *     for (int i = 0, j = lo, k = mid; i < buf.length; j++)
+ *       array[j] = (k == hi || buf[i] < array[k]) ?
+ *         buf[i++] : array[k++];
+ *   }
+ * }}
+ * + * You could then sort {@code anArray} by creating {@code new + * SortTask(anArray)} and invoking it in a ForkJoinPool. As a more + * concrete simple example, the following task increments each element + * of an array: + *
 {@code
+ * class IncrementTask extends RecursiveAction {
+ *   final long[] array; final int lo, hi;
+ *   IncrementTask(long[] array, int lo, int hi) {
+ *     this.array = array; this.lo = lo; this.hi = hi;
+ *   }
+ *   protected void compute() {
+ *     if (hi - lo < THRESHOLD) {
+ *       for (int i = lo; i < hi; ++i)
+ *         array[i]++;
+ *     }
+ *     else {
+ *       int mid = (lo + hi) >>> 1;
+ *       invokeAll(new IncrementTask(array, lo, mid),
+ *                 new IncrementTask(array, mid, hi));
+ *     }
+ *   }
+ * }}
+ * + *

The following example illustrates some refinements and idioms + * that may lead to better performance: RecursiveActions need not be + * fully recursive, so long as they maintain the basic + * divide-and-conquer approach. Here is a class that sums the squares + * of each element of a double array, by subdividing out only the + * right-hand-sides of repeated divisions by two, and keeping track of + * them with a chain of {@code next} references. It uses a dynamic + * threshold based on method {@code getSurplusQueuedTaskCount}, but + * counterbalances potential excess partitioning by directly + * performing leaf actions on unstolen tasks rather than further + * subdividing. + * + *

 {@code
+ * double sumOfSquares(ForkJoinPool pool, double[] array) {
+ *   int n = array.length;
+ *   Applyer a = new Applyer(array, 0, n, null);
+ *   pool.invoke(a);
+ *   return a.result;
+ * }
+ *
+ * class Applyer extends RecursiveAction {
+ *   final double[] array;
+ *   final int lo, hi;
+ *   double result;
+ *   Applyer next; // keeps track of right-hand-side tasks
+ *   Applyer(double[] array, int lo, int hi, Applyer next) {
+ *     this.array = array; this.lo = lo; this.hi = hi;
+ *     this.next = next;
+ *   }
+ *
+ *   double atLeaf(int l, int h) {
+ *     double sum = 0;
+ *     for (int i = l; i < h; ++i) // perform leftmost base step
+ *       sum += array[i] * array[i];
+ *     return sum;
+ *   }
+ *
+ *   protected void compute() {
+ *     int l = lo;
+ *     int h = hi;
+ *     Applyer right = null;
+ *     while (h - l > 1 && getSurplusQueuedTaskCount() <= 3) {
+ *        int mid = (l + h) >>> 1;
+ *        right = new Applyer(array, mid, h, right);
+ *        right.fork();
+ *        h = mid;
+ *     }
+ *     double sum = atLeaf(l, h);
+ *     while (right != null) {
+ *        if (right.tryUnfork()) // directly calculate if not stolen
+ *          sum += right.atLeaf(right.lo, right.hi);
+ *       else {
+ *          right.join();
+ *          sum += right.result;
+ *        }
+ *        right = right.next;
+ *      }
+ *     result = sum;
+ *   }
+ * }}
+ * + * @since 1.7 + * @author Doug Lea + */ +public abstract class RecursiveAction extends ForkJoinTask { + private static final long serialVersionUID = 5232453952276485070L; + + /** + * The main computation performed by this task. + */ + protected abstract void compute(); + + /** + * Always returns {@code null}. + * + * @return {@code null} always + */ + public final Void getRawResult() { return null; } + + /** + * Requires null completion value. + */ + protected final void setRawResult(Void mustBeNull) { } + + /** + * Implements execution conventions for RecursiveActions. + */ + protected final boolean exec() { + compute(); + return true; + } + +} diff --git a/akka-actor/src/main/java/akka/jsr166y/RecursiveTask.java b/akka-actor/src/main/java/akka/jsr166y/RecursiveTask.java new file mode 100644 index 0000000000..12378ee6c8 --- /dev/null +++ b/akka-actor/src/main/java/akka/jsr166y/RecursiveTask.java @@ -0,0 +1,68 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package akka.jsr166y; + +/** + * A recursive result-bearing {@link ForkJoinTask}. + * + *

For a classic example, here is a task computing Fibonacci numbers: + * + *

 {@code
+ * class Fibonacci extends RecursiveTask {
+ *   final int n;
+ *   Fibonacci(int n) { this.n = n; }
+ *   Integer compute() {
+ *     if (n <= 1)
+ *        return n;
+ *     Fibonacci f1 = new Fibonacci(n - 1);
+ *     f1.fork();
+ *     Fibonacci f2 = new Fibonacci(n - 2);
+ *     return f2.compute() + f1.join();
+ *   }
+ * }}
+ * + * However, besides being a dumb way to compute Fibonacci functions + * (there is a simple fast linear algorithm that you'd use in + * practice), this is likely to perform poorly because the smallest + * subtasks are too small to be worthwhile splitting up. Instead, as + * is the case for nearly all fork/join applications, you'd pick some + * minimum granularity size (for example 10 here) for which you always + * sequentially solve rather than subdividing. + * + * @since 1.7 + * @author Doug Lea + */ +public abstract class RecursiveTask extends ForkJoinTask { + private static final long serialVersionUID = 5232453952276485270L; + + /** + * The result of the computation. + */ + V result; + + /** + * The main computation performed by this task. + */ + protected abstract V compute(); + + public final V getRawResult() { + return result; + } + + protected final void setRawResult(V value) { + result = value; + } + + /** + * Implements execution conventions for RecursiveTask. + */ + protected final boolean exec() { + result = compute(); + return true; + } + +} diff --git a/akka-actor/src/main/java/com/eaio/uuid/UUID.java b/akka-actor/src/main/java/com/eaio/uuid/UUID.java index 46bc867cc0..a578a68c6d 100644 --- a/akka-actor/src/main/java/com/eaio/uuid/UUID.java +++ b/akka-actor/src/main/java/com/eaio/uuid/UUID.java @@ -32,8 +32,6 @@ import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; -import org.omg.CORBA.portable.IDLEntity; - import com.eaio.util.lang.Hex; /** diff --git a/akka-actor/src/main/java/com/eaio/uuid/UUIDHelper.java b/akka-actor/src/main/java/com/eaio/uuid/UUIDHelper.java deleted file mode 100644 index 7abbe85895..0000000000 --- a/akka-actor/src/main/java/com/eaio/uuid/UUIDHelper.java +++ /dev/null @@ -1,86 +0,0 @@ -package com.eaio.uuid; - - -/** -* com/eaio/uuid/UUIDHelper.java . -* Generated by the IDL-to-Java compiler (portable), version "3.1" -* from uuid.idl -* Sonntag, 7. März 2004 21.35 Uhr CET -*/ - - -/** - * The UUID struct. - */ -abstract public class UUIDHelper -{ - private static String _id = "IDL:com/eaio/uuid/UUID:1.0"; - - public static void insert (org.omg.CORBA.Any a, com.eaio.uuid.UUID that) - { - org.omg.CORBA.portable.OutputStream out = a.create_output_stream (); - a.type (type ()); - write (out, that); - a.read_value (out.create_input_stream (), type ()); - } - - public static com.eaio.uuid.UUID extract (org.omg.CORBA.Any a) - { - return read (a.create_input_stream ()); - } - - private static org.omg.CORBA.TypeCode __typeCode = null; - private static boolean __active = false; - synchronized public static org.omg.CORBA.TypeCode type () - { - if (__typeCode == null) - { - synchronized (org.omg.CORBA.TypeCode.class) - { - if (__typeCode == null) - { - if (__active) - { - return org.omg.CORBA.ORB.init().create_recursive_tc ( _id ); - } - __active = true; - org.omg.CORBA.StructMember[] _members0 = new org.omg.CORBA.StructMember [2]; - org.omg.CORBA.TypeCode _tcOf_members0 = null; - _tcOf_members0 = org.omg.CORBA.ORB.init ().get_primitive_tc (org.omg.CORBA.TCKind.tk_longlong); - _members0[0] = new org.omg.CORBA.StructMember ( - "time", - _tcOf_members0, - null); - _tcOf_members0 = org.omg.CORBA.ORB.init ().get_primitive_tc (org.omg.CORBA.TCKind.tk_longlong); - _members0[1] = new org.omg.CORBA.StructMember ( - "clockSeqAndNode", - _tcOf_members0, - null); - __typeCode = org.omg.CORBA.ORB.init ().create_struct_tc (com.eaio.uuid.UUIDHelper.id (), "UUID", _members0); - __active = false; - } - } - } - return __typeCode; - } - - public static String id () - { - return _id; - } - - public static com.eaio.uuid.UUID read (org.omg.CORBA.portable.InputStream istream) - { - com.eaio.uuid.UUID value = new com.eaio.uuid.UUID (); - value.time = istream.read_longlong (); - value.clockSeqAndNode = istream.read_longlong (); - return value; - } - - public static void write (org.omg.CORBA.portable.OutputStream ostream, com.eaio.uuid.UUID value) - { - ostream.write_longlong (value.time); - ostream.write_longlong (value.clockSeqAndNode); - } - -} diff --git a/akka-actor/src/main/java/com/eaio/uuid/UUIDHolder.java b/akka-actor/src/main/java/com/eaio/uuid/UUIDHolder.java deleted file mode 100644 index d5531f5e00..0000000000 --- a/akka-actor/src/main/java/com/eaio/uuid/UUIDHolder.java +++ /dev/null @@ -1,42 +0,0 @@ -package com.eaio.uuid; - -/** -* com/eaio/uuid/UUIDHolder.java . -* Generated by the IDL-to-Java compiler (portable), version "3.1" -* from uuid.idl -* Sonntag, 7. März 2004 21.35 Uhr CET -*/ - - -/** - * The UUID struct. - */ -public final class UUIDHolder implements org.omg.CORBA.portable.Streamable -{ - public com.eaio.uuid.UUID value = null; - - public UUIDHolder () - { - } - - public UUIDHolder (com.eaio.uuid.UUID initialValue) - { - value = initialValue; - } - - public void _read (org.omg.CORBA.portable.InputStream i) - { - value = com.eaio.uuid.UUIDHelper.read (i); - } - - public void _write (org.omg.CORBA.portable.OutputStream o) - { - com.eaio.uuid.UUIDHelper.write (o, value); - } - - public org.omg.CORBA.TypeCode _type () - { - return com.eaio.uuid.UUIDHelper.type (); - } - -} diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index 65ffac2342..cdab8e968e 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -159,37 +159,60 @@ akka { # parameters type = "Dispatcher" - # Keep alive time for threads - keep-alive-time = 60s + # Which kind of ExecutorService to use for this dispatcher + # Valid options: + # "fork-join-executor" requires a "fork-join-executor" section + # "thread-pool-executor" requires a "thread-pool-executor" section + # or + # A FQCN of a class extending ExecutorServiceConfigurator + executor = "fork-join-executor" - # minimum number of threads to cap factor-based core number to - core-pool-size-min = 8 + # This will be used if you have set "executor = "fork-join-executor"" + fork-join-executor { + # Min number of threads to cap factor-based parallelism number to + parallelism-min = 8 - # No of core threads ... ceil(available processors * factor) - core-pool-size-factor = 3.0 + # Parallelism (threads) ... ceil(available processors * factor) + parallelism-factor = 3.0 - # maximum number of threads to cap factor-based number to - core-pool-size-max = 64 + # Max number of threads to cap factor-based parallelism number to + parallelism-max = 64 + } - # Hint: max-pool-size is only used for bounded task queues - # minimum number of threads to cap factor-based max number to - max-pool-size-min = 8 + # This will be used if you have set "executor = "thread-pool-executor"" + thread-pool-executor { + # Keep alive time for threads + keep-alive-time = 60s - # Max no of threads ... ceil(available processors * factor) - max-pool-size-factor = 3.0 + # Min number of threads to cap factor-based core number to + core-pool-size-min = 8 - # maximum number of threads to cap factor-based max number to - max-pool-size-max = 64 + # No of core threads ... ceil(available processors * factor) + core-pool-size-factor = 3.0 - # Specifies the bounded capacity of the task queue (< 1 == unbounded) - task-queue-size = -1 + # Max number of threads to cap factor-based number to + core-pool-size-max = 64 - # Specifies which type of task queue will be used, can be "array" or - # "linked" (default) - task-queue-type = "linked" + # Hint: max-pool-size is only used for bounded task queues + # minimum number of threads to cap factor-based max number to + max-pool-size-min = 8 - # Allow core threads to time out - allow-core-timeout = on + # Max no of threads ... ceil(available processors * factor) + max-pool-size-factor = 3.0 + + # Max number of threads to cap factor-based max number to + max-pool-size-max = 64 + + # Specifies the bounded capacity of the task queue (< 1 == unbounded) + task-queue-size = -1 + + # Specifies which type of task queue will be used, can be "array" or + # "linked" (default) + task-queue-type = "linked" + + # Allow core threads to time out + allow-core-timeout = on + } # How long time the dispatcher will wait for new actors until it shuts down shutdown-timeout = 1s diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 94aef4bdef..fd16ed3f39 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -7,6 +7,7 @@ package akka.actor import akka.AkkaException import scala.reflect.BeanProperty import scala.util.control.NoStackTrace +import scala.collection.immutable.Stack import java.util.regex.Pattern /** @@ -112,6 +113,7 @@ object Actor { def isDefinedAt(x: Any) = false def apply(x: Any) = throw new UnsupportedOperationException("Empty behavior apply()") } + } /** @@ -172,7 +174,7 @@ trait Actor { type Receive = Actor.Receive /** - * Stores the context for this actor, including self, sender, and hotswap. + * Stores the context for this actor, including self, and sender. * It is implicit to support operations such as `forward`. * * [[akka.actor.ActorContext]] is the Scala API. `getContext` returns a @@ -281,15 +283,37 @@ trait Actor { // ==== INTERNAL IMPLEMENTATION DETAILS ==== // ========================================= + /** + * For Akka internal use only. + */ private[akka] final def apply(msg: Any) = { - val behaviorStack = context.asInstanceOf[ActorCell].hotswap - msg match { - case msg if behaviorStack.nonEmpty && behaviorStack.head.isDefinedAt(msg) ⇒ behaviorStack.head.apply(msg) - case msg if behaviorStack.isEmpty && processingBehavior.isDefinedAt(msg) ⇒ processingBehavior.apply(msg) - case unknown ⇒ unhandled(unknown) - } + // TODO would it be more efficient to assume that most messages are matched and catch MatchError instead of using isDefinedAt? + val head = behaviorStack.head + if (head.isDefinedAt(msg)) head.apply(msg) else unhandled(msg) } - private[this] val processingBehavior = receive //ProcessingBehavior is the original behavior + /** + * For Akka internal use only. + */ + private[akka] def pushBehavior(behavior: Receive): Unit = { + behaviorStack = behaviorStack.push(behavior) + } + + /** + * For Akka internal use only. + */ + private[akka] def popBehavior(): Unit = { + val original = behaviorStack + val popped = original.pop + behaviorStack = if (popped.isEmpty) original else popped + } + + /** + * For Akka internal use only. + */ + private[akka] def clearBehaviorStack(): Unit = + behaviorStack = Stack.empty[Receive].push(behaviorStack.last) + + private var behaviorStack: Stack[Receive] = Stack.empty[Receive].push(receive) } diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 783f50f82c..858a2ecf1b 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -174,8 +174,7 @@ private[akka] class ActorCell( val self: InternalActorRef, val props: Props, @volatile var parent: InternalActorRef, - /*no member*/ _receiveTimeout: Option[Duration], - var hotswap: Stack[PartialFunction[Any, Unit]]) extends UntypedActorContext { + /*no member*/ _receiveTimeout: Option[Duration]) extends UntypedActorContext { import ActorCell._ @@ -209,10 +208,10 @@ private[akka] class ActorCell( /** * In milliseconds */ - final var receiveTimeoutData: (Long, Cancellable) = + var receiveTimeoutData: (Long, Cancellable) = if (_receiveTimeout.isDefined) (_receiveTimeout.get.toMillis, emptyCancellable) else emptyReceiveTimeoutData - final var childrenRefs: TreeMap[String, ChildRestartStats] = emptyChildrenRefs + var childrenRefs: TreeMap[String, ChildRestartStats] = emptyChildrenRefs private def _actorOf(props: Props, name: String): ActorRef = { if (system.settings.SerializeAllCreators && !props.creator.isInstanceOf[NoSerializationVerificationNeeded]) { @@ -255,16 +254,16 @@ private[akka] class ActorCell( a.stop() } - final var currentMessage: Envelope = null + var currentMessage: Envelope = null - final var actor: Actor = _ + var actor: Actor = _ - final var stopping = false + var stopping = false @volatile //This must be volatile since it isn't protected by the mailbox status var mailbox: Mailbox = _ - final var nextNameSequence: Long = 0 + var nextNameSequence: Long = 0 //Not thread safe, so should only be used inside the actor that inhabits this ActorCell final protected def randomName(): String = { @@ -389,7 +388,6 @@ private[akka] class ActorCell( } } actor = freshActor // assign it here so if preStart fails, we can null out the sef-refs next call - hotswap = Props.noHotSwap // Reset the behavior freshActor.postRestart(cause) if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(freshActor), "restarted")) @@ -509,9 +507,9 @@ private[akka] class ActorCell( } } - def become(behavior: Actor.Receive, discardOld: Boolean = true) { + def become(behavior: Actor.Receive, discardOld: Boolean = true): Unit = { if (discardOld) unbecome() - hotswap = hotswap.push(behavior) + actor.pushBehavior(behavior) } /** @@ -527,10 +525,7 @@ private[akka] class ActorCell( become(newReceive, discardOld) } - def unbecome() { - val h = hotswap - if (h.nonEmpty) hotswap = h.pop - } + def unbecome(): Unit = actor.popBehavior() def autoReceiveMessage(msg: Envelope) { if (system.settings.DebugAutoReceive) @@ -547,9 +542,9 @@ private[akka] class ActorCell( } private def doTerminate() { + val a = actor try { try { - val a = actor if (a ne null) a.postStop() } finally { dispatcher.detach(this) @@ -563,7 +558,7 @@ private[akka] class ActorCell( } finally { currentMessage = null clearActorFields() - hotswap = Props.noHotSwap + if (a ne null) a.clearBehaviorStack() } } } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 753adaa9fa..38e8ab679f 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -57,7 +57,7 @@ import akka.event.LoggingAdapter * * } else if (o instanceof Request3) { * val msg = ((Request3) o).getMsg(); - * getSender().tell(other.ask(msg, 5000)); // reply with Future for holding the other’s reply (timeout 5 seconds) + * getSender().tell(ask(other, msg, 5000)); // reply with Future for holding the other’s reply (timeout 5 seconds) * * } else { * unhandled(o); @@ -224,8 +224,7 @@ private[akka] class LocalActorRef private[akka] ( _supervisor: InternalActorRef, val path: ActorPath, val systemService: Boolean = false, - _receiveTimeout: Option[Duration] = None, - _hotswap: Stack[PartialFunction[Any, Unit]] = Props.noHotSwap) + _receiveTimeout: Option[Duration] = None) extends InternalActorRef with LocalRef { /* @@ -238,7 +237,7 @@ private[akka] class LocalActorRef private[akka] ( * us to use purely factory methods for creating LocalActorRefs. */ @volatile - private var actorCell = newActorCell(_system, this, _props, _supervisor, _receiveTimeout, _hotswap) + private var actorCell = newActorCell(_system, this, _props, _supervisor, _receiveTimeout) actorCell.start() protected def newActorCell( @@ -246,9 +245,8 @@ private[akka] class LocalActorRef private[akka] ( ref: InternalActorRef, props: Props, supervisor: InternalActorRef, - receiveTimeout: Option[Duration], - hotswap: Stack[PartialFunction[Any, Unit]]): ActorCell = - new ActorCell(system, ref, props, supervisor, receiveTimeout, hotswap) + receiveTimeout: Option[Duration]): ActorCell = + new ActorCell(system, ref, props, supervisor, receiveTimeout) protected def actorContext: ActorContext = actorCell diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala old mode 100755 new mode 100644 diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index e3235a5cec..c7a868ffd9 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -1,6 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ + package akka.actor import akka.config.ConfigurationException diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 5660811c00..b277142e76 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -48,6 +48,14 @@ object FSM { } } + /** + * This extractor is just convenience for matching a (S, S) pair, including a + * reminder what the new state is. + */ + object -> { + def unapply[S](in: (S, S)) = Some(in) + } + case class LogEntry[S, D](stateName: S, stateData: D, event: Any) case class State[S, D](stateName: S, stateData: D, timeout: Option[Duration] = None, stopReason: Option[Reason] = None, replies: List[Any] = Nil) { @@ -174,6 +182,10 @@ trait FSM[S, D] extends Listeners { type Timeout = Option[Duration] type TransitionHandler = PartialFunction[(S, S), Unit] + // “import” so that it is visible without an import + val -> = FSM.-> + val StateTimeout = FSM.StateTimeout + val log = Logging(context.system, this) /** @@ -284,14 +296,6 @@ trait FSM[S, D] extends Listeners { */ protected final def setStateTimeout(state: S, timeout: Timeout): Unit = stateTimeouts(state) = timeout - /** - * This extractor is just convenience for matching a (S, S) pair, including a - * reminder what the new state is. - */ - object -> { - def unapply[S](in: (S, S)) = Some(in) - } - /** * Set handler which is called upon each state transition, i.e. not when * staying in the same state. This may use the pair extractor defined in the @@ -533,9 +537,6 @@ trait FSM[S, D] extends Listeners { } case class Event(event: Any, stateData: D) - object Ev { - def unapply[D](e: Event): Option[Any] = Some(e.event) - } case class StopEvent[S, D](reason: Reason, currentState: S, stateData: D) } diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index 895268fb44..8a21f841bb 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -47,36 +47,36 @@ case class ChildRestartStats(val child: ActorRef, var maxNrOfRetriesCount: Int = trait SupervisorStrategyLowPriorityImplicits { this: SupervisorStrategy.type ⇒ /** - * Implicit conversion from `Seq` of Cause-Action pairs to a `Decider`. See makeDecider(causeAction). + * Implicit conversion from `Seq` of Cause-Directive pairs to a `Decider`. See makeDecider(causeDirective). */ - implicit def seqCauseAction2Decider(trapExit: Iterable[CauseAction]): Decider = makeDecider(trapExit) + implicit def seqCauseDirective2Decider(trapExit: Iterable[CauseDirective]): Decider = makeDecider(trapExit) // the above would clash with seqThrowable2Decider for empty lists } object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { - sealed trait Action + sealed trait Directive /** * Resumes message processing for the failed Actor */ - case object Resume extends Action + case object Resume extends Directive /** * Discards the old Actor instance and replaces it with a new, * then resumes message processing. */ - case object Restart extends Action + case object Restart extends Directive /** * Stops the Actor */ - case object Stop extends Action + case object Stop extends Directive /** * Escalates the failure to the supervisor of the supervisor, * by rethrowing the cause of the failure. */ - case object Escalate extends Action + case object Escalate extends Directive /** * Resumes message processing for the failed Actor @@ -127,9 +127,9 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { */ implicit def seqThrowable2Decider(trapExit: Seq[Class[_ <: Throwable]]): Decider = makeDecider(trapExit) - type Decider = PartialFunction[Throwable, Action] - type JDecider = akka.japi.Function[Throwable, Action] - type CauseAction = (Class[_ <: Throwable], Action) + type Decider = PartialFunction[Throwable, Directive] + type JDecider = akka.japi.Function[Throwable, Directive] + type CauseDirective = (Class[_ <: Throwable], Directive) /** * Decider builder which just checks whether one of @@ -152,14 +152,14 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { def makeDecider(trapExit: JIterable[Class[_ <: Throwable]]): Decider = makeDecider(trapExit.toSeq) /** - * Decider builder for Iterables of cause-action pairs, e.g. a map obtained + * Decider builder for Iterables of cause-directive pairs, e.g. a map obtained * from configuration; will sort the pairs so that the most specific type is * checked before all its subtypes, allowing carving out subtrees of the * Throwable hierarchy. */ - def makeDecider(flat: Iterable[CauseAction]): Decider = { - val actions = sort(flat) - return { case x ⇒ actions find (_._1 isInstance x) map (_._2) getOrElse Escalate } + def makeDecider(flat: Iterable[CauseDirective]): Decider = { + val directives = sort(flat) + return { case x ⇒ directives find (_._1 isInstance x) map (_._2) getOrElse Escalate } } def makeDecider(func: JDecider): Decider = { @@ -170,8 +170,8 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { * Sort so that subtypes always precede their supertypes, but without * obeying any order between unrelated subtypes (insert sort). */ - def sort(in: Iterable[CauseAction]): Seq[CauseAction] = - (new ArrayBuffer[CauseAction](in.size) /: in) { (buf, ca) ⇒ + def sort(in: Iterable[CauseDirective]): Seq[CauseDirective] = + (new ArrayBuffer[CauseDirective](in.size) /: in) { (buf, ca) ⇒ buf.indexWhere(_._1 isAssignableFrom ca._1) match { case -1 ⇒ buf append ca case x ⇒ buf insert (x, ca) @@ -215,8 +215,8 @@ abstract class SupervisorStrategy { * Returns whether it processed the failure or not */ def handleFailure(context: ActorContext, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]): Boolean = { - val action = if (decider.isDefinedAt(cause)) decider(cause) else Escalate - action match { + val directive = if (decider.isDefinedAt(cause)) decider(cause) else Escalate + directive match { case Resume ⇒ child.asInstanceOf[InternalActorRef].resume(); true case Restart ⇒ processFailure(context, true, child, cause, stats, children); true case Stop ⇒ processFailure(context, false, child, cause, stats, children); true @@ -227,10 +227,13 @@ abstract class SupervisorStrategy { } /** - * Restart all child actors when one fails + * Applies the fault handling `Directive` (Resume, Restart, Stop) specified in the `Decider` + * to all children when one fails, as opposed to [[akka.actor.OneForOneStrategy]] that applies + * it only to the child actor that failed. + * * @param maxNrOfRetries the number of times an actor is allowed to be restarted, negative value means no limit * @param withinTimeRange duration of the time window for maxNrOfRetries, Duration.Inf means no window - * @param decider = mapping from Throwable to [[akka.actor.SupervisorStrategy.Action]], you can also use a + * @param decider = mapping from Throwable to [[akka.actor.SupervisorStrategy.Directive]], you can also use a * `Seq` of Throwables which maps the given Throwables to restarts, otherwise escalates. */ case class AllForOneStrategy(maxNrOfRetries: Int = -1, withinTimeRange: Duration = Duration.Inf)(val decider: SupervisorStrategy.Decider) @@ -270,10 +273,13 @@ case class AllForOneStrategy(maxNrOfRetries: Int = -1, withinTimeRange: Duration } /** - * Restart a child actor when it fails + * Applies the fault handling `Directive` (Resume, Restart, Stop) specified in the `Decider` + * to the child actor that failed, as opposed to [[akka.actor.AllForOneStrategy]] that applies + * it to all children. + * * @param maxNrOfRetries the number of times an actor is allowed to be restarted, negative value means no limit * @param withinTimeRange duration of the time window for maxNrOfRetries, Duration.Inf means no window - * @param decider = mapping from Throwable to [[akka.actor.SupervisorStrategy.Action]], you can also use a + * @param decider = mapping from Throwable to [[akka.actor.SupervisorStrategy.Directive]], you can also use a * `Seq` of Throwables which maps the given Throwables to restarts, otherwise escalates. */ case class OneForOneStrategy(maxNrOfRetries: Int = -1, withinTimeRange: Duration = Duration.Inf)(val decider: SupervisorStrategy.Decider) diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index 260f882753..74cf45ebe1 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -24,7 +24,6 @@ object Props { final val defaultDeploy = Deploy() - final val noHotSwap: Stack[Actor.Receive] = Stack.empty final val empty = new Props(() ⇒ new Actor { def receive = Actor.emptyBehavior }) /** diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 72d429b450..9ef93ef05d 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -1,15 +1,7 @@ -/* - * Copyright 2007 WorldWide Conferencing, LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. +/** + * Copyright (C) 2009-2011 Typesafe Inc. */ + package akka.actor import akka.util.Duration @@ -134,7 +126,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, receiver ! message // Check if the receiver is still alive and kicking before reschedule the task if (receiver.isTerminated) { - log.warning("Could not reschedule message to be sent because receiving actor has been terminated.") + log.debug("Could not reschedule message to be sent because receiving actor has been terminated.") } else { scheduleNext(timeout, delay, continuousCancellable) } diff --git a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala index 6dd4d8c2c5..daa7467196 100644 --- a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala @@ -37,9 +37,9 @@ import akka.japi.{ Creator } * } * * private static SupervisorStrategy strategy = new OneForOneStrategy(10, Duration.parse("1 minute"), - * new Function() { + * new Function() { * @Override - * public Action apply(Throwable t) { + * public Directive apply(Throwable t) { * if (t instanceof ArithmeticException) { * return resume(); * } else if (t instanceof NullPointerException) { diff --git a/akka-actor/src/main/scala/akka/actor/package.scala b/akka-actor/src/main/scala/akka/actor/package.scala index c03d7f8689..9ec5348fee 100644 --- a/akka-actor/src/main/scala/akka/actor/package.scala +++ b/akka-actor/src/main/scala/akka/actor/package.scala @@ -27,15 +27,4 @@ package object actor { val i = n.lastIndexOf('.') n.substring(i + 1) } - - implicit def future2actor[T](f: akka.dispatch.Future[T]) = new { - def pipeTo(actor: ActorRef): this.type = { - f onComplete { - case Right(r) ⇒ actor ! r - case Left(f) ⇒ actor ! Status.Failure(f) - } - this - } - } - } diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 3a788c0fd7..943eeb2b33 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -14,6 +14,7 @@ import akka.event.EventStream import com.typesafe.config.Config import akka.util.ReflectiveAccess import akka.serialization.SerializationExtension +import akka.jsr166y.ForkJoinPool final case class Envelope(val message: Any, val sender: ActorRef)(system: ActorSystem) { if (message.isInstanceOf[AnyRef]) { @@ -292,6 +293,8 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext protected[akka] def shutdown(): Unit } +abstract class ExecutorServiceConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceFactoryProvider + /** * Base class to be used for hooking in new dispatchers into Dispatchers. */ @@ -333,14 +336,30 @@ abstract class MessageDispatcherConfigurator(val config: Config, val prerequisit } } - def configureThreadPool( - config: Config, - createDispatcher: ⇒ (ThreadPoolConfig) ⇒ MessageDispatcher): ThreadPoolConfigDispatcherBuilder = { - import ThreadPoolConfigDispatcherBuilder.conf_? + def configureExecutor(): ExecutorServiceConfigurator = { + config.getString("executor") match { + case null | "" | "fork-join-executor" ⇒ new ForkJoinExecutorConfigurator(config.getConfig("fork-join-executor"), prerequisites) + case "thread-pool-executor" ⇒ new ThreadPoolExecutorConfigurator(config.getConfig("thread-pool-executor"), prerequisites) + case fqcn ⇒ + val constructorSignature = Array[Class[_]](classOf[Config], classOf[DispatcherPrerequisites]) + ReflectiveAccess.createInstance[ExecutorServiceConfigurator](fqcn, constructorSignature, Array[AnyRef](config, prerequisites), prerequisites.classloader) match { + case Right(instance) ⇒ instance + case Left(exception) ⇒ throw new IllegalArgumentException( + ("""Cannot instantiate ExecutorServiceConfigurator ("executor = [%s]"), defined in [%s], + make sure it has an accessible constructor with a [%s,%s] signature""") + .format(fqcn, config.getString("id"), classOf[Config], classOf[DispatcherPrerequisites]), exception) + } + } + } +} - //Apply the following options to the config if they are present in the config +class ThreadPoolExecutorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceConfigurator(config, prerequisites) { + import ThreadPoolConfigBuilder.conf_? - ThreadPoolConfigDispatcherBuilder(createDispatcher, ThreadPoolConfig()) + val threadPoolConfig: ThreadPoolConfig = createThreadPoolConfigBuilder(config, prerequisites).config + + protected def createThreadPoolConfigBuilder(config: Config, prerequisites: DispatcherPrerequisites): ThreadPoolConfigBuilder = { + ThreadPoolConfigBuilder(ThreadPoolConfig()) .setKeepAliveTime(Duration(config getMilliseconds "keep-alive-time", TimeUnit.MILLISECONDS)) .setAllowCoreThreadTimeout(config getBoolean "allow-core-timeout") .setCorePoolSizeFromFactor(config getInt "core-pool-size-min", config getDouble "core-pool-size-factor", config getInt "core-pool-size-max") @@ -356,4 +375,27 @@ abstract class MessageDispatcherConfigurator(val config: Config, val prerequisit case _ ⇒ None })(queueFactory ⇒ _.setQueueFactory(queueFactory))) } + + def createExecutorServiceFactory(name: String, threadFactory: ThreadFactory): ExecutorServiceFactory = + threadPoolConfig.createExecutorServiceFactory(name, threadFactory) +} + +class ForkJoinExecutorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceConfigurator(config, prerequisites) { + + def validate(t: ThreadFactory): ForkJoinPool.ForkJoinWorkerThreadFactory = prerequisites.threadFactory match { + case correct: ForkJoinPool.ForkJoinWorkerThreadFactory ⇒ correct + case x ⇒ throw new IllegalStateException("The prerequisites for the ForkJoinExecutorConfigurator is a ForkJoinPool.ForkJoinWorkerThreadFactory!") + } + + class ForkJoinExecutorServiceFactory(val threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, + val parallelism: Int) extends ExecutorServiceFactory { + def createExecutorService: ExecutorService = new ForkJoinPool(parallelism, threadFactory, MonitorableThreadFactory.doNothing, true) + } + final def createExecutorServiceFactory(name: String, threadFactory: ThreadFactory): ExecutorServiceFactory = + new ForkJoinExecutorServiceFactory( + validate(threadFactory), + ThreadPoolConfig.scaledPoolSize( + config.getInt("parallelism-min"), + config.getDouble("parallelism-factor"), + config.getInt("parallelism-max"))) } diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index c4742df81a..8542ac69c8 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -31,9 +31,9 @@ class BalancingDispatcher( throughput: Int, throughputDeadlineTime: Duration, mailboxType: MailboxType, - config: ThreadPoolConfig, + _executorServiceFactoryProvider: ExecutorServiceFactoryProvider, _shutdownTimeout: Duration) - extends Dispatcher(_prerequisites, _id, throughput, throughputDeadlineTime, mailboxType, config, _shutdownTimeout) { + extends Dispatcher(_prerequisites, _id, throughput, throughputDeadlineTime, mailboxType, _executorServiceFactoryProvider, _shutdownTimeout) { val buddies = new ConcurrentSkipListSet[ActorCell](akka.util.Helpers.IdentityHashComparator) val rebalance = new AtomicBoolean(false) diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 17a2410784..d71604fd1a 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -158,15 +158,14 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc class DispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends MessageDispatcherConfigurator(config, prerequisites) { - private val instance = - configureThreadPool(config, - threadPoolConfig ⇒ new Dispatcher(prerequisites, - config.getString("id"), - config.getInt("throughput"), - Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), - mailboxType, - threadPoolConfig, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS))).build + private val instance = new Dispatcher( + prerequisites, + config.getString("id"), + config.getInt("throughput"), + Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), + mailboxType, + configureExecutor(), + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) /** * Returns the same dispatcher instance for each invocation @@ -182,14 +181,13 @@ class DispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisi class BalancingDispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends MessageDispatcherConfigurator(config, prerequisites) { - private val instance = - configureThreadPool(config, - threadPoolConfig ⇒ new BalancingDispatcher(prerequisites, - config.getString("id"), - config.getInt("throughput"), - Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), - mailboxType, threadPoolConfig, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS))).build + private val instance = new BalancingDispatcher( + prerequisites, + config.getString("id"), + config.getInt("throughput"), + Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), + mailboxType, configureExecutor(), + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) /** * Returns the same dispatcher instance for each invocation @@ -204,13 +202,23 @@ class BalancingDispatcherConfigurator(config: Config, prerequisites: DispatcherP */ class PinnedDispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends MessageDispatcherConfigurator(config, prerequisites) { + + val threadPoolConfig: ThreadPoolConfig = configureExecutor() match { + case e: ThreadPoolExecutorConfigurator ⇒ e.threadPoolConfig + case other ⇒ + prerequisites.eventStream.publish( + Warning("PinnedDispatcherConfigurator", + this.getClass, + "PinnedDispatcher [%s] not configured to use ThreadPoolExecutor, falling back to default config.".format( + config.getString("id")))) + ThreadPoolConfig() + } /** * Creates new dispatcher for each invocation. */ - override def dispatcher(): MessageDispatcher = configureThreadPool(config, - threadPoolConfig ⇒ - new PinnedDispatcher(prerequisites, null, config.getString("id"), mailboxType, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS), - threadPoolConfig)).build + override def dispatcher(): MessageDispatcher = + new PinnedDispatcher( + prerequisites, null, config.getString("id"), mailboxType, + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS), threadPoolConfig) } diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index 495faba5d6..08a73a1925 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -340,9 +340,9 @@ object Future { } } -sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { +sealed trait Future[+T] extends Await.Awaitable[T] { - implicit def executor: ExecutionContext + protected implicit def executor: ExecutionContext protected final def resolve[X](source: Either[Throwable, X]): Either[Throwable, X] = source match { case Left(t: scala.runtime.NonLocalReturnControl[_]) ⇒ Right(t.value.asInstanceOf[X]) @@ -362,7 +362,7 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { case Right(r) ⇒ that onSuccess { case r2 ⇒ p success ((r, r2)) } } that onFailure { case f ⇒ p failure f } - p + p.future } /** @@ -435,20 +435,20 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { case Left(t) ⇒ p success t case Right(r) ⇒ p failure new NoSuchElementException("Future.failed not completed with a throwable. Instead completed with: " + r) } - p + p.future } /** * Returns a new Future that will either hold the successful value of this Future, * or, it this Future fails, it will hold the result of "that" Future. */ - def or[U >: T](that: Future[U]): Future[U] = { + def fallbackTo[U >: T](that: Future[U]): Future[U] = { val p = Promise[U]() onComplete { case r @ Right(_) ⇒ p complete r case _ ⇒ p completeWith that } - p + p.future } /** @@ -463,12 +463,59 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { * */ final def recover[A >: T](pf: PartialFunction[Throwable, A]): Future[A] = { - val future = Promise[A]() + val p = Promise[A]() onComplete { - case Left(e) if pf isDefinedAt e ⇒ future.complete(try { Right(pf(e)) } catch { case x: Exception ⇒ Left(x) }) - case otherwise ⇒ future complete otherwise + case Left(e) if pf isDefinedAt e ⇒ p.complete(try { Right(pf(e)) } catch { case x: Exception ⇒ Left(x) }) + case otherwise ⇒ p complete otherwise } - future + p.future + } + + /** + * Returns a new Future that will, in case this future fails, + * be completed with the resulting Future of the given PartialFunction, + * if the given PartialFunction matches the failure of the original Future. + * + * If the PartialFunction throws, that Throwable will be propagated to the returned Future. + * + * Example: + * + * {{{ + * val f = Future { Int.MaxValue } + * Future (6 / 0) recoverWith { case e: ArithmeticException => f } // result: Int.MaxValue + * }}} + */ + def recoverWith[U >: T](pf: PartialFunction[Throwable, Future[U]]): Future[U] = { + val p = Promise[U]() + + onComplete { + case Left(t) if pf isDefinedAt t ⇒ + try { p completeWith pf(t) } catch { case t: Throwable ⇒ p complete resolve(Left(t)) } + case otherwise ⇒ p complete otherwise + } + + p.future + } + + /** + * Returns a new Future that will contain the completed result of this Future, + * and which will invoke the supplied PartialFunction when completed. + * + * This allows for establishing order of side-effects. + * + * {{{ + * Future { 5 } andThen { + * case something => assert(something is awesome) + * } andThen { + * case Left(t) => handleProblem(t) + * case Right(v) => dealWithSuccess(v) + * } + * }}} + */ + def andThen[U](pf: PartialFunction[Either[Throwable, T], U]): Future[T] = { + val p = Promise[T]() + onComplete { case r ⇒ try if (pf isDefinedAt r) pf(r) finally p complete r } + p.future } /** @@ -503,6 +550,10 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { /** * Creates a new Future[A] which is completed with this Future's result if * that conforms to A's erased type or a ClassCastException otherwise. + * + * When used from Java, to create the Manifest, use: + * import static akka.japi.Util.manifest; + * future.mapTo(manifest(MyClass.class)); */ final def mapTo[A](implicit m: Manifest[A]): Future[A] = { val fa = Promise[A]() @@ -515,7 +566,7 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { case e: ClassCastException ⇒ Left(e) }) } - fa + fa.future } /** @@ -546,13 +597,13 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { logError("Future.flatMap", e) } } - p + p.future } /** * Same as onSuccess { case r => f(r) } but is also used in for-comprehensions */ - final def foreach(f: T ⇒ Unit): Unit = onComplete { + final def foreach[U](f: T ⇒ U): Unit = onComplete { case Right(r) ⇒ f(r) case _ ⇒ } @@ -586,7 +637,7 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { Left(e) }) } - p + p.future } protected def logError(msg: String, problem: Throwable): Unit = { @@ -818,3 +869,158 @@ final class KeptPromise[T](suppliedValue: Either[Throwable, T])(implicit val exe case Right(r) ⇒ r } } + +/** + * This class contains bridge classes between Scala and Java. + * Internal use only. + */ +object japi { + @deprecated("Do not use this directly, use subclasses of this", "2.0") + class CallbackBridge[-T] extends PartialFunction[T, Unit] { + override final def isDefinedAt(t: T): Boolean = true + override final def apply(t: T): Unit = internal(t) + protected def internal(result: T): Unit = () + } + + @deprecated("Do not use this directly, use 'Recover'", "2.0") + class RecoverBridge[+T] extends PartialFunction[Throwable, T] { + override final def isDefinedAt(t: Throwable): Boolean = true + override final def apply(t: Throwable): T = internal(t) + protected def internal(result: Throwable): T = null.asInstanceOf[T] + } + + @deprecated("Do not use this directly, use subclasses of this", "2.0") + class BooleanFunctionBridge[-T] extends scala.Function1[T, Boolean] { + override final def apply(t: T): Boolean = internal(t) + protected def internal(result: T): Boolean = false + } + + @deprecated("Do not use this directly, use subclasses of this", "2.0") + class UnitFunctionBridge[-T] extends (T ⇒ Unit) { + override final def apply(t: T): Unit = internal(t) + protected def internal(result: T): Unit = () + } +} + +/** + * Callback for when a Future is completed successfully + * SAM (Single Abstract Method) class + * + * Java API + */ +abstract class OnSuccess[-T] extends japi.CallbackBridge[T] { + protected final override def internal(result: T) = onSuccess(result) + + /** + * This method will be invoked once when/if a Future that this callback is registered on + * becomes successfully completed + */ + def onSuccess(result: T): Unit +} + +/** + * Callback for when a Future is completed with a failure + * SAM (Single Abstract Method) class + * + * Java API + */ +abstract class OnFailure extends japi.CallbackBridge[Throwable] { + protected final override def internal(failure: Throwable) = onFailure(failure) + + /** + * This method will be invoked once when/if a Future that this callback is registered on + * becomes completed with a failure + */ + def onFailure(failure: Throwable): Unit +} + +/** + * Callback for when a Future is completed with either failure or a success + * SAM (Single Abstract Method) class + * + * Java API + */ +abstract class OnComplete[-T] extends japi.CallbackBridge[Either[Throwable, T]] { + protected final override def internal(value: Either[Throwable, T]): Unit = value match { + case Left(t) ⇒ onComplete(t, null.asInstanceOf[T]) + case Right(r) ⇒ onComplete(null, r) + } + + /** + * This method will be invoked once when/if a Future that this callback is registered on + * becomes completed with a failure or a success. + * In the case of success then "failure" will be null, and in the case of failure the "success" will be null. + */ + def onComplete(failure: Throwable, success: T): Unit +} + +/** + * Callback for the Future.recover operation that conditionally turns failures into successes. + * + * SAM (Single Abstract Method) class + * + * Java API + */ +abstract class Recover[+T] extends japi.RecoverBridge[T] { + protected final override def internal(result: Throwable): T = recover(result) + + /** + * This method will be invoked once when/if the Future this recover callback is registered on + * becomes completed with a failure. + * + * @returns a successful value for the passed in failure + * @throws the passed in failure to propagate it. + * + * Java API + */ + @throws(classOf[Throwable]) + def recover(failure: Throwable): T +} + +/** + * Callback for the Future.filter operation that creates a new Future which will + * conditionally contain the success of another Future. + * + * SAM (Single Abstract Method) class + * Java API + */ +abstract class Filter[-T] extends japi.BooleanFunctionBridge[T] { + override final def internal(t: T): Boolean = filter(t) + + /** + * This method will be invoked once when/if a Future that this callback is registered on + * becomes completed with a success. + * + * @returns true if the successful value should be propagated to the new Future or not + */ + def filter(result: T): Boolean +} + +/** + * Callback for the Future.foreach operation that will be invoked if the Future that this callback + * is registered on becomes completed with a success. This method is essentially the same operation + * as onSuccess. + * + * SAM (Single Abstract Method) class + * Java API + */ +abstract class Foreach[-T] extends japi.UnitFunctionBridge[T] { + override final def internal(t: T): Unit = each(t) + + /** + * This method will be invoked once when/if a Future that this callback is registered on + * becomes successfully completed + */ + def each(result: T): Unit +} + +/** + * Callback for the Future.map and Future.flatMap operations that will be invoked + * if the Future that this callback is registered on becomes completed with a success. + * This callback is the equivalent of an akka.japi.Function + * + * SAM (Single Abstract Method) class + * + * Java API + */ +abstract class Mapper[-T, +R] extends scala.runtime.AbstractFunction1[T, R] diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index d2bc7ff01d..3097dfc05b 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -303,7 +303,7 @@ trait BoundedMessageQueueSemantics extends QueueBasedMessageQueue { final def enqueue(receiver: ActorRef, handle: Envelope) { if (pushTimeOut.length > 0) { queue.offer(handle, pushTimeOut.length, pushTimeOut.unit) || { - throw new MessageQueueAppendFailedException("Couldn't enqueue message " + handle + " to " + toString) + throw new MessageQueueAppendFailedException("Couldn't enqueue message " + handle + " to " + receiver) } } else queue put handle } diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index 8998ccca03..5be5f1b0e1 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -5,9 +5,20 @@ package akka.dispatch import java.util.Collection -import java.util.concurrent.atomic.AtomicLong import akka.util.Duration -import java.util.concurrent._ +import akka.jsr166y._ +import java.util.concurrent.atomic.AtomicLong +import java.util.concurrent.ArrayBlockingQueue +import java.util.concurrent.BlockingQueue +import java.util.concurrent.Callable +import java.util.concurrent.ExecutorService +import java.util.concurrent.LinkedBlockingQueue +import java.util.concurrent.RejectedExecutionHandler +import java.util.concurrent.RejectedExecutionException +import java.util.concurrent.SynchronousQueue +import java.util.concurrent.TimeUnit +import java.util.concurrent.ThreadFactory +import java.util.concurrent.ThreadPoolExecutor object ThreadPoolConfig { type QueueFactory = () ⇒ BlockingQueue[Runnable] @@ -86,70 +97,65 @@ case class ThreadPoolConfig(allowCorePoolTimeout: Boolean = ThreadPoolConfig.def new ThreadPoolExecutorServiceFactory(threadFactory) } -trait DispatcherBuilder { - def build: MessageDispatcher -} - -object ThreadPoolConfigDispatcherBuilder { - def conf_?[T](opt: Option[T])(fun: (T) ⇒ ThreadPoolConfigDispatcherBuilder ⇒ ThreadPoolConfigDispatcherBuilder): Option[(ThreadPoolConfigDispatcherBuilder) ⇒ ThreadPoolConfigDispatcherBuilder] = opt map fun +object ThreadPoolConfigBuilder { + def conf_?[T](opt: Option[T])(fun: (T) ⇒ ThreadPoolConfigBuilder ⇒ ThreadPoolConfigBuilder): Option[(ThreadPoolConfigBuilder) ⇒ ThreadPoolConfigBuilder] = opt map fun } /** * A DSL to configure and create a MessageDispatcher with a ThreadPoolExecutor */ -case class ThreadPoolConfigDispatcherBuilder(dispatcherFactory: (ThreadPoolConfig) ⇒ MessageDispatcher, config: ThreadPoolConfig) extends DispatcherBuilder { +case class ThreadPoolConfigBuilder(config: ThreadPoolConfig) { import ThreadPoolConfig._ - def build: MessageDispatcher = dispatcherFactory(config) - def withNewThreadPoolWithCustomBlockingQueue(newQueueFactory: QueueFactory): ThreadPoolConfigDispatcherBuilder = + def withNewThreadPoolWithCustomBlockingQueue(newQueueFactory: QueueFactory): ThreadPoolConfigBuilder = this.copy(config = config.copy(queueFactory = newQueueFactory)) - def withNewThreadPoolWithCustomBlockingQueue(queue: BlockingQueue[Runnable]): ThreadPoolConfigDispatcherBuilder = + def withNewThreadPoolWithCustomBlockingQueue(queue: BlockingQueue[Runnable]): ThreadPoolConfigBuilder = withNewThreadPoolWithCustomBlockingQueue(reusableQueue(queue)) - def withNewThreadPoolWithLinkedBlockingQueueWithUnboundedCapacity: ThreadPoolConfigDispatcherBuilder = + def withNewThreadPoolWithLinkedBlockingQueueWithUnboundedCapacity: ThreadPoolConfigBuilder = this.copy(config = config.copy(queueFactory = linkedBlockingQueue())) - def withNewThreadPoolWithLinkedBlockingQueueWithCapacity(capacity: Int): ThreadPoolConfigDispatcherBuilder = + def withNewThreadPoolWithLinkedBlockingQueueWithCapacity(capacity: Int): ThreadPoolConfigBuilder = this.copy(config = config.copy(queueFactory = linkedBlockingQueue(capacity))) - def withNewThreadPoolWithSynchronousQueueWithFairness(fair: Boolean): ThreadPoolConfigDispatcherBuilder = + def withNewThreadPoolWithSynchronousQueueWithFairness(fair: Boolean): ThreadPoolConfigBuilder = this.copy(config = config.copy(queueFactory = synchronousQueue(fair))) - def withNewThreadPoolWithArrayBlockingQueueWithCapacityAndFairness(capacity: Int, fair: Boolean): ThreadPoolConfigDispatcherBuilder = + def withNewThreadPoolWithArrayBlockingQueueWithCapacityAndFairness(capacity: Int, fair: Boolean): ThreadPoolConfigBuilder = this.copy(config = config.copy(queueFactory = arrayBlockingQueue(capacity, fair))) - def setCorePoolSize(size: Int): ThreadPoolConfigDispatcherBuilder = + def setCorePoolSize(size: Int): ThreadPoolConfigBuilder = if (config.maxPoolSize < size) this.copy(config = config.copy(corePoolSize = size, maxPoolSize = size)) else this.copy(config = config.copy(corePoolSize = size)) - def setMaxPoolSize(size: Int): ThreadPoolConfigDispatcherBuilder = + def setMaxPoolSize(size: Int): ThreadPoolConfigBuilder = if (config.corePoolSize > size) this.copy(config = config.copy(corePoolSize = size, maxPoolSize = size)) else this.copy(config = config.copy(maxPoolSize = size)) - def setCorePoolSizeFromFactor(min: Int, multiplier: Double, max: Int): ThreadPoolConfigDispatcherBuilder = + def setCorePoolSizeFromFactor(min: Int, multiplier: Double, max: Int): ThreadPoolConfigBuilder = setCorePoolSize(scaledPoolSize(min, multiplier, max)) - def setMaxPoolSizeFromFactor(min: Int, multiplier: Double, max: Int): ThreadPoolConfigDispatcherBuilder = + def setMaxPoolSizeFromFactor(min: Int, multiplier: Double, max: Int): ThreadPoolConfigBuilder = setMaxPoolSize(scaledPoolSize(min, multiplier, max)) - def setKeepAliveTimeInMillis(time: Long): ThreadPoolConfigDispatcherBuilder = + def setKeepAliveTimeInMillis(time: Long): ThreadPoolConfigBuilder = setKeepAliveTime(Duration(time, TimeUnit.MILLISECONDS)) - def setKeepAliveTime(time: Duration): ThreadPoolConfigDispatcherBuilder = + def setKeepAliveTime(time: Duration): ThreadPoolConfigBuilder = this.copy(config = config.copy(threadTimeout = time)) - def setAllowCoreThreadTimeout(allow: Boolean): ThreadPoolConfigDispatcherBuilder = + def setAllowCoreThreadTimeout(allow: Boolean): ThreadPoolConfigBuilder = this.copy(config = config.copy(allowCorePoolTimeout = allow)) - def setQueueFactory(newQueueFactory: QueueFactory): ThreadPoolConfigDispatcherBuilder = + def setQueueFactory(newQueueFactory: QueueFactory): ThreadPoolConfigBuilder = this.copy(config = config.copy(queueFactory = newQueueFactory)) - def configure(fs: Option[Function[ThreadPoolConfigDispatcherBuilder, ThreadPoolConfigDispatcherBuilder]]*): ThreadPoolConfigDispatcherBuilder = fs.foldLeft(this)((c, f) ⇒ f.map(_(c)).getOrElse(c)) + def configure(fs: Option[Function[ThreadPoolConfigBuilder, ThreadPoolConfigBuilder]]*): ThreadPoolConfigBuilder = fs.foldLeft(this)((c, f) ⇒ f.map(_(c)).getOrElse(c)) } object MonitorableThreadFactory { @@ -161,11 +167,14 @@ case class MonitorableThreadFactory(name: String, daemonic: Boolean, contextClassLoader: Option[ClassLoader], exceptionHandler: Thread.UncaughtExceptionHandler = MonitorableThreadFactory.doNothing) - extends ThreadFactory { + extends ThreadFactory with ForkJoinPool.ForkJoinWorkerThreadFactory { protected val counter = new AtomicLong - def newThread(runnable: Runnable) = { - val t = new Thread(runnable, name + counter.incrementAndGet()) + def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = wire(ForkJoinPool.defaultForkJoinWorkerThreadFactory.newThread(pool)) + + def newThread(runnable: Runnable): Thread = wire(new Thread(runnable, name + counter.incrementAndGet())) + + protected def wire[T <: Thread](t: T): T = { t.setUncaughtExceptionHandler(exceptionHandler) t.setDaemon(daemonic) contextClassLoader foreach (t.setContextClassLoader(_)) diff --git a/akka-actor/src/main/scala/akka/dispatch/japi/Future.scala b/akka-actor/src/main/scala/akka/dispatch/japi/Future.scala deleted file mode 100644 index a237c0c647..0000000000 --- a/akka-actor/src/main/scala/akka/dispatch/japi/Future.scala +++ /dev/null @@ -1,62 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.dispatch.japi - -import akka.japi.{ Procedure2, Procedure, Function ⇒ JFunc } - -/* Java API */ -trait Future[+T] { self: akka.dispatch.Future[T] ⇒ - /** - * Asynchronously called when this Future gets a successful result - */ - private[japi] final def onSuccess[A >: T](proc: Procedure[A]): this.type = self.onSuccess({ case r ⇒ proc(r.asInstanceOf[A]) }: PartialFunction[T, Unit]) - - /** - * Asynchronously called when this Future gets a failed result - */ - private[japi] final def onFailure(proc: Procedure[Throwable]): this.type = self.onFailure({ case t: Throwable ⇒ proc(t) }: PartialFunction[Throwable, Unit]) - - /** - * Asynchronously called when this future is completed with either a failed or a successful result - * In case of a success, the first parameter (Throwable) will be null - * In case of a failure, the second parameter (T) will be null - * For no reason will both be null or neither be null - */ - private[japi] final def onComplete[A >: T](proc: Procedure2[Throwable, A]): this.type = self.onComplete(_.fold(t ⇒ proc(t, null.asInstanceOf[T]), r ⇒ proc(null, r))) - - /** - * Asynchronously applies the provided function to the (if any) successful result of this Future - * Any failure of this Future will be propagated to the Future returned by this method. - */ - private[japi] final def map[A >: T, B](f: JFunc[A, B]): akka.dispatch.Future[B] = self.map(f(_)) - - /** - * Asynchronously applies the provided function to the (if any) successful result of this Future and flattens it. - * Any failure of this Future will be propagated to the Future returned by this method. - */ - private[japi] final def flatMap[A >: T, B](f: JFunc[A, akka.dispatch.Future[B]]): akka.dispatch.Future[B] = self.flatMap(f(_)) - - /** - * Asynchronously applies the provided Procedure to the (if any) successful result of this Future - * Provided Procedure will not be called in case of no-result or in case of failed result - */ - private[japi] final def foreach[A >: T](proc: Procedure[A]): Unit = self.foreach(proc(_)) - - /** - * Returns a new Future whose successful result will be the successful result of this Future if that result conforms to the provided predicate - * Any failure of this Future will be propagated to the Future returned by this method. - */ - private[japi] final def filter[A >: T](p: JFunc[A, java.lang.Boolean]): akka.dispatch.Future[A] = - self.filter((a: Any) ⇒ p(a.asInstanceOf[A])).asInstanceOf[akka.dispatch.Future[A]] - - /** - * Returns a new Future whose value will be of the specified type if it really is - * Or a failure with a ClassCastException if it wasn't. - */ - private[japi] final def mapTo[A](clazz: Class[A]): akka.dispatch.Future[A] = { - implicit val manifest: Manifest[A] = Manifest.classType(clazz) - self.mapTo[A] - } -} - diff --git a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala index e414d0fee6..47ce667759 100644 --- a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala +++ b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala @@ -119,3 +119,13 @@ object Option { implicit def java2ScalaOption[A](o: Option[A]): scala.Option[A] = o.asScala implicit def scala2JavaOption[A](o: scala.Option[A]): Option[A] = if (o.isDefined) some(o.get) else none } + +/** + * This class hold common utilities for Java + */ +object Util { + /** + * Given a Class returns a Scala Manifest of that Class + */ + def manifest[T](clazz: Class[T]): Manifest[T] = Manifest.classType(clazz) +} diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 3e637fc81d..f0e5939f96 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -22,7 +22,69 @@ class AskTimeoutException(message: String, cause: Throwable) extends TimeoutExce /** * This object contains implementation details of the “ask” pattern. */ -object AskSupport { +trait AskSupport { + + /** + * Import this implicit conversion to gain `?` and `ask` methods on + * [[akka.actor.ActorRef]], which will defer to the + * `ask(actorRef, message)(timeout)` method defined here. + * + * {{{ + * import akka.pattern.ask + * + * val future = actor ? message // => ask(actor, message) + * val future = actor ask message // => ask(actor, message) + * val future = actor.ask(message)(timeout) // => ask(actor, message)(timeout) + * }}} + * + * All of the above use an implicit [[akka.actor.Timeout]]. + */ + implicit def ask(actorRef: ActorRef): AskableActorRef = new AskableActorRef(actorRef) + + /** + * Sends a message asynchronously and returns a [[akka.dispatch.Future]] + * holding the eventual reply message; this means that the target actor + * needs to send the result to the `sender` reference provided. The Future + * will be completed with an [[akka.actor.AskTimeoutException]] after the + * given timeout has expired; this is independent from any timeout applied + * while awaiting a result for this future (i.e. in + * `Await.result(..., timeout)`). + * + * Warning: + * When using future callbacks, inside actors you need to carefully avoid closing over + * the containing actor’s object, i.e. do not call methods or access mutable state + * on the enclosing actor from within the callback. This would break the actor + * encapsulation and may introduce synchronization bugs and race conditions because + * the callback will be scheduled concurrently to the enclosing actor. Unfortunately + * there is not yet a way to detect these illegal accesses at compile time. + * + * Recommended usage: + * + * {{{ + * val f = ask(worker, request)(timeout) + * flow { + * EnrichedRequest(request, f()) + * } pipeTo nextActor + * }}} + * + * [see [[akka.dispatch.Future]] for a description of `flow`] + */ + def ask(actorRef: ActorRef, message: Any)(implicit timeout: Timeout): Future[Any] = actorRef match { + case ref: InternalActorRef if ref.isTerminated ⇒ + actorRef.tell(message) + Promise.failed(new AskTimeoutException("sending to terminated ref breaks promises"))(ref.provider.dispatcher) + case ref: InternalActorRef ⇒ + val provider = ref.provider + if (timeout.duration.length <= 0) { + actorRef.tell(message) + Promise.failed(new AskTimeoutException("not asking with negative timeout"))(provider.dispatcher) + } else { + val a = createAsker(provider, timeout) + actorRef.tell(message, a) + a.result + } + case _ ⇒ throw new IllegalArgumentException("incompatible ActorRef " + actorRef) + } /** * Implementation detail of the “ask” pattern enrichment of ActorRef @@ -121,7 +183,10 @@ object AskSupport { } } - def createAsker(provider: ActorRefProvider, timeout: Timeout): PromiseActorRef = { + /** + * INTERNAL AKKA USE ONLY + */ + private[akka] def createAsker(provider: ActorRefProvider, timeout: Timeout): PromiseActorRef = { val path = provider.tempPath() val result = Promise[Any]()(provider.dispatcher) val a = new PromiseActorRef(provider, path, provider.tempContainer, result, provider.deathWatch) diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala new file mode 100644 index 0000000000..d6fbd31c1e --- /dev/null +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -0,0 +1,47 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.pattern + +import akka.actor.{ ActorRef, Actor, ActorSystem, Props, PoisonPill, Terminated, ReceiveTimeout, ActorTimeoutException } +import akka.dispatch.{ Promise, Future } +import akka.util.Duration + +trait GracefulStopSupport { + /** + * Returns a [[akka.dispatch.Future]] that will be completed with success (value `true`) when + * existing messages of the target actor has been processed and the actor has been + * terminated. + * + * Useful when you need to wait for termination or compose ordered termination of several actors. + * + * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] + * is completed with failure [[akka.actor.ActorTimeoutException]]. + */ + def gracefulStop(target: ActorRef, timeout: Duration)(implicit system: ActorSystem): Future[Boolean] = { + if (target.isTerminated) { + Promise.successful(true) + } else { + val result = Promise[Boolean]() + system.actorOf(Props(new Actor { + // Terminated will be received when target has been stopped + context watch target + target ! PoisonPill + // ReceiveTimeout will be received if nothing else is received within the timeout + context setReceiveTimeout timeout + + def receive = { + case Terminated(a) if a == target ⇒ + result success true + context stop self + case ReceiveTimeout ⇒ + result failure new ActorTimeoutException( + "Failed to stop [%s] within [%s]".format(target.path, context.receiveTimeout)) + context stop self + } + })) + result + } + } +} \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/pattern/Patterns.scala b/akka-actor/src/main/scala/akka/pattern/Patterns.scala index c3510d9b68..7167775b29 100644 --- a/akka-actor/src/main/scala/akka/pattern/Patterns.scala +++ b/akka-actor/src/main/scala/akka/pattern/Patterns.scala @@ -6,7 +6,7 @@ package akka.pattern object Patterns { import akka.actor.{ ActorRef, ActorSystem } import akka.dispatch.Future - import akka.pattern.{ ask ⇒ scalaAsk } + import akka.pattern.{ ask ⇒ scalaAsk, pipe ⇒ scalaPipe } import akka.util.{ Timeout, Duration } /** @@ -83,10 +83,10 @@ object Patterns { * // apply some transformation (i.e. enrich with request info) * final Future transformed = f.map(new akka.japi.Function() { ... }); * // send it on to the next stage - * Patterns.pipeTo(transformed, nextActor); + * Patterns.pipe(transformed).to(nextActor); * }}} */ - def pipeTo[T](future: Future[T], actorRef: ActorRef): Future[T] = akka.pattern.pipeTo(future, actorRef) + def pipe[T](future: Future[T]): PipeableFuture[T] = scalaPipe(future) /** * Returns a [[akka.dispatch.Future]] that will be completed with success (value `true`) when @@ -98,7 +98,6 @@ object Patterns { * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] * is completed with failure [[akka.actor.ActorTimeoutException]]. */ - def gracefulStop(target: ActorRef, timeout: Duration, system: ActorSystem): Future[java.lang.Boolean] = { + def gracefulStop(target: ActorRef, timeout: Duration, system: ActorSystem): Future[java.lang.Boolean] = akka.pattern.gracefulStop(target, timeout)(system).asInstanceOf[Future[java.lang.Boolean]] - } } diff --git a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala index 26f3b68e38..b611fd7128 100644 --- a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala @@ -3,13 +3,37 @@ */ package akka.pattern -import akka.actor.ActorRef import akka.dispatch.Future +import akka.actor.{ Status, ActorRef } -object PipeToSupport { +trait PipeToSupport { - class PipeableFuture[T](val future: Future[T]) { - def pipeTo(actorRef: ActorRef): Future[T] = akka.pattern.pipeTo(future, actorRef) + final class PipeableFuture[T](val future: Future[T]) { + def pipeTo(recipient: ActorRef): Future[T] = + future onComplete { + case Right(r) ⇒ recipient ! r + case Left(f) ⇒ recipient ! Status.Failure(f) + } + + def to(recipient: ActorRef): PipeableFuture[T] = { + pipeTo(recipient) + this + } } + /** + * Import this implicit conversion to gain the `pipeTo` method on [[akka.dispatch.Future]]: + * + * {{{ + * import akka.pattern.pipe + * + * Future { doExpensiveCalc() } pipeTo nextActor + * + * or + * + * pipe(someFuture) to nextActor + * + * }}} + */ + implicit def pipe[T](future: Future[T]): PipeableFuture[T] = new PipeableFuture(future) } \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/pattern/package.scala b/akka-actor/src/main/scala/akka/pattern/package.scala index ac8fcf2df2..ec4786a4c0 100644 --- a/akka-actor/src/main/scala/akka/pattern/package.scala +++ b/akka-actor/src/main/scala/akka/pattern/package.scala @@ -40,139 +40,6 @@ import akka.util.{ Timeout, Duration } * ask(actor, message); * }}} */ -package object pattern { +package object pattern extends PipeToSupport with AskSupport with GracefulStopSupport { - /** - * Import this implicit conversion to gain `?` and `ask` methods on - * [[akka.actor.ActorRef]], which will defer to the - * `ask(actorRef, message)(timeout)` method defined here. - * - * {{{ - * import akka.pattern.ask - * - * val future = actor ? message // => ask(actor, message) - * val future = actor ask message // => ask(actor, message) - * val future = actor.ask(message)(timeout) // => ask(actor, message)(timeout) - * }}} - * - * All of the above use an implicit [[akka.actor.Timeout]]. - */ - implicit def ask(actorRef: ActorRef): AskSupport.AskableActorRef = new AskSupport.AskableActorRef(actorRef) - - /** - * Sends a message asynchronously and returns a [[akka.dispatch.Future]] - * holding the eventual reply message; this means that the target actor - * needs to send the result to the `sender` reference provided. The Future - * will be completed with an [[akka.actor.AskTimeoutException]] after the - * given timeout has expired; this is independent from any timeout applied - * while awaiting a result for this future (i.e. in - * `Await.result(..., timeout)`). - * - * Warning: - * When using future callbacks, inside actors you need to carefully avoid closing over - * the containing actor’s object, i.e. do not call methods or access mutable state - * on the enclosing actor from within the callback. This would break the actor - * encapsulation and may introduce synchronization bugs and race conditions because - * the callback will be scheduled concurrently to the enclosing actor. Unfortunately - * there is not yet a way to detect these illegal accesses at compile time. - * - * Recommended usage: - * - * {{{ - * val f = ask(worker, request)(timeout) - * flow { - * EnrichedRequest(request, f()) - * } pipeTo nextActor - * }}} - * - * [see [[akka.dispatch.Future]] for a description of `flow`] - */ - def ask(actorRef: ActorRef, message: Any)(implicit timeout: Timeout): Future[Any] = actorRef match { - case ref: InternalActorRef if ref.isTerminated ⇒ - actorRef.tell(message) - Promise.failed(new AskTimeoutException("sending to terminated ref breaks promises"))(ref.provider.dispatcher) - case ref: InternalActorRef ⇒ - val provider = ref.provider - if (timeout.duration.length <= 0) { - actorRef.tell(message) - Promise.failed(new AskTimeoutException("not asking with negative timeout"))(provider.dispatcher) - } else { - val a = AskSupport.createAsker(provider, timeout) - actorRef.tell(message, a) - a.result - } - case _ ⇒ throw new IllegalArgumentException("incompatible ActorRef " + actorRef) - } - - /** - * Import this implicit conversion to gain the `pipeTo` method on [[akka.dispatch.Future]]: - * - * {{{ - * import akka.pattern.pipeTo - * - * Future { doExpensiveCalc() } pipeTo nextActor - * }}} - */ - implicit def pipeTo[T](future: Future[T]): PipeToSupport.PipeableFuture[T] = new PipeToSupport.PipeableFuture(future) - - /** - * Register an onComplete callback on this [[akka.dispatch.Future]] to send - * the result to the given actor reference. Returns the original Future to - * allow method chaining. - * - * Recommended usage example: - * - * {{{ - * val f = ask(worker, request)(timeout) - * flow { - * EnrichedRequest(request, f()) - * } pipeTo nextActor - * }}} - * - * [see [[akka.dispatch.Future]] for a description of `flow`] - */ - def pipeTo[T](future: Future[T], actorRef: ActorRef): Future[T] = { - future onComplete { - case Right(r) ⇒ actorRef ! r - case Left(f) ⇒ actorRef ! Status.Failure(f) - } - future - } - - /** - * Returns a [[akka.dispatch.Future]] that will be completed with success (value `true`) when - * existing messages of the target actor has been processed and the actor has been - * terminated. - * - * Useful when you need to wait for termination or compose ordered termination of several actors. - * - * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] - * is completed with failure [[akka.actor.ActorTimeoutException]]. - */ - def gracefulStop(target: ActorRef, timeout: Duration)(implicit system: ActorSystem): Future[Boolean] = { - if (target.isTerminated) { - Promise.successful(true) - } else { - val result = Promise[Boolean]() - system.actorOf(Props(new Actor { - // Terminated will be received when target has been stopped - context watch target - target ! PoisonPill - // ReceiveTimeout will be received if nothing else is received within the timeout - context setReceiveTimeout timeout - - def receive = { - case Terminated(a) if a == target ⇒ - result success true - context stop self - case ReceiveTimeout ⇒ - result failure new ActorTimeoutException( - "Failed to stop [%s] within [%s]".format(target.path, context.receiveTimeout)) - context stop self - } - })) - result - } - } - -} +} \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 39a60623d7..a6ed7259a5 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -10,7 +10,7 @@ import akka.util.Duration import akka.util.duration._ import com.typesafe.config.Config import akka.config.ConfigurationException -import akka.pattern.AskSupport +import akka.pattern.pipe import scala.collection.JavaConversions.iterableAsScalaIterable /** @@ -766,7 +766,7 @@ trait ScatterGatherFirstCompletedLike { this: RouterConfig ⇒ { case (sender, message) ⇒ val provider: ActorRefProvider = routeeProvider.context.asInstanceOf[ActorCell].systemImpl.provider - val asker = AskSupport.createAsker(provider, within) + val asker = akka.pattern.createAsker(provider, within) asker.result.pipeTo(sender) toAll(asker, routeeProvider.routees) } diff --git a/akka-actor/src/main/scala/akka/util/BoxedType.scala b/akka-actor/src/main/scala/akka/util/BoxedType.scala index d2c5092be4..f5f95096d9 100644 --- a/akka-actor/src/main/scala/akka/util/BoxedType.scala +++ b/akka-actor/src/main/scala/akka/util/BoxedType.scala @@ -3,9 +3,8 @@ */ package akka.util -import java.{ lang ⇒ jl } - object BoxedType { + import java.{ lang ⇒ jl } private val toBoxed = Map[Class[_], Class[_]]( classOf[Boolean] -> classOf[jl.Boolean], @@ -18,8 +17,5 @@ object BoxedType { classOf[Double] -> classOf[jl.Double], classOf[Unit] -> classOf[scala.runtime.BoxedUnit]) - def apply(c: Class[_]): Class[_] = { - if (c.isPrimitive) toBoxed(c) else c - } - + final def apply(c: Class[_]): Class[_] = if (c.isPrimitive) toBoxed(c) else c } diff --git a/akka-actor/src/main/scala/akka/util/Duration.scala b/akka-actor/src/main/scala/akka/util/Duration.scala index 65d6e6148c..2b6aae1eb3 100644 --- a/akka-actor/src/main/scala/akka/util/Duration.scala +++ b/akka-actor/src/main/scala/akka/util/Duration.scala @@ -8,40 +8,13 @@ import java.util.concurrent.TimeUnit import TimeUnit._ import java.lang.{ Double ⇒ JDouble } -class TimerException(message: String) extends RuntimeException(message) - -/** - * Simple timer class. - * Usage: - *
- *   import akka.util.duration._
- *   import akka.util.Timer
- *
- *   val timer = Timer(30.seconds)
- *   while (timer.isTicking) { ... }
- * 
- */ -case class Timer(duration: Duration, throwExceptionOnTimeout: Boolean = false) { - val startTimeInMillis = System.currentTimeMillis - val timeoutInMillis = duration.toMillis - - /** - * Returns true while the timer is ticking. After that it either throws and exception or - * returns false. Depending on if the 'throwExceptionOnTimeout' argument is true or false. - */ - def isTicking: Boolean = { - if (!(timeoutInMillis > (System.currentTimeMillis - startTimeInMillis))) { - if (throwExceptionOnTimeout) throw new TimerException("Time out after " + duration) - else false - } else true - } -} - -case class Deadline(d: Duration) { - def +(other: Duration): Deadline = copy(d = d + other) - def -(other: Duration): Deadline = copy(d = d - other) - def -(other: Deadline): Duration = d - other.d +case class Deadline private (time: Duration) { + def +(other: Duration): Deadline = copy(time = time + other) + def -(other: Duration): Deadline = copy(time = time - other) + def -(other: Deadline): Duration = time - other.time def timeLeft: Duration = this - Deadline.now + def hasTimeLeft(): Boolean = !isOverdue() //Code reuse FTW + def isOverdue(): Boolean = (time.toNanos - System.nanoTime()) < 0 } object Deadline { def now: Deadline = Deadline(Duration(System.nanoTime, NANOSECONDS)) diff --git a/akka-actor/src/main/scala/akka/util/JMX.scala b/akka-actor/src/main/scala/akka/util/JMX.scala deleted file mode 100644 index 44d1410d6b..0000000000 --- a/akka-actor/src/main/scala/akka/util/JMX.scala +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.util - -import akka.event.Logging.Error -import java.lang.management.ManagementFactory -import javax.management.{ ObjectInstance, ObjectName, InstanceAlreadyExistsException, InstanceNotFoundException } -import akka.actor.ActorSystem - -object JMX { - private val mbeanServer = ManagementFactory.getPlatformMBeanServer - - def nameFor(hostname: String, service: String, bean: String): ObjectName = - new ObjectName("akka.%s:type=%s,name=%s".format(hostname, service, bean.replace(":", "_"))) - - def register(name: ObjectName, mbean: AnyRef)(implicit system: ActorSystem): Option[ObjectInstance] = try { - Some(mbeanServer.registerMBean(mbean, name)) - } catch { - case e: InstanceAlreadyExistsException ⇒ - Some(mbeanServer.getObjectInstance(name)) - case e: Exception ⇒ - system.eventStream.publish(Error(e, "JMX", this.getClass, "Error when registering mbean [%s]".format(mbean))) - None - } - - def unregister(mbean: ObjectName)(implicit system: ActorSystem) = try { - mbeanServer.unregisterMBean(mbean) - } catch { - case e: InstanceNotFoundException ⇒ {} - case e: Exception ⇒ system.eventStream.publish(Error(e, "JMX", this.getClass, "Error while unregistering mbean [%s]".format(mbean))) - } -} diff --git a/akka-cluster/src/main/java/akka/cluster/LocalBookKeeper.java b/akka-cluster/src/main/java/akka/cluster/LocalBookKeeper.java deleted file mode 100644 index 413b9a3154..0000000000 --- a/akka-cluster/src/main/java/akka/cluster/LocalBookKeeper.java +++ /dev/null @@ -1,187 +0,0 @@ -package akka.cluster; - -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.BufferedReader; -import java.io.File; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.Socket; - -import org.apache.bookkeeper.proto.BookieServer; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooKeeper; -import org.apache.zookeeper.ZooDefs.Ids; -import org.apache.zookeeper.server.NIOServerCnxnFactory; -import org.apache.zookeeper.server.ZooKeeperServer; - -public class LocalBookKeeper { - public static final int CONNECTION_TIMEOUT = 30000; - - int numberOfBookies; - - public LocalBookKeeper() { - numberOfBookies = 3; - } - - public LocalBookKeeper(int numberOfBookies) { - this(); - this.numberOfBookies = numberOfBookies; - } - - private final String HOSTPORT = "127.0.0.1:2181"; - NIOServerCnxnFactory serverFactory; - ZooKeeperServer zks; - ZooKeeper zkc; - int ZooKeeperDefaultPort = 2181; - File ZkTmpDir; - - //BookKeeper variables - File tmpDirs[]; - BookieServer bs[]; - Integer initialPort = 5000; - - /** - * @param args - */ - - public void runZookeeper(int maxCC) throws IOException{ - // create a ZooKeeper server(dataDir, dataLogDir, port) - //ServerStats.registerAsConcrete(); - //ClientBase.setupTestEnv(); - ZkTmpDir = File.createTempFile("zookeeper", "test"); - ZkTmpDir.delete(); - ZkTmpDir.mkdir(); - - try { - zks = new ZooKeeperServer(ZkTmpDir, ZkTmpDir, ZooKeeperDefaultPort); - serverFactory = new NIOServerCnxnFactory(); - serverFactory.configure(new InetSocketAddress(ZooKeeperDefaultPort), maxCC); - serverFactory.startup(zks); - } catch (Exception e) { - // TODO Auto-generated catch block - } - - boolean b = waitForServerUp(HOSTPORT, CONNECTION_TIMEOUT); - } - - public void initializeZookeper() { - //initialize the zk client with values - try { - zkc = new ZooKeeper("127.0.0.1", ZooKeeperDefaultPort, new emptyWatcher()); - zkc.create("/ledgers", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); - zkc.create("/ledgers/available", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); - // No need to create an entry for each requested bookie anymore as the - // BookieServers will register themselves with ZooKeeper on startup. - } catch (KeeperException e) { - } catch (InterruptedException e) { - } catch (IOException e) { - } - } - - public void runBookies() throws IOException{ - // Create Bookie Servers (B1, B2, B3) - - tmpDirs = new File[numberOfBookies]; - bs = new BookieServer[numberOfBookies]; - - for(int i = 0; i < numberOfBookies; i++) { - tmpDirs[i] = File.createTempFile("bookie" + Integer.toString(i), "test"); - tmpDirs[i].delete(); - tmpDirs[i].mkdir(); - - bs[i] = new BookieServer(initialPort + i, InetAddress.getLocalHost().getHostAddress() + ":" - + ZooKeeperDefaultPort, tmpDirs[i], new File[]{tmpDirs[i]}); - bs[i].start(); - } - } - - public static void main(String[] args) throws IOException, InterruptedException { - if(args.length < 1) { - usage(); - System.exit(-1); - } - LocalBookKeeper lb = new LocalBookKeeper(Integer.parseInt(args[0])); - lb.runZookeeper(1000); - lb.initializeZookeper(); - lb.runBookies(); - while (true) { - Thread.sleep(5000); - } - } - - private static void usage() { - System.err.println("Usage: LocalBookKeeper number-of-bookies"); - } - - /* User for testing purposes, void */ - class emptyWatcher implements Watcher{ - public void process(WatchedEvent event) {} - } - - public static boolean waitForServerUp(String hp, long timeout) { - long start = System.currentTimeMillis(); - String split[] = hp.split(":"); - String host = split[0]; - int port = Integer.parseInt(split[1]); - while (true) { - try { - Socket sock = new Socket(host, port); - BufferedReader reader = null; - try { - OutputStream outstream = sock.getOutputStream(); - outstream.write("stat".getBytes()); - outstream.flush(); - - reader = - new BufferedReader( - new InputStreamReader(sock.getInputStream())); - String line = reader.readLine(); - if (line != null && line.startsWith("Zookeeper version:")) { - return true; - } - } finally { - sock.close(); - if (reader != null) { - reader.close(); - } - } - } catch (IOException e) { - // ignore as this is expected - } - - if (System.currentTimeMillis() > start + timeout) { - break; - } - try { - Thread.sleep(250); - } catch (InterruptedException e) { - // ignore - } - } - return false; - } - -} diff --git a/akka-cluster/src/main/java/akka/cluster/zookeeper/DistributedQueue.java b/akka-cluster/src/main/java/akka/cluster/zookeeper/DistributedQueue.java deleted file mode 100644 index 7bb87bc414..0000000000 --- a/akka-cluster/src/main/java/akka/cluster/zookeeper/DistributedQueue.java +++ /dev/null @@ -1,312 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package akka.cluster.zookeeper; - -import java.util.List; -import java.util.NoSuchElementException; -import java.util.TreeMap; -import java.util.concurrent.CountDownLatch; - -import org.apache.log4j.Logger; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.ZooKeeper; -import org.apache.zookeeper.data.ACL; -import org.apache.zookeeper.data.Stat; - -/** - * - * A protocol to implement a distributed queue. - * - */ - -public class DistributedQueue { - private static final Logger LOG = Logger.getLogger(DistributedQueue.class); - - private final String dir; - - private ZooKeeper zookeeper; - private List acl = ZooDefs.Ids.OPEN_ACL_UNSAFE; - - private final String prefix = "qn-"; - - - public DistributedQueue(ZooKeeper zookeeper, String dir, List acl) { - this.dir = dir; - - if(acl != null) { - this.acl = acl; - } - this.zookeeper = zookeeper; - - } - - - - /** - * Returns a Map of the children, ordered by id. - * @param watcher optional watcher on getChildren() operation. - * @return map from id to child name for all children - */ - private TreeMap orderedChildren(Watcher watcher) throws KeeperException, InterruptedException { - TreeMap orderedChildren = new TreeMap(); - - List childNames = null; - try{ - childNames = zookeeper.getChildren(dir, watcher); - }catch (KeeperException.NoNodeException e) { - throw e; - } - - for(String childName : childNames) { - try{ - //Check format - if(!childName.regionMatches(0, prefix, 0, prefix.length())) { - LOG.warn("Found child node with improper name: " + childName); - continue; - } - String suffix = childName.substring(prefix.length()); - Long childId = new Long(suffix); - orderedChildren.put(childId,childName); - }catch(NumberFormatException e) { - LOG.warn("Found child node with improper format : " + childName + " " + e,e); - } - } - - return orderedChildren; - } - - /** - * Find the smallest child node. - * @return The name of the smallest child node. - */ - private String smallestChildName() throws KeeperException, InterruptedException { - long minId = Long.MAX_VALUE; - String minName = ""; - - List childNames = null; - - try{ - childNames = zookeeper.getChildren(dir, false); - }catch(KeeperException.NoNodeException e) { - LOG.warn("Caught: " +e,e); - return null; - } - - for(String childName : childNames) { - try{ - //Check format - if(!childName.regionMatches(0, prefix, 0, prefix.length())) { - LOG.warn("Found child node with improper name: " + childName); - continue; - } - String suffix = childName.substring(prefix.length()); - long childId = Long.parseLong(suffix); - if(childId < minId) { - minId = childId; - minName = childName; - } - }catch(NumberFormatException e) { - LOG.warn("Found child node with improper format : " + childName + " " + e,e); - } - } - - - if(minId < Long.MAX_VALUE) { - return minName; - }else{ - return null; - } - } - - /** - * Return the head of the queue without modifying the queue. - * @return the data at the head of the queue. - * @throws NoSuchElementException - * @throws KeeperException - * @throws InterruptedException - */ - public byte[] element() throws NoSuchElementException, KeeperException, InterruptedException { - TreeMap orderedChildren; - - // element, take, and remove follow the same pattern. - // We want to return the child node with the smallest sequence number. - // Since other clients are remove()ing and take()ing nodes concurrently, - // the child with the smallest sequence number in orderedChildren might be gone by the time we check. - // We don't call getChildren again until we have tried the rest of the nodes in sequence order. - while(true) { - try{ - orderedChildren = orderedChildren(null); - }catch(KeeperException.NoNodeException e) { - throw new NoSuchElementException(); - } - if(orderedChildren.size() == 0 ) throw new NoSuchElementException(); - - for(String headNode : orderedChildren.values()) { - if(headNode != null) { - try{ - return zookeeper.getData(dir+"/"+headNode, false, null); - }catch(KeeperException.NoNodeException e) { - //Another client removed the node first, try next - } - } - } - - } - } - - - /** - * Attempts to remove the head of the queue and return it. - * @return The former head of the queue - * @throws NoSuchElementException - * @throws KeeperException - * @throws InterruptedException - */ - public byte[] remove() throws NoSuchElementException, KeeperException, InterruptedException { - TreeMap orderedChildren; - // Same as for element. Should refactor this. - while(true) { - try{ - orderedChildren = orderedChildren(null); - }catch(KeeperException.NoNodeException e) { - throw new NoSuchElementException(); - } - if(orderedChildren.size() == 0) throw new NoSuchElementException(); - - for(String headNode : orderedChildren.values()) { - String path = dir +"/"+headNode; - try{ - byte[] data = zookeeper.getData(path, false, null); - zookeeper.delete(path, -1); - return data; - }catch(KeeperException.NoNodeException e) { - // Another client deleted the node first. - } - } - - } - } - - private class LatchChildWatcher implements Watcher { - - CountDownLatch latch; - - public LatchChildWatcher() { - latch = new CountDownLatch(1); - } - - public void process(WatchedEvent event) { - LOG.debug("Watcher fired on path: " + event.getPath() + " state: " + - event.getState() + " type " + event.getType()); - latch.countDown(); - } - public void await() throws InterruptedException { - latch.await(); - } - } - - /** - * Removes the head of the queue and returns it, blocks until it succeeds. - * @return The former head of the queue - * @throws NoSuchElementException - * @throws KeeperException - * @throws InterruptedException - */ - public byte[] take() throws KeeperException, InterruptedException { - TreeMap orderedChildren; - // Same as for element. Should refactor this. - while(true) { - LatchChildWatcher childWatcher = new LatchChildWatcher(); - try{ - orderedChildren = orderedChildren(childWatcher); - }catch(KeeperException.NoNodeException e) { - zookeeper.create(dir, new byte[0], acl, CreateMode.PERSISTENT); - continue; - } - if(orderedChildren.size() == 0) { - childWatcher.await(); - continue; - } - - for(String headNode : orderedChildren.values()) { - String path = dir +"/"+headNode; - try{ - byte[] data = zookeeper.getData(path, false, null); - zookeeper.delete(path, -1); - return data; - }catch(KeeperException.NoNodeException e) { - // Another client deleted the node first. - } - } - } - } - - /** - * Inserts data into queue. - * @param data - * @return true if data was successfully added - */ - public boolean offer(byte[] data) throws KeeperException, InterruptedException{ - for(;;) { - try{ - zookeeper.create(dir+"/"+prefix, data, acl, CreateMode.PERSISTENT_SEQUENTIAL); - return true; - }catch(KeeperException.NoNodeException e) { - zookeeper.create(dir, new byte[0], acl, CreateMode.PERSISTENT); - } - } - - } - - /** - * Returns the data at the first element of the queue, or null if the queue is empty. - * @return data at the first element of the queue, or null. - * @throws KeeperException - * @throws InterruptedException - */ - public byte[] peek() throws KeeperException, InterruptedException{ - try{ - return element(); - }catch(NoSuchElementException e) { - return null; - } - } - - - /** - * Attempts to remove the head of the queue and return it. Returns null if the queue is empty. - * @return Head of the queue or null. - * @throws KeeperException - * @throws InterruptedException - */ - public byte[] poll() throws KeeperException, InterruptedException { - try{ - return remove(); - }catch(NoSuchElementException e) { - return null; - } - } - - - -} diff --git a/akka-cluster/src/main/java/akka/cluster/zookeeper/ZooKeeperQueue.java b/akka-cluster/src/main/java/akka/cluster/zookeeper/ZooKeeperQueue.java deleted file mode 100644 index 8867d97e00..0000000000 --- a/akka-cluster/src/main/java/akka/cluster/zookeeper/ZooKeeperQueue.java +++ /dev/null @@ -1,173 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.zookeeper; - -import java.io.Serializable; -import java.util.List; -import java.util.ArrayList; - -import org.I0Itec.zkclient.ExceptionUtil; -import org.I0Itec.zkclient.IZkChildListener; -import org.I0Itec.zkclient.ZkClient; -import org.I0Itec.zkclient.exception.ZkNoNodeException; - -public class ZooKeeperQueue { - - protected static class Element { - private String _name; - private T _data; - - public Element(String name, T data) { - _name = name; - _data = data; - } - - public String getName() { - return _name; - } - - public T getData() { - return _data; - } - } - - protected final ZkClient _zkClient; - private final String _elementsPath; - private final String _rootPath; - private final boolean _isBlocking; - - public ZooKeeperQueue(ZkClient zkClient, String rootPath, boolean isBlocking) { - _zkClient = zkClient; - _rootPath = rootPath; - _isBlocking = isBlocking; - _elementsPath = rootPath + "/queue"; - if (!_zkClient.exists(rootPath)) { - _zkClient.createPersistent(rootPath, true); - _zkClient.createPersistent(_elementsPath, true); - } - } - - public String enqueue(T element) { - try { - String sequential = _zkClient.createPersistentSequential(getElementRoughPath(), element); - String elementId = sequential.substring(sequential.lastIndexOf('/') + 1); - return elementId; - } catch (Exception e) { - throw ExceptionUtil.convertToRuntimeException(e); - } - } - - public T dequeue() throws InterruptedException { - if (_isBlocking) { - Element element = getFirstElement(); - _zkClient.delete(getElementPath(element.getName())); - return element.getData(); - } else { - throw new UnsupportedOperationException("Non-blocking ZooKeeperQueue is not yet supported"); - /* FIXME DOES NOT WORK - try { - String headName = getSmallestElement(_zkClient.getChildren(_elementsPath)); - String headPath = getElementPath(headName); - return (T) _zkClient.readData(headPath); - } catch (ZkNoNodeException e) { - return null; - } - */ - } - } - - public boolean containsElement(String elementId) { - String zkPath = getElementPath(elementId); - return _zkClient.exists(zkPath); - } - - public T peek() throws InterruptedException { - Element element = getFirstElement(); - if (element == null) { - return null; - } - return element.getData(); - } - - @SuppressWarnings("unchecked") - public List getElements() { - List paths =_zkClient.getChildren(_elementsPath); - List elements = new ArrayList(); - for (String path: paths) { - elements.add((T)_zkClient.readData(path)); - } - return elements; - } - - public int size() { - return _zkClient.getChildren(_elementsPath).size(); - } - - public void clear() { - _zkClient.deleteRecursive(_rootPath); - } - - public boolean isEmpty() { - return size() == 0; - } - - private String getElementRoughPath() { - return getElementPath("item" + "-"); - } - - private String getElementPath(String elementId) { - return _elementsPath + "/" + elementId; - } - - private String getSmallestElement(List list) { - String smallestElement = list.get(0); - for (String element : list) { - if (element.compareTo(smallestElement) < 0) { - smallestElement = element; - } - } - return smallestElement; - } - - @SuppressWarnings("unchecked") - protected Element getFirstElement() throws InterruptedException { - final Object mutex = new Object(); - IZkChildListener notifyListener = new IZkChildListener() { - @Override - public void handleChildChange(String parentPath, List currentChilds) throws Exception { - synchronized (mutex) { - mutex.notify(); - } - } - }; - try { - while (true) { - List elementNames; - synchronized (mutex) { - elementNames = _zkClient.subscribeChildChanges(_elementsPath, notifyListener); - while (elementNames == null || elementNames.isEmpty()) { - mutex.wait(); - elementNames = _zkClient.getChildren(_elementsPath); - } - } - String elementName = getSmallestElement(elementNames); - try { - String elementPath = getElementPath(elementName); - return new Element(elementName, (T) _zkClient.readData(elementPath)); - } catch (ZkNoNodeException e) { - // somebody else picked up the element first, so we have to - // retry with the new first element - } - } - } catch (InterruptedException e) { - throw e; - } catch (Exception e) { - throw ExceptionUtil.convertToRuntimeException(e); - } finally { - _zkClient.unsubscribeChildChanges(_elementsPath, notifyListener); - } - } - -} diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf new file mode 100644 index 0000000000..749c138a26 --- /dev/null +++ b/akka-cluster/src/main/resources/reference.conf @@ -0,0 +1,33 @@ +###################################### +# Akka Cluster Reference Config File # +###################################### + +# This the reference config file has all the default settings. +# Make your edits/overrides in your application.conf. + +akka { + + cluster { + seed-nodes = [] + seed-node-connection-timeout = 30s + max-time-to-retry-joining-cluster = 30s + + # accrual failure detection config + failure-detector { + + # defines the failure detector threshold + # A low threshold is prone to generate many wrong suspicions but ensures + # a quick detection in the event of a real crash. Conversely, a high + # threshold generates fewer mistakes but needs more time to detect + # actual crashes + threshold = 8 + + max-sample-size = 1000 + } + + gossip { + initialDelay = 5s + frequency = 1s + } + } +} diff --git a/akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala similarity index 88% rename from akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala rename to akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index 2d7a831b9d..379bf98a6b 100644 --- a/akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -2,13 +2,16 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.remote +package akka.cluster + +import akka.actor.{ ActorSystem, Address } +import akka.event.Logging -import java.util.concurrent.atomic.AtomicReference import scala.collection.immutable.Map import scala.annotation.tailrec + +import java.util.concurrent.atomic.AtomicReference import System.{ currentTimeMillis ⇒ newTimestamp } -import akka.actor.{ ActorSystem, Address } /** * Implementation of 'The Phi Accrual Failure Detector' by Hayashibara et al. as defined in their paper: @@ -20,12 +23,14 @@ import akka.actor.{ ActorSystem, Address } *

* Default threshold is 8, but can be configured in the Akka config. */ -class AccrualFailureDetector(val threshold: Int = 8, val maxSampleSize: Int = 1000) { +class AccrualFailureDetector(system: ActorSystem, val threshold: Int = 8, val maxSampleSize: Int = 1000) { private final val PhiFactor = 1.0 / math.log(10.0) private case class FailureStats(mean: Double = 0.0D, variance: Double = 0.0D, deviation: Double = 0.0D) + private val log = Logging(system, "FailureDetector") + /** * Implement using optimistic lockless concurrency, all state is represented * by this immutable case class and managed by an AtomicReference. @@ -49,6 +54,7 @@ class AccrualFailureDetector(val threshold: Int = 8, val maxSampleSize: Int = 10 */ @tailrec final def heartbeat(connection: Address) { + log.debug("Heartbeat from connection [{}] ", connection) val oldState = state.get val latestTimestamp = oldState.timestamps.get(connection) @@ -132,12 +138,15 @@ class AccrualFailureDetector(val threshold: Int = 8, val maxSampleSize: Int = 10 def phi(connection: Address): Double = { val oldState = state.get val oldTimestamp = oldState.timestamps.get(connection) - if (oldTimestamp.isEmpty) 0.0D // treat unmanaged connections, e.g. with zero heartbeats, as healthy connections - else { - val timestampDiff = newTimestamp - oldTimestamp.get - val mean = oldState.failureStats.get(connection).getOrElse(FailureStats()).mean - PhiFactor * timestampDiff / mean - } + val phi = + if (oldTimestamp.isEmpty) 0.0D // treat unmanaged connections, e.g. with zero heartbeats, as healthy connections + else { + val timestampDiff = newTimestamp - oldTimestamp.get + val mean = oldState.failureStats.get(connection).getOrElse(FailureStats()).mean + PhiFactor * timestampDiff / mean + } + log.debug("Phi value [{}] and threshold [{}] for connection [{}] ", phi, threshold, connection) + phi } /** diff --git a/akka-cluster/src/main/scala/akka/cluster/BookKeeperServer.scala b/akka-cluster/src/main/scala/akka/cluster/BookKeeperServer.scala deleted file mode 100644 index 679af24d03..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/BookKeeperServer.scala +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import org.apache.bookkeeper.proto.BookieServer - -import java.io.File - -/* -A simple use of BookKeeper is to implement a write-ahead transaction log. A server maintains an in-memory data structure -(with periodic snapshots for example) and logs changes to that structure before it applies the change. The system -server creates a ledger at startup and store the ledger id and password in a well known place (ZooKeeper maybe). When -it needs to make a change, the server adds an entry with the change information to a ledger and apply the change when -BookKeeper adds the entry successfully. The server can even use asyncAddEntry to queue up many changes for high change -throughput. BooKeeper meticulously logs the changes in order and call the completion functions in order. - -When the system server dies, a backup server will come online, get the last snapshot and then it will open the -ledger of the old server and read all the entries from the time the snapshot was taken. (Since it doesn't know the last -entry number it will use MAX_INTEGER). Once all the entries have been processed, it will close the ledger and start a -new one for its use. -*/ - -object BookKeeperServer { - val port = 3181 - val zkServers = "localhost:2181" - val journal = new File("./bk/journal") - val ledgers = Array(new File("./bk/ledger")) - val bookie = new BookieServer(port, zkServers, journal, ledgers) - - def start() { - bookie.start() - bookie.join() - } -} diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala deleted file mode 100644 index 130149b491..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ /dev/null @@ -1,1876 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import org.apache.zookeeper._ -import org.apache.zookeeper.Watcher.Event._ -import org.apache.zookeeper.data.Stat -import org.apache.zookeeper.recipes.lock.{ WriteLock, LockListener } - -import org.I0Itec.zkclient._ -import org.I0Itec.zkclient.serialize._ -import org.I0Itec.zkclient.exception._ - -import java.util.{ List ⇒ JList } -import java.util.concurrent.atomic.{ AtomicBoolean, AtomicReference } -import java.util.concurrent.{ CopyOnWriteArrayList, Callable, ConcurrentHashMap } -import javax.management.StandardMBean -import java.net.InetSocketAddress - -import scala.collection.mutable.ConcurrentMap -import scala.collection.JavaConversions._ -import scala.annotation.tailrec - -import akka.util._ -import duration._ -import Helpers._ - -import akka.actor._ -import Actor._ -import Status._ -import DeploymentConfig._ - -import akka.event.EventHandler -import akka.config.Config -import akka.config.Config._ - -import akka.serialization.{ Serialization, Serializer, ActorSerialization, Compression } -import ActorSerialization._ -import Compression.LZF - -import akka.routing._ -import akka.cluster._ -import akka.cluster.metrics._ -import akka.cluster.zookeeper._ -import ChangeListener._ -import RemoteProtocol._ -import RemoteSystemDaemonMessageType._ - -import com.eaio.uuid.UUID - -import com.google.protobuf.ByteString -import akka.dispatch.{Await, Dispatchers, Future, PinnedDispatcher} - -// FIXME add watch for each node that when the entry for the node is removed then the node shuts itself down - -/** - * JMX MBean for the cluster service. - */ -trait ClusterNodeMBean { - - def stop() - - def disconnect() - - def reconnect() - - def resign() - - def getRemoteServerHostname: String - - def getRemoteServerPort: Int - - def getNodeName: String - - def getClusterName: String - - def getZooKeeperServerAddresses: String - - def getMemberNodes: Array[String] - - def getNodeAddress(): NodeAddress - - def getLeaderLockName: String - - def isLeader: Boolean - - def getUuidsForClusteredActors: Array[String] - - def getAddressesForClusteredActors: Array[String] - - def getUuidsForActorsInUse: Array[String] - - def getAddressesForActorsInUse: Array[String] - - def getNodesForActorInUseWithAddress(address: String): Array[String] - - def getUuidsForActorsInUseOnNode(nodeName: String): Array[String] - - def getAddressesForActorsInUseOnNode(nodeName: String): Array[String] - - def setConfigElement(key: String, value: String) - - def getConfigElement(key: String): AnyRef - - def removeConfigElement(key: String) - - def getConfigElementKeys: Array[String] - - def getMembershipPathFor(node: String): String - - def getConfigurationPathFor(key: String): String - - def getActorAddresstoNodesPathFor(actorAddress: String): String - - def getActorAddressToNodesPathForWithNodeName(actorAddress: String, nodeName: String): String - - def getNodeToUuidsPathFor(node: String): String - - // FIXME All MBean methods that take a UUID are useless, change to String - def getNodeToUuidsPathFor(node: String, uuid: UUID): String - - def getActorAddressRegistryPathFor(actorAddress: String): String - - def getActorAddressRegistrySerializerPathFor(actorAddress: String): String - - def getActorAddressRegistryUuidPathFor(actorAddress: String): String - - def getActorUuidRegistryNodePathFor(uuid: UUID): String - - def getActorUuidRegistryRemoteAddressPathFor(uuid: UUID): String - - def getActorAddressToUuidsPathFor(actorAddress: String): String - - def getActorAddressToUuidsPathForWithNodeName(actorAddress: String, uuid: UUID): String -} - -/** - * Module for the Cluster. Also holds global state such as configuration data etc. - */ -object Cluster { - val EMPTY_STRING = "".intern - - // config options - val name = Config.clusterName - val zooKeeperServers = config.getString("akka.cluster.zookeeper-server-addresses", "localhost:2181") - val remoteServerPort = config.getInt("akka.remote.server.port", 2552) - val sessionTimeout = Duration(config.getInt("akka.cluster.session-timeout", 60), TIME_UNIT).toMillis.toInt - val metricsRefreshInterval = Duration(config.getInt("akka.cluster.metrics-refresh-timeout", 2), TIME_UNIT) - val connectionTimeout = Duration(config.getInt("akka.cluster.connection-timeout", 60), TIME_UNIT).toMillis.toInt - val maxTimeToWaitUntilConnected = Duration(config.getInt("akka.cluster.max-time-to-wait-until-connected", 30), TIME_UNIT).toMillis.toInt - val shouldCompressData = config.getBool("akka.remote.use-compression", false) - val enableJMX = config.getBool("akka.enable-jmx", true) - val remoteDaemonAckTimeout = Duration(config.getInt("akka.remote.remote-daemon-ack-timeout", 30), TIME_UNIT).toMillis.toInt - val includeRefNodeInReplicaSet = config.getBool("akka.cluster.include-ref-node-in-replica-set", true) - - @volatile - private var properties = Map.empty[String, String] - - /** - * Use to override JVM options such as -Dakka.cluster.nodename=node1 etc. - * Currently supported options are: - *

-   *   Cluster setProperty ("akka.cluster.nodename", "node1")
-   *   Cluster setProperty ("akka.remote.hostname", "darkstar.lan")
-   *   Cluster setProperty ("akka.remote.port", "1234")
-   * 
- */ - def setProperty(property: (String, String)) { - properties = properties + property - } - - private def nodename: String = properties.get("akka.cluster.nodename") match { - case Some(uberride) ⇒ uberride - case None ⇒ Config.nodename - } - - private def hostname: String = properties.get("akka.remote.hostname") match { - case Some(uberride) ⇒ uberride - case None ⇒ Config.hostname - } - - private def port: Int = properties.get("akka.remote.port") match { - case Some(uberride) ⇒ uberride.toInt - case None ⇒ Config.remoteServerPort - } - - val defaultZooKeeperSerializer = new SerializableSerializer - - /** - * The node address. - */ - val nodeAddress = NodeAddress(name, nodename) - - /** - * The reference to the running ClusterNode. - */ - val node = { - if (nodeAddress eq null) throw new IllegalArgumentException("NodeAddress can't be null") - new DefaultClusterNode(nodeAddress, hostname, port, zooKeeperServers, defaultZooKeeperSerializer) - } - - /** - * Creates a new AkkaZkClient. - */ - def newZkClient(): AkkaZkClient = new AkkaZkClient(zooKeeperServers, sessionTimeout, connectionTimeout, defaultZooKeeperSerializer) - - def uuidToString(uuid: UUID): String = uuid.toString - - def stringToUuid(uuid: String): UUID = { - if (uuid eq null) throw new ClusterException("UUID is null") - if (uuid == "") throw new ClusterException("UUID is an empty string") - try { - new UUID(uuid) - } catch { - case e: StringIndexOutOfBoundsException ⇒ - val error = new ClusterException("UUID not valid [" + uuid + "]") - EventHandler.error(error, this, "") - throw error - } - } - - def uuidProtocolToUuid(uuid: UuidProtocol): UUID = new UUID(uuid.getHigh, uuid.getLow) - - def uuidToUuidProtocol(uuid: UUID): UuidProtocol = - UuidProtocol.newBuilder - .setHigh(uuid.getTime) - .setLow(uuid.getClockSeqAndNode) - .build -} - -/** - * A Cluster is made up by a bunch of jvm's, the ClusterNode. - * - * These are the path tree holding the cluster meta-data in ZooKeeper. - * - * Syntax: foo means a variable string, 'foo' means a symbol that does not change and "data" in foo[data] means the value (in bytes) for the node "foo" - * - *
- *   /clusterName/'members'/nodeName
- *   /clusterName/'config'/key[bytes]
- *
- *   /clusterName/'actor-address-to-nodes'/actorAddress/nodeName
- *   /clusterName/'actors-node-to-uuids'/nodeName/actorUuid
- *
- *   /clusterName/'actor-address-registry'/actorAddress/'serializer'[serializerName]
- *   /clusterName/'actor-address-registry'/actorAddress/'uuid'[actorUuid]
- *
- *   /clusterName/'actor-uuid-registry'/actorUuid/'node'[nodeName]
- *   /clusterName/'actor-uuid-registry'/actorUuid/'node'/ip:port
- *   /clusterName/'actor-uuid-registry'/actorUuid/'address'[actorAddress]
- *
- *   /clusterName/'actor-address-to-uuids'/actorAddress/actorUuid
- * 
- */ -class DefaultClusterNode private[akka] ( - val nodeAddress: NodeAddress, - val hostname: String = Config.hostname, - val port: Int = Config.remoteServerPort, - val zkServerAddresses: String, - val serializer: ZkSerializer) extends ErrorHandler with ClusterNode { - self ⇒ - - if ((hostname eq null) || hostname == "") throw new NullPointerException("Host name must not be null or empty string") - if (port < 1) throw new NullPointerException("Port can not be negative") - if (nodeAddress eq null) throw new IllegalArgumentException("'nodeAddress' can not be 'null'") - - val clusterJmxObjectName = JMX.nameFor(hostname, "monitoring", "cluster") - - import Cluster._ - - // private val connectToAllNewlyArrivedMembershipNodesInClusterLock = new AtomicBoolean(false) - - private[cluster] lazy val remoteClientLifeCycleHandler = actorOf(Props(new Actor { - def receive = { - case RemoteClientError(cause, client, address) ⇒ client.shutdownClientModule() - case RemoteClientDisconnected(client, address) ⇒ client.shutdownClientModule() - case _ ⇒ //ignore other - } - }), "akka.cluster.RemoteClientLifeCycleListener") - - private[cluster] lazy val remoteDaemon = new LocalActorRef(Props(new RemoteClusterDaemon(this)).copy(dispatcher = new PinnedDispatcher()), RemoteClusterDaemon.Address, systemService = true) - - private[cluster] lazy val remoteDaemonSupervisor = Supervisor( - SupervisorConfig( - OneForOneStrategy(List(classOf[Exception]), Int.MaxValue, Int.MaxValue), // is infinite restart what we want? - Supervise( - remoteDaemon, - Permanent) - :: Nil)).start() - - lazy val remoteService: RemoteSupport = { - val remote = new akka.remote.netty.NettyRemoteSupport - remote.start(hostname, port) - remote.register(RemoteClusterDaemon.Address, remoteDaemon) - remote.addListener(RemoteFailureDetector.sender) - remote.addListener(remoteClientLifeCycleHandler) - remote - } - - lazy val remoteServerAddress: InetSocketAddress = remoteService.address - - lazy val metricsManager: NodeMetricsManager = new LocalNodeMetricsManager(zkClient, Cluster.metricsRefreshInterval).start() - - // static nodes - val CLUSTER_PATH = "/" + nodeAddress.clusterName - val MEMBERSHIP_PATH = CLUSTER_PATH + "/members" - val CONFIGURATION_PATH = CLUSTER_PATH + "/config" - val PROVISIONING_PATH = CLUSTER_PATH + "/provisioning" - val ACTOR_ADDRESS_NODES_TO_PATH = CLUSTER_PATH + "/actor-address-to-nodes" - val ACTOR_ADDRESS_REGISTRY_PATH = CLUSTER_PATH + "/actor-address-registry" - val ACTOR_UUID_REGISTRY_PATH = CLUSTER_PATH + "/actor-uuid-registry" - val ACTOR_ADDRESS_TO_UUIDS_PATH = CLUSTER_PATH + "/actor-address-to-uuids" - val NODE_TO_ACTOR_UUIDS_PATH = CLUSTER_PATH + "/node-to-actors-uuids" - val NODE_METRICS = CLUSTER_PATH + "/metrics" - - val basePaths = List( - CLUSTER_PATH, - MEMBERSHIP_PATH, - ACTOR_ADDRESS_REGISTRY_PATH, - ACTOR_UUID_REGISTRY_PATH, - ACTOR_ADDRESS_NODES_TO_PATH, - NODE_TO_ACTOR_UUIDS_PATH, - ACTOR_ADDRESS_TO_UUIDS_PATH, - CONFIGURATION_PATH, - PROVISIONING_PATH, - NODE_METRICS) - - val LEADER_ELECTION_PATH = CLUSTER_PATH + "/leader" // should NOT be part of 'basePaths' only used by 'leaderLock' - - private val membershipNodePath = membershipPathFor(nodeAddress.nodeName) - - def membershipNodes: Array[String] = locallyCachedMembershipNodes.toList.toArray.asInstanceOf[Array[String]] - - // zookeeper listeners - private val stateListener = new StateListener(this) - private val membershipListener = new MembershipChildListener(this) - - // cluster node listeners - private val changeListeners = new CopyOnWriteArrayList[ChangeListener]() - - // Address -> ClusterActorRef - private[akka] val clusterActorRefs = new Index[InetSocketAddress, ClusterActorRef] - - case class VersionedConnectionState(version: Long, connections: Map[String, Tuple2[InetSocketAddress, ActorRef]]) - - // all the connections to other nodes - private[akka] val nodeConnections = { - var conns = Map.empty[String, Tuple2[InetSocketAddress, ActorRef]] - // add the remote connection to 'this' node as well, but as a 'local' actor - if (includeRefNodeInReplicaSet) conns += (nodeAddress.nodeName -> (remoteServerAddress, remoteDaemon)) - new AtomicReference[VersionedConnectionState](VersionedConnectionState(0, conns)) - } - - private val isShutdownFlag = new AtomicBoolean(false) - - // ZooKeeper client - private[cluster] val zkClient = new AkkaZkClient(zkServerAddresses, sessionTimeout, connectionTimeout, serializer) - - // leader election listener, registered to the 'leaderLock' below - private[cluster] val leaderElectionCallback = new LockListener { - override def lockAcquired() { - EventHandler.info(this, "Node [%s] is the new leader".format(self.nodeAddress.nodeName)) - self.publish(NewLeader(self.nodeAddress.nodeName)) - } - - override def lockReleased() { - EventHandler.info(this, "Node [%s] is *NOT* the leader anymore".format(self.nodeAddress.nodeName)) - } - } - - // leader election lock in ZooKeeper - private[cluster] val leaderLock = new WriteLock( - zkClient.connection.getZookeeper, - LEADER_ELECTION_PATH, null, - leaderElectionCallback) - - if (enableJMX) createMBean - - boot() - - // ======================================= - // Node - // ======================================= - - private[cluster] def boot() { - EventHandler.info(this, - ("\nCreating cluster node with" + - "\n\tcluster name = [%s]" + - "\n\tnode name = [%s]" + - "\n\tport = [%s]" + - "\n\tzookeeper server addresses = [%s]" + - "\n\tserializer = [%s]") - .format(nodeAddress.clusterName, nodeAddress.nodeName, port, zkServerAddresses, serializer)) - EventHandler.info(this, "Starting up remote server [%s]".format(remoteServerAddress.toString)) - createZooKeeperPathStructureIfNeeded() - registerListeners() - joinCluster() - joinLeaderElection() - fetchMembershipNodes() - EventHandler.info(this, "Cluster node [%s] started successfully".format(nodeAddress)) - } - - def isShutdown = isShutdownFlag.get - - def start() {} - - def shutdown() { - isShutdownFlag.set(true) - - def shutdownNode() { - ignore[ZkNoNodeException](zkClient.deleteRecursive(membershipNodePath)) - - locallyCachedMembershipNodes.clear() - - nodeConnections.get.connections.toList.foreach({ - case (_, (address, _)) ⇒ - Actor.remote.shutdownClientConnection(address) // shut down client connections - }) - - remoteService.shutdown() // shutdown server - - RemoteFailureDetector.sender.stop() - remoteClientLifeCycleHandler.stop() - remoteDaemon.stop() - - // for monitoring remote listener - registry.local.actors.filter(remoteService.hasListener).foreach(_.stop()) - - nodeConnections.set(VersionedConnectionState(0, Map.empty[String, Tuple2[InetSocketAddress, ActorRef]])) - - disconnect() - EventHandler.info(this, "Cluster node shut down [%s]".format(nodeAddress)) - } - - shutdownNode() - } - - def disconnect(): ClusterNode = { - zkClient.unsubscribeAll() - zkClient.close() - this - } - - def reconnect(): ClusterNode = { - zkClient.reconnect() - this - } - - // ======================================= - // Change notification - // ======================================= - - /** - * Registers a cluster change listener. - */ - def register(listener: ChangeListener): ClusterNode = { - changeListeners.add(listener) - this - } - - private[cluster] def publish(change: ChangeNotification) { - changeListeners.iterator.foreach(_.notify(change, this)) - } - - // ======================================= - // Leader - // ======================================= - - /** - * Returns the name of the current leader lock. - */ - def leader: String = leaderLock.getId - - /** - * Returns true if 'this' node is the current leader. - */ - def isLeader: Boolean = leaderLock.isOwner - - /** - * Explicitly resign from being a leader. If this node is not a leader then this operation is a no-op. - */ - def resign() { - if (isLeader) leaderLock.unlock() - } - - // ======================================= - // Actor - // ======================================= - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), 0, Transient, false, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], replicationScheme: ReplicationScheme, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), 0, replicationScheme, false, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], nrOfInstances: Int, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), nrOfInstances, Transient, false, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], nrOfInstances: Int, replicationScheme: ReplicationScheme, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), nrOfInstances, replicationScheme, false, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), 0, Transient, serializeMailbox, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], replicationScheme: ReplicationScheme, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), 0, replicationScheme, serializeMailbox, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], nrOfInstances: Int, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), nrOfInstances, Transient, serializeMailbox, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], nrOfInstances: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), nrOfInstances, replicationScheme, serializeMailbox, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, 0, Transient, false, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, 0, Transient, serializeMailbox, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, replicationScheme: ReplicationScheme, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, 0, replicationScheme, false, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, nrOfInstances, Transient, false, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, replicationScheme: ReplicationScheme, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, nrOfInstances, replicationScheme, false, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, nrOfInstances, Transient, serializeMailbox, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, 0, replicationScheme, serializeMailbox, serializer) - - /** - * Needed to have reflection through structural typing work. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, serializer: AnyRef): ClusterNode = - store(actorAddress, actorFactory, nrOfInstances, replicationScheme, serializeMailbox, serializer.asInstanceOf[Serializer]) - - /** - * Needed to have reflection through structural typing work. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, serializeMailbox: Boolean, serializer: AnyRef): ClusterNode = - store(actorAddress, actorFactory, nrOfInstances, Transient, serializeMailbox, serializer) - - /** - * Clusters an actor. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store( - actorAddress: String, - actorFactory: () ⇒ ActorRef, - nrOfInstances: Int, - replicationScheme: ReplicationScheme, - serializeMailbox: Boolean, - serializer: Serializer): ClusterNode = { - - EventHandler.debug(this, - "Storing actor with address [%s] in cluster".format(actorAddress)) - - val actorFactoryBytes = - Serialization.serialize(actorFactory) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - if (shouldCompressData) LZF.compress(bytes) - else bytes - } - - val actorAddressRegistryPath = actorAddressRegistryPathFor(actorAddress) - - // create ADDRESS -> Array[Byte] for actor registry - try { - zkClient.writeData(actorAddressRegistryPath, actorFactoryBytes) - } catch { - case e: ZkNoNodeException ⇒ // if not stored yet, store the actor - zkClient.retryUntilConnected(new Callable[Either[String, Exception]]() { - def call: Either[String, Exception] = { - try { - Left(zkClient.connection.create(actorAddressRegistryPath, actorFactoryBytes, CreateMode.PERSISTENT)) - } catch { - case e: KeeperException.NodeExistsException ⇒ Right(e) - } - } - }) match { - case Left(path) ⇒ path - case Right(exception) ⇒ actorAddressRegistryPath - } - } - - // create ADDRESS -> SERIALIZER CLASS NAME mapping - try { - zkClient.createPersistent(actorAddressRegistrySerializerPathFor(actorAddress), serializer.identifier.toString) - } catch { - case e: ZkNodeExistsException ⇒ zkClient.writeData(actorAddressRegistrySerializerPathFor(actorAddress), serializer.identifier.toString) - } - - // create ADDRESS -> NODE mapping - ignore[ZkNodeExistsException](zkClient.createPersistent(actorAddressToNodesPathFor(actorAddress))) - - // create ADDRESS -> UUIDs mapping - ignore[ZkNodeExistsException](zkClient.createPersistent(actorAddressToUuidsPathFor(actorAddress))) - - useActorOnNodes(nodesForNrOfInstances(nrOfInstances, Some(actorAddress)).toArray, actorAddress) - - this - } - - /** - * Removes actor from the cluster. - */ - // def remove(actorRef: ActorRef) { - // remove(actorRef.address) - // } - - /** - * Removes actor with uuid from the cluster. - */ - // def remove(actorAddress: String) { - // releaseActorOnAllNodes(actorAddress) - // // warning: ordering matters here - // // FIXME remove ADDRESS to UUID mapping? - // ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAddressToUuidsPathFor(actorAddress))) - // ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAddressRegistryPathFor(actorAddress))) - // ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAddressToNodesPathFor(actorAddress))) - // } - - /** - * Is the actor with uuid clustered or not? - */ - def isClustered(actorAddress: String): Boolean = zkClient.exists(actorAddressRegistryPathFor(actorAddress)) - - /** - * Is the actor with uuid in use on 'this' node or not? - */ - def isInUseOnNode(actorAddress: String): Boolean = isInUseOnNode(actorAddress, nodeAddress) - - /** - * Is the actor with uuid in use or not? - */ - def isInUseOnNode(actorAddress: String, node: NodeAddress): Boolean = zkClient.exists(actorAddressToNodesPathFor(actorAddress, node.nodeName)) - - /** - * Is the actor with uuid in use or not? - */ - def isInUseOnNode(actorAddress: String, nodeName: String): Boolean = zkClient.exists(actorAddressToNodesPathFor(actorAddress, nodeName)) - - /** - * Checks out an actor for use on this node, e.g. checked out as a 'LocalActorRef' but it makes it available - * for remote access through lookup by its UUID. - */ - def use[T <: Actor](actorAddress: String): Option[LocalActorRef] = { - val nodeName = nodeAddress.nodeName - - val actorFactoryPath = actorAddressRegistryPathFor(actorAddress) - zkClient.retryUntilConnected(new Callable[Either[Exception, () ⇒ LocalActorRef]]() { - def call: Either[Exception, () ⇒ LocalActorRef] = { - try { - - val actorFactoryBytes = - if (shouldCompressData) LZF.uncompress(zkClient.connection.readData(actorFactoryPath, new Stat, false)) - else zkClient.connection.readData(actorFactoryPath, new Stat, false) - - val actorFactory = - Serialization.deserialize(actorFactoryBytes, classOf[() ⇒ LocalActorRef], None) match { - case Left(error) ⇒ throw error - case Right(instance) ⇒ instance.asInstanceOf[() ⇒ LocalActorRef] - } - - Right(actorFactory) - } catch { - case e: KeeperException.NoNodeException ⇒ Left(e) - } - } - }) match { - case Left(exception) ⇒ throw exception - case Right(actorFactory) ⇒ - val actorRef = actorFactory() - - EventHandler.debug(this, - "Checking out actor [%s] to be used on node [%s] as local actor" - .format(actorAddress, nodeName)) - - val uuid = actorRef.uuid - - // create UUID registry - ignore[ZkNodeExistsException](zkClient.createPersistent(actorUuidRegistryPathFor(uuid))) - - // create UUID -> NODE mapping - try { - zkClient.createPersistent(actorUuidRegistryNodePathFor(uuid), nodeName) - } catch { - case e: ZkNodeExistsException ⇒ zkClient.writeData(actorUuidRegistryNodePathFor(uuid), nodeName) - } - - // create UUID -> ADDRESS - try { - zkClient.createPersistent(actorUuidRegistryAddressPathFor(uuid), actorAddress) - } catch { - case e: ZkNodeExistsException ⇒ zkClient.writeData(actorUuidRegistryAddressPathFor(uuid), actorAddress) - } - - // create UUID -> REMOTE ADDRESS (InetSocketAddress) mapping - try { - zkClient.createPersistent(actorUuidRegistryRemoteAddressPathFor(uuid), remoteServerAddress) - } catch { - case e: ZkNodeExistsException ⇒ zkClient.writeData(actorUuidRegistryRemoteAddressPathFor(uuid), remoteServerAddress) - } - - // create ADDRESS -> UUID mapping - try { - zkClient.createPersistent(actorAddressRegistryUuidPathFor(actorAddress), uuid) - } catch { - case e: ZkNodeExistsException ⇒ zkClient.writeData(actorAddressRegistryUuidPathFor(actorAddress), uuid) - } - - // create NODE -> UUID mapping - ignore[ZkNodeExistsException](zkClient.createPersistent(nodeToUuidsPathFor(nodeName, uuid), true)) - - // create ADDRESS -> UUIDs mapping - ignore[ZkNodeExistsException](zkClient.createPersistent(actorAddressToUuidsPathFor(actorAddress, uuid))) - - // create ADDRESS -> NODE mapping - ignore[ZkNodeExistsException](zkClient.createPersistent(actorAddressToNodesPathFor(actorAddress, nodeName))) - - actorRef - } - } - - /** - * Using (checking out) actor on a specific set of nodes. - */ - def useActorOnNodes(nodes: Array[String], actorAddress: String, replicateFromUuid: Option[UUID] = None) { - EventHandler.debug(this, - "Sending command to nodes [%s] for checking out actor [%s]".format(nodes.mkString(", "), actorAddress)) - - val builder = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(USE) - .setActorAddress(actorAddress) - - // set the UUID to replicated from - if available - replicateFromUuid foreach (uuid ⇒ builder.setReplicateActorFromUuid(uuidToUuidProtocol(uuid))) - - val command = builder.build - - nodes foreach { node ⇒ - nodeConnections.get.connections(node) foreach { - case (address, connection) ⇒ - sendCommandToNode(connection, command, async = false) - } - } - } - - /** - * Using (checking out) actor on all nodes in the cluster. - */ - def useActorOnAllNodes(actorAddress: String, replicateFromUuid: Option[UUID] = None) { - useActorOnNodes(membershipNodes, actorAddress, replicateFromUuid) - } - - /** - * Using (checking out) actor on a specific node. - */ - def useActorOnNode(node: String, actorAddress: String, replicateFromUuid: Option[UUID] = None) { - useActorOnNodes(Array(node), actorAddress, replicateFromUuid) - } - - /** - * Checks in an actor after done using it on this node. - */ - def release(actorRef: ActorRef) { - release(actorRef.address) - } - - /** - * Checks in an actor after done using it on this node. - */ - def release(actorAddress: String) { - - // FIXME 'Cluster.release' needs to notify all existing ClusterActorRef's that are using the instance that it is no - // longer available. Then what to do? Should we even remove this method? - - ignore[ZkNoNodeException](zkClient.delete(actorAddressToNodesPathFor(actorAddress, nodeAddress.nodeName))) - - uuidsForActorAddress(actorAddress) foreach { uuid ⇒ - EventHandler.debug(this, - "Releasing actor [%s] with UUID [%s] after usage".format(actorAddress, uuid)) - - ignore[ZkNoNodeException](zkClient.deleteRecursive(nodeToUuidsPathFor(nodeAddress.nodeName, uuid))) - ignore[ZkNoNodeException](zkClient.delete(actorUuidRegistryRemoteAddressPathFor(uuid))) - } - } - - /** - * Releases (checking in) all actors with a specific address on all nodes in the cluster where the actor is in 'use'. - */ - private[akka] def releaseActorOnAllNodes(actorAddress: String) { - EventHandler.debug(this, - "Releasing (checking in) all actors with address [%s] on all nodes in cluster".format(actorAddress)) - - val command = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(RELEASE) - .setActorAddress(actorAddress) - .build - - nodesForActorsInUseWithAddress(actorAddress) foreach { node ⇒ - nodeConnections.get.connections(node) foreach { - case (_, connection) ⇒ sendCommandToNode(connection, command, async = true) - } - } - } - - /** - * Creates an ActorRef with a Router to a set of clustered actors. - */ - def ref(actorAddress: String, router: RouterType, failureDetector: FailureDetectorType): ActorRef = - ClusterActorRef.newRef(actorAddress, router, failureDetector, Actor.TIMEOUT) - - /** - * Returns the UUIDs of all actors checked out on this node. - */ - private[akka] def uuidsForActorsInUse: Array[UUID] = uuidsForActorsInUseOnNode(nodeAddress.nodeName) - - /** - * Returns the addresses of all actors checked out on this node. - */ - def addressesForActorsInUse: Array[String] = actorAddressForUuids(uuidsForActorsInUse) - - /** - * Returns the UUIDs of all actors registered in this cluster. - */ - private[akka] def uuidsForClusteredActors: Array[UUID] = - zkClient.getChildren(ACTOR_UUID_REGISTRY_PATH).toList.map(new UUID(_)).toArray.asInstanceOf[Array[UUID]] - - /** - * Returns the addresses of all actors registered in this cluster. - */ - def addressesForClusteredActors: Array[String] = actorAddressForUuids(uuidsForClusteredActors) - - /** - * Returns the actor id for the actor with a specific UUID. - */ - private[akka] def actorAddressForUuid(uuid: UUID): Option[String] = { - try { - Some(zkClient.readData(actorUuidRegistryAddressPathFor(uuid)).asInstanceOf[String]) - } catch { - case e: ZkNoNodeException ⇒ None - } - } - - /** - * Returns the actor ids for all the actors with a specific UUID. - */ - private[akka] def actorAddressForUuids(uuids: Array[UUID]): Array[String] = - uuids map (actorAddressForUuid(_)) filter (_.isDefined) map (_.get) - - /** - * Returns the actor UUIDs for actor ID. - */ - private[akka] def uuidsForActorAddress(actorAddress: String): Array[UUID] = { - try { - zkClient.getChildren(actorAddressToUuidsPathFor(actorAddress)).toList.toArray map { - case c: CharSequence ⇒ new UUID(c) - } filter (_ ne null) - } catch { - case e: ZkNoNodeException ⇒ Array[UUID]() - } - } - - /** - * Returns the node names of all actors in use with UUID. - */ - private[akka] def nodesForActorsInUseWithAddress(actorAddress: String): Array[String] = { - try { - zkClient.getChildren(actorAddressToNodesPathFor(actorAddress)).toList.toArray.asInstanceOf[Array[String]] - } catch { - case e: ZkNoNodeException ⇒ Array[String]() - } - } - - /** - * Returns the UUIDs of all actors in use registered on a specific node. - */ - private[akka] def uuidsForActorsInUseOnNode(nodeName: String): Array[UUID] = { - try { - zkClient.getChildren(nodeToUuidsPathFor(nodeName)).toList.toArray map { - case c: CharSequence ⇒ new UUID(c) - } filter (_ ne null) - } catch { - case e: ZkNoNodeException ⇒ Array[UUID]() - } - } - - /** - * Returns the addresses of all actors in use registered on a specific node. - */ - def addressesForActorsInUseOnNode(nodeName: String): Array[String] = { - val uuids = - try { - zkClient.getChildren(nodeToUuidsPathFor(nodeName)).toList.toArray map { - case c: CharSequence ⇒ new UUID(c) - } filter (_ ne null) - } catch { - case e: ZkNoNodeException ⇒ Array[UUID]() - } - actorAddressForUuids(uuids) - } - - /** - * Returns Serializer for actor with specific address. - */ - def serializerForActor(actorAddress: String): Serializer = try { - Serialization.serializerByIdentity(zkClient.readData(actorAddressRegistrySerializerPathFor(actorAddress), new Stat).asInstanceOf[String].toByte) - } catch { - case e: ZkNoNodeException ⇒ throw new IllegalStateException("No serializer found for actor with address [%s]".format(actorAddress)) - } - - /** - * Returns addresses for nodes that the clustered actor is in use on. - */ - def inetSocketAddressesForActor(actorAddress: String): Array[(UUID, InetSocketAddress)] = { - try { - for { - uuid ← uuidsForActorAddress(actorAddress) - } yield { - val remoteAddress = zkClient.readData(actorUuidRegistryRemoteAddressPathFor(uuid)).asInstanceOf[InetSocketAddress] - (uuid, remoteAddress) - } - } catch { - case e: ZkNoNodeException ⇒ - EventHandler.warning(this, - "Could not retrieve remote socket address for node hosting actor [%s] due to: %s" - .format(actorAddress, e.toString)) - Array[(UUID, InetSocketAddress)]() - } - } - - // ======================================= - // Compute Grid - // ======================================= - - /** - * Send a function 'Function0[Unit]' to be invoked on a random number of nodes (defined by 'nrOfInstances' argument). - */ - def send(f: Function0[Unit], nrOfInstances: Int) { - Serialization.serialize(f) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - val message = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(FUNCTION_FUN0_UNIT) - .setPayload(ByteString.copyFrom(bytes)) - .build - nodeConnectionsForNrOfInstances(nrOfInstances) foreach (_ ! message) - } - } - - /** - * Send a function 'Function0[Any]' to be invoked on a random number of nodes (defined by 'nrOfInstances' argument). - * Returns an 'Array' with all the 'Future's from the computation. - */ - def send(f: Function0[Any], nrOfInstances: Int): List[Future[Any]] = { - Serialization.serialize(f) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - val message = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(FUNCTION_FUN0_ANY) - .setPayload(ByteString.copyFrom(bytes)) - .build - val results = nodeConnectionsForNrOfInstances(nrOfInstances) map (_ ? message) - results.toList.asInstanceOf[List[Future[Any]]] - } - } - - /** - * Send a function 'Function1[Any, Unit]' to be invoked on a random number of nodes (defined by 'nrOfInstances' argument) - * with the argument speficied. - */ - def send(f: Function1[Any, Unit], arg: Any, nrOfInstances: Int) { - Serialization.serialize((f, arg)) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - val message = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(FUNCTION_FUN1_ARG_UNIT) - .setPayload(ByteString.copyFrom(bytes)) - .build - nodeConnectionsForNrOfInstances(nrOfInstances) foreach (_ ! message) - } - } - - /** - * Send a function 'Function1[Any, Any]' to be invoked on a random number of nodes (defined by 'nrOfInstances' argument) - * with the argument speficied. - * Returns an 'Array' with all the 'Future's from the computation. - */ - def send(f: Function1[Any, Any], arg: Any, nrOfInstances: Int): List[Future[Any]] = { - Serialization.serialize((f, arg)) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - val message = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(FUNCTION_FUN1_ARG_ANY) - .setPayload(ByteString.copyFrom(bytes)) - .build - val results = nodeConnectionsForNrOfInstances(nrOfInstances) map (_ ? message) - results.toList.asInstanceOf[List[Future[Any]]] - } - } - - // ======================================= - // Config - // ======================================= - - /** - * Stores a configuration element under a specific key. - * If the key already exists then it will be overwritten. - */ - def setConfigElement(key: String, bytes: Array[Byte]) { - val compressedBytes = if (shouldCompressData) LZF.compress(bytes) else bytes - EventHandler.debug(this, - "Adding config value [%s] under key [%s] in cluster registry".format(key, compressedBytes)) - zkClient.retryUntilConnected(new Callable[Either[Unit, Exception]]() { - def call: Either[Unit, Exception] = { - try { - Left(zkClient.connection.create(configurationPathFor(key), compressedBytes, CreateMode.PERSISTENT)) - } catch { - case e: KeeperException.NodeExistsException ⇒ - try { - Left(zkClient.connection.writeData(configurationPathFor(key), compressedBytes)) - } catch { - case e: Exception ⇒ Right(e) - } - } - } - }) match { - case Left(_) ⇒ /* do nothing */ - case Right(exception) ⇒ throw exception - } - } - - /** - * Returns the config element for the key or NULL if no element exists under the key. - * Returns Some(element) if it exists else None - */ - def getConfigElement(key: String): Option[Array[Byte]] = try { - Some(zkClient.connection.readData(configurationPathFor(key), new Stat, true)) - } catch { - case e: KeeperException.NoNodeException ⇒ None - } - - /** - * Removes configuration element for a specific key. - * Does nothing if the key does not exist. - */ - def removeConfigElement(key: String) { - ignore[ZkNoNodeException] { - EventHandler.debug(this, - "Removing config element with key [%s] from cluster registry".format(key)) - zkClient.deleteRecursive(configurationPathFor(key)) - } - } - - /** - * Returns a list with all config element keys. - */ - def getConfigElementKeys: Array[String] = zkClient.getChildren(CONFIGURATION_PATH).toList.toArray.asInstanceOf[Array[String]] - - // ======================================= - // Private - // ======================================= - - private def sendCommandToNode(connection: ActorRef, command: RemoteSystemDaemonMessageProtocol, async: Boolean = true) { - if (async) { - connection ! command - } else { - try { - Await.result(connection ? (command, remoteDaemonAckTimeout), 10 seconds).asInstanceOf[Status] match { - case Success(status) ⇒ - EventHandler.debug(this, "Remote command sent to [%s] successfully received".format(status)) - case Failure(cause) ⇒ - EventHandler.error(cause, this, cause.toString) - throw cause - } - } catch { - case e: TimeoutException => - EventHandler.error(e, this, "Remote command to [%s] timed out".format(connection.address)) - throw e - case e: Exception ⇒ - EventHandler.error(e, this, "Could not send remote command to [%s] due to: %s".format(connection.address, e.toString)) - throw e - } - } - } - - private[cluster] def membershipPathFor(node: String): String = "%s/%s".format(MEMBERSHIP_PATH, node) - - private[cluster] def configurationPathFor(key: String): String = "%s/%s".format(CONFIGURATION_PATH, key) - - private[cluster] def actorAddressToNodesPathFor(actorAddress: String): String = "%s/%s".format(ACTOR_ADDRESS_NODES_TO_PATH, actorAddress) - - private[cluster] def actorAddressToNodesPathFor(actorAddress: String, nodeName: String): String = "%s/%s".format(actorAddressToNodesPathFor(actorAddress), nodeName) - - private[cluster] def nodeToUuidsPathFor(node: String): String = "%s/%s".format(NODE_TO_ACTOR_UUIDS_PATH, node) - - private[cluster] def nodeToUuidsPathFor(node: String, uuid: UUID): String = "%s/%s/%s".format(NODE_TO_ACTOR_UUIDS_PATH, node, uuid) - - private[cluster] def actorAddressRegistryPathFor(actorAddress: String): String = "%s/%s".format(ACTOR_ADDRESS_REGISTRY_PATH, actorAddress) - - private[cluster] def actorAddressRegistrySerializerPathFor(actorAddress: String): String = "%s/%s".format(actorAddressRegistryPathFor(actorAddress), "serializer") - - private[cluster] def actorAddressRegistryUuidPathFor(actorAddress: String): String = "%s/%s".format(actorAddressRegistryPathFor(actorAddress), "uuid") - - private[cluster] def actorUuidRegistryPathFor(uuid: UUID): String = "%s/%s".format(ACTOR_UUID_REGISTRY_PATH, uuid) - - private[cluster] def actorUuidRegistryNodePathFor(uuid: UUID): String = "%s/%s".format(actorUuidRegistryPathFor(uuid), "node") - - private[cluster] def actorUuidRegistryAddressPathFor(uuid: UUID): String = "%s/%s".format(actorUuidRegistryPathFor(uuid), "address") - - private[cluster] def actorUuidRegistryRemoteAddressPathFor(uuid: UUID): String = "%s/%s".format(actorUuidRegistryPathFor(uuid), "remote-address") - - private[cluster] def actorAddressToUuidsPathFor(actorAddress: String): String = "%s/%s".format(ACTOR_ADDRESS_TO_UUIDS_PATH, actorAddress.replace('.', '_')) - - private[cluster] def actorAddressToUuidsPathFor(actorAddress: String, uuid: UUID): String = "%s/%s".format(actorAddressToUuidsPathFor(actorAddress), uuid) - - /** - * Returns a random set with node names of size 'nrOfInstances'. - * Default nrOfInstances is 0, which returns the empty Set. - */ - private def nodesForNrOfInstances(nrOfInstances: Int = 0, actorAddress: Option[String] = None): Set[String] = { - var replicaNames = Set.empty[String] - val nrOfClusterNodes = nodeConnections.get.connections.size - - if (nrOfInstances < 1) return replicaNames - if (nrOfClusterNodes < nrOfInstances) throw new IllegalArgumentException( - "Replication factor [" + nrOfInstances + - "] is greater than the number of available nodeNames [" + nrOfClusterNodes + "]") - - val preferredNodes = - if (actorAddress.isDefined) { - // use 'preferred-nodes' in deployment config for the actor - Deployer.deploymentFor(actorAddress.get) match { - case Deploy(_, _, _, _, Cluster(nodes, _, _)) ⇒ - nodes map (node ⇒ DeploymentConfig.nodeNameFor(node)) take nrOfInstances - case _ ⇒ - throw new ClusterException("Actor [" + actorAddress.get + "] is not configured as clustered") - } - } else Vector.empty[String] - - for { - nodeName ← preferredNodes - key ← nodeConnections.get.connections.keys - if key == nodeName - } replicaNames = replicaNames + nodeName - - val nrOfCurrentReplicaNames = replicaNames.size - - val replicaSet = - if (nrOfCurrentReplicaNames > nrOfInstances) throw new IllegalStateException("Replica set is larger than replication factor") - else if (nrOfCurrentReplicaNames == nrOfInstances) replicaNames - else { - val random = new java.util.Random(System.currentTimeMillis) - while (replicaNames.size < nrOfInstances) { - replicaNames = replicaNames + membershipNodes(random.nextInt(nrOfClusterNodes)) - } - replicaNames - } - - EventHandler.debug(this, - "Picked out replica set [%s] for actor [%s]".format(replicaSet.mkString(", "), actorAddress)) - - replicaSet - } - - /** - * Returns a random set with replica connections of size 'nrOfInstances'. - * Default nrOfInstances is 0, which returns the empty Set. - */ - private def nodeConnectionsForNrOfInstances(nrOfInstances: Int = 0, actorAddress: Option[String] = None): Set[ActorRef] = { - for { - node ← nodesForNrOfInstances(nrOfInstances, actorAddress) - connectionOption ← nodeConnections.get.connections(node) - connection ← connectionOption - actorRef ← connection._2 - } yield actorRef - } - - /** - * Update the list of connections to other nodes in the cluster. - * Tail recursive, using lockless optimimistic concurrency. - * - * @return a Map with the remote socket addresses to of disconnected node connections - */ - @tailrec - final private[cluster] def connectToAllNewlyArrivedMembershipNodesInCluster( - newlyConnectedMembershipNodes: Traversable[String], - newlyDisconnectedMembershipNodes: Traversable[String]): Map[String, InetSocketAddress] = { - - var change = false - val oldState = nodeConnections.get - - var newConnections = oldState.connections //Map.empty[String, Tuple2[InetSocketAddress, ActorRef]] - - // cache the disconnected connections in a map, needed for fail-over of these connections later - var disconnectedConnections = Map.empty[String, InetSocketAddress] - newlyDisconnectedMembershipNodes foreach { node ⇒ - disconnectedConnections = disconnectedConnections + (node -> (oldState.connections(node) match { - case (address, _) ⇒ address - })) - } - - // remove connections to failed nodes - newlyDisconnectedMembershipNodes foreach { node ⇒ - newConnections = newConnections - node - change = true - } - - // add connections newly arrived nodes - newlyConnectedMembershipNodes foreach { node ⇒ - if (!newConnections.contains(node)) { - - // only connect to each replica once - remoteSocketAddressForNode(node) foreach { address ⇒ - EventHandler.debug(this, "Setting up connection to node with nodename [%s] and address [%s]".format(node, address)) - - val clusterDaemon = remoteService.actorFor( - RemoteClusterDaemon.Address, address.getHostName, address.getPort) - newConnections = newConnections + (node -> (address, clusterDaemon)) - change = true - } - } - } - - // add the remote connection to 'this' node as well, but as a 'local' actor - if (includeRefNodeInReplicaSet) - newConnections = newConnections + (nodeAddress.nodeName -> (remoteServerAddress, remoteDaemon)) - - //there was a state change, so we are now going to update the state. - val newState = new VersionedConnectionState(oldState.version + 1, newConnections) - - if (!nodeConnections.compareAndSet(oldState, newState)) { - // we failed to set the state, try again - connectToAllNewlyArrivedMembershipNodesInCluster( - newlyConnectedMembershipNodes, newlyDisconnectedMembershipNodes) - } else { - // we succeeded to set the state, return - EventHandler.info(this, "Connected to nodes [\n\t%s]".format(newConnections.mkString("\n\t"))) - disconnectedConnections - } - } - - private[cluster] def joinCluster() { - try { - EventHandler.info(this, - "Joining cluster as membership node [%s] on [%s]".format(nodeAddress, membershipNodePath)) - zkClient.createEphemeral(membershipNodePath, remoteServerAddress) - } catch { - case e: ZkNodeExistsException ⇒ - e.printStackTrace - val error = new ClusterException( - "Can't join the cluster. The node name [" + nodeAddress.nodeName + "] is already in use by another node.") - EventHandler.error(error, this, error.toString) - throw error - } - ignore[ZkNodeExistsException](zkClient.createPersistent(nodeToUuidsPathFor(nodeAddress.nodeName))) - } - - private[cluster] def joinLeaderElection(): Boolean = { - EventHandler.info(this, "Node [%s] is joining leader election".format(nodeAddress.nodeName)) - try { - leaderLock.lock - } catch { - case e: KeeperException.NodeExistsException ⇒ false - } - } - - private[cluster] def remoteSocketAddressForNode(node: String): Option[InetSocketAddress] = { - try { - Some(zkClient.readData(membershipPathFor(node), new Stat).asInstanceOf[InetSocketAddress]) - } catch { - case e: ZkNoNodeException ⇒ None - } - } - - private[cluster] def failOverClusterActorRefConnections(from: InetSocketAddress, to: InetSocketAddress) { - EventHandler.info(this, "Failing over ClusterActorRef from %s to %s".format(from, to)) - clusterActorRefs.valueIterator(from) foreach (_.failOver(from, to)) - } - - private[cluster] def migrateActorsOnFailedNodes( - failedNodes: List[String], - currentClusterNodes: List[String], - oldClusterNodes: List[String], - disconnectedConnections: Map[String, InetSocketAddress]) { - - failedNodes.foreach { failedNodeName ⇒ - - val failedNodeAddress = NodeAddress(nodeAddress.clusterName, failedNodeName) - - val myIndex = oldClusterNodes.indexWhere(_.endsWith(nodeAddress.nodeName)) - val failedNodeIndex = oldClusterNodes.indexWhere(_ == failedNodeName) - - // Migrate to the successor of the failed node (using a sorted circular list of the node names) - if ((failedNodeIndex == 0 && myIndex == oldClusterNodes.size - 1) || // No leftmost successor exists, check the tail - (failedNodeIndex == myIndex + 1)) { - // Am I the leftmost successor? - - // Takes the lead of migrating the actors. Not all to this node. - // All to this node except if the actor already resides here, then pick another node it is not already on. - - // Yes I am the node to migrate the actor to (can only be one in the cluster) - val actorUuidsForFailedNode = zkClient.getChildren(nodeToUuidsPathFor(failedNodeName)).toList - - actorUuidsForFailedNode.foreach { uuidAsString ⇒ - EventHandler.debug(this, - "Cluster node [%s] has failed, migrating actor with UUID [%s] to [%s]" - .format(failedNodeName, uuidAsString, nodeAddress.nodeName)) - - val uuid = uuidFrom(uuidAsString) - val actorAddress = actorAddressForUuid(uuid).getOrElse( - throw new IllegalStateException("No actor address found for UUID [" + uuidAsString + "]")) - - val migrateToNodeAddress = - if (!isShutdown && isInUseOnNode(actorAddress)) { - // already in use on this node, pick another node to instantiate the actor on - val replicaNodesForActor = nodesForActorsInUseWithAddress(actorAddress) - val nodesAvailableForMigration = (currentClusterNodes.toSet diff failedNodes.toSet) diff replicaNodesForActor.toSet - - if (nodesAvailableForMigration.isEmpty) throw new ClusterException( - "Can not migrate actor to new node since there are not any available nodes left. " + - "(However, the actor already has >1 replica in cluster, so we are ok)") - - NodeAddress(nodeAddress.clusterName, nodesAvailableForMigration.head) - } else { - // actor is not in use on this node, migrate it here - nodeAddress - } - - // if actor is replicated => pass along the UUID for the actor to replicate from (replay transaction log etc.) - val replicateFromUuid = - if (isReplicated(actorAddress)) Some(uuid) - else None - - migrateWithoutCheckingThatActorResidesOnItsHomeNode( - failedNodeAddress, - migrateToNodeAddress, - actorAddress, - replicateFromUuid) - } - - // notify all available nodes that they should fail-over all connections from 'from' to 'to' - val from = disconnectedConnections(failedNodeName) - val to = remoteServerAddress - - Serialization.serialize((from, to)) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - - val command = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(FAIL_OVER_CONNECTIONS) - .setPayload(ByteString.copyFrom(bytes)) - .build - - // FIXME now we are broadcasting to ALL nodes in the cluster even though a fraction might have a reference to the actors - should that be fixed? - nodeConnections.get.connections.values foreach { - case (_, connection) ⇒ sendCommandToNode(connection, command, async = true) - } - } - } - } - } - - /** - * Used when the ephemeral "home" node is already gone, so we can't check if it is available. - */ - private def migrateWithoutCheckingThatActorResidesOnItsHomeNode( - from: NodeAddress, to: NodeAddress, actorAddress: String, replicateFromUuid: Option[UUID]) { - - EventHandler.debug(this, "Migrating actor [%s] from node [%s] to node [%s]".format(actorAddress, from, to)) - if (!isInUseOnNode(actorAddress, to) && !isShutdown) { - release(actorAddress) - - val remoteAddress = remoteSocketAddressForNode(to.nodeName).getOrElse(throw new ClusterException("No remote address registered for [" + to.nodeName + "]")) - - ignore[ZkNoNodeException](zkClient.delete(actorAddressToNodesPathFor(actorAddress, from.nodeName))) - - // FIXME who takes care of this line? - //ignore[ZkNoNodeException](zkClient.delete(nodeToUuidsPathFor(from.nodeName, uuid))) - - // 'use' (check out) actor on the remote 'to' node - useActorOnNode(to.nodeName, actorAddress, replicateFromUuid) - } - } - - private def createZooKeeperPathStructureIfNeeded() { - ignore[ZkNodeExistsException] { - zkClient.create(CLUSTER_PATH, null, CreateMode.PERSISTENT) - EventHandler.info(this, "Created node [%s]".format(CLUSTER_PATH)) - } - - basePaths.foreach { path ⇒ - try { - ignore[ZkNodeExistsException](zkClient.create(path, null, CreateMode.PERSISTENT)) - EventHandler.debug(this, "Created node [%s]".format(path)) - } catch { - case e ⇒ - val error = new ClusterException(e.toString) - EventHandler.error(error, this) - throw error - } - } - } - - private def registerListeners() = { - zkClient.subscribeStateChanges(stateListener) - zkClient.subscribeChildChanges(MEMBERSHIP_PATH, membershipListener) - } - - private def unregisterListeners() = { - zkClient.unsubscribeStateChanges(stateListener) - zkClient.unsubscribeChildChanges(MEMBERSHIP_PATH, membershipListener) - } - - private def fetchMembershipNodes() { - val membershipChildren = zkClient.getChildren(MEMBERSHIP_PATH) - locallyCachedMembershipNodes.clear() - membershipChildren.iterator.foreach(locallyCachedMembershipNodes.add) - connectToAllNewlyArrivedMembershipNodesInCluster(membershipNodes, Nil) - } - - private def isReplicated(actorAddress: String): Boolean = DeploymentConfig.isReplicated(Deployer.deploymentFor(actorAddress)) - - private def createMBean = { - val clusterMBean = new StandardMBean(classOf[ClusterNodeMBean]) with ClusterNodeMBean { - - override def stop() = self.shutdown() - - override def disconnect() = self.disconnect() - - override def reconnect() = self.reconnect() - - override def resign() = self.resign() - - override def getNodeAddress = self.nodeAddress - - override def getRemoteServerHostname = self.hostname - - override def getRemoteServerPort = self.port - - override def getNodeName = self.nodeAddress.nodeName - - override def getClusterName = self.nodeAddress.clusterName - - override def getZooKeeperServerAddresses = self.zkServerAddresses - - override def getMemberNodes = self.locallyCachedMembershipNodes.iterator.map(_.toString).toArray - - override def getLeaderLockName = self.leader.toString - - override def isLeader = self.isLeader - - override def getUuidsForActorsInUse = self.uuidsForActorsInUse.map(_.toString).toArray - - override def getAddressesForActorsInUse = self.addressesForActorsInUse.map(_.toString).toArray - - override def getUuidsForClusteredActors = self.uuidsForClusteredActors.map(_.toString).toArray - - override def getAddressesForClusteredActors = self.addressesForClusteredActors.map(_.toString).toArray - - override def getNodesForActorInUseWithAddress(address: String) = self.nodesForActorsInUseWithAddress(address) - - override def getUuidsForActorsInUseOnNode(nodeName: String) = self.uuidsForActorsInUseOnNode(nodeName).map(_.toString).toArray - - override def getAddressesForActorsInUseOnNode(nodeName: String) = self.addressesForActorsInUseOnNode(nodeName).map(_.toString).toArray - - override def setConfigElement(key: String, value: String): Unit = self.setConfigElement(key, value.getBytes("UTF-8")) - - override def getConfigElement(key: String) = new String(self.getConfigElement(key).getOrElse(Array[Byte]()), "UTF-8") - - override def removeConfigElement(key: String): Unit = self.removeConfigElement(key) - - override def getConfigElementKeys = self.getConfigElementKeys.toArray - - override def getMembershipPathFor(node: String) = self.membershipPathFor(node) - - override def getConfigurationPathFor(key: String) = self.configurationPathFor(key) - - override def getActorAddresstoNodesPathFor(actorAddress: String) = self.actorAddressToNodesPathFor(actorAddress) - - override def getActorAddressToNodesPathForWithNodeName(actorAddress: String, nodeName: String) = self.actorAddressToNodesPathFor(actorAddress, nodeName) - - override def getNodeToUuidsPathFor(node: String) = self.nodeToUuidsPathFor(node) - - override def getNodeToUuidsPathFor(node: String, uuid: UUID) = self.nodeToUuidsPathFor(node, uuid) - - override def getActorAddressRegistryPathFor(actorAddress: String) = self.actorAddressRegistryPathFor(actorAddress) - - override def getActorAddressRegistrySerializerPathFor(actorAddress: String) = self.actorAddressRegistrySerializerPathFor(actorAddress) - - override def getActorAddressRegistryUuidPathFor(actorAddress: String) = self.actorAddressRegistryUuidPathFor(actorAddress) - - override def getActorUuidRegistryNodePathFor(uuid: UUID) = self.actorUuidRegistryNodePathFor(uuid) - - override def getActorUuidRegistryRemoteAddressPathFor(uuid: UUID) = self.actorUuidRegistryNodePathFor(uuid) - - override def getActorAddressToUuidsPathFor(actorAddress: String) = self.actorAddressToUuidsPathFor(actorAddress) - - override def getActorAddressToUuidsPathForWithNodeName(actorAddress: String, uuid: UUID) = self.actorAddressToUuidsPathFor(actorAddress, uuid) - } - - JMX.register(clusterJmxObjectName, clusterMBean) - - // FIXME need monitoring to lookup the cluster MBean dynamically - // Monitoring.registerLocalMBean(clusterJmxObjectName, clusterMBean) - } -} - -class MembershipChildListener(self: ClusterNode) extends IZkChildListener with ErrorHandler { - def handleChildChange(parentPath: String, currentChilds: JList[String]) { - withErrorHandler { - if (!self.isShutdown) { - if (currentChilds ne null) { - val currentClusterNodes = currentChilds.toList - if (!currentClusterNodes.isEmpty) EventHandler.debug(this, - "MembershipChildListener at [%s] has children [%s]" - .format(self.nodeAddress.nodeName, currentClusterNodes.mkString(" "))) - - // take a snapshot of the old cluster nodes and then update the list with the current connected nodes in the cluster - val oldClusterNodes = self.locallyCachedMembershipNodes.toArray.toSet.asInstanceOf[Set[String]] - self.locallyCachedMembershipNodes.clear() - currentClusterNodes foreach (self.locallyCachedMembershipNodes.add) - - val newlyConnectedMembershipNodes = (Set(currentClusterNodes: _*) diff oldClusterNodes).toList - val newlyDisconnectedMembershipNodes = (oldClusterNodes diff Set(currentClusterNodes: _*)).toList - - // update the connections with the new set of cluster nodes - val disconnectedConnections = self.connectToAllNewlyArrivedMembershipNodesInCluster(newlyConnectedMembershipNodes, newlyDisconnectedMembershipNodes) - - // if node(s) left cluster then migrate actors residing on the failed node - if (!newlyDisconnectedMembershipNodes.isEmpty) { - self.migrateActorsOnFailedNodes(newlyDisconnectedMembershipNodes, currentClusterNodes, oldClusterNodes.toList, disconnectedConnections) - } - - // publish NodeConnected and NodeDisconnect events to the listeners - newlyConnectedMembershipNodes foreach (node ⇒ self.publish(NodeConnected(node))) - newlyDisconnectedMembershipNodes foreach { node ⇒ - self.publish(NodeDisconnected(node)) - // remove metrics of a disconnected node from ZK and local cache - self.metricsManager.removeNodeMetrics(node) - } - } - } - } - } -} - -class StateListener(self: ClusterNode) extends IZkStateListener { - def handleStateChanged(state: KeeperState) { - state match { - case KeeperState.SyncConnected ⇒ - EventHandler.debug(this, "Cluster node [%s] - Connected".format(self.nodeAddress)) - self.publish(ThisNode.Connected) - case KeeperState.Disconnected ⇒ - EventHandler.debug(this, "Cluster node [%s] - Disconnected".format(self.nodeAddress)) - self.publish(ThisNode.Disconnected) - case KeeperState.Expired ⇒ - EventHandler.debug(this, "Cluster node [%s] - Expired".format(self.nodeAddress)) - self.publish(ThisNode.Expired) - } - } - - /** - * Re-initialize after the zookeeper session has expired and a new session has been created. - */ - def handleNewSession() { - EventHandler.debug(this, "Session expired re-initializing node [%s]".format(self.nodeAddress)) - self.boot() - self.publish(NewSession) - } -} - -trait ErrorHandler { - def withErrorHandler[T](body: ⇒ T) = { - try { - ignore[ZkInterruptedException](body) // FIXME Is it good to ignore ZkInterruptedException? If not, how should we handle it? - } catch { - case e: Throwable ⇒ - EventHandler.error(e, this, e.toString) - throw e - } - } -} - -object RemoteClusterDaemon { - val Address = "akka-cluster-daemon".intern - - // FIXME configure computeGridDispatcher to what? - val computeGridDispatcher = Dispatchers.newDispatcher("akka:compute-grid").build -} - -/** - * Internal "daemon" actor for cluster internal communication. - * - * It acts as the brain of the cluster that responds to cluster events (messages) and undertakes action. - */ -class RemoteClusterDaemon(cluster: ClusterNode) extends Actor { - - import RemoteClusterDaemon._ - import Cluster._ - - override def preRestart(reason: Throwable, msg: Option[Any]) { - EventHandler.debug(this, "RemoteClusterDaemon failed due to [%s] restarting...".format(reason)) - } - - def receive: Receive = { - case message: RemoteSystemDaemonMessageProtocol ⇒ - EventHandler.debug(this, - "Received command [\n%s] to RemoteClusterDaemon on node [%s]".format(message, cluster.nodeAddress.nodeName)) - - message.getMessageType match { - case USE ⇒ handleUse(message) - case RELEASE ⇒ handleRelease(message) - case STOP ⇒ cluster.shutdown() - case DISCONNECT ⇒ cluster.disconnect() - case RECONNECT ⇒ cluster.reconnect() - case RESIGN ⇒ cluster.resign() - case FAIL_OVER_CONNECTIONS ⇒ handleFailover(message) - case FUNCTION_FUN0_UNIT ⇒ handle_fun0_unit(message) - case FUNCTION_FUN0_ANY ⇒ handle_fun0_any(message) - case FUNCTION_FUN1_ARG_UNIT ⇒ handle_fun1_arg_unit(message) - case FUNCTION_FUN1_ARG_ANY ⇒ handle_fun1_arg_any(message) - //TODO: should we not deal with unrecognized message types? - } - - case unknown ⇒ EventHandler.warning(this, "Unknown message [%s]".format(unknown)) - } - - def handleRelease(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - if (message.hasActorUuid) { - cluster.actorAddressForUuid(uuidProtocolToUuid(message.getActorUuid)) foreach { address ⇒ - cluster.release(address) - } - } else if (message.hasActorAddress) { - cluster release message.getActorAddress - } else { - EventHandler.warning(this, - "None of 'uuid' or 'actorAddress'' is specified, ignoring remote cluster daemon command [%s]".format(message)) - } - } - - def handleUse(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - def deserializeMessages(entriesAsBytes: Vector[Array[Byte]]): Vector[AnyRef] = { - import akka.cluster.RemoteProtocol._ - import akka.cluster.MessageSerializer - - entriesAsBytes map { bytes ⇒ - val messageBytes = - if (Cluster.shouldCompressData) LZF.uncompress(bytes) - else bytes - MessageSerializer.deserialize(MessageProtocol.parseFrom(messageBytes), None) - } - } - - def actorOfRefToUseForReplay(snapshotAsBytes: Option[Array[Byte]], actorAddress: String, newActorRef: LocalActorRef): ActorRef = { - snapshotAsBytes match { - - // we have a new actor ref - the snapshot - case Some(bytes) ⇒ - // stop the new actor ref and use the snapshot instead - //TODO: What if that actor already has been retrieved and is being used?? - //So do we have a race here? - cluster.remoteService.unregister(actorAddress) - - // deserialize the snapshot actor ref and register it as remote actor - val uncompressedBytes = - if (Cluster.shouldCompressData) LZF.uncompress(bytes) - else bytes - - val snapshotActorRef = fromBinary(uncompressedBytes, newActorRef.uuid) - cluster.remoteService.register(actorAddress, snapshotActorRef) - - // FIXME we should call 'stop()' here (to GC the actor), but can't since that will currently - //shut down the TransactionLog for this UUID - since both this actor and the new snapshotActorRef - //have the same UUID (which they should) - //newActorRef.stop() - - snapshotActorRef - - // we have no snapshot - use the new actor ref - case None ⇒ - newActorRef - } - } - - try { - if (message.hasActorAddress) { - val actorAddress = message.getActorAddress - cluster.serializerForActor(actorAddress) foreach { serializer ⇒ - cluster.use(actorAddress, serializer) foreach { newActorRef ⇒ - cluster.remoteService.register(actorAddress, newActorRef) - - if (message.hasReplicateActorFromUuid) { - // replication is used - fetch the messages and replay them - val replicateFromUuid = uuidProtocolToUuid(message.getReplicateActorFromUuid) - val deployment = Deployer.deploymentFor(actorAddress) - val replicationScheme = DeploymentConfig.replicationSchemeFor(deployment).getOrElse( - throw new IllegalStateException( - "Actor [" + actorAddress + "] should have been configured as a replicated actor but could not find its ReplicationScheme")) - val isWriteBehind = DeploymentConfig.isWriteBehindReplication(replicationScheme) - - try { - // get the transaction log for the actor UUID - val readonlyTxLog = TransactionLog.logFor(replicateFromUuid.toString, isWriteBehind, replicationScheme) - - // get the latest snapshot (Option[Array[Byte]]) and all the subsequent messages (Array[Byte]) - val (snapshotAsBytes, entriesAsBytes) = readonlyTxLog.latestSnapshotAndSubsequentEntries - - // deserialize and restore actor snapshot. This call will automatically recreate a transaction log. - val actorRef = actorOfRefToUseForReplay(snapshotAsBytes, actorAddress, newActorRef) - - // deserialize the messages - val messages: Vector[AnyRef] = deserializeMessages(entriesAsBytes) - - EventHandler.info(this, "Replaying [%s] messages to actor [%s]".format(messages.size, actorAddress)) - - // replay all messages - messages foreach { message ⇒ - EventHandler.debug(this, "Replaying message [%s] to actor [%s]".format(message, actorAddress)) - - // FIXME how to handle '?' messages? - // We can *not* replay them with the correct semantics. Should we: - // 1. Ignore/drop them and log warning? - // 2. Throw exception when about to log them? - // 3. Other? - actorRef ! message - } - - } catch { - case e: Throwable ⇒ - EventHandler.error(e, this, e.toString) - throw e - } - } - } - } - } else { - EventHandler.error(this, "Actor 'address' is not defined, ignoring remote cluster daemon command [%s]".format(message)) - } - - self.reply(Success(cluster.remoteServerAddress.toString)) - } catch { - case error: Throwable ⇒ - self.reply(Failure(error)) - throw error - } - } - - def handle_fun0_unit(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - new LocalActorRef( - Props( - self ⇒ { - case f: Function0[_] ⇒ try { f() } finally { self.stop() } - }).copy(dispatcher = computeGridDispatcher), Props.randomName, systemService = true) ! payloadFor(message, classOf[Function0[Unit]]) - } - - def handle_fun0_any(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - new LocalActorRef( - Props( - self ⇒ { - case f: Function0[_] ⇒ try { self.reply(f()) } finally { self.stop() } - }).copy(dispatcher = computeGridDispatcher), Props.randomName, systemService = true) forward payloadFor(message, classOf[Function0[Any]]) - } - - def handle_fun1_arg_unit(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - new LocalActorRef( - Props( - self ⇒ { - case (fun: Function[_, _], param: Any) ⇒ try { fun.asInstanceOf[Any ⇒ Unit].apply(param) } finally { self.stop() } - }).copy(dispatcher = computeGridDispatcher), Props.randomName, systemService = true) ! payloadFor(message, classOf[Tuple2[Function1[Any, Unit], Any]]) - } - - def handle_fun1_arg_any(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - new LocalActorRef( - Props( - self ⇒ { - case (fun: Function[_, _], param: Any) ⇒ try { self.reply(fun.asInstanceOf[Any ⇒ Any](param)) } finally { self.stop() } - }).copy(dispatcher = computeGridDispatcher), Props.randomName, systemService = true) forward payloadFor(message, classOf[Tuple2[Function1[Any, Any], Any]]) - } - - def handleFailover(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - val (from, to) = payloadFor(message, classOf[(InetSocketAddress, InetSocketAddress)]) - cluster.failOverClusterActorRefConnections(from, to) - } - - private def payloadFor[T](message: RemoteSystemDaemonMessageProtocol, clazz: Class[T]): T = { - Serialization.deserialize(message.getPayload.toByteArray, clazz, None) match { - case Left(error) ⇒ throw error - case Right(instance) ⇒ instance.asInstanceOf[T] - } - } -} diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala deleted file mode 100644 index 29f56a5966..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import akka.actor._ -import akka.util._ -import ReflectiveAccess._ -import akka.routing._ -import akka.cluster._ -import FailureDetector._ -import akka.event.EventHandler -import akka.config.ConfigurationException - -import java.net.InetSocketAddress -import java.util.concurrent.atomic.AtomicReference - -import collection.immutable.Map -import annotation.tailrec - -/** - * ClusterActorRef factory and locator. - */ -object ClusterActorRef { - import FailureDetectorType._ - import RouterType._ - - def newRef( - actorAddress: String, - routerType: RouterType, - failureDetectorType: FailureDetectorType, - timeout: Long): ClusterActorRef = { - - val routerFactory: () ⇒ Router = routerType match { - case Direct ⇒ () ⇒ new DirectRouter - case Random ⇒ () ⇒ new RandomRouter - case RoundRobin ⇒ () ⇒ new RoundRobinRouter - case LeastCPU ⇒ sys.error("Router LeastCPU not supported yet") - case LeastRAM ⇒ sys.error("Router LeastRAM not supported yet") - case LeastMessages ⇒ sys.error("Router LeastMessages not supported yet") - case Custom ⇒ sys.error("Router Custom not supported yet") - } - - val failureDetectorFactory: (Map[InetSocketAddress, ActorRef]) ⇒ FailureDetector = failureDetectorType match { - case RemoveConnectionOnFirstFailureLocalFailureDetector ⇒ - (connections: Map[InetSocketAddress, ActorRef]) ⇒ new RemoveConnectionOnFirstFailureLocalFailureDetector(connections.values) - - case RemoveConnectionOnFirstFailureRemoteFailureDetector ⇒ - (connections: Map[InetSocketAddress, ActorRef]) ⇒ new RemoveConnectionOnFirstFailureRemoteFailureDetector(connections) - - case CustomFailureDetector(implClass) ⇒ - (connections: Map[InetSocketAddress, ActorRef]) ⇒ FailureDetector.createCustomFailureDetector(implClass, connections) - } - - new ClusterActorRef( - RoutedProps() - .withTimeout(timeout) - .withRouter(routerFactory) - .withFailureDetector(failureDetectorFactory), - actorAddress) - } - - /** - * Finds the cluster actor reference that has a specific address. - */ - def actorFor(address: String): Option[ActorRef] = - Actor.registry.local.actorFor(Address.clusterActorRefPrefix + address) - - private[cluster] def createRemoteActorRef(actorAddress: String, inetSocketAddress: InetSocketAddress) = { - RemoteActorRef(inetSocketAddress, actorAddress, Actor.TIMEOUT, None) - } -} - -/** - * ActorRef representing a one or many instances of a clustered, load-balanced and sometimes replicated actor - * where the instances can reside on other nodes in the cluster. - */ -private[akka] class ClusterActorRef(props: RoutedProps, val address: String) extends AbstractRoutedActorRef(props) { - - import ClusterActorRef._ - - ClusterModule.ensureEnabled() - - val addresses = Cluster.node.inetSocketAddressesForActor(address) - - EventHandler.debug(this, - "Checking out cluster actor ref with address [%s] and router [%s] on [%s] connected to [\n\t%s]" - .format(address, router, Cluster.node.remoteServerAddress, addresses.map(_._2).mkString("\n\t"))) - - addresses foreach { - case (_, address) ⇒ Cluster.node.clusterActorRefs.put(address, this) - } - - val connections: FailureDetector = { - val remoteConnections = (Map[InetSocketAddress, ActorRef]() /: addresses) { - case (map, (uuid, inetSocketAddress)) ⇒ - map + (inetSocketAddress -> createRemoteActorRef(address, inetSocketAddress)) - } - props.failureDetectorFactory(remoteConnections) - } - - router.init(connections) - - def nrOfConnections: Int = connections.size - - private[akka] def failOver(from: InetSocketAddress, to: InetSocketAddress) { - connections.failOver(from, to) - } - - def stop() { - synchronized { - if (_status == ActorRefInternals.RUNNING) { - Actor.registry.local.unregisterClusterActorRef(this) - _status = ActorRefInternals.SHUTDOWN - postMessageToMailbox(Terminate, None) - - // FIXME here we need to fire off Actor.cluster.remove(address) (which needs to be properly implemented first, see ticket) - connections.stopAll() - } - } - } - - /* If you start me up */ - if (_status == ActorRefInternals.UNSTARTED) { - _status = ActorRefInternals.RUNNING - Actor.registry.local.registerClusterActorRef(this) - } -} diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala deleted file mode 100644 index 61a393360c..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala +++ /dev/null @@ -1,205 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import akka.actor.DeploymentConfig._ -import akka.actor._ -import akka.event.EventHandler -import akka.config.Config -import akka.util.Switch -import akka.util.Helpers._ -import akka.cluster.zookeeper.AkkaZkClient - -import org.apache.zookeeper.CreateMode -import org.apache.zookeeper.recipes.lock.{ WriteLock, LockListener } - -import org.I0Itec.zkclient.exception.{ ZkNoNodeException, ZkNodeExistsException } - -import scala.collection.immutable.Seq -import scala.collection.JavaConversions.collectionAsScalaIterable - -import java.util.concurrent.{ CountDownLatch, TimeUnit } - -/** - * A ClusterDeployer is responsible for deploying a Deploy. - */ -object ClusterDeployer extends ActorDeployer { - val clusterName = Cluster.name - val nodeName = Config.nodename - val clusterPath = "/%s" format clusterName - - val deploymentPath = clusterPath + "/deployment" - val deploymentAddressPath = deploymentPath + "/%s" - - val deploymentCoordinationPath = clusterPath + "/deployment-coordination" - val deploymentInProgressLockPath = deploymentCoordinationPath + "/in-progress" - val isDeploymentCompletedInClusterLockPath = deploymentCoordinationPath + "/completed" // should not be part of basePaths - - val basePaths = List(clusterPath, deploymentPath, deploymentCoordinationPath, deploymentInProgressLockPath) - - private val isConnected = new Switch(false) - private val deploymentCompleted = new CountDownLatch(1) - - private val zkClient = new AkkaZkClient( - Cluster.zooKeeperServers, - Cluster.sessionTimeout, - Cluster.connectionTimeout, - Cluster.defaultZooKeeperSerializer) - - private val deploymentInProgressLockListener = new LockListener { - def lockAcquired() { - EventHandler.info(this, "Clustered deployment started") - } - - def lockReleased() { - EventHandler.info(this, "Clustered deployment completed") - deploymentCompleted.countDown() - } - } - - private val deploymentInProgressLock = new WriteLock( - zkClient.connection.getZookeeper, - deploymentInProgressLockPath, - null, - deploymentInProgressLockListener) - - private val systemDeployments: List[Deploy] = Nil - - def shutdown() { - isConnected switchOff { - // undeploy all - try { - for { - child ← collectionAsScalaIterable(zkClient.getChildren(deploymentPath)) - deployment ← zkClient.readData(deploymentAddressPath.format(child)).asInstanceOf[Deploy] - } zkClient.delete(deploymentAddressPath.format(deployment.address)) - - invalidateDeploymentInCluster() - } catch { - case e: Exception ⇒ - handleError(new DeploymentException("Could not undeploy all deployment data in ZooKeeper due to: " + e)) - } - - // shut down ZooKeeper client - zkClient.close() - EventHandler.info(this, "ClusterDeployer shut down successfully") - } - } - - def lookupDeploymentFor(address: String): Option[Deploy] = ensureRunning { - LocalDeployer.lookupDeploymentFor(address) match { // try local cache - case Some(deployment) ⇒ // in local cache - deployment - case None ⇒ // not in cache, check cluster - val deployment = - try { - Some(zkClient.readData(deploymentAddressPath.format(address)).asInstanceOf[Deploy]) - } catch { - case e: ZkNoNodeException ⇒ None - case e: Exception ⇒ - EventHandler.warning(this, e.toString) - None - } - deployment foreach (LocalDeployer.deploy(_)) // cache it in local cache - deployment - } - } - - def fetchDeploymentsFromCluster: List[Deploy] = ensureRunning { - val addresses = - try { - zkClient.getChildren(deploymentPath).toList - } catch { - case e: ZkNoNodeException ⇒ List[String]() - } - val deployments = addresses map { address ⇒ - zkClient.readData(deploymentAddressPath.format(address)).asInstanceOf[Deploy] - } - EventHandler.info(this, "Fetched deployment plans from cluster [\n\t%s\n]" format deployments.mkString("\n\t")) - deployments - } - - private[akka] def init(deployments: Seq[Deploy]) { - isConnected switchOn { - EventHandler.info(this, "Initializing ClusterDeployer") - - basePaths foreach { path ⇒ - try { - ignore[ZkNodeExistsException](zkClient.create(path, null, CreateMode.PERSISTENT)) - EventHandler.debug(this, "Created ZooKeeper path for deployment [%s]".format(path)) - } catch { - case e ⇒ - val error = new DeploymentException(e.toString) - EventHandler.error(error, this) - throw error - } - } - - val allDeployments = deployments ++ systemDeployments - - if (!isDeploymentCompletedInCluster) { - if (deploymentInProgressLock.lock()) { - // try to be the one doing the clustered deployment - EventHandler.info(this, "Pushing clustered deployment plans [\n\t" + allDeployments.mkString("\n\t") + "\n]") - allDeployments foreach (deploy(_)) // deploy - markDeploymentCompletedInCluster() - deploymentInProgressLock.unlock() // signal deployment complete - - } else { - deploymentCompleted.await(30, TimeUnit.SECONDS) // wait until deployment is completed by other "master" node - } - } - - // fetch clustered deployments and deploy them locally - fetchDeploymentsFromCluster foreach (LocalDeployer.deploy(_)) - } - } - - private[akka] def deploy(deployment: Deploy) { - ensureRunning { - LocalDeployer.deploy(deployment) - deployment match { - case Deploy(_, _, _, _, Local) | Deploy(_, _, _, _, _: Local) ⇒ //TODO LocalDeployer.deploy(deployment)?? - case Deploy(address, recipe, routing, _, _) ⇒ // cluster deployment - /*TODO recipe foreach { r ⇒ - Deployer.newClusterActorRef(() ⇒ Actor.actorOf(r.implementationClass), address, deployment) - }*/ - val path = deploymentAddressPath.format(address) - try { - ignore[ZkNodeExistsException](zkClient.create(path, null, CreateMode.PERSISTENT)) - zkClient.writeData(path, deployment) - } catch { - case e: NullPointerException ⇒ - handleError(new DeploymentException( - "Could not store deployment data [" + deployment + "] in ZooKeeper since client session is closed")) - case e: Exception ⇒ - handleError(new DeploymentException( - "Could not store deployment data [" + deployment + "] in ZooKeeper due to: " + e)) - } - } - } - } - - private def markDeploymentCompletedInCluster() { - ignore[ZkNodeExistsException](zkClient.create(isDeploymentCompletedInClusterLockPath, null, CreateMode.PERSISTENT)) - } - - private def isDeploymentCompletedInCluster = zkClient.exists(isDeploymentCompletedInClusterLockPath) - - // FIXME in future - add watch to this path to be able to trigger redeployment, and use this method to trigger redeployment - private def invalidateDeploymentInCluster() { - ignore[ZkNoNodeException](zkClient.delete(isDeploymentCompletedInClusterLockPath)) - } - - private def ensureRunning[T](body: ⇒ T): T = { - if (isConnected.isOn) body - else throw new IllegalStateException("ClusterDeployer is not running") - } - - private[akka] def handleError(e: Throwable): Nothing = { - EventHandler.error(e, this, e.toString) - throw e - } -} diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala new file mode 100644 index 0000000000..820290ea14 --- /dev/null +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -0,0 +1,26 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.Config +import akka.util.Duration +import java.util.concurrent.TimeUnit.MILLISECONDS +import akka.config.ConfigurationException +import scala.collection.JavaConverters._ +import akka.actor.Address +import akka.actor.AddressExtractor + +class ClusterSettings(val config: Config, val systemName: String) { + import config._ + // cluster config section + val FailureDetectorThreshold = getInt("akka.cluster.failure-detector.threshold") + val FailureDetectorMaxSampleSize = getInt("akka.cluster.failure-detector.max-sample-size") + val SeedNodeConnectionTimeout = Duration(config.getMilliseconds("akka.cluster.seed-node-connection-timeout"), MILLISECONDS) + val MaxTimeToRetryJoiningCluster = Duration(config.getMilliseconds("akka.cluster.max-time-to-retry-joining-cluster"), MILLISECONDS) + val InitialDelayForGossip = Duration(getMilliseconds("akka.cluster.gossip.initialDelay"), MILLISECONDS) + val GossipFrequency = Duration(getMilliseconds("akka.cluster.gossip.frequency"), MILLISECONDS) + val SeedNodes = Set.empty[Address] ++ getStringList("akka.cluster.seed-nodes").asScala.collect { + case AddressExtractor(addr) ⇒ addr + } +} diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala b/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala new file mode 100644 index 0000000000..1b9026d082 --- /dev/null +++ b/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala @@ -0,0 +1,438 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import akka.actor._ +import akka.actor.Status._ +import akka.remote._ +import akka.event.Logging +import akka.dispatch.Await +import akka.pattern.ask +import akka.util._ +import akka.config.ConfigurationException + +import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean } +import java.util.concurrent.TimeUnit._ +import java.util.concurrent.TimeoutException +import java.security.SecureRandom +import System.{ currentTimeMillis ⇒ newTimestamp } + +import scala.collection.immutable.{ Map, SortedSet } +import scala.annotation.tailrec + +import com.google.protobuf.ByteString + +/** + * Interface for member membership change listener. + */ +trait NodeMembershipChangeListener { + def memberConnected(member: Member) + def memberDisconnected(member: Member) +} + +/** + * Base trait for all cluster messages. All ClusterMessage's are serializable. + */ +sealed trait ClusterMessage extends Serializable + +/** + * Command to join the cluster. + */ +case object JoinCluster extends ClusterMessage + +/** + * Represents the state of the cluster; cluster ring membership, ring convergence, meta data - all versioned by a vector clock. + */ +case class Gossip( + version: VectorClock = VectorClock(), + member: Address, + // sorted set of members with their status, sorted by name + members: SortedSet[Member] = SortedSet.empty[Member](Ordering.fromLessThan[Member](_.address.toString > _.address.toString)), + unavailableMembers: Set[Member] = Set.empty[Member], + // for ring convergence + seen: Map[Member, VectorClock] = Map.empty[Member, VectorClock], + // for handoff + //pendingChanges: Option[Vector[PendingPartitioningChange]] = None, + meta: Option[Map[String, Array[Byte]]] = None) + extends ClusterMessage // is a serializable cluster message + with Versioned // has a vector clock as version + +/** + * Represents the address and the current status of a cluster member node. + */ +case class Member(address: Address, status: MemberStatus) extends ClusterMessage + +/** + * Defines the current status of a cluster member node + * + * Can be one of: Joining, Up, Leaving, Exiting and Down. + */ +sealed trait MemberStatus extends ClusterMessage with Versioned +object MemberStatus { + case class Joining(version: VectorClock = VectorClock()) extends MemberStatus + case class Up(version: VectorClock = VectorClock()) extends MemberStatus + case class Leaving(version: VectorClock = VectorClock()) extends MemberStatus + case class Exiting(version: VectorClock = VectorClock()) extends MemberStatus + case class Down(version: VectorClock = VectorClock()) extends MemberStatus +} + +// sealed trait PendingPartitioningStatus +// object PendingPartitioningStatus { +// case object Complete extends PendingPartitioningStatus +// case object Awaiting extends PendingPartitioningStatus +// } + +// case class PendingPartitioningChange( +// owner: Address, +// nextOwner: Address, +// changes: Vector[VNodeMod], +// status: PendingPartitioningStatus) + +final class ClusterDaemon(system: ActorSystem, gossiper: Gossiper) extends Actor { + val log = Logging(system, "ClusterDaemon") + + def receive = { + case JoinCluster ⇒ sender ! gossiper.latestGossip + case gossip: Gossip ⇒ + gossiper.tell(gossip) + + case unknown ⇒ log.error("Unknown message sent to cluster daemon [" + unknown + "]") + } +} + +/** + * This module is responsible for Gossiping cluster information. The abstraction maintains the list of live + * and dead members. Periodically i.e. every 1 second this module chooses a random member and initiates a round + * of Gossip with it. Whenever it gets gossip updates it updates the Failure Detector with the liveness + * information. + *

+ * During each of these runs the member initiates gossip exchange according to following rules (as defined in the + * Cassandra documentation [http://wiki.apache.org/cassandra/ArchitectureGossip]: + *

+ *   1) Gossip to random live member (if any)
+ *   2) Gossip to random unreachable member with certain probability depending on number of unreachable and live members
+ *   3) If the member gossiped to at (1) was not seed, or the number of live members is less than number of seeds,
+ *       gossip to random seed with certain probability depending on number of unreachable, seed and live members.
+ * 
+ */ +case class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { + + /** + * Represents the state for this Gossiper. Implemented using optimistic lockless concurrency, + * all state is represented by this immutable case class and managed by an AtomicReference. + */ + private case class State( + currentGossip: Gossip, + memberMembershipChangeListeners: Set[NodeMembershipChangeListener] = Set.empty[NodeMembershipChangeListener]) + + val remoteSettings = new RemoteSettings(system.settings.config, system.name) + val clusterSettings = new ClusterSettings(system.settings.config, system.name) + + val protocol = "akka" // TODO should this be hardcoded? + val address = remote.transport.address + + val memberFingerprint = address.## + val initialDelayForGossip = clusterSettings.InitialDelayForGossip + val gossipFrequency = clusterSettings.GossipFrequency + implicit val seedNodeConnectionTimeout = clusterSettings.SeedNodeConnectionTimeout + implicit val defaultTimeout = Timeout(remoteSettings.RemoteSystemDaemonAckTimeout) + + // seed members + private val seeds: Set[Member] = { + if (clusterSettings.SeedNodes.isEmpty) throw new ConfigurationException( + "At least one seed member must be defined in the configuration [akka.cluster.seed-members]") + else clusterSettings.SeedNodes map (address ⇒ Member(address, MemberStatus.Up())) + } + + private val serialization = remote.serialization + private val failureDetector = new AccrualFailureDetector(system, clusterSettings.FailureDetectorThreshold, clusterSettings.FailureDetectorMaxSampleSize) + + private val isRunning = new AtomicBoolean(true) + private val log = Logging(system, "Gossiper") + private val random = SecureRandom.getInstance("SHA1PRNG") + + // Is it right to put this guy under the /system path or should we have a top-level /cluster or something else...? + private val clusterDaemon = system.systemActorOf(Props(new ClusterDaemon(system, this)), "cluster") + private val state = new AtomicReference[State](State(currentGossip = newGossip())) + + // FIXME manage connections in some other way so we can delete the RemoteConnectionManager (SINCE IT SUCKS!!!) + private val connectionManager = new RemoteConnectionManager(system, remote, failureDetector, Map.empty[Address, ActorRef]) + + log.info("Starting cluster Gossiper...") + + // join the cluster by connecting to one of the seed members and retrieve current cluster state (Gossip) + joinCluster(clusterSettings.MaxTimeToRetryJoiningCluster fromNow) + + // start periodic gossip and cluster scrutinization + val initateGossipCanceller = system.scheduler.schedule(initialDelayForGossip, gossipFrequency)(initateGossip()) + val scrutinizeCanceller = system.scheduler.schedule(initialDelayForGossip, gossipFrequency)(scrutinize()) + + /** + * Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks. + */ + def shutdown() { + if (isRunning.compareAndSet(true, false)) { + log.info("Shutting down Gossiper for [{}]...", address) + try connectionManager.shutdown() finally { + try system.stop(clusterDaemon) finally { + try initateGossipCanceller.cancel() finally { + try scrutinizeCanceller.cancel() finally { + log.info("Gossiper for [{}] is shut down", address) + } + } + } + } + } + } + + def latestGossip: Gossip = state.get.currentGossip + + /** + * Tell the gossiper some gossip. + */ + //@tailrec + final def tell(newGossip: Gossip) { + val gossipingNode = newGossip.member + + failureDetector heartbeat gossipingNode // update heartbeat in failure detector + + // FIXME all below here is WRONG - redesign with cluster convergence in mind + + // val oldState = state.get + // println("-------- NEW VERSION " + newGossip) + // println("-------- OLD VERSION " + oldState.currentGossip) + // val latestGossip = VectorClock.latestVersionOf(newGossip, oldState.currentGossip) + // println("-------- WINNING VERSION " + latestGossip) + + // val latestAvailableNodes = latestGossip.members + // val latestUnavailableNodes = latestGossip.unavailableMembers + // println("=======>>> gossipingNode: " + gossipingNode) + // println("=======>>> latestAvailableNodes: " + latestAvailableNodes) + // if (!(latestAvailableNodes contains gossipingNode) && !(latestUnavailableNodes contains gossipingNode)) { + // println("-------- NEW NODE") + // // we have a new member + // val newGossip = latestGossip copy (availableNodes = latestAvailableNodes + gossipingNode) + // val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) + + // println("--------- new GOSSIP " + newGossip.members) + // println("--------- new STATE " + newState) + // // if we won the race then update else try again + // if (!state.compareAndSet(oldState, newState)) tell(newGossip) // recur + // else { + // println("---------- WON RACE - setting state") + // // create connections for all new members in the latest gossip + // (latestAvailableNodes + gossipingNode) foreach { member ⇒ + // setUpConnectionToNode(member) + // oldState.memberMembershipChangeListeners foreach (_ memberConnected member) // notify listeners about the new members + // } + // } + + // } else if (latestUnavailableNodes contains gossipingNode) { + // // gossip from an old former dead member + + // val newUnavailableMembers = latestUnavailableNodes - gossipingNode + // val newMembers = latestAvailableNodes + gossipingNode + + // val newGossip = latestGossip copy (availableNodes = newMembers, unavailableNodes = newUnavailableMembers) + // val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) + + // // if we won the race then update else try again + // if (!state.compareAndSet(oldState, newState)) tell(newGossip) // recur + // else oldState.memberMembershipChangeListeners foreach (_ memberConnected gossipingNode) // notify listeners on successful update of state + // } + } + + /** + * Registers a listener to subscribe to cluster membership changes. + */ + @tailrec + final def registerListener(listener: NodeMembershipChangeListener) { + val oldState = state.get + val newListeners = oldState.memberMembershipChangeListeners + listener + val newState = oldState copy (memberMembershipChangeListeners = newListeners) + if (!state.compareAndSet(oldState, newState)) registerListener(listener) // recur + } + + /** + * Unsubscribes to cluster membership changes. + */ + @tailrec + final def unregisterListener(listener: NodeMembershipChangeListener) { + val oldState = state.get + val newListeners = oldState.memberMembershipChangeListeners - listener + val newState = oldState copy (memberMembershipChangeListeners = newListeners) + if (!state.compareAndSet(oldState, newState)) unregisterListener(listener) // recur + } + + /** + * Sets up remote connections to all the members in the argument list. + */ + private def connectToNodes(members: Seq[Member]) { + members foreach { member ⇒ + setUpConnectionToNode(member) + state.get.memberMembershipChangeListeners foreach (_ memberConnected member) // notify listeners about the new members + } + } + + // FIXME should shuffle list randomly before start traversing to avoid connecting to some member on every member + @tailrec + final private def connectToRandomNodeOf(members: Seq[Member]): ActorRef = { + members match { + case member :: rest ⇒ + setUpConnectionToNode(member) match { + case Some(connection) ⇒ connection + case None ⇒ connectToRandomNodeOf(rest) // recur if + } + case Nil ⇒ + throw new RemoteConnectionException( + "Could not establish connection to any of the members in the argument list") + } + } + + /** + * Joins the cluster by connecting to one of the seed members and retrieve current cluster state (Gossip). + */ + private def joinCluster(deadline: Deadline) { + val seedNodes = seedNodesWithoutMyself // filter out myself + + if (!seedNodes.isEmpty) { // if we have seed members to contact + connectToNodes(seedNodes) + + try { + log.info("Trying to join cluster through one of the seed members [{}]", seedNodes.mkString(", ")) + + Await.result(connectToRandomNodeOf(seedNodes) ? JoinCluster, seedNodeConnectionTimeout) match { + case initialGossip: Gossip ⇒ + // just sets/overwrites the state/gossip regardless of what it was before + // since it should be treated as the initial state + state.set(state.get copy (currentGossip = initialGossip)) + log.debug("Received initial gossip [{}] from seed member", initialGossip) + + case unknown ⇒ + throw new IllegalStateException("Expected initial gossip from seed, received [" + unknown + "]") + } + } catch { + case e: Exception ⇒ + log.error( + "Could not join cluster through any of the seed members - retrying for another {} seconds", + deadline.timeLeft.toSeconds) + + // retry joining the cluster unless + // 1. Gossiper is shut down + // 2. The connection time window has expired + if (isRunning.get) { + if (deadline.timeLeft.toMillis > 0) joinCluster(deadline) // recur + else throw new RemoteConnectionException( + "Could not join cluster (any of the seed members) - giving up after trying for " + + deadline.time.toSeconds + " seconds") + } + } + } + } + + /** + * Initates a new round of gossip. + */ + private def initateGossip() { + val oldState = state.get + val oldGossip = oldState.currentGossip + + val oldMembers = oldGossip.members + val oldMembersSize = oldMembers.size + + val oldUnavailableMembers = oldGossip.unavailableMembers + val oldUnavailableMembersSize = oldUnavailableMembers.size + + // 1. gossip to alive members + val gossipedToSeed = + if (oldUnavailableMembersSize > 0) gossipToRandomNodeOf(oldMembers) + else false + + // 2. gossip to dead members + if (oldUnavailableMembersSize > 0) { + val probability: Double = oldUnavailableMembersSize / (oldMembersSize + 1) + if (random.nextDouble() < probability) gossipToRandomNodeOf(oldUnavailableMembers) + } + + // 3. gossip to a seed for facilitating partition healing + if ((!gossipedToSeed || oldMembersSize < 1) && (seeds.head != address)) { + if (oldMembersSize == 0) gossipToRandomNodeOf(seeds) + else { + val probability = 1.0 / oldMembersSize + oldUnavailableMembersSize + if (random.nextDouble() <= probability) gossipToRandomNodeOf(seeds) + } + } + } + + /** + * Gossips to a random member in the set of members passed in as argument. + * + * @returns 'true' if it gossiped to a "seed" member. + */ + private def gossipToRandomNodeOf(members: Set[Member]): Boolean = { + val peers = members filter (_.address != address) // filter out myself + val peer = selectRandomNode(peers) + val oldState = state.get + val oldGossip = oldState.currentGossip + // if connection can't be established/found => ignore it since the failure detector will take care of the potential problem + setUpConnectionToNode(peer) foreach { _ ! newGossip } + seeds exists (peer == _) + } + + /** + * Scrutinizes the cluster; marks members detected by the failure detector as unavailable, and notifies all listeners + * of the change in the cluster membership. + */ + @tailrec + final private def scrutinize() { + val oldState = state.get + val oldGossip = oldState.currentGossip + + val oldMembers = oldGossip.members + val oldUnavailableMembers = oldGossip.unavailableMembers + val newlyDetectedUnavailableMembers = oldMembers filterNot (member ⇒ failureDetector.isAvailable(member.address)) + + if (!newlyDetectedUnavailableMembers.isEmpty) { // we have newly detected members marked as unavailable + val newMembers = oldMembers diff newlyDetectedUnavailableMembers + val newUnavailableMembers = oldUnavailableMembers ++ newlyDetectedUnavailableMembers + + val newGossip = oldGossip copy (members = newMembers, unavailableMembers = newUnavailableMembers) + val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) + + // if we won the race then update else try again + if (!state.compareAndSet(oldState, newState)) scrutinize() // recur + else { + // notify listeners on successful update of state + for { + deadNode ← newUnavailableMembers + listener ← oldState.memberMembershipChangeListeners + } listener memberDisconnected deadNode + } + } + } + + private def setUpConnectionToNode(member: Member): Option[ActorRef] = { + val address = member.address + try { + Some( + connectionManager.putIfAbsent( + address, + () ⇒ system.actorFor(RootActorPath(Address(protocol, system.name)) / "system" / "cluster"))) + } catch { + case e: Exception ⇒ None + } + } + + private def newGossip(): Gossip = Gossip(member = address) + + private def incrementVersionForGossip(from: Gossip): Gossip = { + val newVersion = from.version.increment(memberFingerprint, newTimestamp) + from copy (version = newVersion) + } + + private def seedNodesWithoutMyself: List[Member] = seeds.filter(_.address != address).toList + + private def selectRandomNode(members: Set[Member]): Member = members.toList(random.nextInt(members.size)) +} diff --git a/akka-cluster/src/main/scala/akka/cluster/LocalCluster.scala b/akka-cluster/src/main/scala/akka/cluster/LocalCluster.scala deleted file mode 100644 index d8a0ac6027..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/LocalCluster.scala +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import akka.config.Config -import Config._ -import akka.util._ -import Helpers._ -import akka.actor._ -import Actor._ -import akka.event.EventHandler -import akka.cluster.zookeeper._ - -import org.apache.zookeeper._ -import org.apache.zookeeper.Watcher.Event._ -import org.apache.zookeeper.data.Stat -import org.apache.zookeeper.recipes.lock.{ WriteLock, LockListener } - -import org.I0Itec.zkclient._ -import org.I0Itec.zkclient.serialize._ -import org.I0Itec.zkclient.exception._ - -import java.util.concurrent.atomic.{ AtomicBoolean, AtomicReference } - -object LocalCluster { - val clusterDirectory = config.getString("akka.cluster.log-directory", "_akka_cluster") - val clusterDataDirectory = clusterDirectory + "/data" - val clusterLogDirectory = clusterDirectory + "/log" - - val clusterName = Config.clusterName - val nodename = Config.nodename - val zooKeeperServers = config.getString("akka.cluster.zookeeper-server-addresses", "localhost:2181") - val sessionTimeout = Duration(config.getInt("akka.cluster.session-timeout", 60), TIME_UNIT).toMillis.toInt - val connectionTimeout = Duration(config.getInt("akka.cluster.connection-timeout", 60), TIME_UNIT).toMillis.toInt - val defaultZooKeeperSerializer = new SerializableSerializer - - val zkServer = new AtomicReference[Option[ZkServer]](None) - - lazy val zkClient = new AkkaZkClient(zooKeeperServers, sessionTimeout, connectionTimeout, defaultZooKeeperSerializer) - - /** - * Looks up the local hostname. - */ - def lookupLocalhostName = NetworkUtil.getLocalhostName - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalCluster(): ZkServer = - startLocalCluster(clusterDataDirectory, clusterLogDirectory, 2181, 5000) - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalCluster(port: Int, tickTime: Int): ZkServer = - startLocalCluster(clusterDataDirectory, clusterLogDirectory, port, tickTime) - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalCluster(tickTime: Int): ZkServer = - startLocalCluster(clusterDataDirectory, clusterLogDirectory, 2181, tickTime) - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalCluster(dataPath: String, logPath: String): ZkServer = - startLocalCluster(dataPath, logPath, 2181, 500) - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalCluster(dataPath: String, logPath: String, port: Int, tickTime: Int): ZkServer = { - try { - val zk = AkkaZooKeeper.startLocalServer(dataPath, logPath, port, tickTime) - zkServer.set(Some(zk)) - zk - } catch { - case e: Throwable ⇒ - EventHandler.error(e, this, "Could not start local ZooKeeper cluster") - throw e - } - } - - /** - * Shut down the local ZooKeeper server. - */ - def shutdownLocalCluster() { - withPrintStackTraceOnError { - EventHandler.debug(this, "Shuts down local cluster") - zkServer.getAndSet(None).foreach(_.shutdown()) - } - } - - def createQueue(rootPath: String, blocking: Boolean = true) = - new ZooKeeperQueue(zkClient, rootPath, blocking) - - def barrier(name: String, count: Int): ZooKeeperBarrier = - ZooKeeperBarrier(zkClient, clusterName, name, nodename, count) - - def barrier(name: String, count: Int, timeout: Duration): ZooKeeperBarrier = - ZooKeeperBarrier(zkClient, clusterName, name, nodename, count, timeout) -} - diff --git a/akka-remote/src/main/scala/akka/remote/RemoteConnectionManager.scala b/akka-cluster/src/main/scala/akka/cluster/RemoteConnectionManager.scala similarity index 96% rename from akka-remote/src/main/scala/akka/remote/RemoteConnectionManager.scala rename to akka-cluster/src/main/scala/akka/cluster/RemoteConnectionManager.scala index fd2a9135d7..63020367a5 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteConnectionManager.scala +++ b/akka-cluster/src/main/scala/akka/cluster/RemoteConnectionManager.scala @@ -2,9 +2,10 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.remote +package akka.cluster import akka.actor._ +import akka.remote._ import akka.routing._ import akka.event.Logging @@ -19,6 +20,7 @@ import java.util.concurrent.atomic.AtomicReference class RemoteConnectionManager( system: ActorSystemImpl, remote: RemoteActorRefProvider, + failureDetector: AccrualFailureDetector, initialConnections: Map[Address, ActorRef] = Map.empty[Address, ActorRef]) extends ConnectionManager { @@ -30,8 +32,6 @@ class RemoteConnectionManager( def iterable: Iterable[ActorRef] = connections.values } - def failureDetector = remote.failureDetector - private val state: AtomicReference[State] = new AtomicReference[State](newState()) /** @@ -145,6 +145,6 @@ class RemoteConnectionManager( } } - private[remote] def newConnection(remoteAddress: Address, actorPath: ActorPath) = + private[cluster] def newConnection(remoteAddress: Address, actorPath: ActorPath) = new RemoteActorRef(remote, remote.transport, actorPath, Nobody) } diff --git a/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala b/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala deleted file mode 100644 index ce9eb300f5..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala +++ /dev/null @@ -1,604 +0,0 @@ -package akka.cluster - -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -import org.apache.bookkeeper.client.{ BookKeeper, LedgerHandle, LedgerEntry, BKException, AsyncCallback } -import org.apache.zookeeper.CreateMode - -import org.I0Itec.zkclient.exception._ - -import akka.AkkaException -import akka.config._ -import Config._ -import akka.util._ -import akka.actor._ -import DeploymentConfig.ReplicationScheme -import akka.event.EventHandler -import akka.dispatch.{ DefaultPromise, Promise, MessageInvocation } -import akka.cluster.zookeeper._ -import akka.serialization.ActorSerialization._ -import akka.serialization.Compression.LZF - -import java.util.Enumeration - -// FIXME allow user to choose dynamically between 'async' and 'sync' tx logging (asyncAddEntry(byte[] data, AddCallback cb, Object ctx)) -// FIXME clean up old entries in log after doing a snapshot - -class ReplicationException(message: String, cause: Throwable = null) extends AkkaException(message) { - def this(msg: String) = this(msg, null) -} - -/** - * A TransactionLog makes chunks of data durable. - */ -class TransactionLog private ( - ledger: LedgerHandle, - val id: String, - val isAsync: Boolean, - replicationScheme: ReplicationScheme) { - - import TransactionLog._ - - val logId = ledger.getId - val txLogPath = transactionLogPath(id) - val snapshotPath = txLogPath + "/snapshot" - - private val isOpen = new Switch(true) - - /** - * Record an Actor message invocation. - * - * @param invocation the MessageInvocation to record - * @param actorRef the LocalActorRef that received the message. - * @throws ReplicationException if the TransactionLog already is closed. - */ - def recordEntry(invocation: MessageInvocation, actorRef: LocalActorRef) { - val entryId = ledger.getLastAddPushed + 1 - val needsSnapshot = entryId != 0 && (entryId % snapshotFrequency) == 0 - - if (needsSnapshot) { - //todo: could it be that the message is never persisted when a snapshot is added? - val bytes = toBinary(actorRef, false, replicationScheme) - recordSnapshot(bytes) - } else { - val bytes = MessageSerializer.serialize(invocation.message.asInstanceOf[AnyRef]).toByteArray - recordEntry(bytes) - } - } - - /** - * Record an entry. - * - * @param entry the entry in byte form to record. - * @throws ReplicationException if the TransactionLog already is closed. - */ - def recordEntry(entry: Array[Byte]) { - if (isOpen.isOn) { - val entryBytes = - if (shouldCompressData) LZF.compress(entry) - else entry - - try { - if (isAsync) { - ledger.asyncAddEntry( - entryBytes, - new AsyncCallback.AddCallback { - def addComplete(returnCode: Int, ledgerHandle: LedgerHandle, entryId: Long, ctx: AnyRef) { - handleReturnCode(returnCode) - EventHandler.debug(this, "Writing entry [%s] to log [%s]".format(entryId, logId)) - } - }, - null) - } else { - handleReturnCode(ledger.addEntry(entryBytes)) - val entryId = ledger.getLastAddPushed - EventHandler.debug(this, "Writing entry [%s] to log [%s]".format(entryId, logId)) - } - } catch { - case e: Throwable ⇒ handleError(e) - } - } else transactionClosedError - } - - /** - * Record a snapshot. - * - * @param snapshot the snapshot in byteform to record. - * @throws ReplicationException if the TransactionLog already is closed. - */ - def recordSnapshot(snapshot: Array[Byte]) { - if (isOpen.isOn) { - val snapshotBytes = - if (shouldCompressData) LZF.compress(snapshot) - else snapshot - - try { - if (isAsync) { - ledger.asyncAddEntry( - snapshotBytes, - new AsyncCallback.AddCallback { - def addComplete(returnCode: Int, ledgerHandle: LedgerHandle, snapshotId: Long, ctx: AnyRef) { - handleReturnCode(returnCode) - EventHandler.debug(this, "Writing snapshot to log [%s]".format(snapshotId)) - storeSnapshotMetaDataInZooKeeper(snapshotId) - } - }, - null) - } else { - //todo: could this be racy, since writing the snapshot itself and storing the snapsnot id, is not - //an atomic operation? - - //first store the snapshot. - handleReturnCode(ledger.addEntry(snapshotBytes)) - val snapshotId = ledger.getLastAddPushed - - //this is the location where all previous entries can be removed. - //TODO: how to remove data? - - EventHandler.debug(this, "Writing snapshot to log [%s]".format(snapshotId)) - //and now store the snapshot metadata. - storeSnapshotMetaDataInZooKeeper(snapshotId) - } - } catch { - case e: Throwable ⇒ handleError(e) - } - } else transactionClosedError - } - - /** - * Get all the entries for this transaction log. - * - * @throws ReplicationException if the TransactionLog already is closed. - */ - def entries: Vector[Array[Byte]] = entriesInRange(0, ledger.getLastAddConfirmed) - - /** - * Get the latest snapshot and all subsequent entries from this snapshot. - */ - def latestSnapshotAndSubsequentEntries: (Option[Array[Byte]], Vector[Array[Byte]]) = { - latestSnapshotId match { - case Some(snapshotId) ⇒ - EventHandler.debug(this, "Reading entries from snapshot id [%s] for log [%s]".format(snapshotId, logId)) - - val cursor = snapshotId + 1 - val lastIndex = ledger.getLastAddConfirmed - - val snapshot = Some(entriesInRange(snapshotId, snapshotId).head) - - val entries = - if ((cursor - lastIndex) == 0) Vector.empty[Array[Byte]] - else entriesInRange(cursor, lastIndex) - - (snapshot, entries) - - case None ⇒ - (None, entries) - } - } - - /** - * Get a range of entries from 'from' to 'to' for this transaction log. - * - * @param from the first element of the range - * @param the last index from the range (including). - * @return a Vector containing Byte Arrays. Each element in the vector is a record. - * @throws IllegalArgumenException if from or to is negative, or if 'from' is bigger than 'to'. - * @throws ReplicationException if the TransactionLog already is closed. - */ - def entriesInRange(from: Long, to: Long): Vector[Array[Byte]] = if (isOpen.isOn) { - try { - if (from < 0) throw new IllegalArgumentException("'from' index can't be negative [" + from + "]") - if (to < 0) throw new IllegalArgumentException("'to' index can't be negative [" + from + "]") - if (to < from) throw new IllegalArgumentException("'to' index can't be smaller than 'from' index [" + from + "," + to + "]") - EventHandler.debug(this, "Reading entries [%s -> %s] for log [%s]".format(from, to, logId)) - - if (isAsync) { - val future = Promise[Vector[Array[Byte]]]() - ledger.asyncReadEntries( - from, to, - new AsyncCallback.ReadCallback { - def readComplete(returnCode: Int, ledgerHandle: LedgerHandle, enumeration: Enumeration[LedgerEntry], ctx: AnyRef) { - val future = ctx.asInstanceOf[Promise[Vector[Array[Byte]]]] - val entries = toByteArrays(enumeration) - - if (returnCode == BKException.Code.OK) future.success(entries) - else future.failure(BKException.create(returnCode)) - } - }, - future) - await(future) - } else { - toByteArrays(ledger.readEntries(from, to)) - } - } catch { - case e: Throwable ⇒ handleError(e) - } - } else transactionClosedError - - /** - * Get the last entry written to this transaction log. - * - * Returns -1 if there has never been an entry. - */ - def latestEntryId: Long = ledger.getLastAddConfirmed - - /** - * Get the id for the last snapshot written to this transaction log. - */ - def latestSnapshotId: Option[Long] = { - try { - val snapshotId = zkClient.readData(snapshotPath).asInstanceOf[Long] - EventHandler.debug(this, "Retrieved latest snapshot id [%s] from transaction log [%s]".format(snapshotId, logId)) - Some(snapshotId) - } catch { - case e: ZkNoNodeException ⇒ None - case e: Throwable ⇒ handleError(e) - } - } - - /** - * Delete this transaction log. So all entries but also all metadata will be removed. - * - * TODO: Behavior unclear what happens when already deleted (what happens to the ledger). - * TODO: Behavior unclear what happens when already closed. - */ - def delete() { - if (isOpen.isOn) { - EventHandler.debug(this, "Deleting transaction log [%s]".format(logId)) - try { - if (isAsync) { - bookieClient.asyncDeleteLedger( - logId, - new AsyncCallback.DeleteCallback { - def deleteComplete(returnCode: Int, ctx: AnyRef) { - (returnCode) - } - }, - null) - } else { - bookieClient.deleteLedger(logId) - } - - //also remote everything else that belongs to this TransactionLog. - zkClient.delete(snapshotPath) - zkClient.delete(txLogPath) - } catch { - case e: Throwable ⇒ handleError(e) - } - } - } - - /** - * Close this transaction log. - * - * If already closed, the call is ignored. - */ - def close() { - isOpen switchOff { - EventHandler.debug(this, "Closing transaction log [%s]".format(logId)) - try { - if (isAsync) { - ledger.asyncClose( - new AsyncCallback.CloseCallback { - def closeComplete( - returnCode: Int, - ledgerHandle: LedgerHandle, - ctx: AnyRef) { - handleReturnCode(returnCode) - } - }, - null) - } else { - ledger.close() - } - } catch { - case e: Throwable ⇒ handleError(e) - } - } - } - - private def toByteArrays(enumeration: Enumeration[LedgerEntry]): Vector[Array[Byte]] = { - var entries = Vector[Array[Byte]]() - while (enumeration.hasMoreElements) { - val bytes = enumeration.nextElement.getEntry - val entry = - if (shouldCompressData) LZF.uncompress(bytes) - else bytes - entries = entries :+ entry - } - entries - } - - private def storeSnapshotMetaDataInZooKeeper(snapshotId: Long) { - if (isOpen.isOn) { - try { - zkClient.create(snapshotPath, null, CreateMode.PERSISTENT) - } catch { - case e: ZkNodeExistsException ⇒ {} // do nothing - case e: Throwable ⇒ handleError(e) - } - - try { - zkClient.writeData(snapshotPath, snapshotId) - } catch { - case e: Throwable ⇒ - handleError(new ReplicationException( - "Could not store transaction log snapshot meta-data in ZooKeeper for UUID [" + id + "]")) - } - EventHandler.debug(this, "Writing snapshot [%s] to log [%s]".format(snapshotId, logId)) - } else transactionClosedError - } - - private def handleReturnCode(block: ⇒ Long) { - val code = block.toInt - if (code == BKException.Code.OK) {} // all fine - else handleError(BKException.create(code)) - } - - private def transactionClosedError: Nothing = { - handleError(new ReplicationException( - "Transaction log [" + logId + - "] is closed. You need to open up new a new one with 'TransactionLog.logFor(id)'")) - } -} - -/** - * TODO: Documentation. - */ -object TransactionLog { - - val zooKeeperServers = config.getString("akka.cluster.zookeeper-server-addresses", "localhost:2181") - val sessionTimeout = Duration(config.getInt("akka.cluster.session-timeout", 60), TIME_UNIT).toMillis.toInt - val connectionTimeout = Duration(config.getInt("akka.cluster.connection-timeout", 60), TIME_UNIT).toMillis.toInt - - val digestType = config.getString("akka.cluster.replication.digest-type", "CRC32") match { - case "CRC32" ⇒ BookKeeper.DigestType.CRC32 - case "MAC" ⇒ BookKeeper.DigestType.MAC - case unknown ⇒ throw new ConfigurationException( - "akka.cluster.replication.digest-type is invalid [" + unknown + "], must be either 'CRC32' or 'MAC'") - } - val password = config.getString("akka.cluster.replication.password", "secret").getBytes("UTF-8") - val ensembleSize = config.getInt("akka.cluster.replication.ensemble-size", 3) - val quorumSize = config.getInt("akka.cluster.replication.quorum-size", 2) - val snapshotFrequency = config.getInt("akka.cluster.replication.snapshot-frequency", 1000) - val timeout = Duration(config.getInt("akka.cluster.replication.timeout", 30), TIME_UNIT).toMillis - val shouldCompressData = config.getBool("akka.remote.use-compression", false) - - private[akka] val transactionLogNode = "/transaction-log-ids" - - private val isConnected = new Switch(false) - - @volatile - private[akka] var bookieClient: BookKeeper = _ - - @volatile - private[akka] var zkClient: AkkaZkClient = _ - - private[akka] def apply( - ledger: LedgerHandle, - id: String, - isAsync: Boolean, - replicationScheme: ReplicationScheme) = - new TransactionLog(ledger, id, isAsync, replicationScheme) - - /** - * Starts up the transaction log. - */ - def start() { - isConnected switchOn { - bookieClient = new BookKeeper(zooKeeperServers) - zkClient = new AkkaZkClient(zooKeeperServers, sessionTimeout, connectionTimeout) - - try { - zkClient.create(transactionLogNode, null, CreateMode.PERSISTENT) - } catch { - case e: ZkNodeExistsException ⇒ {} // do nothing - case e: Throwable ⇒ handleError(e) - } - - EventHandler.info(this, - ("Transaction log service started with" + - "\n\tdigest type [%s]" + - "\n\tensemble size [%s]" + - "\n\tquorum size [%s]" + - "\n\tlogging time out [%s]").format( - digestType, - ensembleSize, - quorumSize, - timeout)) - } - } - - /** - * Shuts down the transaction log. - */ - def shutdown() { - isConnected switchOff { - try { - EventHandler.info(this, "Shutting down transaction log...") - zkClient.close() - bookieClient.halt() - EventHandler.info(this, "Transaction log shut down successfully") - } catch { - case e: Throwable ⇒ handleError(e) - } - } - } - - def transactionLogPath(id: String): String = transactionLogNode + "/" + id - - /** - * Checks if a TransactionLog for the given id already exists. - */ - def exists(id: String): Boolean = { - val txLogPath = transactionLogPath(id) - zkClient.exists(txLogPath) - } - - /** - * Creates a new transaction log for the 'id' specified. If a TransactionLog already exists for the id, - * it will be overwritten. - */ - def newLogFor(id: String, isAsync: Boolean, replicationScheme: ReplicationScheme): TransactionLog = { - val txLogPath = transactionLogPath(id) - - val ledger = try { - if (exists(id)) { - //if it exists, we need to delete it first. This gives it the overwrite semantics we are looking for. - try { - val ledger = bookieClient.createLedger(ensembleSize, quorumSize, digestType, password) - val txLog = TransactionLog(ledger, id, false, null) - txLog.delete() - txLog.close() - } catch { - case e: Throwable ⇒ handleError(e) - } - } - - val future = Promise[LedgerHandle]() - if (isAsync) { - bookieClient.asyncCreateLedger( - ensembleSize, quorumSize, digestType, password, - new AsyncCallback.CreateCallback { - def createComplete( - returnCode: Int, - ledgerHandle: LedgerHandle, - ctx: AnyRef) { - val future = ctx.asInstanceOf[Promise[LedgerHandle]] - if (returnCode == BKException.Code.OK) future.success(ledgerHandle) - else future.failure(BKException.create(returnCode)) - } - }, - future) - await(future) - } else { - bookieClient.createLedger(ensembleSize, quorumSize, digestType, password) - } - } catch { - case e: Throwable ⇒ handleError(e) - } - - val logId = ledger.getId - try { - zkClient.create(txLogPath, null, CreateMode.PERSISTENT) - zkClient.writeData(txLogPath, logId) - logId //TODO: does this have any effect? - } catch { - case e: Throwable ⇒ - bookieClient.deleteLedger(logId) // clean up - handleError(new ReplicationException( - "Could not store transaction log [" + logId + - "] meta-data in ZooKeeper for UUID [" + id + "]", e)) - } - - EventHandler.info(this, "Created new transaction log [%s] for UUID [%s]".format(logId, id)) - TransactionLog(ledger, id, isAsync, replicationScheme) - } - - /** - * Fetches an existing transaction log for the 'id' specified. - * - * @throws ReplicationException if the log with the given id doesn't exist. - */ - def logFor(id: String, isAsync: Boolean, replicationScheme: ReplicationScheme): TransactionLog = { - val txLogPath = transactionLogPath(id) - - val logId = try { - val logId = zkClient.readData(txLogPath).asInstanceOf[Long] - EventHandler.debug(this, - "Retrieved transaction log [%s] for UUID [%s]".format(logId, id)) - logId - } catch { - case e: ZkNoNodeException ⇒ - handleError(new ReplicationException( - "Transaction log for UUID [" + id + "] does not exist in ZooKeeper")) - case e: Throwable ⇒ handleError(e) - } - - val ledger = try { - if (isAsync) { - val future = Promise[LedgerHandle]() - bookieClient.asyncOpenLedger( - logId, digestType, password, - new AsyncCallback.OpenCallback { - def openComplete(returnCode: Int, ledgerHandle: LedgerHandle, ctx: AnyRef) { - val future = ctx.asInstanceOf[Promise[LedgerHandle]] - if (returnCode == BKException.Code.OK) future.success(ledgerHandle) - else future.failure(BKException.create(returnCode)) - } - }, - future) - await(future) - } else { - bookieClient.openLedger(logId, digestType, password) - } - } catch { - case e: Throwable ⇒ handleError(e) - } - - TransactionLog(ledger, id, isAsync, replicationScheme) - } - - private[akka] def await[T](future: Promise[T]): T = { - future.await.value.get match { - case Right(result) => result - case Left(throwable) => handleError(throwable) - } - } - - private[akka] def handleError(e: Throwable): Nothing = { - EventHandler.error(e, this, e.toString) - throw e - } -} - -/** - * TODO: Documentation. - */ -object LocalBookKeeperEnsemble { - private val isRunning = new Switch(false) - - //TODO: should probably come from the config file. - private val port = 5555 - - @volatile - private var localBookKeeper: LocalBookKeeper = _ - - /** - * Starts the LocalBookKeeperEnsemble. - * - * Call can safely be made when already started. - * - * This call will block until it is started. - */ - def start() { - isRunning switchOn { - EventHandler.info(this, "Starting up LocalBookKeeperEnsemble...") - localBookKeeper = new LocalBookKeeper(TransactionLog.ensembleSize) - localBookKeeper.runZookeeper(port) - localBookKeeper.initializeZookeper() - localBookKeeper.runBookies() - EventHandler.info(this, "LocalBookKeeperEnsemble started up successfully") - } - } - - /** - * Shuts down the LocalBookKeeperEnsemble. - * - * Call can safely bemade when already shutdown. - * - * This call will block until the shutdown completes. - */ - def shutdown() { - isRunning switchOff { - EventHandler.info(this, "Shutting down LocalBookKeeperEnsemble...") - localBookKeeper.bs.foreach(_.shutdown()) // stop bookies - localBookKeeper.zkc.close() // stop zk client - localBookKeeper.zks.shutdown() // stop zk server - localBookKeeper.serverFactory.shutdown() // stop zk NIOServer - EventHandler.info(this, "LocalBookKeeperEnsemble shut down successfully") - } - } -} diff --git a/akka-remote/src/main/scala/akka/remote/VectorClock.scala b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala similarity index 71% rename from akka-remote/src/main/scala/akka/remote/VectorClock.scala rename to akka-cluster/src/main/scala/akka/cluster/VectorClock.scala index 9da70111e9..ef1f1be490 100644 --- a/akka-remote/src/main/scala/akka/remote/VectorClock.scala +++ b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala @@ -2,18 +2,39 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.remote +package akka.cluster import akka.AkkaException class VectorClockException(message: String) extends AkkaException(message) +/** + * Trait to be extended by classes that wants to be versioned using a VectorClock. + */ +trait Versioned { + def version: VectorClock +} + +/** + * Utility methods for comparing Versioned instances. + */ +object Versioned { + def latestVersionOf[T <: Versioned](versioned1: T, versioned2: T): T = { + (versioned1.version compare versioned2.version) match { + case VectorClock.Before ⇒ versioned2 // version 1 is BEFORE (older), use version 2 + case VectorClock.After ⇒ versioned1 // version 1 is AFTER (newer), use version 1 + case VectorClock.Concurrent ⇒ versioned1 // can't establish a causal relationship between versions => conflict - keeping version 1 + } + } +} + /** * Representation of a Vector-based clock (counting clock), inspired by Lamport logical clocks. - * + * {{ * Reference: - * Leslie Lamport (1978). "Time, clocks, and the ordering of events in a distributed system". Communications of the ACM 21 (7): 558-565. - * Friedemann Mattern (1988). "Virtual Time and Global States of Distributed Systems". Workshop on Parallel and Distributed Algorithms: pp. 215-226 + * 1) Leslie Lamport (1978). "Time, clocks, and the ordering of events in a distributed system". Communications of the ACM 21 (7): 558-565. + * 2) Friedemann Mattern (1988). "Virtual Time and Global States of Distributed Systems". Workshop on Parallel and Distributed Algorithms: pp. 215-226 + * }} */ case class VectorClock( versions: Vector[VectorClock.Entry] = Vector.empty[VectorClock.Entry], @@ -55,9 +76,11 @@ object VectorClock { /** * The result of comparing two vector clocks. * Either: - * 1) v1 is BEFORE v2 - * 2) v1 is AFTER t2 - * 3) v1 happens CONCURRENTLY to v2 + * {{ + * 1) v1 is BEFORE v2 + * 2) v1 is AFTER t2 + * 3) v1 happens CONCURRENTLY to v2 + * }} */ sealed trait Ordering case object Before extends Ordering @@ -74,9 +97,11 @@ object VectorClock { /** * Compare two vector clocks. The outcomes will be one of the following: *

- * 1. Clock 1 is BEFORE clock 2 if there exists an i such that c1(i) <= c(2) and there does not exist a j such that c1(j) > c2(j). - * 2. Clock 1 is CONCURRENT to clock 2 if there exists an i, j such that c1(i) < c2(i) and c1(j) > c2(j). - * 3. Clock 1 is AFTER clock 2 otherwise. + * {{ + * 1. Clock 1 is BEFORE clock 2 if there exists an i such that c1(i) <= c(2) and there does not exist a j such that c1(j) > c2(j). + * 2. Clock 1 is CONCURRENT to clock 2 if there exists an i, j such that c1(i) < c2(i) and c1(j) > c2(j). + * 3. Clock 1 is AFTER clock 2 otherwise. + * }} * * @param v1 The first VectorClock * @param v2 The second VectorClock diff --git a/akka-cluster/src/main/scala/akka/cluster/metrics/LocalNodeMetricsManager.scala b/akka-cluster/src/main/scala/akka/cluster/metrics/LocalNodeMetricsManager.scala deleted file mode 100644 index c366ed598c..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/metrics/LocalNodeMetricsManager.scala +++ /dev/null @@ -1,226 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.metrics - -import akka.cluster._ -import Cluster._ -import akka.cluster.zookeeper._ -import akka.actor._ -import Actor._ -import scala.collection.JavaConversions._ -import scala.collection.JavaConverters._ -import java.util.concurrent.{ ConcurrentHashMap, ConcurrentSkipListSet } -import java.util.concurrent.atomic.AtomicReference -import akka.util.{ Duration, Switch } -import akka.util.Helpers._ -import akka.util.duration._ -import org.I0Itec.zkclient.exception.ZkNoNodeException -import akka.event.EventHandler - -/* - * Instance of the metrics manager running on the node. To keep the fine performance, metrics of all the - * nodes in the cluster are cached internally, and refreshed from monitoring MBeans / Sigar (when if's local node), - * of ZooKeeper (if it's metrics of all the nodes in the cluster) after a specified timeout - - * metricsRefreshTimeout - * metricsRefreshTimeout defaults to 2 seconds, and can be declaratively defined through - * akka.conf: - * - * @exampl {{{ - * akka.cluster.metrics-refresh-timeout = 2 - * }}} - */ -class LocalNodeMetricsManager(zkClient: AkkaZkClient, private val metricsRefreshTimeout: Duration) - extends NodeMetricsManager { - - /* - * Provides metrics of the system that the node is running on, through monitoring MBeans, Hyperic Sigar - * and other systems - */ - lazy private val metricsProvider = SigarMetricsProvider(refreshTimeout.toMillis.toInt) fold ((thrw) ⇒ { - EventHandler.warning(this, """Hyperic Sigar library failed to load due to %s: %s. -All the metrics will be retreived from monitoring MBeans, and may be incorrect at some platforms. -In order to get better metrics, please put "sigar.jar" to the classpath, and add platform-specific native libary to "java.library.path".""" - .format(thrw.getClass.getName, thrw.getMessage)) - new JMXMetricsProvider - }, - sigar ⇒ sigar) - - /* - * Metrics of all nodes in the cluster - */ - private val localNodeMetricsCache = new ConcurrentHashMap[String, NodeMetrics] - - @volatile - private var _refreshTimeout = metricsRefreshTimeout - - /* - * Plugged monitors (both local and cluster-wide) - */ - private val alterationMonitors = new ConcurrentSkipListSet[MetricsAlterationMonitor] - - private val _isRunning = new Switch(false) - - /* - * If the value is true, metrics manages is started and running. Stopped, otherwise - */ - def isRunning = _isRunning.isOn - - /* - * Starts metrics manager. When metrics manager is started, it refreshes cache from ZooKeeper - * after refreshTimeout, and invokes plugged monitors - */ - def start() = { - _isRunning.switchOn { refresh() } - this - } - - private[cluster] def metricsForNode(nodeName: String): String = "%s/%s".format(node.NODE_METRICS, nodeName) - - /* - * Adds monitor that reacts, when specific conditions are satisfied - */ - def addMonitor(monitor: MetricsAlterationMonitor) = alterationMonitors add monitor - - def removeMonitor(monitor: MetricsAlterationMonitor) = alterationMonitors remove monitor - - def refreshTimeout_=(newValue: Duration) = _refreshTimeout = newValue - - /* - * Timeout after which metrics, cached in the metrics manager, will be refreshed from ZooKeeper - */ - def refreshTimeout = _refreshTimeout - - /* - * Stores metrics of the node in ZooKeeper - */ - private[akka] def storeMetricsInZK(metrics: NodeMetrics) = { - val metricsPath = metricsForNode(metrics.nodeName) - if (zkClient.exists(metricsPath)) { - zkClient.writeData(metricsPath, metrics) - } else { - ignore[ZkNoNodeException](zkClient.createEphemeral(metricsPath, metrics)) - } - } - - /* - * Gets metrics of the node from ZooKeeper - */ - private[akka] def getMetricsFromZK(nodeName: String) = { - zkClient.readData[NodeMetrics](metricsForNode(nodeName)) - } - - /* - * Removed metrics of the node from local cache and ZooKeeper - */ - def removeNodeMetrics(nodeName: String) = { - val metricsPath = metricsForNode(nodeName) - if (zkClient.exists(metricsPath)) { - ignore[ZkNoNodeException](zkClient.delete(metricsPath)) - } - - localNodeMetricsCache.remove(nodeName) - } - - /* - * Gets metrics of a local node directly from JMX monitoring beans/Hyperic Sigar - */ - def getLocalMetrics = metricsProvider.getLocalMetrics - - /* - * Gets metrics of the node, specified by the name. If useCached is true (default value), - * metrics snapshot is taken from the local cache; otherwise, it's retreived from ZooKeeper' - */ - def getMetrics(nodeName: String, useCached: Boolean = true): Option[NodeMetrics] = - if (useCached) - Option(localNodeMetricsCache.get(nodeName)) - else - try { - Some(getMetricsFromZK(nodeName)) - } catch { - case ex: ZkNoNodeException ⇒ None - } - - /* - * Return metrics of all nodes in the cluster from ZooKeeper - */ - private[akka] def getAllMetricsFromZK: Map[String, NodeMetrics] = { - val metricsPaths = zkClient.getChildren(node.NODE_METRICS).toList.toArray.asInstanceOf[Array[String]] - metricsPaths.flatMap { nodeName ⇒ getMetrics(nodeName, false).map((nodeName, _)) } toMap - } - - /* - * Gets cached metrics of all nodes in the cluster - */ - def getAllMetrics: Array[NodeMetrics] = localNodeMetricsCache.values.asScala.toArray - - /* - * Refreshes locally cached metrics from ZooKeeper, and invokes plugged monitors - */ - private[akka] def refresh() { - - storeMetricsInZK(getLocalMetrics) - refreshMetricsCacheFromZK() - - if (isRunning) { - Scheduler.schedule({ () ⇒ refresh() }, refreshTimeout.length, refreshTimeout.length, refreshTimeout.unit) - invokeMonitors() - } - } - - /* - * Refreshes metrics manager cache from ZooKeeper - */ - private def refreshMetricsCacheFromZK() { - val allMetricsFromZK = getAllMetricsFromZK - - localNodeMetricsCache.keySet.foreach { key ⇒ - if (!allMetricsFromZK.contains(key)) - localNodeMetricsCache.remove(key) - } - - // RACY: metrics for the node might have been removed both from ZK and local cache by the moment, - // but will be re-cached, since they're still present in allMetricsFromZK snapshot. Not important, because - // cache will be fixed soon, at the next iteration of refresh - allMetricsFromZK map { - case (node, metrics) ⇒ - localNodeMetricsCache.put(node, metrics) - } - } - - /* - * Invokes monitors with the cached metrics - */ - private def invokeMonitors(): Unit = if (!alterationMonitors.isEmpty) { - // RACY: metrics for some nodes might have been removed/added by that moment. Not important, - // because monitors will be fed with up-to-date metrics shortly, at the next iteration of refresh - val clusterNodesMetrics = getAllMetrics - val localNodeMetrics = clusterNodesMetrics.find(_.nodeName == nodeAddress.nodeName) - val iterator = alterationMonitors.iterator - - // RACY: there might be new monitors added after the iterator has been obtained. Not important, - // becuse refresh interval is meant to be very short, and all the new monitors will be called ad the - // next refresh iteration - while (iterator.hasNext) { - - val monitor = iterator.next - - monitor match { - case localMonitor: LocalMetricsAlterationMonitor ⇒ - localNodeMetrics.map { metrics ⇒ - if (localMonitor reactsOn metrics) - localMonitor react metrics - } - - case clusterMonitor: ClusterMetricsAlterationMonitor ⇒ - if (clusterMonitor reactsOn clusterNodesMetrics) - clusterMonitor react clusterNodesMetrics - } - - } - } - - def stop() = _isRunning.switchOff - -} diff --git a/akka-cluster/src/main/scala/akka/cluster/metrics/MetricsProvider.scala b/akka-cluster/src/main/scala/akka/cluster/metrics/MetricsProvider.scala deleted file mode 100644 index 0b366ef9c8..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/metrics/MetricsProvider.scala +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.metrics - -import akka.cluster._ -import akka.event.EventHandler -import java.lang.management.ManagementFactory -import akka.util.ReflectiveAccess._ -import akka.util.Switch - -/* - * Snapshot of the JVM / system that's the node is running on - * - * @param nodeName name of the node, where metrics are gathered at - * @param usedHeapMemory amount of heap memory currently used - * @param committedHeapMemory amount of heap memory guaranteed to be available - * @param maxHeapMemory maximum amount of heap memory that can be used - * @param avaiableProcessors number of the processors avalable to the JVM - * @param systemLoadAverage system load average. If OS-specific Sigar's native library is plugged, - * it's used to calculate average load on the CPUs in the system. Otherwise, value is retreived from monitoring - * MBeans. Hyperic Sigar provides more precise values, and, thus, if the library is provided, it's used by default. - * - */ -case class DefaultNodeMetrics(nodeName: String, - usedHeapMemory: Long, - committedHeapMemory: Long, - maxHeapMemory: Long, - avaiableProcessors: Int, - systemLoadAverage: Double) extends NodeMetrics - -object MetricsProvider { - - /* - * Maximum value of system load average - */ - val MAX_SYS_LOAD_AVG = 1 - - /* - * Minimum value of system load average - */ - val MIN_SYS_LOAD_AVG = 0 - - /* - * Default value of system load average - */ - val DEF_SYS_LOAD_AVG = 0.5 - -} - -/* - * Abstracts metrics provider that returns metrics of the system the node is running at - */ -trait MetricsProvider { - - /* - * Gets metrics of the local system - */ - def getLocalMetrics: NodeMetrics - -} - -/* - * Loads JVM metrics through JMX monitoring beans - */ -class JMXMetricsProvider extends MetricsProvider { - - import MetricsProvider._ - - private val memoryMXBean = ManagementFactory.getMemoryMXBean - - private val osMXBean = ManagementFactory.getOperatingSystemMXBean - - /* - * Validates and calculates system load average - * - * @param avg system load average obtained from a specific monitoring provider (may be incorrect) - * @return system load average, or default value(0.5), if passed value was out of permitted - * bounds (0.0 to 1.0) - */ - @inline - protected final def calcSystemLoadAverage(avg: Double) = - if (avg >= MIN_SYS_LOAD_AVG && avg <= MAX_SYS_LOAD_AVG) avg else DEF_SYS_LOAD_AVG - - protected def systemLoadAverage = calcSystemLoadAverage(osMXBean.getSystemLoadAverage) - - def getLocalMetrics = - DefaultNodeMetrics(Cluster.nodeAddress.nodeName, - memoryMXBean.getHeapMemoryUsage.getUsed, - memoryMXBean.getHeapMemoryUsage.getCommitted, - memoryMXBean.getHeapMemoryUsage.getMax, - osMXBean.getAvailableProcessors, - systemLoadAverage) - -} - -/* - * Loads wider range of metrics of a better quality with Hyperic Sigar (native library) - * - * @param refreshTimeout Sigar gathers metrics during this interval - */ -class SigarMetricsProvider private (private val sigarInstance: AnyRef) extends JMXMetricsProvider { - - private val reportErrors = new Switch(true) - - private val getCpuPercMethod = sigarInstance.getClass.getMethod("getCpuPerc") - private val sigarCpuCombinedMethod = getCpuPercMethod.getReturnType.getMethod("getCombined") - - /* - * Wraps reflective calls to Hyperic Sigar - * - * @param f reflective call to Hyperic Sigar - * @param fallback function, which is invoked, if call to Sigar has been finished with exception - */ - private def callSigarMethodOrElse[T](callSigar: ⇒ T, fallback: ⇒ T): T = - try callSigar catch { - case thrw ⇒ - reportErrors.switchOff { - EventHandler.warning(this, "Failed to get metrics from Hyperic Sigar. %s: %s" - .format(thrw.getClass.getName, thrw.getMessage)) - } - fallback - } - - /* - * Obtains system load average from Sigar - * If the value cannot be obtained, falls back to system load average taken from JMX - */ - override def systemLoadAverage = callSigarMethodOrElse( - calcSystemLoadAverage(sigarCpuCombinedMethod - .invoke(getCpuPercMethod.invoke(sigarInstance)).asInstanceOf[Double]), - super.systemLoadAverage) - -} - -object SigarMetricsProvider { - - /* - * Instantiates Sigar metrics provider through reflections, in order to avoid creating dependencies to - * Hiperic Sigar library - */ - def apply(refreshTimeout: Int): Either[Throwable, MetricsProvider] = try { - for { - sigarInstance ← createInstance[AnyRef]("org.hyperic.sigar.Sigar", noParams, noArgs).right - sigarProxyCacheClass: Class[_] ← getClassFor("org.hyperic.sigar.SigarProxyCache").right - } yield new SigarMetricsProvider(sigarProxyCacheClass - .getMethod("newInstance", Array(sigarInstance.getClass, classOf[Int]): _*) - .invoke(null, sigarInstance, new java.lang.Integer(refreshTimeout))) - } catch { - case thrw ⇒ Left(thrw) - } - -} diff --git a/akka-cluster/src/main/scala/akka/cluster/storage/Storage.scala b/akka-cluster/src/main/scala/akka/cluster/storage/Storage.scala deleted file mode 100644 index a402f2def1..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/storage/Storage.scala +++ /dev/null @@ -1,366 +0,0 @@ -package akka.cluster.storage - -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -import akka.cluster.zookeeper.AkkaZkClient -import akka.AkkaException -import org.apache.zookeeper.{ KeeperException, CreateMode } -import org.apache.zookeeper.data.Stat -import java.util.concurrent.ConcurrentHashMap -import annotation.tailrec -import java.lang.{ RuntimeException, UnsupportedOperationException } - -/** - * Simple abstraction to store an Array of bytes based on some String key. - * - * Nothing is being said about ACID, transactions etc. It depends on the implementation - * of this Storage interface of what is and isn't done on the lowest level. - * - * The amount of data that is allowed to be insert/updated is implementation specific. The InMemoryStorage - * has no limits, but the ZooKeeperStorage has a maximum size of 1 mb. - * - * TODO: Class is up for better names. - * TODO: Instead of a String as key, perhaps also a byte-array. - */ -trait Storage { - - /** - * Loads the VersionedData for the given key. - * - * This call doesn't care about the actual version of the data. - * - * @param key: the key of the VersionedData to load. - * @return the VersionedData for the given entry. - * @throws MissingDataException if the entry with the given key doesn't exist. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def load(key: String): VersionedData - - /** - * Loads the VersionedData for the given key and expectedVersion. - * - * This call can be used for optimistic locking since the version is included. - * - * @param key: the key of the VersionedData to load - * @param expectedVersion the version the data to load should have. - * @throws MissingDataException if the data with the given key doesn't exist. - * @throws BadVersionException if the version is not the expected version. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def load(key: String, expectedVersion: Long): VersionedData - - /** - * Checks if a VersionedData with the given key exists. - * - * @param key the key to check the existence for. - * @return true if exists, false if not. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def exists(key: String): Boolean - - /** - * Inserts a byte-array based on some key. - * - * @param key the key of the Data to insert. - * @param bytes the data to insert. - * @return the version of the written data (can be used for optimistic locking). - * @throws DataExistsException when VersionedData with the given Key already exists. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def insert(key: String, bytes: Array[Byte]): Long - - /** - * Inserts the data if there is no data for that key, or overwrites it if it is there. - * - * This is the method you want to call if you just want to save something and don't - * care about any lost update issues. - * - * @param key the key of the data - * @param bytes the data to insert - * @return the version of the written data (can be used for optimistic locking). - * @throws StorageException if anything goes wrong while accessing the storage - */ - def insertOrOverwrite(key: String, bytes: Array[Byte]): Long - - /** - * Overwrites the current data for the given key. This call doesn't care about the version of the existing data. - * - * @param key the key of the data to overwrite - * @param bytes the data to insert. - * @return the version of the written data (can be used for optimistic locking). - * @throws MissingDataException when the entry with the given key doesn't exist. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def overwrite(key: String, bytes: Array[Byte]): Long - - /** - * Updates an existing value using an optimistic lock. So it expect the current data to have the expectedVersion - * and only then, it will do the update. - * - * @param key the key of the data to update - * @param bytes the content to write for the given key - * @param expectedVersion the version of the content that is expected to be there. - * @return the version of the written data (can be used for optimistic locking). - * @throws MissingDataException if no data for the given key exists - * @throws BadVersionException if the version if the found data doesn't match the expected version. So essentially - * if another update was already done. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def update(key: String, bytes: Array[Byte], expectedVersion: Long): Long -} - -/** - * The VersionedData is a container of data (some bytes) and a version (a Long). - */ -class VersionedData(val data: Array[Byte], val version: Long) {} - -/** - * An AkkaException thrown by the Storage module. - */ -class StorageException(msg: String = null, cause: java.lang.Throwable = null) extends AkkaException(msg, cause) { - def this(msg: String) = this(msg, null); -} - -/** - * * - * A StorageException thrown when an operation is done on a non existing node. - */ -class MissingDataException(msg: String = null, cause: java.lang.Throwable = null) extends StorageException(msg, cause) { - def this(msg: String) = this(msg, null); -} - -/** - * A StorageException thrown when an operation is done on an existing node, but no node was expected. - */ -class DataExistsException(msg: String = null, cause: java.lang.Throwable = null) extends StorageException(msg, cause) { - def this(msg: String) = this(msg, null); -} - -/** - * A StorageException thrown when an operation causes an optimistic locking failure. - */ -class BadVersionException(msg: String = null, cause: java.lang.Throwable = null) extends StorageException(msg, cause) { - def this(msg: String) = this(msg, null); -} - -/** - * A Storage implementation based on ZooKeeper. - * - * The store method is atomic: - * - so everything is written or nothing is written - * - is isolated, so threadsafe, - * but it will not participate in any transactions. - * - */ -class ZooKeeperStorage(zkClient: AkkaZkClient, root: String = "/peter/storage") extends Storage { - - var path = "" - - //makes sure that the complete root exists on zookeeper. - root.split("/").foreach( - item ⇒ if (item.size > 0) { - - path = path + "/" + item - - if (!zkClient.exists(path)) { - //it could be that another thread is going to create this root node as well, so ignore it when it happens. - try { - zkClient.create(path, "".getBytes, CreateMode.PERSISTENT) - } catch { - case ignore: KeeperException.NodeExistsException ⇒ - } - } - }) - - def toZkPath(key: String): String = { - root + "/" + key - } - - def load(key: String) = try { - val stat = new Stat - val arrayOfBytes = zkClient.connection.readData(root + "/" + key, stat, false) - new VersionedData(arrayOfBytes, stat.getVersion) - } catch { - case e: KeeperException.NoNodeException ⇒ throw new MissingDataException( - String.format("Failed to load key [%s]: no data was found", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to load key [%s]", key), e) - } - - def load(key: String, expectedVersion: Long) = try { - val stat = new Stat - val arrayOfBytes = zkClient.connection.readData(root + "/" + key, stat, false) - - if (stat.getVersion != expectedVersion) throw new BadVersionException( - "Failed to update key [" + key + "]: version mismatch, expected [" + expectedVersion + "]" + - " but found [" + stat.getVersion + "]") - - new VersionedData(arrayOfBytes, stat.getVersion) - } catch { - case e: KeeperException.NoNodeException ⇒ throw new MissingDataException( - String.format("Failed to load key [%s]: no data was found", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to load key [%s]", key), e) - } - - def insertOrOverwrite(key: String, bytes: Array[Byte]) = { - try { - throw new UnsupportedOperationException() - } catch { - case e: KeeperException.NodeExistsException ⇒ throw new DataExistsException( - String.format("Failed to insert key [%s]: an entry already exists with the same key", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to insert key [%s]", key), e) - } - } - - def insert(key: String, bytes: Array[Byte]): Long = { - try { - zkClient.connection.create(root + "/" + key, bytes, CreateMode.PERSISTENT) - //todo: how to get hold of the version. - val version: Long = 0 - version - } catch { - case e: KeeperException.NodeExistsException ⇒ throw new DataExistsException( - String.format("Failed to insert key [%s]: an entry already exists with the same key", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to insert key [%s]", key), e) - } - } - - def exists(key: String) = try { - zkClient.connection.exists(toZkPath(key), false) - } catch { - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to check existance for key [%s]", key), e) - } - - def update(key: String, bytes: Array[Byte], expectedVersion: Long): Long = { - try { - zkClient.connection.writeData(root + "/" + key, bytes, expectedVersion.asInstanceOf[Int]) - throw new RuntimeException() - } catch { - case e: KeeperException.BadVersionException ⇒ throw new BadVersionException( - String.format("Failed to update key [%s]: version mismatch", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to update key [%s]", key), e) - } - } - - def overwrite(key: String, bytes: Array[Byte]): Long = { - try { - zkClient.connection.writeData(root + "/" + key, bytes) - -1L - } catch { - case e: KeeperException.NoNodeException ⇒ throw new MissingDataException( - String.format("Failed to overwrite key [%s]: a previous entry already exists", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to overwrite key [%s]", key), e) - } - } -} - -object InMemoryStorage { - val InitialVersion = 0; -} - -/** - * An in memory {@link RawStore} implementation. Useful for testing purposes. - */ -final class InMemoryStorage extends Storage { - - private val map = new ConcurrentHashMap[String, VersionedData]() - - def load(key: String) = { - val result = map.get(key) - - if (result == null) throw new MissingDataException( - String.format("Failed to load key [%s]: no data was found", key)) - - result - } - - def load(key: String, expectedVersion: Long) = { - val result = load(key) - - if (result.version != expectedVersion) throw new BadVersionException( - "Failed to load key [" + key + "]: version mismatch, expected [" + result.version + "] " + - "but found [" + expectedVersion + "]") - - result - } - - def exists(key: String) = map.containsKey(key) - - def insert(key: String, bytes: Array[Byte]): Long = { - val version: Long = InMemoryStorage.InitialVersion - val result = new VersionedData(bytes, version) - - val previous = map.putIfAbsent(key, result) - if (previous != null) throw new DataExistsException( - String.format("Failed to insert key [%s]: the key already has been inserted previously", key)) - - version - } - - @tailrec - def update(key: String, bytes: Array[Byte], expectedVersion: Long): Long = { - val found = map.get(key) - - if (found == null) throw new MissingDataException( - String.format("Failed to update key [%s], no previous entry exist", key)) - - if (expectedVersion != found.version) throw new BadVersionException( - "Failed to update key [" + key + "]: version mismatch, expected [" + expectedVersion + "]" + - " but found [" + found.version + "]") - - val newVersion: Long = expectedVersion + 1 - - if (map.replace(key, found, new VersionedData(bytes, newVersion))) newVersion - else update(key, bytes, expectedVersion) - } - - @tailrec - def overwrite(key: String, bytes: Array[Byte]): Long = { - val current = map.get(key) - - if (current == null) throw new MissingDataException( - String.format("Failed to overwrite key [%s], no previous entry exist", key)) - - val update = new VersionedData(bytes, current.version + 1) - - if (map.replace(key, current, update)) update.version - else overwrite(key, bytes) - } - - def insertOrOverwrite(key: String, bytes: Array[Byte]): Long = { - val version = InMemoryStorage.InitialVersion - val result = new VersionedData(bytes, version) - - val previous = map.putIfAbsent(key, result) - - if (previous == null) result.version - else overwrite(key, bytes) - } -} - -//TODO: To minimize the number of dependencies, should the Storage not be placed in a seperate module? -//class VoldemortRawStorage(storeClient: StoreClient) extends Storage { -// -// def load(Key: String) = { -// try { -// -// } catch { -// case -// } -// } -// -// override def insert(key: String, bytes: Array[Byte]) { -// throw new UnsupportedOperationException() -// } -// -// def update(key: String, bytes: Array[Byte]) { -// throw new UnsupportedOperationException() -// } -//} diff --git a/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZkClient.scala b/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZkClient.scala deleted file mode 100644 index 9137959877..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZkClient.scala +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster.zookeeper - -import org.I0Itec.zkclient._ -import org.I0Itec.zkclient.serialize._ -import org.I0Itec.zkclient.exception._ - -/** - * ZooKeeper client. Holds the ZooKeeper connection and manages its session. - */ -class AkkaZkClient(zkServers: String, - sessionTimeout: Int, - connectionTimeout: Int, - zkSerializer: ZkSerializer = new SerializableSerializer) - extends ZkClient(zkServers, sessionTimeout, connectionTimeout, zkSerializer) { - - def connection: ZkConnection = _connection.asInstanceOf[ZkConnection] - - def reconnect() { - val zkLock = getEventLock - - zkLock.lock() - try { - _connection.close() - _connection.connect(this) - } catch { - case e: InterruptedException ⇒ throw new ZkInterruptedException(e) - } finally { - zkLock.unlock() - } - } -} diff --git a/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZooKeeper.scala b/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZooKeeper.scala deleted file mode 100644 index b5165ffb72..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZooKeeper.scala +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster.zookeeper - -import org.I0Itec.zkclient._ -import org.apache.commons.io.FileUtils -import java.io.File - -object AkkaZooKeeper { - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalServer(dataPath: String, logPath: String): ZkServer = - startLocalServer(dataPath, logPath, 2181, 500) - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalServer(dataPath: String, logPath: String, port: Int, tickTime: Int): ZkServer = { - FileUtils.deleteDirectory(new File(dataPath)) - FileUtils.deleteDirectory(new File(logPath)) - val zkServer = new ZkServer( - dataPath, logPath, - new IDefaultNameSpace() { - def createDefaultNameSpace(zkClient: ZkClient) {} - }, - port, tickTime) - zkServer.start() - zkServer - } -} diff --git a/akka-cluster/src/main/scala/akka/cluster/zookeeper/ZooKeeperBarrier.scala b/akka-cluster/src/main/scala/akka/cluster/zookeeper/ZooKeeperBarrier.scala deleted file mode 100644 index c1f51ceb96..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/zookeeper/ZooKeeperBarrier.scala +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster.zookeeper - -import akka.util.Duration -import akka.util.duration._ - -import org.I0Itec.zkclient._ -import org.I0Itec.zkclient.exception._ - -import java.util.{ List ⇒ JList } -import java.util.concurrent.CountDownLatch - -class BarrierTimeoutException(message: String) extends RuntimeException(message) - -/** - * Barrier based on Zookeeper barrier tutorial. - */ -object ZooKeeperBarrier { - val BarriersNode = "/barriers" - val DefaultTimeout = 60 seconds - - def apply(zkClient: ZkClient, name: String, node: String, count: Int) = - new ZooKeeperBarrier(zkClient, name, node, count, DefaultTimeout) - - def apply(zkClient: ZkClient, name: String, node: String, count: Int, timeout: Duration) = - new ZooKeeperBarrier(zkClient, name, node, count, timeout) - - def apply(zkClient: ZkClient, cluster: String, name: String, node: String, count: Int) = - new ZooKeeperBarrier(zkClient, cluster + "-" + name, node, count, DefaultTimeout) - - def apply(zkClient: ZkClient, cluster: String, name: String, node: String, count: Int, timeout: Duration) = - new ZooKeeperBarrier(zkClient, cluster + "-" + name, node, count, timeout) - - def ignore[E: Manifest](body: ⇒ Unit) { - try { - body - } catch { - case e if manifest[E].erasure.isAssignableFrom(e.getClass) ⇒ () - } - } -} - -/** - * Barrier based on Zookeeper barrier tutorial. - */ -class ZooKeeperBarrier(zkClient: ZkClient, name: String, node: String, count: Int, timeout: Duration) - extends IZkChildListener { - - import ZooKeeperBarrier.{ BarriersNode, ignore } - - val barrier = BarriersNode + "/" + name - val entry = barrier + "/" + node - val ready = barrier + "/ready" - - val exitBarrier = new CountDownLatch(1) - - ignore[ZkNodeExistsException](zkClient.createPersistent(BarriersNode)) - ignore[ZkNodeExistsException](zkClient.createPersistent(barrier)) - - def apply(body: ⇒ Unit) { - enter() - body - leave() - } - - /** - * An await does a enter/leave making this barrier a 'single' barrier instead of a double barrier. - */ - def await() { - enter() - leave() - } - - def enter() = { - zkClient.createEphemeral(entry) - if (zkClient.countChildren(barrier) >= count) - ignore[ZkNodeExistsException](zkClient.createPersistent(ready)) - else - zkClient.waitUntilExists(ready, timeout.unit, timeout.length) - if (!zkClient.exists(ready)) { - throw new BarrierTimeoutException("Timeout (%s) while waiting for entry barrier" format timeout) - } - zkClient.subscribeChildChanges(barrier, this) - } - - def leave() { - zkClient.delete(entry) - exitBarrier.await(timeout.length, timeout.unit) - if (zkClient.countChildren(barrier) > 0) { - zkClient.unsubscribeChildChanges(barrier, this) - throw new BarrierTimeoutException("Timeout (%s) while waiting for exit barrier" format timeout) - } - zkClient.unsubscribeChildChanges(barrier, this) - } - - def handleChildChange(path: String, children: JList[String]) { - if (children.size <= 1) { - ignore[ZkNoNodeException](zkClient.delete(ready)) - exitBarrier.countDown() - } - } -} diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/GossipMembershipMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipMembershipMultiJvmSpec.scala similarity index 99% rename from akka-remote/src/multi-jvm/scala/akka/remote/GossipMembershipMultiJvmSpec.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/GossipMembershipMultiJvmSpec.scala index 418f6f385b..c380d3e5eb 100644 --- a/akka-remote/src/multi-jvm/scala/akka/remote/GossipMembershipMultiJvmSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipMembershipMultiJvmSpec.scala @@ -1,4 +1,4 @@ -// package akka.remote +// package akka.cluster // import akka.actor.Actor // import akka.remote._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmSpec.scala deleted file mode 100644 index f1b9f5a7ae..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmSpec.scala +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.changelisteners.newleader - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import ChangeListener._ -import Cluster._ -import akka.cluster.LocalCluster._ - -import java.util.concurrent._ - -object NewLeaderChangeListenerMultiJvmSpec { - var NrOfNodes = 2 -} - -class NewLeaderChangeListenerMultiJvmNode1 extends MasterClusterTestNode { - import NewLeaderChangeListenerMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A NewLeader change listener" must { - - "be invoked after leader election is completed" ignore { - barrier("start-node1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node2", NrOfNodes).await() - - System.exit(0) - } - } -} - -class NewLeaderChangeListenerMultiJvmNode2 extends ClusterTestNode { - import NewLeaderChangeListenerMultiJvmSpec._ - - "A NewLeader change listener" must { - - "be invoked after leader election is completed" ignore { - val latch = new CountDownLatch(1) - - barrier("start-node1", NrOfNodes).await() - - barrier("start-node2", NrOfNodes) { - node.register(new ChangeListener { - override def newLeader(node: String, client: ClusterNode) { - latch.countDown - } - }) - } - latch.await(10, TimeUnit.SECONDS) must be === true - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmSpec.scala deleted file mode 100644 index deec5c19e6..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmSpec.scala +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.changelisteners.nodeconnected - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import ChangeListener._ -import Cluster._ -import akka.cluster.LocalCluster._ - -import java.util.concurrent._ - -object NodeConnectedChangeListenerMultiJvmSpec { - var NrOfNodes = 2 -} - -class NodeConnectedChangeListenerMultiJvmNode1 extends MasterClusterTestNode { - import NodeConnectedChangeListenerMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A NodeConnected change listener" must { - - "be invoked when a new node joins the cluster" in { - val latch = new CountDownLatch(1) - node.register(new ChangeListener { - override def nodeConnected(node: String, client: ClusterNode) { - latch.countDown - } - }) - - barrier("start-node1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node2", NrOfNodes) { - latch.await(5, TimeUnit.SECONDS) must be === true - } - - node.shutdown() - } - } -} - -class NodeConnectedChangeListenerMultiJvmNode2 extends ClusterTestNode { - import NodeConnectedChangeListenerMultiJvmSpec._ - - "A NodeConnected change listener" must { - - "be invoked when a new node joins the cluster" in { - barrier("start-node1", NrOfNodes).await() - - barrier("start-node2", NrOfNodes) { - Cluster.node.start() - } - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmSpec.scala deleted file mode 100644 index 54a327126e..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmSpec.scala +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.changelisteners.nodedisconnected - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import ChangeListener._ -import Cluster._ -import akka.cluster.LocalCluster._ - -import java.util.concurrent._ - -object NodeDisconnectedChangeListenerMultiJvmSpec { - var NrOfNodes = 2 -} - -class NodeDisconnectedChangeListenerMultiJvmNode1 extends MasterClusterTestNode { - import NodeDisconnectedChangeListenerMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A NodeDisconnected change listener" must { - - "be invoked when a new node leaves the cluster" in { - val latch = new CountDownLatch(1) - node.register(new ChangeListener { - override def nodeDisconnected(node: String, client: ClusterNode) { - latch.countDown - } - }) - - barrier("start-node1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node2", NrOfNodes).await() - - latch.await(10, TimeUnit.SECONDS) must be === true - - node.shutdown() - } - } -} - -class NodeDisconnectedChangeListenerMultiJvmNode2 extends ClusterTestNode { - import NodeDisconnectedChangeListenerMultiJvmSpec._ - - "A NodeDisconnected change listener" must { - - "be invoked when a new node leaves the cluster" in { - barrier("start-node1", NrOfNodes).await() - - barrier("start-node2", NrOfNodes) { - Cluster.node.start() - } - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmSpec.scala deleted file mode 100644 index f9aabbb004..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmSpec.scala +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.configuration - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import Cluster._ -import akka.cluster.LocalCluster._ - -object ConfigurationStorageMultiJvmSpec { - var NrOfNodes = 2 -} - -class ConfigurationStorageMultiJvmNode1 extends MasterClusterTestNode { - import ConfigurationStorageMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A cluster" must { - - "be able to store, read and remove custom configuration data" in { - - barrier("start-node-1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node-2", NrOfNodes).await() - - barrier("store-config-data-node-1", NrOfNodes) { - node.setConfigElement("key1", "value1".getBytes) - } - - barrier("read-config-data-node-2", NrOfNodes).await() - - barrier("remove-config-data-node-2", NrOfNodes).await() - - barrier("try-read-config-data-node-1", NrOfNodes) { - val option = node.getConfigElement("key1") - option.isDefined must be(false) - - val elements = node.getConfigElementKeys - elements.size must be(0) - } - - node.shutdown() - } - } -} - -class ConfigurationStorageMultiJvmNode2 extends ClusterTestNode { - import ConfigurationStorageMultiJvmSpec._ - - "A cluster" must { - - "be able to store, read and remove custom configuration data" in { - - barrier("start-node-1", NrOfNodes).await() - - barrier("start-node-2", NrOfNodes) { - Cluster.node.start() - } - - barrier("store-config-data-node-1", NrOfNodes).await() - - barrier("read-config-data-node-2", NrOfNodes) { - val option = node.getConfigElement("key1") - option.isDefined must be(true) - option.get must be("value1".getBytes) - - val elements = node.getConfigElementKeys - elements.size must be(1) - elements.head must be("key1") - } - - barrier("remove-config-data-node-2", NrOfNodes) { - node.removeConfigElement("key1") - } - - barrier("try-read-config-data-node-1", NrOfNodes).await() - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmSpec.scala deleted file mode 100644 index 479f77e0d3..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmSpec.scala +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.leader.election - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import ChangeListener._ -import Cluster._ -import akka.cluster.LocalCluster._ - -import java.util.concurrent._ - -object LeaderElectionMultiJvmSpec { - var NrOfNodes = 2 -} -/* -class LeaderElectionMultiJvmNode1 extends MasterClusterTestNode { - import LeaderElectionMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A cluster" must { - - "be able to elect a single leader in the cluster and perform re-election if leader resigns" in { - - barrier("start-node1", NrOfNodes) { - Cluster.node.start() - } - node.isLeader must be === true - - barrier("start-node2", NrOfNodes) { - } - node.isLeader must be === true - - barrier("stop-node1", NrOfNodes) { - node.resign() - } - } - } -} - -class LeaderElectionMultiJvmNode2 extends ClusterTestNode { - import LeaderElectionMultiJvmSpec._ - - "A cluster" must { - - "be able to elect a single leader in the cluster and perform re-election if leader resigns" in { - - barrier("start-node1", NrOfNodes) { - } - node.isLeader must be === false - - barrier("start-node2", NrOfNodes) { - Cluster.node.start() - } - node.isLeader must be === false - - barrier("stop-node1", NrOfNodes) { - } - Thread.sleep(1000) // wait for re-election - - node.isLeader must be === true - } - } -} -*/ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmSpec.scala deleted file mode 100644 index c20bf9269c..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmSpec.scala +++ /dev/null @@ -1,116 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.registry - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.actor._ -import Actor._ -import akka.cluster._ -import ChangeListener._ -import Cluster._ -import akka.config.Config -import akka.serialization.Serialization -import akka.cluster.LocalCluster._ - -import java.util.concurrent._ - -object RegistryStoreMultiJvmSpec { - var NrOfNodes = 2 - - class HelloWorld1 extends Actor with Serializable { - def receive = { - case "Hello" ⇒ - reply("World from node [" + Config.nodename + "]") - } - } - - class HelloWorld2 extends Actor with Serializable { - var counter = 0 - def receive = { - case "Hello" ⇒ - Thread.sleep(1000) - counter += 1 - case "Count" ⇒ - reply(counter) - } - } -} - -class RegistryStoreMultiJvmNode1 extends MasterClusterTestNode { - import RegistryStoreMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A cluster" must { - - "be able to store an ActorRef in the cluster without a replication strategy and retrieve it with 'use'" in { - - barrier("start-node-1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node-2", NrOfNodes).await() - - barrier("store-1-in-node-1", NrOfNodes) { - node.store("hello-world-1", classOf[HelloWorld1], Serialization.serializerFor(classOf[HelloWorld1])) - } - - barrier("use-1-in-node-2", NrOfNodes).await() - - barrier("store-2-in-node-1", NrOfNodes) { - node.store("hello-world-2", classOf[HelloWorld1], false, Serialization.serializerFor(classOf[HelloWorld1])) - } - - barrier("use-2-in-node-2", NrOfNodes).await() - - node.shutdown() - } - } -} - -class RegistryStoreMultiJvmNode2 extends ClusterTestNode { - import RegistryStoreMultiJvmSpec._ - - "A cluster" must { - - "be able to store an actor in the cluster with 'store' and retrieve it with 'use'" in { - - barrier("start-node-1", NrOfNodes).await() - - barrier("start-node-2", NrOfNodes) { - Cluster.node.start() - } - - barrier("store-1-in-node-1", NrOfNodes).await() - - barrier("use-1-in-node-2", NrOfNodes) { - val actorOrOption = node.use("hello-world-1") - if (actorOrOption.isEmpty) fail("Actor could not be retrieved") - - val actorRef = actorOrOption.get - actorRef.address must be("hello-world-1") - - (actorRef ? "Hello").as[String].get must be("World from node [node2]") - } - - barrier("store-2-in-node-1", NrOfNodes).await() - - barrier("use-2-in-node-2", NrOfNodes) { - val actorOrOption = node.use("hello-world-2") - if (actorOrOption.isEmpty) fail("Actor could not be retrieved") - - val actorRef = actorOrOption.get - actorRef.address must be("hello-world-2") - - (actorRef ? "Hello").as[String].get must be("World from node [node2]") - } - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.conf deleted file mode 100644 index 88df1a6421..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.conf deleted file mode 100644 index 88df1a6421..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmSpec.scala deleted file mode 100644 index ef0b79b4a7..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmSpec.scala +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.deployment - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.actor._ -import Actor._ -import akka.cluster._ -import Cluster._ -import akka.cluster.LocalCluster._ - -object DeploymentMultiJvmSpec { - var NrOfNodes = 2 -} - -class DeploymentMultiJvmNode1 extends MasterClusterTestNode { - import DeploymentMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A ClusterDeployer" must { - - "be able to deploy deployments in akka.conf and lookup the deployments by 'address'" in { - - barrier("start-node-1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node-2", NrOfNodes).await() - - barrier("perform-deployment-on-node-1", NrOfNodes) { - Deployer.start() - } - - barrier("lookup-deployment-node-2", NrOfNodes).await() - - node.shutdown() - } - } -} - -class DeploymentMultiJvmNode2 extends ClusterTestNode { - import DeploymentMultiJvmSpec._ - - "A cluster" must { - - "be able to store, read and remove custom configuration data" in { - - barrier("start-node-1", NrOfNodes).await() - - barrier("start-node-2", NrOfNodes) { - Cluster.node.start() - } - - barrier("perform-deployment-on-node-1", NrOfNodes).await() - - barrier("lookup-deployment-node-2", NrOfNodes) { - Deployer.start() - val deployments = Deployer.deploymentsInConfig - deployments map { oldDeployment ⇒ - val newDeployment = ClusterDeployer.lookupDeploymentFor(oldDeployment.address) - newDeployment must be('defined) - oldDeployment must equal(newDeployment.get) - } - } - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.conf deleted file mode 100644 index 8d5284be46..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.cluster.metrics-refresh-timeout = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmSpec.scala deleted file mode 100644 index 380d68d8ef..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmSpec.scala +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.metrics.local - -import akka.cluster._ -import akka.actor._ -import Actor._ -import Cluster._ -import akka.dispatch._ -import akka.util.Duration -import akka.util.duration._ -import akka.cluster.metrics._ -import java.util.concurrent.atomic.AtomicInteger - -object LocalMetricsMultiJvmSpec { - val NrOfNodes = 1 -} - -class LocalMetricsMultiJvmNode1 extends MasterClusterTestNode { - - import LocalMetricsMultiJvmSpec._ - - val testNodes = NrOfNodes - - override def beforeAll = { - super.beforeAll() - node - } - - override def afterAll = { - node.shutdown() - super.afterAll() - } - - "Metrics manager" must { - - def timeout = node.metricsManager.refreshTimeout - - "be initialized with refresh timeout value, specified in akka.conf" in { - timeout must be(1.second) - } - - "return up-to-date local node metrics straight from MBeans/Sigar" in { - node.metricsManager.getLocalMetrics must not be (null) - - node.metricsManager.getLocalMetrics.systemLoadAverage must be(0.5 plusOrMinus 0.5) - } - - "return metrics cached in the MetricsManagerLocalMetrics" in { - node.metricsManager.getMetrics(nodeAddress.nodeName) must not be (null) - } - - "return local node metrics from ZNode" in { - node.metricsManager.getMetrics(nodeAddress.nodeName, false) must not be (null) - } - - "return cached metrics of all nodes in the cluster" in { - node.metricsManager.getAllMetrics.size must be(1) - node.metricsManager.getAllMetrics.find(_.nodeName == "node1") must not be (null) - } - - "throw no exceptions, when user attempts to get metrics of a non-existing node" in { - node.metricsManager.getMetrics("nonexisting") must be(None) - node.metricsManager.getMetrics("nonexisting", false) must be(None) - } - - "regularly update cached metrics" in { - val oldMetrics = node.metricsManager.getLocalMetrics - Thread sleep timeout.toMillis - node.metricsManager.getLocalMetrics must not be (oldMetrics) - } - - "allow to track JVM state and bind handles through MetricsAlterationMonitors" in { - val monitorReponse = Promise[String]() - - node.metricsManager.addMonitor(new LocalMetricsAlterationMonitor { - - val id = "heapMemoryThresholdMonitor" - - def reactsOn(metrics: NodeMetrics) = metrics.usedHeapMemory > 1 - - def react(metrics: NodeMetrics) = monitorReponse.success("Too much memory is used!") - - }) - - Await.result(monitorReponse, 5 seconds) must be("Too much memory is used!") - - } - - class FooMonitor(monitorWorked: AtomicInteger) extends LocalMetricsAlterationMonitor { - val id = "fooMonitor" - def reactsOn(metrics: NodeMetrics) = true - def react(metrics: NodeMetrics) = monitorWorked.set(monitorWorked.get + 1) - } - - "allow to unregister the monitor" in { - - val monitorWorked = new AtomicInteger(0) - val fooMonitor = new FooMonitor(monitorWorked) - - node.metricsManager.addMonitor(fooMonitor) - node.metricsManager.removeMonitor(fooMonitor) - - val oldValue = monitorWorked.get - Thread sleep timeout.toMillis - monitorWorked.get must be(oldValue) - - } - - "stop notifying monitors, when stopped" in { - - node.metricsManager.stop() - - val monitorWorked = new AtomicInteger(0) - - node.metricsManager.addMonitor(new LocalMetricsAlterationMonitor { - val id = "fooMonitor" - def reactsOn(metrics: NodeMetrics) = true - def react(metrics: NodeMetrics) = monitorWorked.set(monitorWorked.get + 1) - }) - - monitorWorked.get must be(0) - - node.metricsManager.start() - Thread sleep (timeout.toMillis * 2) - monitorWorked.get must be > (1) - - } - - } - -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.conf deleted file mode 100644 index 172e980612..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.conf +++ /dev/null @@ -1,3 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.conf deleted file mode 100644 index 172e980612..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.conf +++ /dev/null @@ -1,3 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmSpec.scala deleted file mode 100644 index 8c4730dc90..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmSpec.scala +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.metrics.remote - -import akka.cluster._ -import akka.actor._ -import Actor._ -import Cluster._ -import akka.dispatch._ -import akka.util.Duration -import akka.util.duration._ -import akka.cluster.metrics._ -import java.util.concurrent._ -import atomic.AtomicInteger - -object RemoteMetricsMultiJvmSpec { - val NrOfNodes = 2 - - val MetricsRefreshTimeout = 100.millis -} - -class AllMetricsAvailableMonitor(_id: String, completionLatch: CountDownLatch, clusterSize: Int) extends ClusterMetricsAlterationMonitor { - - val id = _id - - def reactsOn(allMetrics: Array[NodeMetrics]) = allMetrics.size == clusterSize - - def react(allMetrics: Array[NodeMetrics]) = completionLatch.countDown - -} - -class RemoteMetricsMultiJvmNode1 extends MasterClusterTestNode { - - import RemoteMetricsMultiJvmSpec._ - - val testNodes = NrOfNodes - - "Metrics manager" must { - "provide metrics of all nodes in the cluster" in { - - val allMetricsAvaiable = new CountDownLatch(1) - - node.metricsManager.refreshTimeout = MetricsRefreshTimeout - node.metricsManager.addMonitor(new AllMetricsAvailableMonitor("all-metrics-available", allMetricsAvaiable, NrOfNodes)) - - LocalCluster.barrier("node-start", NrOfNodes).await() - - allMetricsAvaiable.await() - - LocalCluster.barrier("check-all-remote-metrics", NrOfNodes) { - node.metricsManager.getAllMetrics.size must be(2) - } - - val cachedMetrics = node.metricsManager.getMetrics("node2") - val metricsFromZnode = node.metricsManager.getMetrics("node2", false) - - LocalCluster.barrier("check-single-remote-metrics", NrOfNodes) { - cachedMetrics must not be (null) - metricsFromZnode must not be (null) - } - - Thread sleep MetricsRefreshTimeout.toMillis - - LocalCluster.barrier("remote-metrics-is-updated", NrOfNodes) { - node.metricsManager.getMetrics("node2") must not be (cachedMetrics) - node.metricsManager.getMetrics("node2", false) must not be (metricsFromZnode) - } - - val someMetricsGone = new CountDownLatch(1) - node.metricsManager.addMonitor(new AllMetricsAvailableMonitor("some-metrics-gone", someMetricsGone, 1)) - - LocalCluster.barrier("some-nodes-leave", NrOfNodes).await() - - someMetricsGone.await(10, TimeUnit.SECONDS) must be(true) - - node.metricsManager.getMetrics("node2") must be(None) - node.metricsManager.getMetrics("node2", false) must be(None) - node.metricsManager.getAllMetrics.size must be(1) - - node.shutdown() - - } - } - -} - -class RemoteMetricsMultiJvmNode2 extends ClusterTestNode { - - import RemoteMetricsMultiJvmSpec._ - - val testNodes = NrOfNodes - - "Metrics manager" must { - "provide metrics of all nodes in the cluster" in { - - val allMetricsAvaiable = new CountDownLatch(1) - - node.metricsManager.refreshTimeout = MetricsRefreshTimeout - node.metricsManager.addMonitor(new AllMetricsAvailableMonitor("all-metrics-available", allMetricsAvaiable, NrOfNodes)) - - LocalCluster.barrier("node-start", NrOfNodes).await() - - allMetricsAvaiable.await() - - LocalCluster.barrier("check-all-remote-metrics", NrOfNodes) { - node.metricsManager.getAllMetrics.size must be(2) - } - - val cachedMetrics = node.metricsManager.getMetrics("node1") - val metricsFromZnode = node.metricsManager.getMetrics("node1", false) - - LocalCluster.barrier("check-single-remote-metrics", NrOfNodes) { - cachedMetrics must not be (null) - metricsFromZnode must not be (null) - } - - Thread sleep MetricsRefreshTimeout.toMillis - - LocalCluster.barrier("remote-metrics-is-updated", NrOfNodes) { - node.metricsManager.getMetrics("node1") must not be (cachedMetrics) - node.metricsManager.getMetrics("node1", false) must not be (metricsFromZnode) - } - - LocalCluster.barrier("some-nodes-leave", NrOfNodes) { - node.shutdown() - } - } - } - -} - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmSpec.scala deleted file mode 100644 index 7dfdec2f7c..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmSpec.scala +++ /dev/null @@ -1,112 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - * - * - * package akka.cluster.migration - * - * import org.scalatest.WordSpec - * import org.scalatest.matchers.MustMatchers - * import org.scalatest.BeforeAndAfterAll - * - * import akka.actor._ - * import Actor._ - * import akka.cluster._ - * import ChangeListener._ - * import Cluster._ - * import akka.config.Config - * import akka.serialization.Serialization - * import akka.cluster.LocalCluster._ - * - * import java.util.concurrent._ - * - * object MigrationExplicitMultiJvmSpec { - * var NrOfNodes = 2 - * - * class HelloWorld extends Actor with Serializable { - * def receive = { - * case "Hello" ⇒ - * reply("World from node [" + Config.nodename + "]") - * } - * } - * } - * - * class MigrationExplicitMultiJvmNode1 extends MasterClusterTestNode { - * import MigrationExplicitMultiJvmSpec._ - * - * val testNodes = NrOfNodes - * - * "A cluster" must { - * - * "be able to migrate an actor from one node to another" in { - * - * barrier("start-node-1", NrOfNodes) { - * Cluster.node.start() - * } - * - * barrier("start-node-2", NrOfNodes) { - * } - * - * barrier("store-1-in-node-1", NrOfNodes) { - * val serializer = Serialization.serializerFor(classOf[HelloWorld]).fold(x ⇒ fail("No serializer found"), s ⇒ s) - * node.store("hello-world", classOf[HelloWorld], serializer) - * } - * - * barrier("use-1-in-node-2", NrOfNodes) { - * } - * - * barrier("migrate-from-node2-to-node1", NrOfNodes) { - * } - * - * barrier("check-actor-is-moved-to-node1", NrOfNodes) { - * node.isInUseOnNode("hello-world") must be(true) - * - * val actorRef = Actor.registry.local.actorFor("hello-world").getOrElse(fail("Actor should have been in the local actor registry")) - * actorRef.address must be("hello-world") - * (actorRef ? "Hello").as[String].get must be("World from node [node1]") - * } - * - * node.shutdown() - * } - * } - * } - * - * class MigrationExplicitMultiJvmNode2 extends ClusterTestNode { - * import MigrationExplicitMultiJvmSpec._ - * - * "A cluster" must { - * - * "be able to migrate an actor from one node to another" in { - * - * barrier("start-node-1", NrOfNodes) { - * } - * - * barrier("start-node-2", NrOfNodes) { - * Cluster.node.start() - * } - * - * barrier("store-1-in-node-1", NrOfNodes) { - * } - * - * barrier("use-1-in-node-2", NrOfNodes) { - * val actorOrOption = node.use("hello-world") - * if (actorOrOption.isEmpty) fail("Actor could not be retrieved") - * - * val actorRef = actorOrOption.get - * actorRef.address must be("hello-world") - * - * (actorRef ? "Hello").as[String].get must be("World from node [node2]") - * } - * - * barrier("migrate-from-node2-to-node1", NrOfNodes) { - * node.migrate(NodeAddress(node.nodeAddress.clusterName, "node1"), "hello-world") - * Thread.sleep(2000) - * } - * - * barrier("check-actor-is-moved-to-node1", NrOfNodes) { - * } - * - * node.shutdown() - * } - * } - * } - */ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf deleted file mode 100644 index f510c5253c..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf +++ /dev/null @@ -1,6 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-test.router = "round-robin" -akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"] -akka.actor.deployment.service-test.nr-of-instances = 2 \ No newline at end of file diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf deleted file mode 100644 index b7c3e53e6f..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf +++ /dev/null @@ -1,5 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-test.router = "round-robin" -akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"] -akka.actor.deployment.service-test.nr-of-instances = 2 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf deleted file mode 100644 index b7c3e53e6f..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf +++ /dev/null @@ -1,5 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-test.router = "round-robin" -akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"] -akka.actor.deployment.service-test.nr-of-instances = 2 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.opts deleted file mode 100644 index 089e3b7776..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmSpec.scala deleted file mode 100644 index 98d2aaf394..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmSpec.scala +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.reflogic - -import akka.cluster._ -import akka.cluster.Cluster._ -import akka.actor.Actor -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } -import akka.routing.RoutingException -import java.net.ConnectException -import java.nio.channels.{ ClosedChannelException, NotYetConnectedException } -import akka.cluster.LocalCluster._ - -object ClusterActorRefCleanupMultiJvmSpec { - - val NrOfNodes = 3 - - class TestActor extends Actor with Serializable { - def receive = { - case _ ⇒ {} - } - } - -} - -class ClusterActorRefCleanupMultiJvmNode1 extends MasterClusterTestNode { - - import ClusterActorRefCleanupMultiJvmSpec._ - - val testNodes = NrOfNodes - - "ClusterActorRef" must { - "cleanup itself" ignore { - Cluster.node.start() - barrier("awaitStarted", NrOfNodes).await() - - val ref = Actor.actorOf(Props[ClusterActorRefCleanupMultiJvmSpec.TestActor]("service-test") - - ref.isInstanceOf[ClusterActorRef] must be(true) - - val clusteredRef = ref.asInstanceOf[ClusterActorRef] - - barrier("awaitActorCreated", NrOfNodes).await() - - //verify that all remote actors are there. - clusteredRef.nrOfConnections must be(2) - - // ignore exceptions from killing nodes - val ignoreExceptions = Seq( - EventFilter[ClosedChannelException], - EventFilter[NotYetConnectedException], - EventFilter[RoutingException], - EventFilter[ConnectException]) - - EventHandler.notify(TestEvent.Mute(ignoreExceptions)) - - //just some waiting to make sure that the node has died. - Thread.sleep(5000) - - //send some request, this should trigger the cleanup - try { - clusteredRef ! "hello" - clusteredRef ! "hello" - } catch { - case e: ClosedChannelException ⇒ - case e: NotYetConnectedException ⇒ - case e: RoutingException ⇒ - } - - barrier("node-3-dead", NrOfNodes - 1).await() - - //since the call to the node failed, the node must have been removed from the list. - clusteredRef.nrOfConnections must be(1) - - //just some waiting to make sure that the node has died. - Thread.sleep(5000) - - //trigger the cleanup. - try { - clusteredRef ! "hello" - clusteredRef ! "hello" - } catch { - case e: ClosedChannelException ⇒ - case e: NotYetConnectedException ⇒ - case e: RoutingException ⇒ - } - - //now there must not be any remaining connections after the dead of the last actor. - clusteredRef.nrOfConnections must be(0) - - //and lets make sure we now get the correct exception if we try to use the ref. - intercept[RoutingException] { - clusteredRef ! "Hello" - } - - node.shutdown() - } - } -} - -class ClusterActorRefCleanupMultiJvmNode2 extends ClusterTestNode { - - import ClusterActorRefCleanupMultiJvmSpec._ - - val testNodes = NrOfNodes - - //we are only using the nodes for their capacity, not for testing on this node itself. - "___" must { - "___" ignore { - Runtime.getRuntime.addShutdownHook(new Thread() { - override def run() { - ClusterTestNode.exit(classOf[ClusterActorRefCleanupMultiJvmNode2].getName) - } - }) - - Cluster.node.start() - barrier("awaitStarted", NrOfNodes).await() - - barrier("awaitActorCreated", NrOfNodes).await() - - barrier("node-3-dead", NrOfNodes - 1).await() - - System.exit(0) - } - } -} - -class ClusterActorRefCleanupMultiJvmNode3 extends ClusterTestNode { - - import ClusterActorRefCleanupMultiJvmSpec._ - - val testNodes = NrOfNodes - - //we are only using the nodes for their capacity, not for testing on this node itself. - "___" must { - "___" ignore { - Runtime.getRuntime.addShutdownHook(new Thread() { - override def run() { - ClusterTestNode.exit(classOf[ClusterActorRefCleanupMultiJvmNode3].getName) - } - }) - - Cluster.node.start() - barrier("awaitStarted", NrOfNodes).await() - - barrier("awaitActorCreated", NrOfNodes).await() - - System.exit(0) - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.conf deleted file mode 100644 index dca432f404..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world-write-behind-nosnapshot.router = "direct" -akka.actor.deployment.hello-world-write-behind-nosnapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.strategy = "write-behind" -akka.cluster.replication.snapshot-frequency = 1000 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.conf deleted file mode 100644 index dca432f404..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world-write-behind-nosnapshot.router = "direct" -akka.actor.deployment.hello-world-write-behind-nosnapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.strategy = "write-behind" -akka.cluster.replication.snapshot-frequency = 1000 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec.scala deleted file mode 100644 index a90d26ad8d..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec.scala +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -// package akka.cluster.replication.transactionlog.writebehind.nosnapshot - -// import akka.actor._ -// import akka.cluster._ -// import Cluster._ -// import akka.config.Config -// import akka.cluster.LocalCluster._ - -// object ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec { -// var NrOfNodes = 2 - -// sealed trait TransactionLogMessage extends Serializable -// case class Count(nr: Int) extends TransactionLogMessage -// case class Log(full: String) extends TransactionLogMessage -// case object GetLog extends TransactionLogMessage - -// class HelloWorld extends Actor with Serializable { -// var log = "" -// def receive = { -// case Count(nr) ⇒ -// log += nr.toString -// reply("World from node [" + Config.nodename + "]") -// case GetLog ⇒ -// reply(Log(log)) -// } -// } -// } - -// class ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1 extends ClusterTestNode { -// import ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec._ - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes) { -// Cluster.node.start() -// } - -// barrier("create-actor-on-node1", NrOfNodes) { -// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-behind-nosnapshot") -// // node.isInUseOnNode("hello-world") must be(true) -// actorRef.address must be("hello-world-write-behind-nosnapshot") -// for (i ← 0 until 10) { -// (actorRef ? Count(i)).as[String] must be(Some("World from node [node1]")) -// } -// } - -// barrier("start-node2", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } - -// class ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2 extends MasterClusterTestNode { -// import ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec._ - -// val testNodes = NrOfNodes - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes).await() - -// barrier("create-actor-on-node1", NrOfNodes).await() - -// barrier("start-node2", NrOfNodes) { -// Cluster.node.start() -// } - -// Thread.sleep(5000) // wait for fail-over from node1 to node2 - -// barrier("check-fail-over-to-node2", NrOfNodes - 1) { -// // both remaining nodes should now have the replica -// node.isInUseOnNode("hello-world-write-behind-nosnapshot") must be(true) -// val actorRef = Actor.registry.local.actorFor("hello-world-write-behind-nosnapshot").getOrElse(fail("Actor should have been in the local actor registry")) -// actorRef.address must be("hello-world-write-behind-nosnapshot") -// (actorRef ? GetLog).as[Log].get must be(Log("0123456789")) -// } - -// node.shutdown() -// } -// } - -// override def onReady() { -// LocalBookKeeperEnsemble.start() -// } - -// override def onShutdown() { -// TransactionLog.shutdown() -// LocalBookKeeperEnsemble.shutdown() -// } -// } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.conf deleted file mode 100644 index a3ec6ec2c3..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world.router = "direct" -akka.actor.deployment.hello-world-write-behind-snapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.strategy = "write-behind" -akka.cluster.replication.snapshot-frequency = 7 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.conf deleted file mode 100644 index a3ec6ec2c3..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world.router = "direct" -akka.actor.deployment.hello-world-write-behind-snapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.strategy = "write-behind" -akka.cluster.replication.snapshot-frequency = 7 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec.scala deleted file mode 100644 index fde113080e..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec.scala +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -// package akka.cluster.replication.transactionlog.writebehind.snapshot - -// import akka.actor._ -// import akka.cluster._ -// import Cluster._ -// import akka.config.Config -// import akka.cluster.LocalCluster._ - -// object ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec { -// var NrOfNodes = 2 - -// sealed trait TransactionLogMessage extends Serializable -// case class Count(nr: Int) extends TransactionLogMessage -// case class Log(full: String) extends TransactionLogMessage -// case object GetLog extends TransactionLogMessage - -// class HelloWorld extends Actor with Serializable { -// var log = "" -// //println("Creating HelloWorld log =======> " + log) -// def receive = { -// case Count(nr) ⇒ -// log += nr.toString -// //println("Message to HelloWorld log =======> " + log) -// reply("World from node [" + Config.nodename + "]") -// case GetLog ⇒ -// reply(Log(log)) -// } -// } -// } - -// class ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1 extends ClusterTestNode { -// import ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec._ - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes) { -// Cluster.node.start() -// } - -// barrier("create-actor-on-node1", NrOfNodes) { -// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-behind-snapshot") -// node.isInUseOnNode("hello-world-write-behind-snapshot") must be(true) -// actorRef.address must be("hello-world-write-behind-snapshot") -// var counter = 0 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// } - -// barrier("start-node2", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } - -// class ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2 extends MasterClusterTestNode { -// import ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec._ - -// val testNodes = NrOfNodes - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes).await() - -// barrier("create-actor-on-node1", NrOfNodes).await() - -// barrier("start-node2", NrOfNodes) { -// Cluster.node.start() -// } - -// Thread.sleep(5000) // wait for fail-over from node1 to node2 - -// barrier("check-fail-over-to-node2", NrOfNodes - 1) { -// // both remaining nodes should now have the replica -// node.isInUseOnNode("hello-world-write-behind-snapshot") must be(true) -// val actorRef = Actor.registry.local.actorFor("hello-world-write-behind-snapshot").getOrElse(fail("Actor should have been in the local actor registry")) -// actorRef.address must be("hello-world-write-behind-snapshot") -// (actorRef ? GetLog).as[Log].get must be(Log("0123456789")) -// } - -// node.shutdown() -// } -// } - -// override def onReady() { -// LocalBookKeeperEnsemble.start() -// } - -// override def onShutdown() { -// TransactionLog.shutdown() -// LocalBookKeeperEnsemble.shutdown() -// } -// } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.conf deleted file mode 100644 index 8de04a2eb1..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "DEBUG" -akka.actor.deployment.hello-world-write-through-nosnapshot.router = "direct" -akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.strategy = "write-through" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"] -akka.cluster.replication.snapshot-frequency = 1000 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.conf deleted file mode 100644 index 8de04a2eb1..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "DEBUG" -akka.actor.deployment.hello-world-write-through-nosnapshot.router = "direct" -akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.strategy = "write-through" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"] -akka.cluster.replication.snapshot-frequency = 1000 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec.scala deleted file mode 100644 index c2e6ed678b..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec.scala +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -// package akka.cluster.replication.transactionlog.writethrough.nosnapshot - -// import akka.actor._ -// import akka.cluster._ -// import Cluster._ -// import akka.config.Config -// import akka.cluster.LocalCluster._ - -// object ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec { -// var NrOfNodes = 2 - -// sealed trait TransactionLogMessage extends Serializable -// case class Count(nr: Int) extends TransactionLogMessage -// case class Log(full: String) extends TransactionLogMessage -// case object GetLog extends TransactionLogMessage - -// class HelloWorld extends Actor with Serializable { -// var log = "" -// def receive = { -// case Count(nr) ⇒ -// println("Received number: " + nr + " on " + self.address) -// log += nr.toString -// reply("World from node [" + Config.nodename + "]") -// case GetLog ⇒ -// println("Received getLog on " + uuid) -// reply(Log(log)) -// } -// } -// } - -// class ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1 extends ClusterTestNode { -// import ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec._ - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes) { -// Cluster.node.start() -// } - -// barrier("create-actor-on-node1", NrOfNodes) { -// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-through-nosnapshot") -// actorRef.address must be("hello-world-write-through-nosnapshot") -// for (i ← 0 until 10) -// (actorRef ? Count(i)).as[String] must be(Some("World from node [node1]")) -// } - -// barrier("start-node2", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } - -// class ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2 extends MasterClusterTestNode { -// import ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec._ - -// val testNodes = NrOfNodes - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes).await() - -// barrier("create-actor-on-node1", NrOfNodes).await() - -// barrier("start-node2", NrOfNodes) { -// Cluster.node.start() -// } - -// Thread.sleep(5000) // wait for fail-over from node1 to node2 - -// barrier("check-fail-over-to-node2", NrOfNodes - 1) { -// // both remaining nodes should now have the replica -// node.isInUseOnNode("hello-world-write-through-nosnapshot") must be(true) -// val actorRef = Actor.registry.local.actorFor("hello-world-write-through-nosnapshot").getOrElse(fail("Actor should have been in the local actor registry")) -// actorRef.address must be("hello-world-write-through-nosnapshot") -// (actorRef ? GetLog).as[Log].get must be(Log("0123456789")) -// } - -// node.shutdown() -// } -// } - -// override def onReady() { -// LocalBookKeeperEnsemble.start() -// } - -// override def onShutdown() { -// TransactionLog.shutdown() -// LocalBookKeeperEnsemble.shutdown() -// } -// } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.conf deleted file mode 100644 index 82d6dc18ce..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world-write-through-snapshot.router = "direct" -akka.actor.deployment.hello-world-write-through-snapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-through-snapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-through-snapshot.cluster.replication.strategy = "write-through" -akka.cluster.replication.snapshot-frequency = 7 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.conf deleted file mode 100644 index 82d6dc18ce..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world-write-through-snapshot.router = "direct" -akka.actor.deployment.hello-world-write-through-snapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-through-snapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-through-snapshot.cluster.replication.strategy = "write-through" -akka.cluster.replication.snapshot-frequency = 7 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec.scala deleted file mode 100644 index 3df29dd510..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec.scala +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -// package akka.cluster.replication.transactionlog.writethrough.snapshot - -// import akka.actor._ -// import akka.cluster._ -// import Cluster._ -// import akka.config.Config -// import akka.cluster.LocalCluster._ - -// object ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec { -// var NrOfNodes = 2 - -// sealed trait TransactionLogMessage extends Serializable -// case class Count(nr: Int) extends TransactionLogMessage -// case class Log(full: String) extends TransactionLogMessage -// case object GetLog extends TransactionLogMessage - -// class HelloWorld extends Actor with Serializable { -// var log = "" -// def receive = { -// case Count(nr) ⇒ -// log += nr.toString -// reply("World from node [" + Config.nodename + "]") -// case GetLog ⇒ -// reply(Log(log)) -// } -// } -// } - -// class ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1 extends ClusterTestNode { -// import ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec._ - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes) { -// Cluster.node.start() -// } - -// barrier("create-actor-on-node1", NrOfNodes) { -// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-through-snapshot") -// node.isInUseOnNode("hello-world-write-through-snapshot") must be(true) -// actorRef.address must be("hello-world-write-through-snapshot") -// var counter = 0 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// } - -// barrier("start-node2", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } - -// class ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2 extends MasterClusterTestNode { -// import ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec._ - -// val testNodes = NrOfNodes - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes).await() - -// barrier("create-actor-on-node1", NrOfNodes).await() - -// barrier("start-node2", NrOfNodes) { -// Cluster.node.start() -// } - -// Thread.sleep(5000) // wait for fail-over from node1 to node2 - -// barrier("check-fail-over-to-node2", NrOfNodes - 1) { -// // both remaining nodes should now have the replica -// node.isInUseOnNode("hello-world-write-through-snapshot") must be(true) -// val actorRef = Actor.registry.local.actorFor("hello-world-write-through-snapshot").getOrElse(fail("Actor should have been in the local actor registry")) -// actorRef.address must be("hello-world-write-through-snapshot") -// (actorRef ? GetLog).as[Log].get must be(Log("0123456789")) -// } - -// node.shutdown() -// } -// } - -// override def onReady() { -// LocalBookKeeperEnsemble.start() -// } - -// override def onShutdown() { -// TransactionLog.shutdown() -// LocalBookKeeperEnsemble.shutdown() -// } -// } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.conf deleted file mode 100644 index 7332be6934..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.conf +++ /dev/null @@ -1,5 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "direct" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node2"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.conf deleted file mode 100644 index 7332be6934..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.conf +++ /dev/null @@ -1,5 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "direct" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node2"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmSpec.scala deleted file mode 100644 index 6bc1653836..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmSpec.scala +++ /dev/null @@ -1,90 +0,0 @@ -package akka.cluster.routing.direct.failover - -import akka.config.Config -import scala.Predef._ -import akka.cluster.{ ClusterActorRef, Cluster, MasterClusterTestNode, ClusterTestNode } -import akka.actor.{ ActorInitializationException, Actor, ActorRef } -import akka.util.duration._ -import akka.util.{ Duration, Timer } -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } -import java.net.ConnectException -import java.nio.channels.NotYetConnectedException -import akka.cluster.LocalCluster -import akka.dispatch.Await - -object DirectRoutingFailoverMultiJvmSpec { - - val NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - - def receive = { - case "identify" ⇒ - reply(Config.nodename) - } - } -} - -class DirectRoutingFailoverMultiJvmNode1 extends MasterClusterTestNode { - - import DirectRoutingFailoverMultiJvmSpec._ - - val testNodes = NrOfNodes - - "Direct Router" must { - "throw exception [ActorInitializationException] upon fail-over" ignore { - - val ignoreExceptions = Seq(EventFilter[NotYetConnectedException], EventFilter[ConnectException]) - EventHandler.notify(TestEvent.Mute(ignoreExceptions)) - - var actor: ActorRef = null - - LocalCluster.barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - LocalCluster.barrier("actor-creation", NrOfNodes) { - actor = Actor.actorOf(Props[SomeActor]("service-hello") - } - - LocalCluster.barrier("verify-actor", NrOfNodes) { - Await.result(actor ? "identify", timeout.duration) must equal("node2") - } - - val timer = Timer(30.seconds, true) - while (timer.isTicking && !Cluster.node.isInUseOnNode("service-hello")) {} - - LocalCluster.barrier("verify-fail-over", NrOfNodes - 1) { - actor ! "identify" // trigger failure and removal of connection to node2 - intercept[Exception] { - actor ! "identify" // trigger exception since no more connections - } - } - - Cluster.node.shutdown() - } - } -} - -class DirectRoutingFailoverMultiJvmNode2 extends ClusterTestNode { - - import DirectRoutingFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - LocalCluster.barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - LocalCluster.barrier("actor-creation", NrOfNodes).await() - - LocalCluster.barrier("verify-actor", NrOfNodes) { - Cluster.node.isInUseOnNode("service-hello") must be(true) - } - - Cluster.node.shutdown() - } - } -} - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNode1MultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNode1MultiJvmSpec.scala deleted file mode 100644 index 6ce2219978..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNode1MultiJvmSpec.scala +++ /dev/null @@ -1,60 +0,0 @@ -package akka.cluster.routing.direct.homenode - -import akka.config.Config -import akka.actor.Actor -import akka.cluster.{ ClusterTestNode, MasterClusterTestNode, Cluster } -import Cluster._ -import akka.cluster.LocalCluster._ - -object HomeNodeMultiJvmSpec { - - val NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - def receive = { - case "identify" ⇒ { - reply(Config.nodename) - } - } - } - -} - -class HomeNodeMultiJvmNode1 extends MasterClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - val testNodes = NrOfNodes - - "___" must { - "___" in { - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - barrier("waiting-for-end", NrOfNodes).await() - node.shutdown() - } - } -} - -class HomeNodeMultiJvmNode2 extends ClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - "Direct Router: A Direct Router" must { - "obey 'home-node' config option when instantiated actor in cluster" in { - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - - val actorNode1 = Actor.actorOf(Props[SomeActor]("service-node1") - val name1 = (actorNode1 ? "identify").get.asInstanceOf[String] - name1 must equal("node1") - - val actorNode2 = Actor.actorOf(Props[SomeActor]("service-node2") - val name2 = (actorNode2 ? "identify").get.asInstanceOf[String] - name2 must equal("node2") - - barrier("waiting-for-end", NrOfNodes).await() - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.conf deleted file mode 100644 index 893f798e1d..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.conf +++ /dev/null @@ -1,6 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-node1.router = "direct" -akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-node2.router = "direct" -akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.conf deleted file mode 100644 index 893f798e1d..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.conf +++ /dev/null @@ -1,6 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-node1.router = "direct" -akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-node2.router = "direct" -akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.conf deleted file mode 100644 index aa0d7771c8..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "direct" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.conf deleted file mode 100644 index aa0d7771c8..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "direct" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmSpec.scala deleted file mode 100644 index a7b61af3e7..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmSpec.scala +++ /dev/null @@ -1,62 +0,0 @@ -package akka.cluster.routing.direct.normalusage - -import akka.actor.Actor -import akka.config.Config -import akka.cluster.{ ClusterActorRef, ClusterTestNode, MasterClusterTestNode, Cluster } -import akka.cluster.LocalCluster - -object SingleReplicaDirectRoutingMultiJvmSpec { - val NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - //println("---------------------------------------------------------------------------") - //println("SomeActor has been created on node [" + Config.nodename + "]") - //println("---------------------------------------------------------------------------") - - def receive = { - case "identify" ⇒ { - //println("The node received the 'identify' command: " + Config.nodename) - reply(Config.nodename) - } - } - } -} - -class SingleReplicaDirectRoutingMultiJvmNode1 extends MasterClusterTestNode { - - import SingleReplicaDirectRoutingMultiJvmSpec._ - - val testNodes = NrOfNodes - - "___" must { - "___" in { - Cluster.node.start() - LocalCluster.barrier("waiting-for-begin", NrOfNodes).await() - - LocalCluster.barrier("waiting-to-end", NrOfNodes).await() - Cluster.node.shutdown() - } - } -} - -class SingleReplicaDirectRoutingMultiJvmNode2 extends ClusterTestNode { - - import SingleReplicaDirectRoutingMultiJvmSpec._ - - "Direct Router: when node send message to existing node it" must { - "communicate with that node" in { - Cluster.node.start() - LocalCluster.barrier("waiting-for-begin", NrOfNodes).await() - - val actor = Actor.actorOf(Props[SomeActor]("service-hello").asInstanceOf[ClusterActorRef] - actor.isRunning must be(true) - - val result = (actor ? "identify").get - result must equal("node1") - - LocalCluster.barrier("waiting-to-end", NrOfNodes).await() - Cluster.node.shutdown() - } - } -} - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.conf deleted file mode 100644 index 1772693874..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1", "node:node3"] -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.timeout = 30 -akka.cluster.session-timeout = 10 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.opts deleted file mode 100644 index f1306829d9..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.conf deleted file mode 100644 index 1772693874..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1", "node:node3"] -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.timeout = 30 -akka.cluster.session-timeout = 10 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.opts deleted file mode 100644 index 897e69f626..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.conf deleted file mode 100644 index 1772693874..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1", "node:node3"] -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.timeout = 30 -akka.cluster.session-timeout = 10 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.opts deleted file mode 100644 index 4127fb94fc..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmSpec.scala deleted file mode 100644 index cbdc42dbe9..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmSpec.scala +++ /dev/null @@ -1,145 +0,0 @@ -package akka.cluster.routing.random.failover - -import akka.config.Config -import akka.cluster._ -import akka.actor.{ ActorRef, Actor } -import akka.event.EventHandler -import akka.util.duration._ -import akka.util.{ Duration, Timer } -import akka.testkit.{ EventFilter, TestEvent } -import java.util.{ Collections, Set ⇒ JSet } -import java.net.ConnectException -import java.nio.channels.NotYetConnectedException -import akka.cluster.LocalCluster._ -import akka.dispatch.Await - -object RandomFailoverMultiJvmSpec { - - val NrOfNodes = 3 - - class SomeActor extends Actor with Serializable { - - def receive = { - case "identify" ⇒ - reply(Config.nodename) - } - } - -} - -class RandomFailoverMultiJvmNode1 extends MasterClusterTestNode { - - import RandomFailoverMultiJvmSpec._ - - def testNodes = NrOfNodes - - "Random: when random router fails" must { - "jump to another replica" ignore { - val ignoreExceptions = Seq( - EventFilter[NotYetConnectedException], - EventFilter[ConnectException], - EventFilter[ClusterException], - EventFilter[java.nio.channels.ClosedChannelException]) - - var oldFoundConnections: JSet[String] = null - var actor: ActorRef = null - - barrier("node-start", NrOfNodes) { - EventHandler.notify(TestEvent.Mute(ignoreExceptions)) - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes) { - actor = Actor.actorOf(Props[SomeActor]("service-hello") - actor.isInstanceOf[ClusterActorRef] must be(true) - } - - val timer = Timer(30.seconds, true) - while (timer.isTicking && - !Cluster.node.isInUseOnNode("service-hello", "node1") && - !Cluster.node.isInUseOnNode("service-hello", "node3")) {} - - barrier("actor-usage", NrOfNodes) { - Cluster.node.isInUseOnNode("service-hello") must be(true) - oldFoundConnections = identifyConnections(actor) - - //since we have replication factor 2 - oldFoundConnections.size() must be(2) - } - - barrier("verify-fail-over", NrOfNodes - 1) { - val timer = Timer(30.seconds, true) - while (timer.isTicking && - !Cluster.node.isInUseOnNode("service-hello", "node1") && - !Cluster.node.isInUseOnNode("service-hello", "node2")) {} - - val newFoundConnections = identifyConnections(actor) - - //it still must be 2 since a different node should have been used to failover to - newFoundConnections.size() must be(2) - - //they are not disjoint since, there must be a single element that is in both - Collections.disjoint(newFoundConnections, oldFoundConnections) must be(false) - - //but they should not be equal since the shutdown-node has been replaced by another one. - newFoundConnections.equals(oldFoundConnections) must be(false) - } - - Cluster.node.shutdown() - } - } - - def identifyConnections(actor: ActorRef): JSet[String] = { - val set = new java.util.HashSet[String] - for (i ← 0 until 100) { // we should get hits from both nodes in 100 attempts, if not then not very random - val value = Await.result(actor ? "identify", timeout.duration).asInstanceOf[String] - set.add(value) - } - set - } -} - -class RandomFailoverMultiJvmNode2 extends ClusterTestNode { - - import RandomFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes).await() - barrier("actor-usage", NrOfNodes).await() - - Cluster.node.isInUseOnNode("service-hello") must be(false) - - Thread.sleep(5000) // wait for fail-over from node3 - - barrier("verify-fail-over", NrOfNodes - 1).await() - - Cluster.node.shutdown() - } - } -} - -class RandomFailoverMultiJvmNode3 extends ClusterTestNode { - - import RandomFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes).await() - barrier("actor-usage", NrOfNodes).await() - - Cluster.node.isInUseOnNode("service-hello") must be(true) - - Cluster.node.shutdown() - } - } -} - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.conf deleted file mode 100644 index 012685917c..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-node1.router = "random" -akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-node1.nr-of-instances = 1 -akka.actor.deployment.service-node2.router = "random" -akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"] -akka.actor.deployment.service-node2.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.conf deleted file mode 100644 index 012685917c..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-node1.router = "random" -akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-node1.nr-of-instances = 1 -akka.actor.deployment.service-node2.router = "random" -akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"] -akka.actor.deployment.service-node2.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmSpec.scala deleted file mode 100644 index a8f4887464..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmSpec.scala +++ /dev/null @@ -1,60 +0,0 @@ -package akka.cluster.routing.random.homenode - -import akka.config.Config -import akka.actor.Actor -import akka.cluster.{ ClusterTestNode, MasterClusterTestNode, Cluster } -import Cluster._ -import akka.cluster.LocalCluster._ - -object HomeNodeMultiJvmSpec { - - val NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - def receive = { - case "identify" ⇒ { - reply(Config.nodename) - } - } - } - -} - -class HomeNodeMultiJvmNode1 extends MasterClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - val testNodes = NrOfNodes - - "___" must { - "___" in { - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - barrier("waiting-for-end", NrOfNodes).await() - node.shutdown() - } - } -} - -class HomeNodeMultiJvmNode2 extends ClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - "Random Router: A Random Router" must { - "obey 'home-node' config option when instantiated actor in cluster" in { - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - - val actorNode1 = Actor.actorOf(Props[SomeActor]("service-node1") - val nameNode1 = (actorNode1 ? "identify").get.asInstanceOf[String] - nameNode1 must equal("node1") - - val actorNode2 = Actor.actorOf(Props[SomeActor]("service-node2") - val nameNode2 = (actorNode2 ? "identify").get.asInstanceOf[String] - nameNode2 must equal("node2") - - barrier("waiting-for-end", NrOfNodes).await() - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.conf deleted file mode 100644 index 729dc64fd6..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmSpec.scala deleted file mode 100644 index 525a09467a..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmSpec.scala +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.routing.random.replicationfactor_1 - -import akka.cluster._ -import akka.cluster.Cluster._ -import akka.actor._ -import akka.config.Config -import akka.cluster.LocalCluster._ - -/** - * Test that if a single node is used with a random router with replication factor then the actor is instantiated - * on the single node. - */ -object Random1ReplicaMultiJvmSpec { - - class HelloWorld extends Actor with Serializable { - def receive = { - case "Hello" ⇒ - reply("World from node [" + Config.nodename + "]") - } - } - -} - -class Random1ReplicaMultiJvmNode1 extends MasterClusterTestNode { - - import Random1ReplicaMultiJvmSpec._ - - val testNodes = 1 - - "Random Router: A cluster" must { - - "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - Cluster.node.start() - - var hello = Actor.actorOf(Props[HelloWorld]("service-hello") - hello must not equal (null) - hello.address must equal("service-hello") - hello.isInstanceOf[ClusterActorRef] must be(true) - - hello must not equal (null) - val reply = (hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1")) - reply must equal("World from node [node1]") - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.conf deleted file mode 100644 index ae344f2100..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.nr-of-instances = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.conf deleted file mode 100644 index 09a37715d0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.cluster.repliction-factor = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.conf deleted file mode 100644 index ae344f2100..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.nr-of-instances = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.opts deleted file mode 100644 index 089e3b7776..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmSpec.scala deleted file mode 100644 index c1a4175a09..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmSpec.scala +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.routing.random.replicationfactor_3 - -import akka.cluster._ -import akka.actor._ -import akka.config.Config -import Cluster._ -import akka.cluster.LocalCluster._ -import akka.dispatch.Await - -/** - * When a MultiJvmNode is started, will it automatically be part of the cluster (so will it automatically be eligible - * for running actors, or will it be just a 'client' talking to the cluster. - */ -object Random3ReplicasMultiJvmSpec { - val NrOfNodes = 3 - - class HelloWorld extends Actor with Serializable { - def receive = { - case "Hello" ⇒ - reply("World from node [" + Config.nodename + "]") - } - } -} - -/** - * What is the purpose of this node? Is this just a node for the cluster to make use of? - */ -class Random3ReplicasMultiJvmNode1 extends MasterClusterTestNode { - - import Random3ReplicasMultiJvmSpec._ - - def testNodes: Int = NrOfNodes - - "___" must { - "___" in { - Cluster.node.start() - - barrier("start-nodes", NrOfNodes).await() - - barrier("create-actor", NrOfNodes).await() - - barrier("end-test", NrOfNodes).await() - - node.shutdown() - } - } -} - -class Random3ReplicasMultiJvmNode2 extends ClusterTestNode { - - import Random3ReplicasMultiJvmSpec._ - import Cluster._ - - "Random: A cluster" must { - - "distribute requests randomly" in { - Cluster.node.start() - - //wait till node 1 has started. - barrier("start-nodes", NrOfNodes).await() - - //check if the actorRef is the expected remoteActorRef. - var hello: ActorRef = null - hello = Actor.actorOf(Props[HelloWorld]("service-hello") - hello must not equal (null) - hello.address must equal("service-hello") - hello.isInstanceOf[ClusterActorRef] must be(true) - - barrier("create-actor", NrOfNodes).await() - - val replies = collection.mutable.Map.empty[String, Int] - def count(reply: String) = { - if (replies.get(reply).isEmpty) replies.put(reply, 1) - else replies.put(reply, replies(reply) + 1) - } - - for (i ← 0 until 1000) { - count(Await.result((hello ? "Hello").mapTo[String], 10 seconds)) - } - - val repliesNode1 = replies("World from node [node1]") - val repliesNode2 = replies("World from node [node2]") - val repliesNode3 = replies("World from node [node3]") - - assert(repliesNode1 > 100) - assert(repliesNode2 > 100) - assert(repliesNode3 > 100) - assert(repliesNode1 + repliesNode2 + repliesNode3 === 1000) - - barrier("end-test", NrOfNodes).await() - - node.shutdown() - } - } -} - -class Random3ReplicasMultiJvmNode3 extends ClusterTestNode { - - import Random3ReplicasMultiJvmSpec._ - import Cluster._ - - "___" must { - "___" in { - Cluster.node.start() - - barrier("start-nodes", NrOfNodes).await() - - barrier("create-actor", NrOfNodes).await() - - barrier("end-test", NrOfNodes).await() - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.conf deleted file mode 100644 index 0a858fb8fd..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1","node:node3"] -akka.cluster.include-ref-node-in-replica-set = on -akka.actor.timeout = 30 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.opts deleted file mode 100644 index f1306829d9..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.conf deleted file mode 100644 index 0a858fb8fd..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1","node:node3"] -akka.cluster.include-ref-node-in-replica-set = on -akka.actor.timeout = 30 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.opts deleted file mode 100644 index 897e69f626..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.conf deleted file mode 100644 index 0a858fb8fd..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1","node:node3"] -akka.cluster.include-ref-node-in-replica-set = on -akka.actor.timeout = 30 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.opts deleted file mode 100644 index 4127fb94fc..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmSpec.scala deleted file mode 100644 index 1b97ef1075..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmSpec.scala +++ /dev/null @@ -1,146 +0,0 @@ -package akka.cluster.routing.roundrobin.failover - -import akka.config.Config -import akka.cluster._ -import akka.actor.{ ActorRef, Actor } -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } -import akka.util.duration._ -import akka.util.{ Duration, Timer } -import java.util.{ Collections, Set ⇒ JSet } -import java.net.ConnectException -import java.nio.channels.NotYetConnectedException -import java.lang.Thread -import akka.cluster.LocalCluster._ -import akka.dispatch.Await - -object RoundRobinFailoverMultiJvmSpec { - - val NrOfNodes = 3 - - class SomeActor extends Actor with Serializable { - - def receive = { - case "identify" ⇒ - reply(Config.nodename) - } - } - -} - -class RoundRobinFailoverMultiJvmNode1 extends MasterClusterTestNode { - - import RoundRobinFailoverMultiJvmSpec._ - - def testNodes = NrOfNodes - - "Round Robin: when round robin router fails" must { - "jump to another replica" ignore { - val ignoreExceptions = Seq( - EventFilter[NotYetConnectedException], - EventFilter[ConnectException], - EventFilter[ClusterException]) - - var oldFoundConnections: JSet[String] = null - var actor: ActorRef = null - - barrier("node-start", NrOfNodes) { - EventHandler.notify(TestEvent.Mute(ignoreExceptions)) - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes) { - actor = Actor.actorOf(Props[SomeActor]("service-hello") - actor.isInstanceOf[ClusterActorRef] must be(true) - } - - val timer = Timer(30.seconds, true) - while (timer.isTicking && - !Cluster.node.isInUseOnNode("service-hello", "node1") && - !Cluster.node.isInUseOnNode("service-hello", "node3")) {} - //Thread.sleep(5000) // wait for all actors to start up on other nodes - - barrier("actor-usage", NrOfNodes) { - Cluster.node.isInUseOnNode("service-hello") must be(true) - oldFoundConnections = identifyConnections(actor) - - //since we have replication factor 2 - oldFoundConnections.size() must be(2) - } - - Thread.sleep(5000) // wait for fail-over from node3 - - barrier("verify-fail-over", NrOfNodes - 1) { - val timer = Timer(30.seconds, true) - while (timer.isTicking && - !Cluster.node.isInUseOnNode("service-hello", "node1") && - !Cluster.node.isInUseOnNode("service-hello", "node2")) {} - - val newFoundConnections = identifyConnections(actor) - - //it still must be 2 since a different node should have been used to failover to - newFoundConnections.size() must be(2) - - //they are not disjoint since, there must be a single element that is in both - Collections.disjoint(newFoundConnections, oldFoundConnections) must be(false) - - //but they should not be equal since the shutdown-node has been replaced by another one. - newFoundConnections.equals(oldFoundConnections) must be(false) - } - - Cluster.node.shutdown() - } - } - - def identifyConnections(actor: ActorRef): JSet[String] = { - val set = new java.util.HashSet[String] - for (i ← 0 until 100) { - val value = Await.result(actor ? "identify", timeout.duration).asInstanceOf[String] - set.add(value) - } - set - } -} - -class RoundRobinFailoverMultiJvmNode2 extends ClusterTestNode { - - import RoundRobinFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes).await() - barrier("actor-usage", NrOfNodes).await() - - Cluster.node.isInUseOnNode("service-hello") must be(false) - - Thread.sleep(5000) // wait for fail-over from node3 - - barrier("verify-fail-over", NrOfNodes - 1).await() - } - } -} - -class RoundRobinFailoverMultiJvmNode3 extends ClusterTestNode { - - import RoundRobinFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes).await() - barrier("actor-usage", NrOfNodes).await() - - Cluster.node.isInUseOnNode("service-hello") must be(true) - - Cluster.node.shutdown() - } - } -} - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.conf deleted file mode 100644 index 85536cd656..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-node1.router = "round-robin" -akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-node1.nr-of-instances = 1 -akka.actor.deployment.service-node2.router = "round-robin" -akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"] -akka.actor.deployment.service-node2.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.conf deleted file mode 100644 index 99c85fd1a8..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.conf +++ /dev/null @@ -1,5 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-hello.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmSpec.scala deleted file mode 100644 index 4dc9e96429..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmSpec.scala +++ /dev/null @@ -1,63 +0,0 @@ -package akka.cluster.routing.roundrobin.homenode - -import akka.config.Config -import akka.actor.Actor -import akka.cluster.{ ClusterTestNode, MasterClusterTestNode, Cluster } -import Cluster._ -import akka.cluster.LocalCluster._ - -object HomeNodeMultiJvmSpec { - - val NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - def receive = { - case "identify" ⇒ { - reply(Config.nodename) - } - } - } - -} - -class HomeNodeMultiJvmNode1 extends MasterClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - val testNodes = NrOfNodes - - "___" must { - "___" in { - - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - barrier("waiting-for-end", NrOfNodes).await() - - node.shutdown() - } - } -} - -class HomeNodeMultiJvmNode2 extends ClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - "Round Robin: A Router" must { - "obey 'home-node' config option when instantiated actor in cluster" in { - - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - - val actorNode1 = Actor.actorOf(Props[SomeActor]("service-node1") - val name1 = (actorNode1 ? "identify").get.asInstanceOf[String] - name1 must equal("node1") - - val actorNode2 = Actor.actorOf(Props[SomeActor]("service-node2") - val name2 = (actorNode2 ? "identify").get.asInstanceOf[String] - name2 must equal("node2") - - barrier("waiting-for-end", NrOfNodes).await() - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.conf deleted file mode 100644 index 88df1a6421..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala deleted file mode 100644 index f8fd41b0cf..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.routing.roundrobin.replicationfactor_1 - -import akka.cluster._ -import Cluster._ -import akka.actor._ -import akka.config.Config -import akka.cluster.LocalCluster._ - -/** - * Test that if a single node is used with a round robin router with replication factor then the actor is instantiated on the single node. - */ -object RoundRobin1ReplicaMultiJvmSpec { - - class HelloWorld extends Actor with Serializable { - def receive = { - case "Hello" ⇒ reply("World from node [" + Config.nodename + "]") - } - } - -} - -class RoundRobin1ReplicaMultiJvmNode1 extends MasterClusterTestNode { - - import RoundRobin1ReplicaMultiJvmSpec._ - - val testNodes = 1 - - "Round Robin: A cluster" must { - - "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - Cluster.node.start() - - var hello = Actor.actorOf(Props[HelloWorld]("service-hello") - hello must not equal (null) - hello.address must equal("service-hello") - hello.isInstanceOf[ClusterActorRef] must be(true) - - hello must not equal (null) - val reply = (hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1")) - reply must equal("World from node [node1]") - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.conf deleted file mode 100644 index a763b66792..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 2 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.conf deleted file mode 100644 index a763b66792..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 2 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmSpec.scala deleted file mode 100644 index b101a06f81..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmSpec.scala +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.routing.roundrobin.replicationfactor_2 - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import Cluster._ -import akka.cluster.LocalCluster._ -import akka.actor._ -import akka.actor.Actor._ -import akka.config.Config -import akka.util.duration._ -import akka.util.{ Duration, Timer } -import akka.cluster.LocalCluster._ - -import java.util.concurrent.atomic.AtomicInteger -import java.util.concurrent.ConcurrentHashMap -import akka.dispatch.Await - -/** - * When a MultiJvmNode is started, will it automatically be part of the cluster (so will it automatically be eligible - * for running actors, or will it be just a 'client' talking to the cluster. - */ -object RoundRobin2ReplicasMultiJvmSpec { - val NrOfNodes = 2 - - class HelloWorld extends Actor with Serializable { - def receive = { - case "Hello" ⇒ - reply("World from node [" + Config.nodename + "]") - } - } -} - -class RoundRobin2ReplicasMultiJvmNode1 extends MasterClusterTestNode { - import RoundRobin2ReplicasMultiJvmSpec._ - - val testNodes = NrOfNodes - - "Round Robin: A cluster" must { - - "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - System.getProperty("akka.cluster.nodename", "") must be("node1") - System.getProperty("akka.remote.port", "") must be("9991") - - //wait till node 1 has started. - barrier("start-node1", NrOfNodes) { - Cluster.node.start() - } - - //wait till ndoe 2 has started. - barrier("start-node2", NrOfNodes).await() - - //wait till an actor reference on node 2 has become available. - barrier("get-ref-to-actor-on-node2", NrOfNodes) { - val timer = Timer(30.seconds, true) - while (timer.isTicking && !node.isInUseOnNode("service-hello")) {} - } - - //wait till the node 2 has send a message to the replica's. - barrier("send-message-from-node2-to-replicas", NrOfNodes).await() - - node.shutdown() - } - } -} - -class RoundRobin2ReplicasMultiJvmNode2 extends ClusterTestNode { - import RoundRobin2ReplicasMultiJvmSpec._ - - "Round Robin: A cluster" must { - - "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - System.getProperty("akka.cluster.nodename", "") must be("node2") - System.getProperty("akka.remote.port", "") must be("9992") - - //wait till node 1 has started. - barrier("start-node1", NrOfNodes).await() - - //wait till node 2 has started. - barrier("start-node2", NrOfNodes) { - Cluster.node.start() - } - - //check if the actorRef is the expected remoteActorRef. - var hello: ActorRef = null - barrier("get-ref-to-actor-on-node2", NrOfNodes) { - hello = Actor.actorOf(Props[HelloWorld]("service-hello") - hello must not equal (null) - hello.address must equal("service-hello") - hello.isInstanceOf[ClusterActorRef] must be(true) - } - - barrier("send-message-from-node2-to-replicas", NrOfNodes) { - //todo: is there a reason to check for null again since it already has been done in the previous block. - hello must not equal (null) - - val replies = new ConcurrentHashMap[String, AtomicInteger]() - def count(reply: String) = { - val counter = new AtomicInteger(0) - Option(replies.putIfAbsent(reply, counter)).getOrElse(counter).incrementAndGet() - } - - implicit val timeout = Timeout(Duration(20, "seconds")) - - for(i <- 1 to 8) - count(Await.result((hello ? "Hello").mapTo[String], timeout.duration)) - - replies.get("World from node [node1]").get must equal(4) - replies.get("World from node [node2]").get must equal(4) - } - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.conf deleted file mode 100644 index 8592b46c85..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.conf deleted file mode 100644 index 92bafcfe8b..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.cluster.repliction-factor = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.conf deleted file mode 100644 index 8592b46c85..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.opts deleted file mode 100644 index 089e3b7776..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmSpec.scala deleted file mode 100644 index f62b7d3e74..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmSpec.scala +++ /dev/null @@ -1,158 +0,0 @@ -// /** -// * Copyright (C) 2009-2012 Typesafe Inc. -// */ - -// package akka.cluster.routing.roundrobin.replicationfactor_3 - -// import org.scalatest.WordSpec -// import org.scalatest.matchers.MustMatchers -// import org.scalatest.BeforeAndAfterAll - -// import akka.cluster._ -// import akka.actor._ -// import akka.actor.Actor._ -// import akka.util.duration._ -// import akka.util.{ Duration, Timer } -// import akka.config.Config -// import akka.cluster.LocalCluster._ -// import Cluster._ - -// /** -// * When a MultiJvmNode is started, will it automatically be part of the cluster (so will it automatically be eligible -// * for running actors, or will it be just a 'client' talking to the cluster. -// */ -// object RoundRobin3ReplicasMultiJvmSpec { -// val NrOfNodes = 3 - -// class HelloWorld extends Actor with Serializable { -// def receive = { -// case "Hello" ⇒ -// reply("World from node [" + Config.nodename + "]") -// } -// } -// } - -// /** -// * What is the purpose of this node? Is this just a node for the cluster to make use of? -// */ -// class RoundRobin3ReplicasMultiJvmNode1 extends MasterClusterTestNode { -// import RoundRobin3ReplicasMultiJvmSpec._ - -// val testNodes = NrOfNodes - -// "Round Robin: A cluster" must { - -// "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - -// //wait till node 1 has started. -// barrier("start-node1", NrOfNodes) { -// Cluster.node.boot() -// } - -// //wait till ndoe 2 has started. -// barrier("start-node2", NrOfNodes).await() - -// //wait till node 3 has started. -// barrier("start-node3", NrOfNodes).await() - -// //wait till an actor reference on node 2 has become available. -// barrier("get-ref-to-actor-on-node2", NrOfNodes) { -// val timer = Timer(30.seconds, true) -// while (timer.isTicking && !node.isInUseOnNode("service-hello")) {} -// } - -// //wait till the node 2 has send a message to the replica's. -// barrier("send-message-from-node2-to-replicas", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } - -// class RoundRobin3ReplicasMultiJvmNode2 extends ClusterTestNode { -// import RoundRobin3ReplicasMultiJvmSpec._ -// import Cluster._ - -// "Round Robin: A cluster" must { - -// "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - -// //wait till node 1 has started. -// barrier("start-node1", NrOfNodes).await() - -// //wait till node 2 has started. -// barrier("start-node2", NrOfNodes) { -// Cluster.node.start() -// } - -// //wait till node 3 has started. -// barrier("start-node3", NrOfNodes).await() - -// //check if the actorRef is the expected remoteActorRef. -// var hello: ActorRef = null -// barrier("get-ref-to-actor-on-node2", NrOfNodes) { -// hello = Actor.actorOf(Props[HelloWorld]("service-hello") -// hello must not equal (null) -// hello.address must equal("service-hello") -// hello.isInstanceOf[ClusterActorRef] must be(true) -// } - -// barrier("send-message-from-node2-to-replicas", NrOfNodes) { -// //todo: is there a reason to check for null again since it already has been done in the previous block. -// hello must not equal (null) - -// val replies = collection.mutable.Map.empty[String, Int] -// def count(reply: String) = { -// if (replies.get(reply).isEmpty) replies.put(reply, 1) -// else replies.put(reply, replies(reply) + 1) -// } - -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node2"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node3"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node2"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node3"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node2"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node3"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node2"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node3"))) - -// replies("World from node [node1]") must equal(4) -// replies("World from node [node2]") must equal(4) -// replies("World from node [node3]") must equal(4) -// } - -// node.shutdown() -// } -// } -// } - -// class RoundRobin3ReplicasMultiJvmNode3 extends ClusterTestNode { -// import RoundRobin3ReplicasMultiJvmSpec._ -// import Cluster._ - -// "Round Robin: A cluster" must { - -// "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { -// barrier("start-node1", NrOfNodes).await() - -// barrier("start-node2", NrOfNodes).await() - -// barrier("start-node3", NrOfNodes) { -// Cluster.node.start() -// } - -// barrier("get-ref-to-actor-on-node2", NrOfNodes) { -// val timer = Timer(30.seconds, true) -// while (timer.isTicking && !node.isInUseOnNode("service-hello")) {} -// } - -// barrier("send-message-from-node2-to-replicas", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.conf deleted file mode 100644 index fd2babf3a9..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.conf +++ /dev/null @@ -1,6 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "akka.routing.ScatterGatherFirstCompletedRouter" -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.timeout = 30 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.conf deleted file mode 100644 index fd2babf3a9..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.conf +++ /dev/null @@ -1,6 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "akka.routing.ScatterGatherFirstCompletedRouter" -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.timeout = 30 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmSpec.scala deleted file mode 100644 index e8cc4f7d68..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmSpec.scala +++ /dev/null @@ -1,114 +0,0 @@ -package akka.cluster.routing.scattergather.failover - -import akka.config.Config -import akka.cluster._ -import akka.actor.{ ActorRef, Actor } -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } -import java.util.{ Collections, Set ⇒ JSet } -import java.net.ConnectException -import java.nio.channels.NotYetConnectedException -import java.lang.Thread -import akka.routing.Routing.Broadcast -import akka.cluster.LocalCluster._ -import akka.dispatch.Await - -object ScatterGatherFailoverMultiJvmSpec { - - val NrOfNodes = 2 - - case class Shutdown(node: Option[String] = None) - case class Sleep(node: String) - - class TestActor extends Actor with Serializable { - - def shutdownNode = new Thread() { - override def run() { - Thread.sleep(2000) - Cluster.node.shutdown() - } - } - - def receive = { - case Shutdown(None) ⇒ shutdownNode - case Sleep(node) if node.equals(Config.nodename) ⇒ - Thread sleep 100 - reply(Config.nodename) - case Shutdown(Some(node)) if node.equals(Config.nodename) ⇒ shutdownNode - case _ ⇒ - Thread sleep 100 - reply(Config.nodename) - } - } - -} - -class ScatterGatherFailoverMultiJvmNode1 extends MasterClusterTestNode { - - import ScatterGatherFailoverMultiJvmSpec._ - - def testNodes = NrOfNodes - - "When the message is sent with ?, and all connections are up, router" must { - "return the first came reponse" ignore { - val ignoreExceptions = Seq( - EventFilter[NotYetConnectedException], - EventFilter[ConnectException], - EventFilter[ClusterException]) - - EventHandler.notify(TestEvent.Mute(ignoreExceptions)) - - Cluster.node.start() - LocalCluster.barrier("waiting-for-begin", NrOfNodes).await() - - /* - FIXME: Uncomment, when custom routers will be fully supported (ticket #1109) - - val actor = Actor.actorOf(Props[TestActor]("service-hello").asInstanceOf[ClusterActorRef] - - identifyConnections(actor).size() must be(2) - - // since node1 is falling asleep, response from node2 is gathered - (actor ? Broadcast(Sleep("node1"))).get.asInstanceOf[String] must be("node2") - - Thread sleep 100 - - // since node2 shuts down during processing the message, response from node1 is gathered - (actor ? Broadcast(Shutdown(Some("node2")))).get.asInstanceOf[String] must be("node1") - - */ - LocalCluster.barrier("waiting-for-end", NrOfNodes).await() - Cluster.node.shutdown() - } - } - - def identifyConnections(actor: ActorRef): JSet[String] = { - val set = new java.util.HashSet[String] - for (i ← 0 until NrOfNodes * 2) { - val value = Await.result(actor ? "foo", timeout.duration).asInstanceOf[String] - set.add(value) - } - set - } -} - -class ScatterGatherFailoverMultiJvmNode2 extends ClusterTestNode { - - import ScatterGatherFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - - Cluster.node.start() - LocalCluster.barrier("waiting-for-begin", NrOfNodes).await() - - /* - FIXME: Uncomment, when custom routers will be fully supported (ticket #1109) - Thread.sleep(30 *1000) - */ - - LocalCluster.barrier("waiting-for-end", NrOfNodes).await() - Cluster.node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/sample/PingPongMultiJvmExample.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/sample/PingPongMultiJvmExample.scala deleted file mode 100644 index c7e9aceaf1..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/sample/PingPongMultiJvmExample.scala +++ /dev/null @@ -1,227 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.sample - -import akka.cluster._ - -import akka.actor._ -import akka.util.duration._ - -object PingPongMultiJvmExample { - val PING_ADDRESS = "ping" - val PONG_ADDRESS = "pong" - - val ClusterName = "ping-pong-cluster" - val NrOfNodes = 5 - val Pause = true - val PauseTimeout = 5 minutes - - // ----------------------------------------------- - // Messages - // ----------------------------------------------- - - sealed trait PingPong extends Serializable - case object Ping extends PingPong - case object Pong extends PingPong - case object Stop extends PingPong - - case class Serve(player: ActorRef) - - // ----------------------------------------------- - // Actors - // ----------------------------------------------- - - class PingActor extends Actor with Serializable { - var pong: ActorRef = _ - var play = true - - def receive = { - case Pong ⇒ - if (play) { - println("---->> PING") - pong ! Ping - } else { - println("---->> GAME OVER") - } - case Serve(player) ⇒ - pong = player - println("---->> SERVE") - pong ! Ping - case Stop ⇒ - play = false - } - } - - class PongActor extends Actor with Serializable { - def receive = { - case Ping ⇒ - println("---->> PONG") - reply(Pong) - } - } -} - -/* -object PingPongMultiJvmNode1 { - import PingPong._ - import BinaryFormats._ - - val PingService = classOf[PingActor].getName - val PongService = classOf[PongActor].getName - - def main(args: Array[String]) { run } - - def run = { - // ----------------------------------------------- - // Start monitoring - // ----------------------------------------------- - - //MonitoringServer.start - //Monitoring.startLocalDaemons - - // ----------------------------------------------- - // Start cluster - // ----------------------------------------------- - - Cluster.startLocalCluster() - - // create node - val node = Cluster.newNode(NodeAddress(ClusterName, "node1", port = 9991)) - - def pause(name: String, message: String) = { - node.barrier("user-prompt-" + name, NrOfNodes, PauseTimeout) { - println(message) - if (Pause) { - println("Press enter to continue (timeout of %s) ..." format PauseTimeout) - System.in.read - } - } - } - - pause("start", "Ready to start all nodes") - println("Starting nodes ...") - - Cluster.node.start() - - node.barrier("start", NrOfNodes) { - // wait for others to start - } - - // ----------------------------------------------- - // Store pong actors in the cluster - // ----------------------------------------------- - - pause("create", "Ready to create all actors") - println("Creating actors ...") - - // store the ping actor in the cluster, but do not deploy it anywhere - node.store(classOf[PingActor], PING_ADDRESS) - - // store the pong actor in the cluster and replicate it on all nodes - node.store(classOf[PongActor], PONG_ADDRESS, NrOfNodes) - - // give some time for the deployment - Thread.sleep(3000) - - // ----------------------------------------------- - // Get actor references - // ----------------------------------------------- - - // check out a local ping actor - val ping = node.use[PingActor](PING_ADDRESS).head - - // get a reference to all the pong actors through a round-robin router actor ref - val pong = node.ref(PONG_ADDRESS, router = Router.RoundRobin) - - // ----------------------------------------------- - // Play the game - // ----------------------------------------------- - - pause("play", "Ready to play ping pong") - - ping ! Serve(pong) - - // let them play for 3 seconds - Thread.sleep(3000) - - ping ! Stop - - // give some time for the game to finish - Thread.sleep(3000) - - // ----------------------------------------------- - // Stop actors - // ----------------------------------------------- - - pause("stop", "Ready to stop actors") - println("Stopping actors ...") - - ping.stop - pong.stop - - // give remote actors time to stop - Thread.sleep(5000) - - // ----------------------------------------------- - // Stop everything - // ----------------------------------------------- - - pause("shutdown", "Ready to shutdown") - println("Stopping everything ...") - - //Monitoring.stopLocalDaemons - //MonitoringServer.stop - - Actor.remote.shutdown - Actor.registry.local.shutdownAll - - node.stop - - Cluster.shutdownLocalCluster - } -} - -object PingPongMultiJvmNode2 extends PongNode(2) -object PingPongMultiJvmNode3 extends PongNode(3) -object PingPongMultiJvmNode4 extends PongNode(4) -object PingPongMultiJvmNode5 extends PongNode(5) - -class PongNode(number: Int) { - import PingPong._ - - def main(args: Array[String]) { run } - - def run = { - val node = Cluster.newNode(NodeAddress(ClusterName, "node" + number, port = 9990 + number)) - - def pause(name: String) = { - node.barrier("user-prompt-" + name, NrOfNodes, PauseTimeout) { - // wait for user prompt - } - } - - pause("start") - - node.barrier("start", NrOfNodes) { - Cluster.node.start() - } - - pause("create") - - pause("play") - - pause("stop") - - pause("shutdown") - - // clean up and stop - - Actor.remote.shutdown - Actor.registry.local.shutdownAll - - node.stop - } -} -*/ diff --git a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala similarity index 83% rename from akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala rename to akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala index 17a848b8d3..f611fc9812 100644 --- a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala @@ -1,16 +1,18 @@ -package akka.remote +package akka.cluster import java.net.InetSocketAddress import akka.testkit.AkkaSpec import akka.actor.Address -class AccrualFailureDetectorSpec extends AkkaSpec { +class AccrualFailureDetectorSpec extends AkkaSpec(""" + akka.loglevel = "DEBUG" +""") { "An AccrualFailureDetector" must { val conn = Address("akka", "", Some("localhost"), Some(2552)) "mark node as available after a series of successful heartbeats" in { - val fd = new AccrualFailureDetector() + val fd = new AccrualFailureDetector(system) fd.heartbeat(conn) @@ -25,7 +27,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec { // FIXME how should we deal with explicit removal of connection? - if triggered as failure then we have a problem in boostrap - see line 142 in AccrualFailureDetector "mark node as dead after explicit removal of connection" ignore { - val fd = new AccrualFailureDetector + val fd = new AccrualFailureDetector(system) fd.heartbeat(conn) @@ -43,7 +45,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec { } "mark node as dead if heartbeat are missed" in { - val fd = new AccrualFailureDetector(threshold = 3) + val fd = new AccrualFailureDetector(system, threshold = 3) fd.heartbeat(conn) @@ -61,7 +63,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec { } "mark node as available if it starts heartbeat again after being marked dead due to detection of failure" in { - val fd = new AccrualFailureDetector(threshold = 3) + val fd = new AccrualFailureDetector(system, threshold = 3) fd.heartbeat(conn) diff --git a/akka-cluster/src/test/scala/akka/cluster/AsynchronousTransactionLogSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AsynchronousTransactionLogSpec.scala deleted file mode 100644 index 0d26befc4e..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/AsynchronousTransactionLogSpec.scala +++ /dev/null @@ -1,230 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import org.apache.bookkeeper.client.BookKeeper -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.actor._ -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } - -import com.eaio.uuid.UUID - -class AsynchronousTransactionLogSpec extends WordSpec with MustMatchers with BeforeAndAfterAll { - private var bookKeeper: BookKeeper = _ - private var localBookKeeper: LocalBookKeeper = _ - - "An asynchronous Transaction Log" should { - "be able to record entries - asynchronous" in { - val uuid = (new UUID).toString - val txlog = TransactionLog.newLogFor(uuid, true, null) - val entry = "hello".getBytes("UTF-8") - txlog.recordEntry(entry) - Thread.sleep(200) - txlog.close - } - - "be able to be deleted - asynchronous" in { - val uuid = (new UUID).toString - val txlog = TransactionLog.newLogFor(uuid, true, null) - val entry = "hello".getBytes("UTF-8") - txlog.recordEntry(entry) - - txlog.delete() - txlog.close() - - val zkClient = TransactionLog.zkClient - assert(zkClient.readData(txlog.snapshotPath, true) == null) - assert(zkClient.readData(txlog.txLogPath, true) == null) - } - - "be able to be checked for existence - asynchronous" in { - val uuid = (new UUID).toString - TransactionLog.exists(uuid) must be(false) - - TransactionLog.newLogFor(uuid, true, null) - TransactionLog.exists(uuid) must be(true) - } - - "fail to be opened if non existing - asynchronous" in { - EventHandler.notify(TestEvent.Mute(EventFilter[ReplicationException])) - val uuid = (new UUID).toString - intercept[ReplicationException](TransactionLog.logFor(uuid, true, null)) - EventHandler.notify(TestEvent.UnMuteAll) - } - - "be able to overweite an existing txlog if one already exists - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close - - val txLog2 = TransactionLog.newLogFor(uuid, true, null) - txLog2.latestSnapshotId.isDefined must be(false) - txLog2.latestEntryId must be(-1) - } - - "be able to record and delete entries - asynchronous" in { - EventHandler.notify(TestEvent.Mute(EventFilter[ReplicationException])) - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.delete - Thread.sleep(200) - intercept[ReplicationException](TransactionLog.logFor(uuid, true, null)) - EventHandler.notify(TestEvent.UnMuteAll) - } - - "be able to record entries and read entries with 'entriesInRange' - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, true, null) - Thread.sleep(200) - val entries = txlog2.entriesInRange(0, 1).map(bytes ⇒ new String(bytes, "UTF-8")) - Thread.sleep(200) - entries.size must equal(2) - entries(0) must equal("hello") - entries(1) must equal("hello") - Thread.sleep(200) - txlog2.close - } - - "be able to record entries and read entries with 'entries' - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, true, null) - val entries = txlog2.entries.map(bytes ⇒ new String(bytes, "UTF-8")) - Thread.sleep(200) - entries.size must equal(4) - entries(0) must equal("hello") - entries(1) must equal("hello") - entries(2) must equal("hello") - entries(3) must equal("hello") - Thread.sleep(200) - txlog2.close - } - - "be able to record a snapshot - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - Thread.sleep(200) - txlog1.close - } - - "be able to record and read a snapshot and following entries - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - Thread.sleep(200) - - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, true, null) - Thread.sleep(200) - val (snapshotAsBytes, entriesAsBytes) = txlog2.latestSnapshotAndSubsequentEntries - Thread.sleep(200) - new String(snapshotAsBytes.getOrElse(fail("No snapshot")), "UTF-8") must equal("snapshot") - - val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8")) - Thread.sleep(200) - entries.size must equal(4) - entries(0) must equal("hello") - entries(1) must equal("hello") - entries(2) must equal("hello") - entries(3) must equal("hello") - Thread.sleep(200) - txlog2.close - } - - "be able to record entries then a snapshot then more entries - and then read from the snapshot and the following entries - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, true, null) - Thread.sleep(200) - val (snapshotAsBytes, entriesAsBytes) = txlog2.latestSnapshotAndSubsequentEntries - Thread.sleep(200) - new String(snapshotAsBytes.getOrElse(fail("No snapshot")), "UTF-8") must equal("snapshot") - val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8")) - Thread.sleep(200) - entries.size must equal(2) - entries(0) must equal("hello") - entries(1) must equal("hello") - Thread.sleep(200) - txlog2.close - } - } - - override def beforeAll() = { - LocalBookKeeperEnsemble.start() - TransactionLog.start() - } - - override def afterAll() = { - TransactionLog.shutdown() - LocalBookKeeperEnsemble.shutdown() - } -} diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala new file mode 100644 index 0000000000..240d1ad3ff --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -0,0 +1,35 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import akka.testkit.AkkaSpec +import akka.util.duration._ +import akka.util.Duration + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class ClusterConfigSpec extends AkkaSpec( + """ + akka { + actor { + provider = "akka.remote.RemoteActorRefProvider" + } + } + """) { + + "Clustering" must { + + "be able to parse generic cluster config elements" in { + val settings = new ClusterSettings(system.settings.config, system.name) + import settings._ + FailureDetectorThreshold must be(8) + FailureDetectorMaxSampleSize must be(1000) + SeedNodeConnectionTimeout must be(30 seconds) + MaxTimeToRetryJoiningCluster must be(30 seconds) + InitialDelayForGossip must be(5 seconds) + GossipFrequency must be(1 second) + SeedNodes must be(Set()) + } + } +} diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala new file mode 100644 index 0000000000..6366a9f65e --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -0,0 +1,95 @@ +// /** +// * Copyright (C) 2009-2011 Typesafe Inc. +// */ +// package akka.cluster + +// import java.net.InetSocketAddress + +// import akka.testkit._ +// import akka.dispatch._ +// import akka.actor._ +// import com.typesafe.config._ + +// class GossipingAccrualFailureDetectorSpec extends AkkaSpec(""" +// akka { +// loglevel = "INFO" +// actor.provider = "akka.remote.RemoteActorRefProvider" + +// remote.server.hostname = localhost +// remote.server.port = 5550 +// remote.failure-detector.threshold = 3 +// cluster.seed-nodes = ["akka://localhost:5551"] +// } +// """) with ImplicitSender { + +// val conn1 = Address("akka", system.systemName, Some("localhost"), Some(5551)) +// val node1 = ActorSystem("GossiperSpec", ConfigFactory +// .parseString("akka { remote.server.port=5551, cluster.use-cluster = on }") +// .withFallback(system.settings.config)) +// val remote1 = +// node1.asInstanceOf[ActorSystemImpl] +// .provider.asInstanceOf[RemoteActorRefProvider] +// .remote +// val gossiper1 = remote1.gossiper +// val fd1 = remote1.failureDetector +// gossiper1 must be('defined) + +// val conn2 = RemoteNettyAddress("localhost", 5552) +// val node2 = ActorSystem("GossiperSpec", ConfigFactory +// .parseString("akka { remote.server.port=5552, cluster.use-cluster = on }") +// .withFallback(system.settings.config)) +// val remote2 = +// node2.asInstanceOf[ActorSystemImpl] +// .provider.asInstanceOf[RemoteActorRefProvider] +// .remote +// val gossiper2 = remote2.gossiper +// val fd2 = remote2.failureDetector +// gossiper2 must be('defined) + +// val conn3 = RemoteNettyAddress("localhost", 5553) +// val node3 = ActorSystem("GossiperSpec", ConfigFactory +// .parseString("akka { remote.server.port=5553, cluster.use-cluster = on }") +// .withFallback(system.settings.config)) +// val remote3 = +// node3.asInstanceOf[ActorSystemImpl] +// .provider.asInstanceOf[RemoteActorRefProvider] +// .remote +// val gossiper3 = remote3.gossiper +// val fd3 = remote3.failureDetector +// gossiper3 must be('defined) + +// "A Gossip-driven Failure Detector" must { + +// "receive gossip heartbeats so that all healthy nodes in the cluster are marked 'available'" ignore { +// Thread.sleep(5000) // let them gossip for 10 seconds +// fd1.isAvailable(conn2) must be(true) +// fd1.isAvailable(conn3) must be(true) +// fd2.isAvailable(conn1) must be(true) +// fd2.isAvailable(conn3) must be(true) +// fd3.isAvailable(conn1) must be(true) +// fd3.isAvailable(conn2) must be(true) +// } + +// "mark node as 'unavailable' if a node in the cluster is shut down and its heartbeats stops" ignore { +// // kill node 3 +// gossiper3.get.shutdown() +// node3.shutdown() +// Thread.sleep(5000) // let them gossip for 10 seconds + +// fd1.isAvailable(conn2) must be(true) +// fd1.isAvailable(conn3) must be(false) +// fd2.isAvailable(conn1) must be(true) +// fd2.isAvailable(conn3) must be(false) +// } +// } + +// override def atTermination() { +// gossiper1.get.shutdown() +// gossiper2.get.shutdown() +// gossiper3.get.shutdown() +// node1.shutdown() +// node2.shutdown() +// node3.shutdown() +// // FIXME Ordering problem - If we shut down the ActorSystem before the Gossiper then we get an IllegalStateException +// } +// } diff --git a/akka-cluster/src/test/scala/akka/cluster/SynchronousTransactionLogSpec.scala b/akka-cluster/src/test/scala/akka/cluster/SynchronousTransactionLogSpec.scala deleted file mode 100644 index 3dc58d6c9a..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/SynchronousTransactionLogSpec.scala +++ /dev/null @@ -1,190 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import org.apache.bookkeeper.client.BookKeeper -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.actor._ -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } - -import com.eaio.uuid.UUID - -class SynchronousTransactionLogSpec extends WordSpec with MustMatchers with BeforeAndAfterAll { - private var bookKeeper: BookKeeper = _ - private var localBookKeeper: LocalBookKeeper = _ - - "A synchronous used Transaction Log" should { - - "be able to be deleted - synchronous" in { - val uuid = (new UUID).toString - val txlog = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog.recordEntry(entry) - - txlog.delete() - txlog.close() - - val zkClient = TransactionLog.zkClient - assert(zkClient.readData(txlog.snapshotPath, true) == null) - assert(zkClient.readData(txlog.txLogPath, true) == null) - } - - "fail to be opened if non existing - synchronous" in { - EventHandler.notify(TestEvent.Mute(EventFilter[ReplicationException])) - val uuid = (new UUID).toString - intercept[ReplicationException](TransactionLog.logFor(uuid, false, null)) - EventHandler.notify(TestEvent.UnMuteAll) - } - - "be able to be checked for existence - synchronous" in { - val uuid = (new UUID).toString - TransactionLog.exists(uuid) must be(false) - - TransactionLog.newLogFor(uuid, false, null) - TransactionLog.exists(uuid) must be(true) - } - - "be able to record entries - synchronous" in { - val uuid = (new UUID).toString - val txlog = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog.recordEntry(entry) - } - - "be able to overweite an existing txlog if one already exists - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close - - val txLog2 = TransactionLog.newLogFor(uuid, false, null) - txLog2.latestSnapshotId.isDefined must be(false) - txLog2.latestEntryId must be(-1) - } - - "be able to record and delete entries - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.delete - txlog1.close - // intercept[ReplicationException](TransactionLog.logFor(uuid, false, null)) - } - - "be able to record entries and read entries with 'entriesInRange' - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, false, null) - val entries = txlog2.entriesInRange(0, 1).map(bytes ⇒ new String(bytes, "UTF-8")) - entries.size must equal(2) - entries(0) must equal("hello") - entries(1) must equal("hello") - txlog2.close - } - - "be able to record entries and read entries with 'entries' - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close // should work without txlog.close - - val txlog2 = TransactionLog.logFor(uuid, false, null) - val entries = txlog2.entries.map(bytes ⇒ new String(bytes, "UTF-8")) - entries.size must equal(4) - entries(0) must equal("hello") - entries(1) must equal("hello") - entries(2) must equal("hello") - entries(3) must equal("hello") - txlog2.close - } - - "be able to record a snapshot - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - txlog1.close - } - - "be able to record and read a snapshot and following entries - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, false, null) - val (snapshotAsBytes, entriesAsBytes) = txlog2.latestSnapshotAndSubsequentEntries - new String(snapshotAsBytes.getOrElse(fail("No snapshot")), "UTF-8") must equal("snapshot") - - val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8")) - entries.size must equal(4) - entries(0) must equal("hello") - entries(1) must equal("hello") - entries(2) must equal("hello") - entries(3) must equal("hello") - txlog2.close - } - - "be able to record entries then a snapshot then more entries - and then read from the snapshot and the following entries - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, false, null) - val (snapshotAsBytes, entriesAsBytes) = txlog2.latestSnapshotAndSubsequentEntries - new String(snapshotAsBytes.getOrElse(fail("No snapshot")), "UTF-8") must equal("snapshot") - - val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8")) - entries.size must equal(2) - entries(0) must equal("hello") - entries(1) must equal("hello") - txlog2.close - } - } - - override def beforeAll() = { - LocalBookKeeperEnsemble.start() - TransactionLog.start() - } - - override def afterAll() = { - TransactionLog.shutdown() - LocalBookKeeperEnsemble.shutdown() - } -} diff --git a/akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala b/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala similarity index 53% rename from akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala rename to akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala index 5bfda16666..df9cead7f8 100644 --- a/akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala @@ -1,4 +1,4 @@ -package akka.remote +package akka.cluster import java.net.InetSocketAddress import akka.testkit.AkkaSpec @@ -6,7 +6,7 @@ import akka.testkit.AkkaSpec class VectorClockSpec extends AkkaSpec { import VectorClock._ - "An VectorClock" must { + "A VectorClock" must { "have zero versions when created" in { val clock = VectorClock() @@ -40,7 +40,7 @@ class VectorClockSpec extends AkkaSpec { clock1.compare(clock2) must not be (Concurrent) } - "A clock should not happen before an identical clock" in { + "not happen before an identical clock" in { val clock1_1 = VectorClock() val clock2_1 = clock1_1.increment(1, System.currentTimeMillis) val clock3_1 = clock2_1.increment(2, System.currentTimeMillis) @@ -54,7 +54,7 @@ class VectorClockSpec extends AkkaSpec { clock4_1.compare(clock4_2) must not be (Concurrent) } - "A clock should happen before an identical clock with a single additional event" in { + "happen before an identical clock with a single additional event" in { val clock1_1 = VectorClock() val clock2_1 = clock1_1.increment(1, System.currentTimeMillis) val clock3_1 = clock2_1.increment(2, System.currentTimeMillis) @@ -121,4 +121,82 @@ class VectorClockSpec extends AkkaSpec { clock5_1.compare(clock3_2) must be(After) } } + + "A Versioned" must { + class TestVersioned(val version: VectorClock = VectorClock()) extends Versioned { + def increment(v: Int, time: Long) = new TestVersioned(version.increment(v, time)) + } + + "have zero versions when created" in { + val versioned = new TestVersioned() + versioned.version.versions must be(Vector()) + } + + "happen before an identical versioned with a single additional event" in { + val versioned1_1 = new TestVersioned() + val versioned2_1 = versioned1_1.increment(1, System.currentTimeMillis) + val versioned3_1 = versioned2_1.increment(2, System.currentTimeMillis) + val versioned4_1 = versioned3_1.increment(1, System.currentTimeMillis) + + val versioned1_2 = new TestVersioned() + val versioned2_2 = versioned1_2.increment(1, System.currentTimeMillis) + val versioned3_2 = versioned2_2.increment(2, System.currentTimeMillis) + val versioned4_2 = versioned3_2.increment(1, System.currentTimeMillis) + val versioned5_2 = versioned4_2.increment(3, System.currentTimeMillis) + + Versioned.latestVersionOf[TestVersioned](versioned4_1, versioned5_2) must be(versioned5_2) + } + + "Two versioneds with different events should be concurrent: 1" in { + var versioned1_1 = new TestVersioned() + val versioned2_1 = versioned1_1.increment(1, System.currentTimeMillis) + + val versioned1_2 = new TestVersioned() + val versioned2_2 = versioned1_2.increment(2, System.currentTimeMillis) + + Versioned.latestVersionOf[TestVersioned](versioned2_1, versioned2_2) must be(versioned2_1) + } + + "Two versioneds with different events should be concurrent: 2" in { + val versioned1_3 = new TestVersioned() + val versioned2_3 = versioned1_3.increment(1, System.currentTimeMillis) + val versioned3_3 = versioned2_3.increment(2, System.currentTimeMillis) + val versioned4_3 = versioned3_3.increment(1, System.currentTimeMillis) + + val versioned1_4 = new TestVersioned() + val versioned2_4 = versioned1_4.increment(1, System.currentTimeMillis) + val versioned3_4 = versioned2_4.increment(1, System.currentTimeMillis) + val versioned4_4 = versioned3_4.increment(3, System.currentTimeMillis) + + Versioned.latestVersionOf[TestVersioned](versioned4_3, versioned4_4) must be(versioned4_3) + } + + "be earlier than another versioned if it has an older version" in { + val versioned1_1 = new TestVersioned() + val versioned2_1 = versioned1_1.increment(2, System.currentTimeMillis) + val versioned3_1 = versioned2_1.increment(2, System.currentTimeMillis) + + val versioned1_2 = new TestVersioned() + val versioned2_2 = versioned1_2.increment(1, System.currentTimeMillis) + val versioned3_2 = versioned2_2.increment(2, System.currentTimeMillis) + val versioned4_2 = versioned3_2.increment(2, System.currentTimeMillis) + val versioned5_2 = versioned4_2.increment(3, System.currentTimeMillis) + + Versioned.latestVersionOf[TestVersioned](versioned3_1, versioned5_2) must be(versioned5_2) + } + + "be later than another versioned if it has an newer version" in { + val versioned1_1 = new TestVersioned() + val versioned2_1 = versioned1_1.increment(1, System.currentTimeMillis) + val versioned3_1 = versioned2_1.increment(2, System.currentTimeMillis) + val versioned4_1 = versioned3_1.increment(2, System.currentTimeMillis) + val versioned5_1 = versioned4_1.increment(3, System.currentTimeMillis) + + val versioned1_2 = new TestVersioned() + val versioned2_2 = versioned1_2.increment(2, System.currentTimeMillis) + val versioned3_2 = versioned2_2.increment(2, System.currentTimeMillis) + + Versioned.latestVersionOf[TestVersioned](versioned5_1, versioned3_2) must be(versioned5_1) + } + } } diff --git a/akka-cluster/src/test/scala/akka/cluster/sample/ClusteredPingPongSample.scala b/akka-cluster/src/test/scala/akka/cluster/sample/ClusteredPingPongSample.scala deleted file mode 100644 index c242185450..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/sample/ClusteredPingPongSample.scala +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.sample - -import akka.cluster._ - -import akka.actor._ -import akka.actor.Actor._ - -import java.util.concurrent.CountDownLatch - -object PingPong { - val PING_ADDRESS = "ping" - val PONG_ADDRESS = "pong" - - val NrOfPings = 5 - - // ------------------------ - // Messages - // ------------------------ - - sealed trait PingPong extends Serializable - case object Ball extends PingPong - case object Stop extends PingPong - case class Latch(latch: CountDownLatch) extends PingPong - - // ------------------------ - // Actors - // ------------------------ - - class PingActor extends Actor with Serializable { - var count = 0 - var gameOverLatch: CountDownLatch = _ - - def receive = { - case Ball ⇒ - if (count < NrOfPings) { - println("---->> PING (%s)" format count) - count += 1 - reply(Ball) - } else { - sender.foreach(s ⇒ (s ? Stop).await) - gameOverLatch.countDown - self.stop - } - case Latch(latch) ⇒ - gameOverLatch = latch - } - } - - class PongActor extends Actor with Serializable { - def receive = { - case Ball ⇒ - reply(Ball) - case Stop ⇒ - reply(Stop) - self.stop - } - } -} - -/* -object ClusteredPingPongSample { - import PingPong._ - import BinaryFormats._ - - val CLUSTER_NAME = "test-cluster" - - def main(args: Array[String]) = run - - def run = { - - // ------------------------ - // Start cluster of 5 nodes - // ------------------------ - - Cluster.startLocalCluster() - val localNode = Cluster.newNode(NodeAddress(CLUSTER_NAME, "node0", port = 9991)).start - val remoteNodes = Cluster.newNode(NodeAddress(CLUSTER_NAME, "node1", port = 9992)).start :: - Cluster.newNode(NodeAddress(CLUSTER_NAME, "node2", port = 9993)).start :: - Cluster.newNode(NodeAddress(CLUSTER_NAME, "node3", port = 9994)).start :: - Cluster.newNode(NodeAddress(CLUSTER_NAME, "node4", port = 9995)).start :: Nil - - // ------------------------ - // Store the actors in the cluster - // ------------------------ - - // Store the PingActor in the cluster, but do not deploy it anywhere - localNode.store(classOf[PingActor], PING_ADDRESS) - - // Store the PongActor in the cluster and deploy it - // to 5 (replication factor) nodes in the cluster - localNode.store(classOf[PongActor], PONG_ADDRESS, 5) - - Thread.sleep(1000) // let the deployment finish - - // ------------------------ - // Get the actors from the cluster - // ------------------------ - - // Check out a local PingActor instance (not reference) - val ping = localNode.use[PingActor](PING_ADDRESS).head - - // Get a reference to all the pong actors through a round-robin router ActorRef - val pong = localNode.ref(PONG_ADDRESS, router = Router.RoundRobin) - - // ------------------------ - // Play the game - // ------------------------ - - val latch = new CountDownLatch(1) - ping ! Latch(latch) // register latch for actor to know when to stop - - println("---->> SERVE") - - implicit val replyTo = Some(pong) // set the reply address to the PongActor - ping ! Ball // serve - - latch.await // wait for game to finish - - println("---->> GAME OVER") - - // ------------------------ - // Clean up - // ------------------------ - - localNode.stop - remoteNodes.foreach(_.stop) - Cluster.shutdownLocalCluster() - } -} -*/ diff --git a/akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala b/akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala deleted file mode 100644 index daf817872e..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.sample - -import akka.cluster._ -import akka.dispatch.Futures - -object ComputeGridSample { - //sample.cluster.ComputeGridSample.fun2 - - // FIXME rewrite as multi-jvm test - - /* - // run all - def run { - fun1 - fun2 - fun3 - fun4 - } - - // Send Function0[Unit] - def fun1 = { - Cluster.startLocalCluster() - val node = Cluster newNode (NodeAddress("test", "local", port = 9991)) start - val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start - - Thread.sleep(100) - val fun = () ⇒ println("=============>>> AKKA ROCKS <<<=============") - node send (fun, 2) // send and invoke function on to two cluster nodes - - node.stop - remote1.stop - Cluster.shutdownLocalCluster() - } - - // Send Function0[Any] - def fun2 = { - Cluster.startLocalCluster() - val local = Cluster newNode (NodeAddress("test", "local", port = 9991)) start - val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start - - Thread.sleep(100) - val fun = () ⇒ "AKKA ROCKS" - val futures = local send (fun, 2) // send and invoke function on to two cluster nodes and get result - - val result = Await.sync(Futures.fold("")(futures)(_ + " - " + _), timeout) - println("===================>>> Cluster says [" + result + "]") - - local.stop - remote1.stop - Cluster.shutdownLocalCluster() - } - - // Send Function1[Any, Unit] - def fun3 = { - Cluster.startLocalCluster() - val local = Cluster newNode (NodeAddress("test", "local", port = 9991)) start - val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start - - val fun = ((s: String) ⇒ println("=============>>> " + s + " <<<=============")).asInstanceOf[Function1[Any, Unit]] - local send (fun, "AKKA ROCKS", 2) // send and invoke function on to two cluster nodes - - local.stop - remote1.stop - Cluster.shutdownLocalCluster() - } - - // Send Function1[Any, Any] - def fun4 = { - Cluster.startLocalCluster() - val local = Cluster newNode (NodeAddress("test", "local", port = 9991)) start - val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start - - val fun = ((i: Int) ⇒ i * i).asInstanceOf[Function1[Any, Any]] - - val future1 = local send (fun, 2, 1) head // send and invoke function on one cluster node and get result - val future2 = local send (fun, 2, 1) head // send and invoke function on one cluster node and get result - - // grab the result from the first one that returns - val result = Await.sync(Futures.firstCompletedOf(List(future1, future2)), timeout) - println("===================>>> Cluster says [" + result + "]") - - local.stop - remote1.stop - Cluster.shutdownLocalCluster() - } - */ -} diff --git a/akka-cluster/src/test/scala/akka/cluster/storage/InMemoryStorageSpec.scala b/akka-cluster/src/test/scala/akka/cluster/storage/InMemoryStorageSpec.scala deleted file mode 100644 index 762b189bd2..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/storage/InMemoryStorageSpec.scala +++ /dev/null @@ -1,241 +0,0 @@ -package akka.cluster.storage - -import org.scalatest.matchers.MustMatchers -import org.scalatest.WordSpec -import akka.cluster.storage.StorageTestUtils._ - -class InMemoryStorageSpec extends WordSpec with MustMatchers { - - "unversioned load" must { - "throw MissingDataException if non existing key" in { - val store = new InMemoryStorage() - - try { - store.load("foo") - fail() - } catch { - case e: MissingDataException ⇒ - } - } - - "return VersionedData if key existing" in { - val storage = new InMemoryStorage() - val key = "somekey" - val value = "somevalue".getBytes - storage.insert(key, value) - - val result = storage.load(key) - //todo: strange that the implicit store is not found - assertContent(key, value, result.version)(storage) - } - } - - "exist" must { - "return true if value exists" in { - val store = new InMemoryStorage() - val key = "somekey" - store.insert(key, "somevalue".getBytes) - store.exists(key) must be(true) - } - - "return false if value not exists" in { - val store = new InMemoryStorage() - store.exists("somekey") must be(false) - } - } - - "versioned load" must { - "throw MissingDataException if non existing key" in { - val store = new InMemoryStorage() - - try { - store.load("foo", 1) - fail() - } catch { - case e: MissingDataException ⇒ - } - } - - "return VersionedData if key existing and exact version match" in { - val storage = new InMemoryStorage() - val key = "somekey" - val value = "somevalue".getBytes - val storedVersion = storage.insert(key, value) - - val loaded = storage.load(key, storedVersion) - assert(loaded.version == storedVersion) - org.junit.Assert.assertArrayEquals(value, loaded.data) - } - - "throw BadVersionException is version too new" in { - val storage = new InMemoryStorage() - val key = "somekey" - val value = "somevalue".getBytes - val version = storage.insert(key, value) - - try { - storage.load(key, version + 1) - fail() - } catch { - case e: BadVersionException ⇒ - } - } - - "throw BadVersionException is version too old" in { - val storage = new InMemoryStorage() - val key = "somekey" - val value = "somevalue".getBytes - val version = storage.insert(key, value) - - try { - storage.load(key, version - 1) - fail() - } catch { - case e: BadVersionException ⇒ - } - } - } - - "insert" must { - - "place a new value when non previously existed" in { - val storage = new InMemoryStorage() - val key = "somekey" - val oldValue = "oldvalue".getBytes - storage.insert(key, oldValue) - - val result = storage.load(key) - assertContent(key, oldValue)(storage) - assert(InMemoryStorage.InitialVersion == result.version) - } - - "throw MissingDataException when there already exists an entry with the same key" in { - val storage = new InMemoryStorage() - val key = "somekey" - val initialValue = "oldvalue".getBytes - val initialVersion = storage.insert(key, initialValue) - - val newValue = "newValue".getBytes - - try { - storage.insert(key, newValue) - fail() - } catch { - case e: DataExistsException ⇒ - } - - assertContent(key, initialValue, initialVersion)(storage) - } - } - - "update" must { - - "throw MissingDataException when no node exists" in { - val storage = new InMemoryStorage() - - val key = "somekey" - - try { - storage.update(key, "somevalue".getBytes, 1) - fail() - } catch { - case e: MissingDataException ⇒ - } - } - - "replace if previous value exists and no other updates have been done" in { - val storage = new InMemoryStorage() - - //do the initial insert - val key = "foo" - val oldValue = "insert".getBytes - val initialVersion = storage.insert(key, oldValue) - - //do the update the will be the cause of the conflict. - val newValue: Array[Byte] = "update".getBytes - val newVersion = storage.update(key, newValue, initialVersion) - - assertContent(key, newValue, newVersion)(storage) - } - - "throw BadVersionException when already overwritten" in { - val storage = new InMemoryStorage() - - //do the initial insert - val key = "foo" - val oldValue = "insert".getBytes - val initialVersion = storage.insert(key, oldValue) - - //do the update the will be the cause of the conflict. - val newValue = "otherupdate".getBytes - val newVersion = storage.update(key, newValue, initialVersion) - - try { - storage.update(key, "update".getBytes, initialVersion) - fail() - } catch { - case e: BadVersionException ⇒ - } - - assertContent(key, newValue, newVersion)(storage) - } - } - - "overwrite" must { - - "throw MissingDataException when no node exists" in { - val storage = new InMemoryStorage() - val key = "somekey" - - try { - storage.overwrite(key, "somevalue".getBytes) - fail() - } catch { - case e: MissingDataException ⇒ - } - - storage.exists(key) must be(false) - } - - "succeed if previous value exist" in { - val storage = new InMemoryStorage() - val key = "somekey" - val oldValue = "oldvalue".getBytes - val newValue = "somevalue".getBytes - - val initialVersion = storage.insert(key, oldValue) - val overwriteVersion = storage.overwrite(key, newValue) - - assert(overwriteVersion == initialVersion + 1) - assertContent(key, newValue, overwriteVersion)(storage) - } - } - - "insertOrOverwrite" must { - "insert if nothing was inserted before" in { - val storage = new InMemoryStorage() - val key = "somekey" - val value = "somevalue".getBytes - - val version = storage.insertOrOverwrite(key, value) - - assert(version == InMemoryStorage.InitialVersion) - assertContent(key, value, version)(storage) - } - - "overwrite of something existed before" in { - val storage = new InMemoryStorage() - val key = "somekey" - val oldValue = "oldvalue".getBytes - val newValue = "somevalue".getBytes - - val initialVersion = storage.insert(key, oldValue) - - val overwriteVersion = storage.insertOrOverwrite(key, newValue) - - assert(overwriteVersion == initialVersion + 1) - assertContent(key, newValue, overwriteVersion)(storage) - } - } - -} diff --git a/akka-cluster/src/test/scala/akka/cluster/storage/StorageTestUtils.scala b/akka-cluster/src/test/scala/akka/cluster/storage/StorageTestUtils.scala deleted file mode 100644 index 71ad994356..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/storage/StorageTestUtils.scala +++ /dev/null @@ -1,15 +0,0 @@ -package akka.cluster.storage - -object StorageTestUtils { - - def assertContent(key: String, expectedData: Array[Byte], expectedVersion: Long)(implicit storage: Storage) { - val found = storage.load(key) - assert(found.version == expectedVersion, "versions should match, found[" + found.version + "], expected[" + expectedVersion + "]") - org.junit.Assert.assertArrayEquals(expectedData, found.data) - } - - def assertContent(key: String, expectedData: Array[Byte])(implicit storage: Storage) { - val found = storage.load(key) - org.junit.Assert.assertArrayEquals(expectedData, found.data) - } -} diff --git a/akka-cluster/src/test/scala/akka/cluster/storage/ZooKeeperStorageSpec.scala b/akka-cluster/src/test/scala/akka/cluster/storage/ZooKeeperStorageSpec.scala deleted file mode 100644 index 8767ccf88e..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/storage/ZooKeeperStorageSpec.scala +++ /dev/null @@ -1,132 +0,0 @@ -// package akka.cluster.storage - -// import org.scalatest.matchers.MustMatchers -// import akka.actor.Actor -// import org.scalatest.{ BeforeAndAfterEach, BeforeAndAfterAll, WordSpec } -// import org.I0Itec.zkclient.ZkServer -// //import zookeeper.AkkaZkClient -// import akka.cluster.storage.StorageTestUtils._ -// import java.io.File -// import java.util.concurrent.atomic.AtomicLong - -// class ZooKeeperStorageSpec extends WordSpec with MustMatchers with BeforeAndAfterAll with BeforeAndAfterEach { -// val dataPath = "_akka_cluster/data" -// val logPath = "_akka_cluster/log" -// var zkServer: ZkServer = _ -// //var zkClient: AkkaZkClient = _ -// val idGenerator = new AtomicLong - -// def generateKey: String = { -// "foo" + idGenerator.incrementAndGet() -// } - -// override def beforeAll() { -// /*new File(dataPath).delete() -// new File(logPath).delete() - -// try { -// zkServer = Cluster.startLocalCluster(dataPath, logPath) -// Thread.sleep(5000) -// Actor.cluster.start() -// zkClient = Cluster.newZkClient() -// } catch { -// case e ⇒ e.printStackTrace() -// }*/ -// } - -// override def afterAll() { -// /*zkClient.close() -// Actor.cluster.shutdown() -// ClusterDeployer.shutdown() -// Cluster.shutdownLocalCluster() -// Actor.registry.local.shutdownAll() */ -// } - -// /* -// "unversioned load" must { -// "throw MissingDataException if non existing key" in { -// val storage = new ZooKeeperStorage(zkClient) - -// try { -// storage.load(generateKey) -// fail() -// } catch { -// case e: MissingDataException ⇒ -// } -// } - -// "return VersionedData if key existing" in { -// val storage = new ZooKeeperStorage(zkClient) -// val key = generateKey -// val value = "somevalue".getBytes -// storage.insert(key, value) - -// val result = storage.load(key) -// //todo: strange that the implicit store is not found -// assertContent(key, value, result.version)(storage) -// } -// } */ - -// /*"overwrite" must { - -// "throw MissingDataException when there doesn't exist an entry to overwrite" in { -// val storage = new ZooKeeperStorage(zkClient) -// val key = generateKey -// val value = "value".getBytes - -// try { -// storage.overwrite(key, value) -// fail() -// } catch { -// case e: MissingDataException ⇒ -// } - -// assert(!storage.exists(key)) -// } - -// "overwrite if there is an existing value" in { -// val storage = new ZooKeeperStorage(zkClient) -// val key = generateKey -// val oldValue = "oldvalue".getBytes - -// storage.insert(key, oldValue) -// val newValue = "newValue".getBytes - -// val result = storage.overwrite(key, newValue) -// //assertContent(key, newValue, result.version)(storage) -// } -// } - -// "insert" must { - -// "place a new value when non previously existed" in { -// val storage = new ZooKeeperStorage(zkClient) -// val key = generateKey -// val oldValue = "oldvalue".getBytes -// storage.insert(key, oldValue) - -// val result = storage.load(key) -// assertContent(key, oldValue)(storage) -// assert(InMemoryStorage.InitialVersion == result.version) -// } - -// "throw DataExistsException when there already exists an entry with the same key" in { -// val storage = new ZooKeeperStorage(zkClient) -// val key = generateKey -// val oldValue = "oldvalue".getBytes - -// val initialVersion = storage.insert(key, oldValue) -// val newValue = "newValue".getBytes - -// try { -// storage.insert(key, newValue) -// fail() -// } catch { -// case e: DataExistsException ⇒ -// } - -// assertContent(key, oldValue, initialVersion)(storage) -// } -// } */ - -// } diff --git a/akka-docs/images/faulttolerancesample-failure-flow.png b/akka-docs/images/faulttolerancesample-failure-flow.png new file mode 100755 index 0000000000..6e6fcdf6c8 Binary files /dev/null and b/akka-docs/images/faulttolerancesample-failure-flow.png differ diff --git a/akka-docs/images/faulttolerancesample-normal-flow.png b/akka-docs/images/faulttolerancesample-normal-flow.png new file mode 100644 index 0000000000..c3fa85c25c Binary files /dev/null and b/akka-docs/images/faulttolerancesample-normal-flow.png differ diff --git a/akka-docs/images/faulttolerancesample.graffle b/akka-docs/images/faulttolerancesample.graffle new file mode 100755 index 0000000000..1dd802b7d5 --- /dev/null +++ b/akka-docs/images/faulttolerancesample.graffle @@ -0,0 +1,7302 @@ + + + + + ApplicationVersion + + com.omnigroup.OmniGraffle + 138.33.0.157554 + + CreationDate + 2012-01-25 11:07:14 +0100 + Creator + Derek Wyatt + GraphDocumentVersion + 8 + GuidesLocked + NO + GuidesVisible + YES + ImageCounter + 1 + LinksVisible + NO + MagnetsVisible + NO + MasterSheets + + ModificationDate + 2012-01-26 18:37:58 +0100 + Modifier + Patrik Nordwall + NotesVisible + NO + OriginVisible + NO + PageBreaks + YES + PrintInfo + + NSBottomMargin + + float + 41 + + NSLeftMargin + + float + 18 + + NSPaperName + + string + na-letter + + NSPaperSize + + coded + BAtzdHJlYW10eXBlZIHoA4QBQISEhAdOU1ZhbHVlAISECE5TT2JqZWN0AIWEASqEhAx7X05TU2l6ZT1mZn2WgWQCgRgDhg== + + NSRightMargin + + float + 18 + + NSTopMargin + + float + 18 + + + QuickLookPreview + + JVBERi0xLjMKJcTl8uXrp/Og0MTGCjUgMCBvYmoKPDwgL0xlbmd0aCA2IDAgUiAvRmls + dGVyIC9GbGF0ZURlY29kZSA+PgpzdHJlYW0KeAHFnEuPHUdyhff1K2pJLnRV78fKgGV7 + 4IEXFkhgFoIWRo84Y7mpsagZ/35/Jx6ZeR/dJO3FgCC6MiufJzMjTkRG3V/77/tf+4F/ + 677Z/08/9X/of+m//e63sX/6rR/t329P/TfDZe31vyn4gYx9WZdj5+GYl3E7u7Gn8LlS + cJnX/mM/DZM9PfO0XPZtXCw5DTt/J8/wxNA/UWbv52mj9nPnz/Nln4+tVp6nkbdq1Z+i + M0889ydNzsd6xNtTI54nb6SLlJqnmr9S4vGjFVnO8zwY6dDT9nDJpA+8i940FVpZy2u1 + /+cWhQ/A+Hv+/+zAfvfOMB/6d98B/2iJb/QHcLunj15zXMbAT08NfkoKnHGZL5ODo8QW + +E3LyZxUfu+mdbpMwryAP60Lb4WfPwV+nmjw8wxDJhtx/Lz5wM8TXsq6bR4b/DS4Br8c + eMVPUyr4KVHwU+Jj/459ypactCUH7bAGtXE5LjurvB+XaRV6//i+H6fAdOq/Gc/LNO3r + 0n+zsF3X/v3H/tt/GS8DuL//0L/5w9v+/c/9P7+3k1CW4nONHpdxnOZhfdzoD/2bv7zV + kZn6N5/+66fy+Lb7sX//e++snjDOzris58kh2qZzmRZNcOS0TByi4dSYP/bjyWKX9DPp + 7TKM49JFifHU9Cm/X1ayeR9pDp22Y6TnY1Qx1bf3Qm05OGClvXnfVCL6i1QZT6SpP4xe + MXJOUPZFsOYs2UVv7AR/652/nMiCi0SFRulFc1Jd6TQnfQWS9swtbB/6H3okTv8j++GP + 7a4pZy1qjJtNuwAd6Qp0HzkB3LgV4GzfjbsBW4Aez5hClj/LwvjCgbAtbQAdqQS6i3QD + dOQ4JGM2l8lY5qukJWjJ3l29SaBj1Al0nVSubk7agO4i1QAdOV92Qs+5j+MpjTNwNPV/ + v+zHMZz90l2fTA7Rv+XJ+c98+O2vbzkmHKtyqH7JVyXn09cfs3ljdeop82Rd+84z8sxs + qwvVkp6oXFZ+ns/mfM1sK8noer5mHU47zDOPueC9Es1qK+lLNmcTmTyp5Eurrmpufcrl + nTeNLFd3LuPO1Y1p+eJ6ollbz/jYf7g7N1rGODfTcLTIWbKryKFp9D6QmkYksKNh0mca + 5xa5ad4rctPM8UB1sX/jwEzLlMjpsSCnRIOckg5FNlGSe5fIqKss1D6V9zayRK6OO5Hz + eV5h0CCXoHyJ2lqllFJtNediXi7LNiyI9BuNxbn4Lvd80TJ/y5xf3naud+4Oyqd39YT8 + TzlQT2TeK6XkdV3yOpRl8roRfJ3X6emKl0AqxEti5SKxs1LiIrPW2mhJP9s2WE/ykU3G + T7RLjJXogVUQg5z0zBokp7O0GEZXGjC+YS1Txxidenn0ZO+dv422zyhkya6Mt9A5Tayh + I9prQed88miVlxWKDX3h+Cb51bkuE9ULgbTkqfaEzrSe1sVPrABbl5AzpfK6SEwJJn8K + nDzRAOUZhkI24uTNmw+oPBGl1G3z2KC1mDxLtMrAK3nTlApaShS0HIUvOAUHMy2HoOFu + y3iZV6joizTrxZPQvSmK4f91EmCEQc84C3f0DKYFbpU0WLKRfv4+KcAYZNzTsBkR9aI3 + UBKqaYLxMGbPASnC78ASc63BUxF9PFfJ15FymVaqR1KmQXm8f8iXPqAQeh0EN9RcCr2Y + jZMuTzRCL7H4Ysq1bDLlCniebMDzjEBk2cISLGksN8R5Jk9ZaCURhmVBbzkLo9VjwU+J + BkAlHZ5FskCCKZOyTuvzo6fyHia5ogKjTBl3ohjTchQ90aDoGVd8qnvB4oECitqbwTNc + hgGx8F4ze8Stig7BQJfV891f/vbLX3/6VG2fLzNHdomJXLHOUs2CWTrWYIelgZ92re1o + I1h1uYzrhIkCccZkx3WQih6BkwxJcjG0rNOqRLEwH0SEVU+8ET9dQF9Wr3nIVbLx5CLV + wWbzPhUnRvbcLJGlP0OL1kF6KJHqPflcp+gZgc06rFe0aDXOVMBaZ+M3jtYKL7imRSva + LODSY8FLibq3eyUdl2yiJKdCi9RVFmqfEjUfWcJWx52w2bQQbGa7+hwb4Dzjam+zVx9a + 8yuIaG93Muavd/Q6TtP4iBW9S0FfWNGn/0ha9KekRa/SneLGcroz4/oZJ7GKj6ygJL2l + ONsrZrb5Vfz1apJ8HNnpkhmZFId5itQqQeMv5JPRhi+tdCvWqLXPw1Ovbnl45gG1KLpM + asZ4KhUtobbjof7p6Mezx1Hdq5G5LyMrLfq46wS1SjWlzf3Dj8ide5NZ8kYFF1ao4uKp + BhfP8PkuK4KKaTBNKTxYbYsLa1mBQcY7gAHvOmnyQl5PAc20sQJlJnph85eXT5U9ISWZ + T7d//Y2Nw1qamY0PsTbrE6gzTYBy3vekBsnqRi2tmS/ASM29fG7oDb7Mma7dR1U9U/9H + Gb0Mmw4N5r4WBl0mw8JSz6Tg0mhof7sOMjpErF1GR/pwPhKpTXwkn4OPdKWdjSNoPfQ8 + PeF3sr55pi9k3ymfksogGegJJehbPtO4Ocu77eFTeW+jUqvWUhlz6UUzxAtW566latO2 + m1/xAmmJovyCh2rtErzekppRoucZAcoCzTJrtqTdsxzJdRxMUhjUCC3fnaWpdeQoOIJ6 + LBAq0WBoxWzqXW3DoaADq2a4KBEQ3TwWIJdD40skuzL8gqTProWC9guW/vaR9L7b+XvD + TL5SeneN57SR3uFM/Urp3S1mxqf09lQrpex1iKWRQ1ul9xI62F/Oy2FrqQPCiU2pHFJq + XpDNJqX05FJKT42UUlJiKCt3njistLK5sbDX9W9IKRuHi/Elh1iEn82ItpDH4UpxMZ7z + ftFQTTE+mocoAbKUTTTVm712DMYN5uHaSDCMwej85TQdTDeeZZ23Yryb5gRITw6QnhqA + lBQCONWrGFerDoOeeN3F376+8XE4QGWIBSCfn2k2m0qKcZ/po81cr2NCjPvFQsjx9mLh + Wnr/PbyXcVTnwyh2EVuRrnILR7yVCMnEItrdQUr3WfaiUxUTVvNqnuki/ed1c4Gewqub + uUVCrIf8ilQRYZFupFiWd8FV23PBFv0VQRXpEGcPU7Wsj71ItTK3ItVi7tdYNXKtoHdj + uFap5rejVUuMh8m3gnekK9595ATeIypMChECZPiOMi4bvKfR7h4K3hNXVrrbqXoHxqsV + Crw9ZRrUtF68bfDO8o53bS/T1l/B0PtHh5omidFcp0rZGHvizUx9biif0Po+99TIgUSD + d+R84dmTPnYT90qRsIGnCWv91jn6wnVeOcqPmwPFbZCb6ba5Fy/yzGfa2a3hC65ACYyJ + yw15+2O7SL5F4tmuNAcuhQcuhX99sNcmyHJT15uafAPhT4yW7Rg+o6Tb4hCYoKdQMy5M + L+s+Ligg+bK2dV65Hsfomuf1ckz72euWbmN3QexrFhJgHP3qfB1PVIxtmRlt5HU8jdMS + p9Q5iHVbgYm2olFvocmwXti1NWuXRmFgE97F7di50+dW7eTCAw8xnob9ZASomYUNLD3A + /twG6Qk00YEXtJuwVc9zVSm27jbutETxDVN3xsowmaaWCE1Ab3vOjOnGrRXThpQe6opL + iYX52yXPMK+moKYFSOjVuj+xUSo0O9VWzKyaE/iZbgkE7erTQa13/82ldREppRVd081I + hWYVRuT0dAC+TBtWoR93E8xlFSJdVyEyCsLRQoHcLgPVS7MKE7BOG1ZIWQWcz5dh21jU + XAW70NjN85ir0E2wEfo7qBerMOH+hOZrPWMVJoxQ662sQuSUVZggxqjtLleBjcwW2BY5 + MuZcBet+0FbNDcrqX9jTzSokfk8dFCiLZWY5EA8jCAqeL6p6Brnt3FGC1IGjbb1X9sX5 + UEjrJ9wR9Wbl0dqPGC1r5xEO9oxFBicaF2713OVjlxn1cYQcySrT6S6yoxKWeqfNoYSh + zQel5YMS78+6o3n5M8WN+VXZR3JDwqmRGyMBAus8YDSjBn3H1iwMGjyYkENur1ZO9opT + kvl4BRIc/xEP1jZs8W7inFtjWbNkROtUqFmc1EOCbMZwPk7MPiTIpkNsISWEUfQLx5Nb + bfMbcpdCiAbmJf69dbeRMJGTDbmIvI7IC2GENCBDFqlaGZYZNk3WyGXLsSGyl43x6woX + s56JcVONB3A/TFCM3KmyTYlxol/+MvWAh7GdzBNBkTkFMAmKu0xZqRkkdLtZpAFLBaic + C5aaw/TmkxUpsO9ojkSd5wp6pzeJ51HqlRxvu8GcZcEEBbLEHHcc4OvIFsxHDuOBzFE/ + ifmIT3OZkA4jUTeG+cjBJjZEq+eYj2a/JubcNxCSk5jrOCwsZ8F8hF4gX8y/mZir3x0t + UbFhp8/nss5dxSZn1EBeELx34tTDVPBjxwUDaZzsjRNnv6BNTlQOwmG3cLHrOIY3/4Q/ + MkOM5GbvTCezDQ/BytnCMMc1ajp5wvBie6L7eYcqWOT5MxmKheMvpa55SVq3TWjmeI3Y + u6n9+OI+SpXIDfUxshXQgKUPyU3GEX3Ea5Klj1L7NQhzwDbCZHENcvMFVX/q0JbrCfDQ + 9cR4jZizmESMi7iKmMw/RrqOJukOOVJNXno2+41Rjxbz5i/YBk2N7jFEUWQbJD1pk0ND + K16vadGys8HOSr8GR3bMAIPRvoRF1waoTRWLW7mgdkYPiLQpKhXyNAL6uprB8LWiNYOJ + PfeWi1LLxxnO9dwfUDF7cLVkz7j1pUcsGEvteApq8jmNZKP00sht1iluPpeoq6NNu5EC + z7bs55hsp1g589EjfS86UQIBwUZ84wCR4Er0uED90DcTTt613wmetAdMGsYOJ4f5KANC + 09TyRDQJW7J30IpT+meG0C+cEfNWjNI/WMyoOxgsjgmoAowZx79oP1qAPMwV6R8W6RiM + ajFZLDw5t1Ep3NFTRj1Q6bwgKHEzeAbLc9LGhuNjvQwoIdTMeW47s6GjyZWQ+BeM1Dvf + TQk5AowXOYyQaQFiZGDc5gDZy7rH95gAnYmzGDlrCfCsIc1nVwCeuQAxgP2hAdgzbIpR + yxHNJh3gDlEELFDRArD8ZQfKpgI8oyJmVzYF4Bldd0rZJMA4KCaZ3LqLCYDlgrgC2DMS + 4H7WdkEWFYDVEXZTxAw5wMpjs6BxDD+N90DfZDIAqgDn/Cr/NOF/c+elLUnMDKO7C15l + 52z7znIn82wkZYekxED914iIK/EOT59KoM/H8lTeXrHSuPVFoqZwvVJHYDHsDK2fCBPV + wnOlcUyWAcGTNKklgI8MpCCr05b4c1MmWnksdGtTWA901pXOZiyNq860h5RROssSbWfe + iuRHxzQfEv/apU3nHv4HGgo2LA21VKmcV+cvgLiwhINC0hNEjvi8ktFNAWIt4SBy0jAx + rUqUwAl/28o9iNKwtVhMPzuDLTiIpUSCWDrLErUzDGxbihTCD0EsDfp0XgPxxmx6M38x + iIi4FSO/7ESPTjk2ZF5AVEsEiBjP+2FVCoi1TN2JD1wvtVhM30Jh1FlCVEokiNkZdqzD + TLxYGfIXgFgL22ANRHR33CgSxf5gJwZXWiuID/nBsWDMD2iQpAicq+HEmA2KAMPJjKQI + JQOqqXPGgbY2zI8gmlgyYNviAmZDxjPWkvqDQDhrimThDfneFD5tiTK9ZMya8ikVnD3A + 9ifvISmDyKhNMTPEybLbqBTE7DWH2HQiZDFO+mASuNFqlrQDZGJaYRMLqtw5hNxwu1UJ + SjFy9TUcEEN/j4vCmgR8r58Z2YeZTF4GHRD0QmHY8wyEugwxemF+P0yEhl0odmTAjiUv + 2IVWQY4tWxyRC3l/UB60E+RCzWAQJ7lQ+C3+3LGQiwn2OG58y9KQiwkhuw9oUuUZuaig + JL+oOU6YjGLcZn6WZZQKVWtmFi4w4xoV/2QUgX8yjoK/ZxS4s35mEI3qZEYma+Bf2EfB + v7CPxL8hH4k/pCXIR+JfuEfBP7lHwd8zEv/kHqxD4J/cg+1R8C/cI0GRdej0o+bkrHCC + 3WVWEvLoM5q6lV+gIjCxDUaFRf+iE+x3t8H5GZFTwjRLnPKnSlLuQpZL6RLIU6q9SmPg + Lk5jzGNVbcQJ/89ALDwa2PzlnOtDHhzTHnYjg0QrZdBnZl7rBE+UQU1HGTAtpbKlezUs + mVXKyVtqlDl7REEo1lVCNduaJ/s6hpzoETUTZZoeS0tpYj5ikqXNGPNryviGSb7Zqh5J + RiPnBPE1eMpFC3HHIxpkb+GaMxGPVJM1IA59SGooMm7nlhaNgPOLJFJDsQD4c11H4Cqz + nPPgymDCReTNiIeJVRtxvmr8JQM9C/GxYDsCPIl4nYijbkeAoJxYfnzf2Jk2AolOvKJ1 + BHJP8+WVPFjojuvGE+96aCplKEWFxCtg86XLBTf5uTegG438h7dduoYeKW9NZQE8dpQo + r9SqklLA/iKe7Ps68jpFXOO14WoDi7p9xm1Z8tEIPHNxSnyOPb+mgsUqsf+jLM9Rz27v + 41ltK/CglrpWtwS74+EaMDP2jSCGQ7sJlxn3d7qHkqMSpyCuu/B5WRqpLE+yfEgy3+UJ + 5D4CVxV/nzptNWx3cxBybmR7SI6rRqSiPQSov+WQme94gizIeMdmQ11iu3cT/kAZ7+Qg + ft13zMGX8Y6w4y7WfMcTc8V4ZyDc1uI7ZvewxzHe6dnid9SMTHeoKUcBTySGu+rTOHa7 + 7TAMdzrBfed2O2asGe7WsTmPAwv5ELDbTbVUaLQ3b8F63W730gTwy2MMnck0rA6jrkC7 + II8Erf6CV0KrpIO3QKJUI1PeXkK7IDHlI0ZVBbQL1rdcxMjZgBafbLqIC7QLYQXmIk5o + O76ClItYMiKg1YLST0DL5md9E1oF7clHXKDlxiF8xPCFgFYdm4845i7vgSz2hMKmYsf+ + Oqcqy0dy1jaiQln+fgb7PiDodJJuH3SkEGcY7pwOvkqU3S5XMYa7josMOk5/KYETzBg9 + 9/gY7rUEe62UiVYe67pSTF9t2i7LzhbzBzWd6YtvU33ZWZZoOrNWGPK77nuWGkFt8+DQ + c3SWns/TP7z4JXAZic/yfnGqEXVriT4052+RLVgDMV8Uy6xHHAgcmQrsUcx6CSqHuJYQ + xGA+EYGEUV9LSLu3rTDrD91DNZDFEpzSWQJYGioQR2eEwMl1YyECt61ISE8M3+fxRRCX + fmyWX+XTf2js30AsDeH7WruYM4zRzwVi7mLF+ou1JcS1REAs4mS8zofnFxY3rbywi7Oz + hJiYAe/MALSrkWgoIS6dVYjLgLIVQaxrbJ9HQNy9votzJD7L13YxbOLqquAzrgCxCb6m + atmEksYh2MPNM47UyiOunuMEO8NYdQMlaSIeMHA8lXEgRr7ArDe2MGDTqg6K8ZCYUjvr + pDzCSMTtngmkEDmEAKzY2gdmne3lR7VvOAcqmutCNpTInxNYQghOXRhw62Se7LCW6Dgy + tOZ8BYVO0ed5jAyCJGuAUCO7dPWUFB9jPonoitciDiBZqpshR0aaY/A1L4GtPs2mWFGa + ExsHDnpZiFcgtBGfFFGT7G5UF7EnurbVcTvN3ww7IlBD/i/EzYEDAtudcFX4iBxgGGai + GhqqKsHHuSKAiPjYoWUKm+Fs0JEmJzfADjgSDVirFmSpYAFxYvVO94qycZAgnoQM4QfP + jDIppPYtkJ/hI16cLxvzqiYbsG9SDsX7JPAMS3ZSIh/JCj07WQXcZqZgaSGhzk6oEuCv + MM3drl8SfG5NYNUb3QT4fD+BbeORLgX8FVqyWaRLgo8PRbGwOGQS/JUHRtGgHzkyww1+ + 2DvuGb6IKvCrL+i8hbok/Na/hboEtCv0dSPSpcBf51Xxz7zKWqq1Uq+0E0uocP3kuPEx + 3ln4N9YKlw0vWPhd/SS5mOpXFn7Eb9/5Ax5Z+PUD5ho+kxap9MOpeJH7B51rjuNJTEjP + iupQW+TBQuC8ZFS+g+iTkuvqpKS9A8h8azVfUBFRhvb0myTZvInUpn0R5bYDdoXpoezB + a5tiQGvZcLFcuDeD33xGM9QmHjDPSm7KssVdxQPL3j+pArJvf8cP/fzpN75ZwQImFgNb + BC/KiaThMoFfVjGKg8kyyLIh6lM/tnJw1ShNN8ymJQgtJMJDckV5uomnnH8C41HwC05T + hUh4gQHX8zNUn0oYfZ432oesXDw8GAW/MmT/+K0cBjtpsLccyW4+sqq+0NFomwnIMLA5 + NROYBOZGdJDyYgKWp3iPMgGPUi8TsAK0ZDOMCVjeAWBa5TKBHEXEYmKgPorBZEKzJrQC + OgYghg9bwazgmsO8dMNs0VbIeVzJMHXKyOzCH0udyFCM7sbv0qASImfGseLNRiM1w/rB + T1tzuHPlozQWiesK1ASyDV0/iK9ipCHt1C7+6JVwKMTcgl0+7MRA4XVgE69TtyBlCfSh + FHyHaGfqoXEwmVWGzWm6SU3hTEbtRdaCKphETOTGloddO8ed0nJ+E++pzrAXdt3TK/4N + NtjghbrAmaTvw64RlLGceV1B9XovLY/2kk5WVkRMhPsucywWU5ZtLgemFTdznPGyHJlR + lyNzCtYK6KSRrmZ4P81yzAqS4s6kLgdnCaBFSXI55J7mHMkfUpaDL0lYNAAuyzELUILu + 6nLIQa0haxVjOTIrl8NcD1zul+VAQrKY7I1mOZQ1HxMfMib4/CCJPGvNcjiCdiHQlApU + g7E9vlYs2xtWZZvnztZm7YeRMKpXPNLFlfy1aimUUKn2ZY7nm3AunFx4OPl2aCIo0IP2 + jNny61JmDSoLUROlOPzISc/C/YTN3pZCtty39lhRZUHIk4sKdrQItRqc4Y1Cs3bLlubA + e1Z2W0vVbpvWXl23Mhs4ErP5Khtxrz7oz/1uk9xAx6EYdVT5oehoqWTIK3Hglgc7lqa/ + sgpMkYQBQLw5NwLiXdMyyzlGZaLRyELY2GWvMnb8e/B0GLbCUuBkx6zbPRHYbTPijBsf + z66L7gEnOaealwSfmDBH/GEGcFsg28PqnUS4cIhAXATeGt+JnveMXYSeARx8fmHV+AyE + n3HTKPlgWLVQ0yvOX37ABo88PvMZTjqs0HvL2TCebJTt5Kgnwce3Bt3PL/1yicReqTjT + OOPbA1KhYrxcN50GE6hwg0bAuhhDQcXu1GY+Idc4AxWdfwasyQQqlsNvEZHjqNhd3HAe + LSpqnDsMOIVgEioawKhfSVM1oUIOo7TfiktUaAmLhttOCgUqljPvsXZCpUzuyfaGSDF9 + ayH0OSwF8OWgAjGtuPph5UcZqZZzyFxjcbjG44H9h6hTDhIXLsms+fU2tqIivhjsoW/l + Jj71OQnlYAnJYXFkDlhAj5bL56y4+/XUZRRt81N5VFfOIaVI/1zyRrUFg9hGyT0WEtXa + 1i8EsX8vfK+o3uhfNyvKoC4Zt5PTVnhEn0pJLSK6VuZ4wgBktKvbiwKDVoMNzZgTBsGK + Jp5tYAGD1MGoLwETBloih9UoMGh9/EAUGNQ2DnKupRMG9X/qBwysmmCwUe48NDCw88DR + enMYLGPXzwwFDLbTbHK+9i/FBaki193aE2hhaco73SNjWp9ux6+WwZQ8HAMb6d/Ljyv9 + d/78QAnD+mP7TcD3/wv4lLZFCmVuZHN0cmVhbQplbmRvYmoKNiAwIG9iago3MTMxCmVu + ZG9iagozIDAgb2JqCjw8IC9UeXBlIC9QYWdlIC9QYXJlbnQgNCAwIFIgL1Jlc291cmNl + cyA3IDAgUiAvQ29udGVudHMgNSAwIFIgL01lZGlhQm94IFswIDAgNTc2IDU3Nl0KPj4K + ZW5kb2JqCjcgMCBvYmoKPDwgL1Byb2NTZXQgWyAvUERGIC9UZXh0IF0gL0NvbG9yU3Bh + Y2UgPDwgL0NzMiA5IDAgUiAvQ3MxIDggMCBSID4+IC9FeHRHU3RhdGUKPDwgL0dzMyAx + MiAwIFIgL0dzMiAxMyAwIFIgL0dzMSAxNCAwIFIgL0dzNCAxNSAwIFIgPj4gL0ZvbnQg + PDwgL0YxLjAgMTAgMCBSCi9GMi4wIDExIDAgUiA+PiA+PgplbmRvYmoKMTIgMCBvYmoK + PDwgL1R5cGUgL0V4dEdTdGF0ZSAvY2EgMSA+PgplbmRvYmoKMTMgMCBvYmoKPDwgL1R5 + cGUgL0V4dEdTdGF0ZSAvQ0EgMC43NSA+PgplbmRvYmoKMTQgMCBvYmoKPDwgL1R5cGUg + L0V4dEdTdGF0ZSAvY2EgMC4xID4+CmVuZG9iagoxNSAwIG9iago8PCAvVHlwZSAvRXh0 + R1N0YXRlIC9DQSAxID4+CmVuZG9iagoxNiAwIG9iago8PCAvTGVuZ3RoIDE3IDAgUiAv + TiAxIC9BbHRlcm5hdGUgL0RldmljZUdyYXkgL0ZpbHRlciAvRmxhdGVEZWNvZGUgPj4K + c3RyZWFtCngBhVJPSBRRHP7NNhKEiEGFeIh3CgmVKaysoNp2dVmVbVuV0qIYZ9+6o7Mz + 05vZNcWTBF2iPHUPomN07NChm5eiwKxL1yCpIAg8dej7zezqKIRveTvf+/39ft97RG2d + pu87KUFUc0OVK6Wnbk5Ni4MfKUUd1E5YphX46WJxjLHruZK/u9fWZ9LYst7HtXb79j21 + lWVgIeottrcQ+iGRZgAfmZ8oZYCzwB2Wr9g+ATxYDqwa8COiAw+auTDT0Zx0pbItkVPm + oigqr2I7Sa77+bnGvou1iYP+XI9m1o69s+qq0UzUtPdEobwPrkQZz19U9mw1FKcN45xI + Qxop8q7V3ytMxxGRKxBKBlI1ZLmfak6ddeB1GLtdupPj+PYQpT7JYKiJtemymR2FfQB2 + KsvsEPAF6PGyYg/ngXth/1tRw5PAJ2E/ZId51q0f9heuU+B7hD014M4UrsXx2oofXi0B + Q/dUI2iMc03E09c5c6SI7zHUGZj3RjmmCzF3lqoTN4A7YR9ZqmYKsV37ruol7nsCd9Pj + O9GbOQtcoBxJcrEV2RTQPAlYFH2LsEkOPD7OHlXgd6iYwBy5idzNKPce1REbZ6NSgVZ6 + jVfGT+O58cX4ZWwYz4B+rHbXe3z/6eMVdde2Pjz5jXrcOa69nRtVYVZxZQvd/8cyhI/Z + JzmmwdOhWVhr2HbkD5rMTLAMKMR/BT6X+pITVdzV7u24RRLMUD4sbCW6S1RuKdTqPYNK + rBwr2AB2cJLELFocuFNrujl4d9giem35TVey64b++vZ6+9ryHm3KqCkoE82zRGaUsVuj + 5N142/1mkRGfODq+572KWsn+SUUQP4U5WiryFFX0VlDWxG9nDn4btn5cP6Xn9UH9PAk9 + rZ/Rr+ijEb4MdEnPwnNRH6NJ8LBpIeISoIqDM9ROVGONA+Ip8fK0W2SR/Q9AGf1mCmVu + ZHN0cmVhbQplbmRvYmoKMTcgMCBvYmoKNzA0CmVuZG9iago5IDAgb2JqClsgL0lDQ0Jh + c2VkIDE2IDAgUiBdCmVuZG9iagoxOCAwIG9iago8PCAvTGVuZ3RoIDE5IDAgUiAvTiAz + IC9BbHRlcm5hdGUgL0RldmljZVJHQiAvRmlsdGVyIC9GbGF0ZURlY29kZSA+PgpzdHJl + YW0KeAGFVM9rE0EU/jZuqdAiCFprDrJ4kCJJWatoRdQ2/RFiawzbH7ZFkGQzSdZuNuvu + JrWliOTi0SreRe2hB/+AHnrwZC9KhVpFKN6rKGKhFy3xzW5MtqXqwM5+8943731vdt8A + DXLSNPWABOQNx1KiEWlsfEJq/IgAjqIJQTQlVdvsTiQGQYNz+Xvn2HoPgVtWw3v7d7J3 + rZrStpoHhP1A4Eea2Sqw7xdxClkSAog836Epx3QI3+PY8uyPOU55eMG1Dys9xFkifEA1 + Lc5/TbhTzSXTQINIOJT1cVI+nNeLlNcdB2luZsbIEL1PkKa7zO6rYqGcTvYOkL2d9H5O + s94+wiHCCxmtP0a4jZ71jNU/4mHhpObEhj0cGDX0+GAVtxqp+DXCFF8QTSeiVHHZLg3x + mK79VvJKgnCQOMpkYYBzWkhP10xu+LqHBX0m1xOv4ndWUeF5jxNn3tTd70XaAq8wDh0M + GgyaDUhQEEUEYZiwUECGPBoxNLJyPyOrBhuTezJ1JGq7dGJEsUF7Ntw9t1Gk3Tz+KCJx + lEO1CJL8Qf4qr8lP5Xn5y1yw2Fb3lK2bmrry4DvF5Zm5Gh7X08jjc01efJXUdpNXR5as + eXq8muwaP+xXlzHmgjWPxHOw+/EtX5XMlymMFMXjVfPqS4R1WjE3359sfzs94i7PLrXW + c62JizdWm5dn/WpI++6qvJPmVflPXvXx/GfNxGPiKTEmdornIYmXxS7xkthLqwviYG3H + CJ2VhinSbZH6JNVgYJq89S9dP1t4vUZ/DPVRlBnM0lSJ93/CKmQ0nbkOb/qP28f8F+T3 + iuefKAIvbODImbptU3HvEKFlpW5zrgIXv9F98LZua6N+OPwEWDyrFq1SNZ8gvAEcdod6 + HugpmNOWls05Uocsn5O66cpiUsxQ20NSUtcl12VLFrOZVWLpdtiZ0x1uHKE5QvfEp0pl + k/qv8RGw/bBS+fmsUtl+ThrWgZf6b8C8/UUKZW5kc3RyZWFtCmVuZG9iagoxOSAwIG9i + ago3MzcKZW5kb2JqCjggMCBvYmoKWyAvSUNDQmFzZWQgMTggMCBSIF0KZW5kb2JqCjIx + IDAgb2JqCjw8IC9MZW5ndGggMjIgMCBSIC9GaWx0ZXIgL0ZsYXRlRGVjb2RlID4+CnN0 + cmVhbQp4AbWdW7MdR3XH3+dTzKNUKR/mumfvx8RFqkKlKIiV4gF4SIQdh0gmSEDy8fP7 + r1v3vpxzZIogLO9e09OXf/davW49/uP4y/GP48Sf/TiNx7qOn74dfzX+MP7k68/z+P7z + ONufz+/Hr6anfdQ/XcXvIBzbvp0PfpzXbT5dhnmk8nFQ8XRen9Z53ceP43zeWvEDxdPT + cZo72ny+UGF+mo7twmMKx9leHt5bcT+2p7Pa0ssXxrA8TfvC4LKl/XTxCuqqCjGSLA8f + Rto51vN+bnVoSpOKBqMUvfG+P43yVWm4Ko1Wd7tcLucaPKOlTtBydjGCowECWlZnKMTe + j9/fYfgdK/Ez/vm9r83X39iyTeM3X7OCsxW+0r+0iO8/DrYE2+XpdD7tSyxBFbslKJrh + Oj9dln0WroC8+cuxBPN+fgLuXIL5ND1dtkOrFYs5n1avoCWoQixBlvslSJqDXA0arNVb + LEGVr576EtSzbgly8P0S7Dm7WoKafC1BUWoJivJx/AZugTEWMcakfd4BP6/wApt+OT3t + JzYqS/AP78Z5iYVZxq/my9OyHPs2frVZ1Xcfx5/84/o0sXjvvhvf/Ort+O7340/fGUfW + et40O7z/ON40y6rMyzrtj5v99fjmD2/FvMv45tN/fVs/3w6/Hd/9zLtrvA4Xz9t+ucDO + p+WyLZsmyfouTIzlnzQxuPnYewpLOsOt0yyGzlrzmS711v50Xoyno8yEDR72jVOWiwOm + PSnKIAjPk5i7Wl02r5M9Z7kGl4QPI0JnO4vBT0EbYDyNJFvNYnRLE/E8CC8W4YV4vsdE + 6NHfz5m2ASQWNwBqZxlpKLQ+jt+Nvx43/vyW/fK7fl8lQ8cysHUva3C0L0NS+mVIWoLM + 9gY642sD/bhEK5KuRpkncKNhE7C2DPN0epovYvBchnk6e51YhirnMhShW4akxTJUq45a + dZu4FuHm+XWxlqEmkstQM61lKCx8GarYlqFIX8bh2+zHWrCiDs8J7uaf9Xg6Lvs2j9tw + zdxw4S8+FR/+R3Fh8ePnz/+cxP/MH5//9BY+XIY3VeuHfFSUT2/ZMT+Gj4fTDEfGqRx8 + nJSej5OWXDqvT5zBkva2XU6zRMW629lglP18OCzJx+N+hmvtkM4NtJ8XrxMbqMqxgYYi + dBuoaL4FqtUsRre5gXIcsd+yGFxaxayeE2Hfe4OnnGltoERsCD5OaGoDjVkD2fjdC7w7 + z7ChTtCPQ3JQUjroq1ZyJufGxqGc0M/L4q0k7w6IbDupi3fHeYNT7XBO6Ocdka6+s+cs + B/RjVkCM1MyTFtBUq8GM2W1iWeO4fp6cf1c9J5LQzznTNoDEx6FPaNh1IULHJAH9F5zO + HG1x/Pgx2vPu9rSdpm1/xLtfJ98VC/85KcWSzqxjY9ZP33z7doiD9y/F1O9h3XuOvVOj + 0QlSjV4vMwrqx2FB+u6jlz6My8ypIxXaCYt27bieUTFEzCK6CHqplxadh2wRKw3LMksR + PrWGFtDfR+/Ff4fq5oVOdXaCKWLZjCti3kVobF6IWuq6+9kpaozBhsVTV4JrEohlqepH + TDFVtChqC0BKROwEfXx4Smfymkhu6S+BpJV6JI0Q0G1rwmPAbobk4A93W4lAcuRYTdDj + x26L4b3470DSCw3JwQkGTDXjJesikPT+Onr3syE5rDbIzuRYcxINSZtiQ9KKDUkrwkjD + C2ruto4om+B4r4ny+rovUnCPacwDEIMQ7ZYD8K9motReYaLgp8dMhM78jPqKXr6etILo + rhfjIpWQdMuUXGSPF5QgVd1iQaOoXQMXeWk9i4fiN3qw8VA0M6zYXtGDrDDpbOqb3yZV + L6aYqoiEtAfRQBbPekePaOlclfpf8ZxBalQmOW12MWQkZ3RiE7L+B597yMwOiy/SObv6 + NrNo7VnszsE3jtBxhd3F5I8/uaArIaoQa7EE24QlZKujX4mdfnfYqRhgRQNVlC6Sj/aH + v+q5japhF0O+xk6nfc39HrsXD5ttwXxi3TEdacItwelpmqZlfMep3Z06nG/H+Txd0PyT + YcIc/PoPf/7hT9+i36VRGFYa/DRxtut/7YestOPpfOzTPK7If+1z9CKNn5LWanqa2ddW + XCa0MHvETvMlCALj0D730nIWl+vdLC+5tNHYgi6IQhE9WUlrkAPxx8C8Y6GuWGROQAf0 + gS78siOoo5hgGZMw+BiySAN6flOkx2xxZQL0V0Uks7mZcgSD0KjxqWALmwN25J7R4LLS + Nl8avINKHbwqJl4bJ2xIBwdQ5nSDF/Wsh3fYUMj87M612nb5nhJeL3XwOqGD1wk5+9Zg + UkbvssPLCTfPb4qt+qAJdPDWBBPeUdMveFW4gleEpqUNj3wowTgLWwr+uTewYJcd/RcD + q2OXwc+Xb1IDKyXt07+lllYW15dpX+7EXDlmN9wGKN4HA0c7sV8fRrjM95Ue7BPOLiwd + 22hewD9kZFySOmRERBpLl8Gplu9yyKBr0qb9eD9Wb1b+gIBHeEz4j6y8nvARMpahGkrK + qF7o8Lr8UqnVHebzwopW5ZpHda4Z1shU0IpeEVzxGp7zWlTdfTsFiPrVgaiiIMKNbPwy + WAF7J0DEqygx5CjiipJ+21A86Tg3GO1Xj6MIwF9zOVmNmuwJKF2DDmxZJHXV4AlCvgH2 + rh74UtxX3xl0h2ZOqI1AU01ABhWu0BSh8ccjH+O27myjGR1j4UcyiJ8sg06WztuI9Fk3 + lOevMLvgpS88XeRIDiVq6H2AK6Ie4DFCJ4ktcQOHQCN8gGBiF4FlNbaLpL69QP86ZTqK + 5Np73ogq+Jj0fpZYFmOVavHAMZbd8fv9UKOhBODos+d9OfDt8xtTWN0e2UqjSBz1pfw9 + 0H/+Hvnd1Zo5+7T56vGM6uubJjv16eaIBi/aut5C9rq6la3grOSgGzqUjWAoBcpOKNBO + C4qEVNFGwR/do4yzUWC153u47QvmE+KpcFYBIHIOKjakaauD7JQtFUrqqkNRxatnfQGN + oRVtzB3cNa1aY593G5eAoi+EUgwV97FIrzMS6tA8sq2Kj270sjho7j15f/VB85zqtp6x + qfAnMxKWnAP0WM7ocU7RkqHwnS+IDqdowVRjWAloYDaKO5yCxoku66ueFNCA/Z0/vV3c + LnjItVWy3UUevKH1bGVbfgbmBRYFgE2fCcp6nHkYbXnButJ6nofV++3o3U+rkoPVctNS + zqV147MFHCZq4Pgyd1jBUY9NfqkOhir+YWbfUB1Wp/SoOqVhiBER+CSGHO9SRxrOGzIO + I61DdcMhGmsRqG4zr3M25XpGmbmz3FFoqKJJWnVhUW1ZwboyduKJFzp69zNQtcHq3BOq + OZeGqs+2A6dQTaxuTZuhi3LpBDLGWVd0gbtYFIfONG94GXv9LOx/2IYjhTDUF+tnXb8V + BfH5TsjGdddJ5PYhpkT3y+1d1HKoHtJYlvWJ0FuUNMGJWOpELPWPYkrz3/OXh05xibGR + ozbeonrXLP8qSXP6vq/LmAK6iJw/tNNkFLKZVvlf50OR2aScOGuOA30L1lmX+cReXVR1 + 4WTUaLyEBYHiQkxGMtWfozWrxXx/iXL2wBtGwUHEiBa0jGUnWD5hQK7r/HSWJblg/K4n + MTQR4+mszY6EIdqDQ1TjOU5YlgvO5GVTJVk2p2E5QHWdIZiqL3vufGFKKi6IpA1vO5W3 + p3XaFihn+SY4OOhjYo20pzm28LnC4dAOwpgFxnp5mq2vG7hst97QpIJmbPzReibAjG89 + L+cO8kOAqNeAHBDtQHDIcXX48dBBHs8D4nw/Ic8eEnJ8lVrUHTdHQr5hAMz7ziIE5Bv+ + EJy4V5CLz5cV4yAh38yUZZ0CcsSFGZMJuYoF+Ybz8HSRwZ2Qq4/LabVTOSCHX1i983Gw + LXxHXlYZ5ixflNtkOF1vaLXVH8bCc8fK48/B8kBOKC9hYgt8dTydVuJKl1sF9V1zfHRi + 4PWG11caxhHZgmcfyy3/Q7oWy2RMa7Jq/6533D87pvD2XGniO0KRxJbxBIstctFcK+MM + 6TdvHioV4e7srVcPKPzmbT+YjN+TkHHAcqjg8gwR7JlWRRHWVc7O1Q+xJm7Yu0gNzgk9 + Rh6hMFsRAWfSJh6z9l4h3/+uPw9KZkYl1z9aJ8gkGqtO0Mq9GJ3YY86q6iTfvz2D+qha + dOXjv99bcBMmn7SiG9XtzbntqWX8H0Np2o4JUc8JoPyF0w4bPpNPI/XSFm8DKakUC6YA + miuntcxZDbjbFIVL1YIdPyqyZo3Avm7V9gfGM8s3n8mE0oqY5JK7FOsv/HrxbFjNS6rQ + Hs3Hs+9xS/RvSkb+j511z+UM5QvDRjaJJuhdyW+joK43tpkzO7vyZ60rf7NbPRwDbMEO + mWzHvG93CkRbvBsmeXN5O6QntGutcJ4ZLGlcBiveT5iMQLGGzTaB3v9m723sOQ2ytABt + Bv50CVT5JrqA+VNxEtCO3rVWtXxWst0bvUTd60Xdj3kjaCKePO3rzj4AECVNsVIzrYph + GbERLC58OeHGVG4U/gb5lmSC4j8V53hBLmTe0xHqBBxwaozlt3etOGTbOPD8sRK2zhoJ + UJ1ZXEK+mNxnUno4aQmLcg7OKNqnw05zhZkXNAxoSsublkHh2YWsIIsuwy0HwVeCphcU + CnQ+BikHBO/PyhkxwrziCN9WtQF/yf01r2c0j2Md1BPuXAVJYLun5cKWE+3Aqxhg8AAf + w7Rr2ldoSQe4JqF6dSrAo8X0Fna6Evck3uOOEjlx+hbew85Q6dHx9kKHtxMc0Ho3itF2 + 4m0phfN+4BIIvNHu8K0oBp947+jc8LV8XYX3jt514eQG3cBbKQ9oToCWeO9M5wpvJxTe + yplABDKtwJsERylYmxx4ifcg2sJfCaZGfL6g8QW4iVaHd5Juzv/hhsdjt7pr847JF9ST + s3xTqJFnoiK9dyqcuf+Snts6ft+X2VAh9/rRKnFqV3R9eOVcJFtVASfbDIq5MW3jbZEJ + YCI7/EyE7Exe9R+HCNQeDN7a28mXoRWRT4oAens4/kRmC1f9Tl4ihG6wVK8MzA+6LorU + +fpOMK1ptndH3ry0M+8VODib0DbpTcEieGKBqWwLHmjxSdMs2Lxk9K2NNrOgvMp6Xk5E + ubR1GwXhoba0wbMWWgCuUkMEq9nfTFqMgsVwihJ2fVhBWGmq0ciNUPMMq6PFILBW1umM + JlaDv5+itvYdlRV8vMJVc0ewIWpt74hLmfxQtA6iotXkd5Spy4VAYKMQNre2GkQIoRMx + PhClrYSoaAWRUy5t6lWlQcRui+Y7iGoQBVEN9H6KPURtkjcC4HbTIvJ8XdEaUNqxEkwK + kFiqU/YyfoVXSe4S7EN00zt9+J8s8218Uxw+BmFOuTDlj3zyOYTA8Ob9504IdIns6djW + QdySW0nJMrVuMec+Wx6bSUeyleF7aUEnhU69giL//pgZKhRRBLI9zYbLCkgCvR3V2TBy + nlm425tbPdxtjfGbY6OGQpE4G3YzmbJYcRSR5zHMaKgow0JHFqTLChYhfabQV7QBWzwv + EfApDa1nw6SNy4q2H2qogdrLvm1ZgNXKZgGrBrSXO6CdkMhtBOvNsx2EYYPTe6A35E0D + ekd4WSiu1k1WUC6qfjegiTRZYkYCrWIBXQ0VRR11+KmYzwhutAJuwuuKNuAO6JpSAe1T + TogGL14B7aRXuA7lDveOjrIwi8RtkZvaQqf37PbQ+Iy0m974rKM1uQqH3kOTM5X8RUIc + cZqmxCIflYRaGBN4eLzslsuQz5tBkS286OrN5pppZpYLW0W6XOtuA3Arh6GUz1t32cLt + idzbvNmbD/bLzuXOmpGG82beHp3LN3dp0GPRBeSqlSPH1NbyJjsF+VRe/KhTfuxFh/FV + dGDBw0MrnR97UZBZiORbrADn01V0YEGLo07r2csxMC80PzZi0qrjp21tyWkbXfFaKzz+ + GVV8sObHHmou5ceO2XbgiF264jNnOH4QvO+rjbGFBZLSwRl1EhhqFDAeFhg2fJRitQJv + 3JCGANzghFKLIJcMQZyNKDR1Ck4vKwqmgfnDDs6gCKlqywuxll2h/cR1rPp9FR+swdnm + UnDGbDtwAk5mGVjdskRv1FdYQE7jkj6dhtoCA38L6fPHGwtP54tSM80dY7edViwQXNMg + qrgSK4Jz+4m0kShpLs/Z/mpsRW5lbUr+bsiqbMlFCXvuqm6gZBEAhI/Z/hb3a7b/glSy + KL6DxdCx5pyAp98GOysljdHL32kELQIEEqJiOlXD/BKcf9VGEAhN3PbzvIlsAGZ1B6kb + 1sa9hqthFSGGVbgSDPBxEgOyV2pYSeiGlf28tLFWAvANKw3z5vYTHU7TGRv2QcSpMk4/ + lYHYuXc9GMWTu1MNBfHhqbZyhl649SAHm4vjVZe+tC444Jzzq45mbrJA2bK2FasOe+au + pXuDw/Zh1tsErE7S7BEeTtdq1fGjn72ePWadoeuxWnoR9mwzxtwfcreJJs347A45u9E2 + r+2Qu2VZaSibXZlTSJaJeYGxu0awEJHBkxmlmW3P9G+LQKCMm+LmgXjejWhQP/k24a+Z + nizLicbibV1vUttRlC6gzixapuo9R8uJN60H/28/OKIxBLAR8HOZHOSdBYlxNgKBIktC + 2vCUFUHyCEnUvdITEGQy/NZ6hYtYYr9sFPdsErKNeAXmuh3J8zwPMpzQ0YIy7pTkln0g + eVx1qlEUIQfur7SBk1yJM6sf+Kq7kN3AcWEnIbrNV7qBW7MtlGrxJdODupVdAcDBwxfx + 4nHz1fo3Pm+uu34gkJgiwbLTjOF+ww/EeX76v0pd9xjOf+cPjzQNbyoY1Xu6mnDq5l9e + 703hWXQ48zqpsGBRf+ByB14c/cLHBDth3N8WuattS1Oso1D4PesM1gFuy+iAUK/3hopD + SLcK31NkJFmxZ5nnDkF8u7ghjlHm3EI4FNGG2MIzwpkdJPYWPqOd3K4iiW3Yo2j2WKD4 + faKMOmXhZH+KQsZ9bbwz7XVtb8W7q4sgvIcFciBBsqh+EWt0LzISfOOtEOdFmtBTN50k + ddNJUg5/0eVRDTgmwOJJI8wSTlybbKKByW+Y1XSSIFEU00kS02lzzH5Lqj0M31azhur9 + kct+W884p269jWzwe7/t5wyk1tH7qXfSvmZJLvioLxMhtg27gkwzE1IE1QnVQVOKg2gI + x6qHni6aXDeoQkQvrupJ/7hv8f701bmRNcEPV6ICX9Cyb2U7GK3rW2ZK0rLvrl7ru28x + mOXhSuQI2kz9JO7Dyy1kdiPq3uAsy5DZK75fk2qGVKCMAJZsJlnhCr1WL1AW8hhqvkJt + NdA3JKJvWnyMctXsUK6+O/RavUBZK5x9d/W6vrsWX0K5Ws799ECsN5RvpPqbTsm5Rxkl + sw/EEx3rMZGUQGm7Q3nNeg1lJd0JZUvrjz3PTKtm445nUM6aDRO7MGZ9d+hVi91ezr4x + MmrPd31nizeH9m0go1pOlMViN9p8Q/l2L+9fvJcJ55MOTMBlXyYxLK6mE58cwNmnOH09 + xbBXERuZK+lUtqfi0Ku3H4JJ4lNUwujsuiBVzEVRPiUA13XhT7suePsVzKojDe8luNiU + Q/8xiS8P+8jCEj4B10oMm1yQhKueBlxcMHVsA6567mA/hqsqOVzZRcDVnjpc2UXC1Z6/ + CldV/bFwPfTGPbYBLUGuwYV+38OVTx2udeZLFrYVE658/iJcWSngii4SrnpqcFUXBVc9 + fx2urPpj4To1ZnygNJorRh6uMFndWxBFFEd5E3CYx3EeBCKncrl0xtxjjdRf57LIZrbc + hk23E5Xyd+WrIi0jSiil5rjIuo+0Uqm8yORMy1jRAS+6ZqOkz4PcCEnooBGHIY6vTEYS + 1rACldKIYmbnvVlJuipUFLQ3nATnyzwToIlaMqHUemupUbI/NLiqpd2DBHCzdz5oiSzA + CXFrxupEAh2OdW7/zIqvQcO8JbQADU8lCQxmbhFZVPSWKJTCzsrcvKCqE4YKSa73MMYU + tC2ajAO6U4QXjRyNh1MK/XkiOUr9EbmU11NZ+6Sb0RRDoPUOKGTIRdHLBl2bHgdHAZrU + yN7AT/bQCmlv7PSFC0xKYLWiu0Czsh1rWXJvccDpUj+AJ0XDjmVptAK8Wuoo3h/KeKOx + LCTutmVBcoLqgZElH4Iti2UGnUmV7ZZlxzKcSMoj15X10bLs8B/fTCDVP5aFrRsKbbcs + Ratl4a6LMnJIss9lUX/nw3JIY1lonFQl0nA6oHDqTIq7d9AFnO/lIcpl6UB+UWtKcBdU + E+MCO6CGK92UrfVcpkekhf9rmsEZwB0zF3L48kzxe8ULtu4UL11dmMXWZBiRAuVsrRC+ + MSNnmme1mTuceohajvCgocjbHiIfN2mA9aDFh4df1cSEZbXdgFFWsPVNGoXTsA2zxZ3c + LqsHLfvu6rW++xZfXKhoGbHjMwj19rVckltFdybmfmNPPBf/WxX/NyAjWVKJ3uR3g6wR + mBqswufOkKFGUE6nnrNC5I0fSLakyHMhH09HQJXlYqTdtYNxcP4iqZK28IUPovtJmZFP + gO1jyTpi5qTlLdwiZP/Sf0nrRpz6AGNGDFA+LIuO3E7ymQ2Q1bYIhiYWODHcHVZYOKFm + vrFwyAil6Ts6nKnyinZYkDmNS/q4YAtJiBgWSWOWgYVTuA1R8646HRbeON6jxKL6Lyxi + gNczusIiJ5kb8tE1X4lgZylEyIHvxzXbth9bmgjJL8d+6DbUXZrIm5//4QecaTc70g50 + ux1/fT1eN2FIc8AhhqzmUgiKtyKnSpdNEsqH+IwsDVIcoxoUz6mdyd8/6WtYjYAU1+2S + 93az0Rpig8U3tKqWBDpf3upaxoljlar7JLCsPkhEaNRhVZWiQ8KI5QvZexZv0rCz6UaI + 7mlIMSmr4yR87NeEuxraVFmHHW4zU+/xWs4+h0N2qeOYY26IeeDwGm3O69czRHKJCJrl + l+YSoyJ1S1S0xHrnbgQWwFzrwZcq82NzVQeWQCGXVzpJJ6lC+kRdLT6fAfJK0f1QhFqi + sUjdEhUtYaumk6DvD6l7OfcC2iR9AaF7KWf2oVY2Z087sWMKoDtg+yWqWh+H5Nnnrh4r + VUnfeCI9lG3vxvv/W4aJMvR//Vu7V25Rly9N1Gf+nnJFep6vojgdHwAXUXb8ePb1wefz + 9aMqn/RCHGmjoGkCtqau3NfYO/GFnIjyPjQUN3jZhAJqsnGThkGmkTBM3mETei0+4YXh + 4XfVFPE3pFstLJn71uSQfiGfX4d8vmWZyj6ZHEMte42BJHkuqgRvxBiyFsdLG0PX2os7 + Joec87hXDzlhOQuIHNwGb3vXZWdh5nwSwAd+OtS+DYOLuxJf2GbGUgZsiHZ/gJWiZFnW + BFGsBHcjNezWodRGLwqAFxMJbM/gFlJ1tpLuLHKpmaKpL60oxxGq3XX1gHe2QzSSCW4t + Vs4cWtRx0CzWomHIucWK3cRlgtlS96Wrk0ig/a33SGtw7R1NB/QuunyQtEUBO68VLTVK + 9CfTqGrxwyzWRea55VejlJjFKl88V77oD7dyWKwLsz0IU1HTDFa84kRLT1YpDFbS05+w + 38ng43psDpOmMDr1XtCUJHagPkJxg5Xp6SwgPVPdpcGK9cwCeGK9W6wFFPq+W6xFadNj + WW6pdp7VlUMtiaXZ8VdcIa03JN7dYtXHoRzMnaxlt1hzWZThb07dWpaiAG8uS9LwTsSy + VEsdJU26tiy499xirWU5AYUsVhCPZdFnD0mPk8Way3JiWWSwQohlOQlMGay1LCdsPxu4 + AmmxLEWrZeFKiBmsOGNjWdRdGqy5LDYELNYGFEZyWKwNuppetywN5IzWezTl5lpCgSvV + qVmsqFaxfnykV86QxxYr3gn/0MzzFuuX3m3276u9dHrIR0rqus6KZrEq/OfMWJZo1ht0 + YSUtVrw9wdpVD7AetPjQYCGOH33rs9lpsWbfOyuY1mnVw3OQtOw769nJ8ajFl46Oqj9z + 7dRm5WdHsxD6JXshAeWFqIElZTUXAfeZE3AZZcqE0L9n+1thAy5Dyyx1gvzyeui2qsUD + KEpWiX/8IR/fCytVd3+R/4inpCk4y5VqXaVSLa5Q4761/tEE4j3JVadFy4yiCNGzexJ1 + CS/GdTULRDPRhiuSCa7uRC2BVdV2m3HO30vYhDl/J/gUd6XXyj7NohunUcKPL8uUc63m + vwdNOes+f6fIMs2pZR0BWTRDtJt/69kusM8XVDst180sruafE3tp4/GZVYkzXVgzyzQU + jM4eZYknVUCU3/hHiH7//fsMdlcKx6eIiHdf8f3uR6SclcbDOapdGepbfGam6W7YrWbT + pO6I4FS5KW34S6yBe5bvFUWdC103/h0a2slhnPAg9f0gja774fPC3sJLKGdr8a0cMyRe + Y+3beODcxSB+3GVezTeNL5LhAFWp/tK4lb+i2Wnsj3gk68FvhpK+JYKMxgTLt17xQ24I + T+soeIw8IbMR+Gqmw+rP9fFHL8N1OgTyuZbzuoUvMgKuX6lO5V2ytY3npuWy1tlpPK9O + 48uPL+eO1ABtSia2r8/XZ29nH82X8wj8ui/lqUuk8XEkxd0xZDPnjJbtOV1c5mp7gTPV + 7vSye1sLQDEjTewuF9EPu+dLMmD1Ei/FxrbD2wzT+3RAtgUOZIIyct3zMRdFeTEl+Igz + qtNOTArj0m6/IT/pUrcGOxrJVvqmRr6rYSmvymiYtGe1J1q+izMu2mt9NFobS7xLe3w4 + 98EIX05wqjcUJUJGKwRT/aHM4jaD1sYlc6rRYvz1bjcnfSaY/9pE/66datFe9sGRgfbu + 9dpY2rvdnKqX6516m0RYK7vDzkLwgTWJdjtN6IPohffSnu87d5+mjZ/2adohv8jebeS7 + 7tKDcq188qkjNk6XO3ivdtaluMqo+kv9qhzCDzm2evTv+YmLenSV+9wNtZQC7gOj1uAJ + MTcIyo/9xAjGT+/RZ0JrGGESI2gkrfAaK7J5s/JHPtilgqKIrZk8VvqKqdtf5dZfm8N4 + TqWJIjL5r1ggOsnTRdjTepYk3ghpcoWn0XiGjokMxyeNucFWTIpdIDqf+CYP3yKsWmw/ + XLo0UC2RFsO5RwSSnvK3bnPmWIJmunCj5pgi+slMHkc/6w19/OV8kHvwEVGM0hWlbk5F + q9HqEsCFT7208cua46K4LsJlLYuL4QNVnmWgI6OTJD8UHcVs4nc3p6JJnNyN8JW0FuHE + uBzZuyQgWiO4Dujy4PRZLWhY9zfVSKNtObIvn7/utJbzhpRY4grsjZlQvsWl9rwfY74l + q8PpYAe7jBALIFWd7x+0dK9Y2Zkj55da46MIug7Q9Tijs6E8IAqzzoLWRh0o2aPXgQLG + WataipOoEgev7qhl7Rjz/UGMK5KMAXJO7/xkP29f2ngZTz4qoPHunAhExZibqelK9FCR + T3bqMTzkCb6IDXzs9fD7u7cfI+h9cAdWabXsK+tDjgxl2UYf7FDPurU+2sPsI95mhB1m + d18m8Z5yhPeIodnO3EQlP/n2LBje/DzF7d+RdNn2YydU+aCXttPun+P01ptdQeLKxH/v + gc91Lq7pUkYguMPhF+2g+T77KbFeaeSf8yZ0PcoPruWDz2WVfHxO9L86yuNYCa5e7gdZ + 46hO6iyqAyutoxxQ1Bju73lnzZpvQZDvYkUR1B/f/DkR8VeGRtCtn7YUv/w/GDHqMwpl + bmRzdHJlYW0KZW5kb2JqCjIyIDAgb2JqCjg3MTQKZW5kb2JqCjIwIDAgb2JqCjw8IC9U + eXBlIC9QYWdlIC9QYXJlbnQgNCAwIFIgL1Jlc291cmNlcyAyMyAwIFIgL0NvbnRlbnRz + IDIxIDAgUiAvTWVkaWFCb3gKWzAgMCA1NzYgNzMzXSA+PgplbmRvYmoKMjMgMCBvYmoK + PDwgL1Byb2NTZXQgWyAvUERGIC9UZXh0IF0gL0NvbG9yU3BhY2UgPDwgL0NzMiA5IDAg + UiAvQ3MxIDggMCBSID4+IC9Gb250IDw8Ci9GNC4wIDI1IDAgUiAvRjMuMCAyNCAwIFIg + Pj4gPj4KZW5kb2JqCjQgMCBvYmoKPDwgL1R5cGUgL1BhZ2VzIC9NZWRpYUJveCBbMCAw + IDYxMiA3OTJdIC9Db3VudCAyIC9LaWRzIFsgMyAwIFIgMjAgMCBSIF0gPj4KZW5kb2Jq + CjI2IDAgb2JqCjw8IC9UeXBlIC9DYXRhbG9nIC9PdXRsaW5lcyAyIDAgUiAvUGFnZXMg + NCAwIFIgL1ZlcnNpb24gLzEuNCA+PgplbmRvYmoKMiAwIG9iago8PCAvTGFzdCAyNyAw + IFIgL0ZpcnN0IDI4IDAgUiA+PgplbmRvYmoKMjggMCBvYmoKPDwgL1BhcmVudCAyOSAw + IFIgL1RpdGxlIChDYW52YXMgMSkgL0NvdW50IDAgL0Rlc3QgWyAzIDAgUiAvWFlaIDAg + NTc2IDAgXQovTmV4dCAzMCAwIFIgPj4KZW5kb2JqCjMwIDAgb2JqCjw8IC9QYXJlbnQg + MzEgMCBSIC9QcmV2IDMyIDAgUiAvQ291bnQgMCAvRGVzdCBbIDIwIDAgUiAvWFlaIDAg + NzMzIDAgXSAvVGl0bGUKKENhbnZhcyAyKSA+PgplbmRvYmoKMzIgMCBvYmoKPDwgL1Bh + cmVudCAyOSAwIFIgPj4KZW5kb2JqCjMxIDAgb2JqCjw8ID4+CmVuZG9iagoyOSAwIG9i + ago8PCA+PgplbmRvYmoKMjcgMCBvYmoKPDwgL1BhcmVudCAzMSAwIFIgL1ByZXYgMzIg + MCBSIC9Db3VudCAwIC9EZXN0IFsgMjAgMCBSIC9YWVogMCA3MzMgMCBdIC9UaXRsZQoo + Q2FudmFzIDIpID4+CmVuZG9iagozMyAwIG9iago8PCAvTGVuZ3RoIDM0IDAgUiAvTGVu + Z3RoMSA3NTYwIC9GaWx0ZXIgL0ZsYXRlRGVjb2RlID4+CnN0cmVhbQp4Ab1Ze3gUVZY/ + t6r6EUgg73Se3Z1K5x1CEkgCQWlCd0h4hgRCOvJI50USCcYQMuAIExFUAkaRp+CiiLJI + BmmSLHSCIPLBqJ+O4guVRUcFdWY//dgdddcdSPf+bnUnQ/KNDn+4Vn2n7zn3dc79nVOn + 6t5ubVldS37UTiIVV9ib60i5oixEzFLdZG/2yIHfozxf3dZq8MiqRCJxRV3z8iaPrN1B + NCp6+Yq13vFBV4g0P9bX2ms87XQTZXY9Kjwym4Ayrr6pdY1HDuxFaV5xT7W3PcgJObbJ + vsarnzAfGVbam2o9/aN0KFOb71nV6pEj30dpaW6p9fZn5bDvbWKo9ad7yIcaSUMCeH9a + Asv+PCqaJLTydlz73vC5vGzslB8oQKvIy+Y8ppR/ML504ceOmwm+T/hEYrTPYH9eqpNc + SUS+DO0TfZ/QEutRhgz++DupNMVJRaCpoImglJRpOmpnh+hx0DMgkRrYFloL2gx6EiQN + cS9A6mNbuiWtuZ+tpQg20zxa0i8IDtfrRo3Wv+tk6t79+o91V0+xcHjvcxbe7Uc+00ax + Z9jTVEN69jyZ2H1USIlsb0/SCn0lml6gZlA7SFR+GXuhOyZTf4alkkliGBNPMRI7of86 + I03/ZYZTYN36cwlOCcUrMZDMY/Vno/frX45erj8D6vI0HUlCjxP6F6JX6LfHONnebv0T + 0U6GMds8xepoDD2hb0rapa/JUNpn73IKXd36SWgvM4/WZ+ca9ROjr+nTE5xaBjkterY+ + OeOP+jgMRDcDJjWZA/RR0dv1k9EUE21NmAw6xY6wfZTM9nWbZur7wWK5PUVJubuc7Lc9 + hYkZJie7z5xdmLgrqTDBlDRbb0oqSEgAX/aaZqPmLs00TaYmRZOoidcYNZGaYG2g1l87 + RuurHaXVajVO9vvuqXr1KdZFUwFLV49WrVU52YuolE6xo0rl0ZNaSStoSRvsdH+G4GUU + 7GRdvf6cA3NCrXBqJzuKuOBVR816iXOS0uAvcB4/+CWBaQWaSQ72qFNNm0LbpuqmBt4Z + MKnA8lM/lUrL4G/KT186Fu3YNau03HEk2ubI5Iw72jbYXTfI/GTZuhpNtfkpKbNK1va0 + NTfWWWtla6VsrQVVOra01esc7VUGw/HGZt5gcIjxlVXV9by01zqa5VqLo1G2GI63KeNG + NNfx5jbZcpzqrAvKj9eZay3dbeY2q2y32Hqq8luWDNO1eUhXS/4/0JXPJ2vhuqqUcSN0 + LeHNVVzXEq5rCddVZa5SdPHFWxtK81e1IjoN1oZZBkdiqaNofkW5w2C3WZzsECotq0l1 + lvxVpylR1U4RUjrpidwfgy7z0rXQ/ZXqVfJ3Nbn/S8yDU/s4Ca6pU+gsPUr76Bip6TD4 + RFpKe+h11ohnezH10iUWQ+OQeyVy0mx6k7nd71AdPYf+rXSOdtJx8sWYJgpBayczue+D + bAZfRRvdz1Ic5dJDdJomYdZO+tb9grsHrSW0kI5QF8a/wWThuBTkftF9jbQ0H3NuRMs7 + 7tnuYxRIqZRPxajdSGeYSbzsricd5cG6p+hpOkCv0DdsA+t117vb3BfdnyNUdRRFpbjX + sV72uXhMesj9lPs/3C4gkUjJ0FpJ2+kg5j+G+yxSq5XdzVrZdrZTMAsbhF5pkyrMNQAc + kmgG7kJk5UeAQB+dp7/S/7Lrgk70F1vFC+6J7u9oNM3CKvlKaqkN98O4O7GmU0zNxrPp + rJitYzvYTvaekCwsFMqF3whrhK/EueJica34nrRK6lZtVe1Rj3b94D7lftX9AYVRNN1F + LbQeqztHF+l7+hsTMVcUM7E8ls+W4m5n+4Q+doD1CcXsLLsoHGF/YlfZdXZDUAm+QoiQ + IrQK24Uu4Zzwltgg7hSfFP8k/iDdqRJUB1Rfqk2af3dVuTa73nLnuT93/4gUqyUjPJNP + c2kZ2bHaZppAv8MqjuI+Bq+dpwv0unJfZVH0Lf0IFIgFsgiWyebgnsvmsTrWwPazftxn + FFv+W4AjBB8hQAgTooRSoUpoEtqFD4R2MVJMFmeKFeIx3K+Jl8Qb4g1JJQVJIdIMqYi2 + Sk3SXtyHpMNSt/S2apLqTtVcVZmqXbVZtVWsVr2juqRer+5Ud6uvq/8TaXG25h7NVnjn + dcTsK4OvNKWUWBysz6SVVM0srIp2wRsHmJ06EF017BHg1UyJ7iXienGGMB7RcIZ+i2jd + S+tos7iYDrg/Eo/Qh4iUFZitnf5Vyqdo1W54ZwONRxR5b3NSclJiQrwpTo41GpDyoyIj + wnVhoSHBQYEB/n6+o0f5aDVqlSQKjFKtckGlwRFf6ZDi5cLCNC7LdlTYb6moxKNscBQM + 7+Mw8HF2NA3raUbPuhE9zZ6e5qGezN8whaakpRqsssHxR4tscLKK+eXgH7XINoPjW4Wf + o/CPK7wfeKMRAwxWXb3F4GCVBqujoK2+w1ppSUtlfWbAMSotlScOM43mEztoun0dEixN + 5z2sjgjZYnWEy+DRJpqs9hpH8fxyqyXSaLShDlUl5dCRltrggJ20xbdGrtniNFNVJefs + i8sdot3mECr5XAEpjjDZ4gi770vd38VBzrr1lkaHYCqw13YUOMyVWwAuFyu5ZN8KaVap + AdMKm2zlDrbJawS3sRGWcnM97wRTZaPB4SPny/UdjZUAl0rKuyPMEUrydVBxeXe4OVwR + 0lL7dOvzjFh9X9q0tGm8zDPq1nvKrx/01L97lpe69ec/QzmrZAgAxhGQi2Cnw1CtKJFh + bC7/qc2ljupc4ITLxrDMBtgz3SEgZkSTQ2UqsjvaSwfNqLd4jKtstHT7hEcoL6F8G/pX + dvhPhqfQ3182dPyAt3Wl/O03w2vs3hq1yf8H4o3c0UOx4mD2Qb6NvyxNWHW9Tq7n/m1T + fApZ1llvqYDMoeE2O4LxAi8uNzoMNlTgazJ1lpN8isuPM9ZpczL3JidZovvwjSouW4rm + VB5qDRboh5CWiopkI7hxqYYCaC7gsWLoMHQU1XQYCgz1CCbJpJRoqO2wpQPB0nLgRAug + 0WyLHGJrbbbJmCedz4Mh6N5hwwyN3hlQKlXpA+g0PhUvUzG+uHx+uaPdEukwW2zwAsL3 + bHG54ywi12ZDr4whS2Hxugad1+ZM2JyRjPYszyz4dmnHFLaODj5nablsdJzt6Ijs4M+b + R3YyGllh9lY4iXfhkDtZezHGopCNkYoPjLIRZtk4phMQ0oMRhW/2n0c4e8hujMyBtdkK + wrm/EMKTbgfhybeFcN6QpcMQngKb8zjCd/x6CN85DOGpP4+wechuGDkN1poVhPN/IYSn + 3w7ClttC2Dpk6TCEC2CzlSM849dDuHAYwkU/j/DMIbth5CxYO1NBePYvhPCc20F47m0h + PG/I0mEIF8PmeRzh+b8ewiXDEC79eYQXDNkNIxfC2gUKwmW/EMKLbgfh8ttC2DZk6TCE + K2CzjSN81xDC5kgH3ZqH20ekXfrFE/PiWyDHl5IqkPKFSSjL6Ji0ippB6yAfR7mRvUob + 1UdoI+dBLaBI4QhtxuZ7Jfq8DLkLUwye/fhiR3ICsoHK+Fb8//US/unsoreHRCpwapxZ + eS4t3uujsCvCgRNOe8bQWKV6AnYUjfQVy8Y+y8HeFOKEpcJe4RsxSrxfPCvyEzcB+w+S + LmLfKmKuqZ4zKW06PhxAWpxR0UUQl8GLV5wkgQi85gr1K9rLUvoxi4rKUsZnZAUYAxJA + +VKn8+YXqtN/m+6U5tzg514C9gSkZtAzioIpkuvBjtYnvR+Y+mFsP87dNMoczJgZqlFL + slGUWTx+jZnZOdnsa2Hn+VdS4lzfferKfPBVKWTKKlfrCrb1oZf1sarTn7z2e/fAduk7 + vUtsefxZ6Gp2X8Fepggo5FGeoqufEmiiokePvV4ZDtsSblkc55MvgrDQieDHgR+XPj7D + lJmTPZWNYWOZWoM7lMGUnOx4ORaSnB2XlRkWqhHVoVkwUTOGybEJ8Tm8iM+5k+Wwr5ZV + Px8XY1qZ1VybsyQ0YBnrMesDfIJb7nt0VnLk4XSmO3i6rs7woHqsyVcfGJ2aFr8kaqxq + xrX7d+6ONnyyb3Vq0aFtIVHqMX5R6cvnVgjB2lRd2uLS2cmlf9hXWLhnYHdUrChu8lXn + y+bCxn97ZOdzQVj3Ovdl7OBmYB8eR3rvurXAm+Mbgd0mL/m5JvcVC9WEaozceKwpawzj + C5RjhaBAysoMFbMTsEq1RnrApGLyzesxy3c/unyKfDy4Ka/6d9aS1z7KzWGLv2g5u2ZM + +Lij978liw/PXzHz2YMXlmTPyNs2rjjKn8nYPAss/27XttUFG3o6eDgynFaQWKJ6Fx6A + A5QI8AfiAkiFSOBxyz0zBh4IhAf8UAZfhKXZsChMMw7IqjVZGlmUg+ScrJxsgZX0BsSF + ypHho0syTd0xvee7z++M2RkXpWpbLAjPCWzhi503a8SnOg83Q/dGxPkzqnMUSiav7tHQ + GwIi6FZTgIKOwJ+hgMBJ0JolwtceF8tBXjwQk6d7V5dMrt7wl/ERxif6HgsP1D/cfGnc + ml7VuYEr842TDtr2D8wXDrbllO+9NPAaj3voFVZ5n6+xXs0StAqIL+iQGVbE6k5+cZLl + nkw9KSXfuKQ6/abHXvVl+DKeP30KVoFgIkEmxV7uTSepgZEfsDKiDEMZpsyq8Voedicb + WoDiT0D395Us7zbMz6u7t31aXMi8ntqP0nQxu0/tD62Y03hS3nhyR9jY8Oa611PX9Erp + e+bF3TE1rqCs9KkFnQM5wp/vLu48NLBNONWUOWv/23yV3Lcc3wOwN5zCvPaOgq062KrC + s61EXJbmFnt4gCHmEHjAYHmXofJU/bVxEbFPnHwsxD9yvTl1XkFuVuhvuPalJU8vepaj + WjWlxi80f+K9DQNvc50Ctbg/li5Kc5HzdF6t/cgvXFs//OyNc++CeaQH5ghkjBVyeIwH + ipcNUWn9z78RH1d7sOflz3JcL7n+55PzEyezsq/fviok7Vq642Z31zU2tss14HqRpdxE + HjC7vlH0RroWSu8iv4yhWMKLT/GOESsNUXT2w54oxTsSvOJ/pR/vjijy9USVYg1/1sJg + Tzacr7gqUBT405cQnyDK4meRgYb+U02TjRFBsf3r3x94/liMtaj+tyfO5cz88JG9a2ck + p7T2CjHti4+fqtl7/6JD7wmfdhYlTnH9BXY+u2vZxJiigU/gj8141tbCHwHeTABLFF/4 + KpapYJkW8aL1xAv3y9AjBn+s6EowXLhbpYtK9I/wf+Rk+Ab4oS97nyCeEYVjLQN7MP9K + 94fSbGkWTicTFAT6sfYUBfkg7+pToIPwughCqbuCaPesOQcQpLN45EpvBpUHPRQWqmeh + SKnI/sihLHt0ZMbhmq6gsdmxTQ9vyK2NMjCx769zxuuOaKPSn6xbO2bsFHnmvc5JFYYY + 141Prz8ZyYorxi9adJc1LjQ6Lja56IE9L3VW1N+ROWOuuSA5PCg6PdW647GLHz/DY4fR + y+7rwjeqCkROhteD/EELAEai14t+wI7nArz0sIIQoCWiDOMZKUvkeci7HiX1x+fwpMTe + OGHu0nft9I0NyvCLCYkxWhPWTw3dvU2/TVXh+mD7gDU3aDQTOn20DywXLmxX7OhyX5Ve + kdKREz0Z2/O6HaX4KnDIV34cP49CpOsg5I0cJV8DxSAhcnfA2MgHX6rPNTb3Xj0ij79j + 7fYvM6Oi90npA5NKJlQervgXYcyNd/bfkbzgyZLNwkd8/fjScH8PqsX56T+68B8EcMgi + C85hi5SzVn4SXILT3YX4DlpE5coghlNizzeRGjjS3NIZJWXzUwprV7TVtjZU29PmVa1o + uBf/HnK8B69CMAtANaBWEPIG7QA9B+oFnQe9D7oG+h4DJVAwKA40we290EZDPMPzNVwu + HiHbR8jKmm8Zr/wneYtcP6J/wwhZ+U/zlv4rR7TfM0JuHiG3jJBXjZBbR8irufx/pT/6 + bQplbmRzdHJlYW0KZW5kb2JqCjM0IDAgb2JqCjQ0NDgKZW5kb2JqCjM1IDAgb2JqCjw8 + IC9UeXBlIC9Gb250RGVzY3JpcHRvciAvQXNjZW50IDc3MCAvQ2FwSGVpZ2h0IDcxNyAv + RGVzY2VudCAtMjMwIC9GbGFncyA5NgovRm9udEJCb3ggWy05MzMgLTQ4MSAxNTcxIDEx + MzhdIC9Gb250TmFtZSAvTlNHUlZRK0hlbHZldGljYS1PYmxpcXVlIC9JdGFsaWNBbmds + ZQotNiAvU3RlbVYgMCAvTWF4V2lkdGggMTUwMCAvWEhlaWdodCA2MzcgL0ZvbnRGaWxl + MiAzMyAwIFIgPj4KZW5kb2JqCjM2IDAgb2JqClsgMjc4IDAgMCAwIDAgMCAwIDAgMCAw + IDAgMCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAK + MCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAgMCA2NjcgMCAwIDAgMCAwIDAgMCAwIDAgMCAw + IDAgMCAwIDAgMCA1NTYgMCAwIDAgNTU2CjI3OCAwIDU1NiAyMjIgMCAwIDAgODMzIDU1 + NiA1NTYgNTU2IDAgMzMzIDUwMCAyNzggNTU2IF0KZW5kb2JqCjI1IDAgb2JqCjw8IC9U + eXBlIC9Gb250IC9TdWJ0eXBlIC9UcnVlVHlwZSAvQmFzZUZvbnQgL05TR1JWUStIZWx2 + ZXRpY2EtT2JsaXF1ZSAvRm9udERlc2NyaXB0b3IKMzUgMCBSIC9XaWR0aHMgMzYgMCBS + IC9GaXJzdENoYXIgMzIgL0xhc3RDaGFyIDExNyAvRW5jb2RpbmcgL01hY1JvbWFuRW5j + b2RpbmcKPj4KZW5kb2JqCjM3IDAgb2JqCjw8IC9MZW5ndGggMzggMCBSIC9MZW5ndGgx + IDEzMjIwIC9GaWx0ZXIgL0ZsYXRlRGVjb2RlID4+CnN0cmVhbQp4Ab17d3xUx7XwzO3b + e9W2q9UW9YaEGmgRahTJgGyQMMKSQCCaaUIGHmBhU2VMbGMLDO6FjrUIbAQYhzgiQJ4L + xHEjzosTA3HyrDjJAycxaPc7c1fIoC/Jz3/4l706M3Nm7p07c+bMaXPVumRZM1KhdkSj + CVMbF81C0i+zEiFq2owFjYtiuCEF8ldmtLV6YjgbRIieP2vR7AUxXHgSIblz9vwVA88b + tQg5jrY0N86MtaObkOe2QEUMx8MgT2hZ0Lo8huuPQr58/sIZA+2GC4CPXNC4fOD96DPA + Pfc3LmiO3Z+ZAHnCooVLW2N4Ri/kDYuWNA/cj2thfO8jDLUutBDJ0DzEIwpp4apHiP9S + 7kQMtJJ2+M1MUWy7T1N0HekECb+v6kdS/jPxzY/+3nwzoHhc+AdUyG7dT3IuMZKIkBJD + e5/i8cEW6TlIXD2oJrkHjQEoBsgBSE4eZUXteDd6DOAFABrNwY+gFQCbAZ4GYAZL+wA7 + jh/pZoTQCbwC2fHYkIJx3220ua1yhfsXPZg7+pz7U+sXJ7ENVu+32NatQrJRcvwCfh7N + RG78KvLhlagSBfHOI4nz3Q3QtA8tAmgHoKUU433driz3WzgF+RgMz/iRi8FvuH+fmeq+ + ktlD4W7324EeBrKfuAALadynnc+5f+yc7X4L4ECsaX8i3PGGe59zvnubqwfv7HY/4ezB + 8MzjsWyZEx59w70gsdM9M1NqH9/ZQx3odudD++SQwp2bJ7pznJfd6YEeAQOe6hzvTsp8 + 150AD8JtHujUF9K5Hc5t7gJocjnLAgUAJ/F+vAsl4V3dvrHuE1CE6R4Zk5jX2YP/60hl + MNPXg1eGciuDnYmVAV/ieLcvsTwQgPLkc/w6/l5+FJ/FJ/NB3s+LfBxvFPSCVlALSkEu + CALfgw92F7u5k/gAKgayHDgicALbg1+DSuYkPiRVHjomMAIlIMHYE/0cmBcjYw8+cBTY + HiMovMFJJa4HHzoSqzoUcjOkxEgNWoqUIYEUUVig0FgUxo/2cGi9ua3YWqwfqcsvL/1X + SYPUcitN/tc/K3aGO8fV1Ib3O+vCWaQQddbdut16q/Av89Zl0NRckpw8btKKI22L5s4q + a/aWNXjLmgEawo+0tVjD7U0ez+G5i0iDJ0z7G5pmtJC8sTm8yNtcGp7rLfUcbpOeG9I8 + izS3eUsPo1lld9cenhVqLu1uC7WVeRtL6440lSypv+NdmwfftaTkn7yrhHS2hLyrSXpu + yLvqSXMTeVc9eVc9eVdTqEl6F5l82ZyakqWtwJ2esjnjPOFgTXjMxKm1YU9jXWkP3g2V + pcsQexpp2VMoyLYjO5OO3AhFPwW4RPLIPdGr7FmkjSyI/oUuhEU9ToCKFBeh0+hRtAt1 + IQ7thXIQTUc70Hk8F/b2NHQUfYRdKA21w77vQePROzgavYhmoVfg/lb0NnoKHUZKeGYB + MkHrVuyLrgQ8BOUmtC76EkpAeWgDOoXyodetqC+6L3oEWiehe9B+dACe/2/spQ4zhuhr + 0ctIQBOhz3XQcjE6PtqF9CgFlaAJULsOvYV99KVoC7KiQhjdM+h59CL6CfoKP4SPRlui + bdEL0d8Cq1qRA9XAtRofxb+lu5gN0Weif4xGgBJBlARvbUDb0MvQfxdcp0G0luF5uBVv + w09RIeoh6iiznrVE+oEOiagCrkqQypuAAsdRL/or+gf+mrLSWrqVPhPNif4fUqBxMEsy + k2bUBtdGuLbCnE5iDmfg0XgCXo2fxE/hD6gk6h6qlnqAWk5dpavpafQK+gNmKdPNbmF3 + cIrI9ejJ6Nnoh8iCnOhetAStgdm9jS6ga+hbTENfDuzDhbgET4erHe+ijuMX8XFqAj6N + L1D78W/wF/hrfINiKSVlopKpVmobdYB6m3qPnkM/RT9N/4a+zoxkKfZF9grn438VaYps + jrwXLYz+Nvp3ELECEmFlSlA1ug81wmwXoWHoQZjFIbi6YNV60Rl0Xrq+wA7Uh/4OVEBY + j+04C1fBVY3vwrPwHPwcPgHXW9JYvqFgISgZpaMslIOqoZqoBVQ79SHVTsfRSfRYeird + Bdc5+iP6Bn2DYRkDY2IqmDFoC7OA2QnXbmYv0828z+azI9lqdjLbzm5mt9Az2IvsR9wa + bivXzX3N/RnE4nh+Ib8FVuc88OxPgJe/+zE4AUafhe5HM3ApbkKdsBov4kbUAdw1E28C + ei1CwWg9vYauoDKAG95C/wXcuhOtRpvpaejF6Cf0fvQxcMp86LId7WFKkJPdDqvzEMoA + Lhq4QolJicGA35fgjRc9IPIdcXab1WI2GQ16nValVMhlAs+xDE1hlFLmLW/whP0NYcbv + raxMJbi3ESoab6togK3sCZffeU/YQ55rhKY77gzBnbOG3BmK3RkavBNrPUWoKDXFU+b1 + hN8t9Xp68NSJtVB+tNRb5wn3SeUqqfyYVFZBWRThAU+ZtaXUE8YNnrJweVtLR1lDaWoK + Ph4CcshTU4jgCCEF6TiMRjeuBgGLRpM7ysJ2b2lZ2OaFMrTRvrLGmeEJE2vLSuNEsQ7q + oGpSLbwjNWVOGMaJHlHO9M58pCeEmhpIqXFabZhurAtTDaQvXXLY4i0NW1ZesX6H3iqV + bbmtMUz5yhubO8rDoYZHgLgEbSBY4xbAxtV4oFtqfV1tGK8fGAQZ41wYKRluTCf4GuZ6 + wjJvibelY24DEBdNqu22h+yS8A2jCbXdtpBNQlJTjlvXFIow++Opo1JHkbxQtK6J5b9/ + OFb/i9Mkt67p/RzycZMGCYAJBbxjYJxhzwzpJV4YbB5JmvNQx4w8oBP86jBMcw6MZ3SY + Ap6hfWHWN6Yx3F5zaxgtpbHBNcwt7ZbZ7JISKqmD+xs6tAWwUnC/1uvpuA7ausHb99Wd + NY0DNZxPex2RRrLQg7wSxo23ym1EWfpg1i1WbwtZ3zZpTQH3WstuqwCckIaMOWwEBT6h + Vgx76qACrMmUcT1INqH2MMZb63pwdH0PKnUeBxuVvm86NKcQVptTCu8HJDUFKpJEKKWl + eMrhzeWEVzwdno4xMzs85Z4WYCbGJ+XQ0NxRlw4UrKkFOqG74Y2hurjBYnNdXQH0k076 + gUfg9o466GHuQA+QS1Xp/XBTRgooU9o/oXZibbi9NC4cKq2DVQD2PT2hNnwaOLeuDu7K + HBwpjHj1HOvAmLNgzJlJ0J4d6wVsl3booq6jg/RZU+sVw6c7OuI6yH6L4T0YDa0IDVT0 + IHILIXkPbp8Az0LmFeOkNRC9IgyrjtB0GLD0LY4Cm/3fUzh3cNzw5HAYba5E4bwfiML5 + 34fCBd+LwoWDI72DwkUw5kJC4RH/OQqPvIPCxf+ewqHBccMgR8FoQxKFS34gCo/+PhQu + /V4ULhsc6R0ULocxlxEKV/znKFx5B4XH/HsKjx0cNwxyHIx2rETh8T8Qhau+D4WrvxeF + 7xoc6R0UngBjvotQeOJ/jsKT7qBwzb+n8N2D44ZB3gOjvVui8OQfiMJTvg+Fa78XhesG + R3oHhafCmOsIhe8dpHAoLoxul8PtQ8Qu+sEF87TbSA6WEqtHJcxS1AsQAsgEGAZA6qoA + xgE8A/7YVIBXuXw0g8pH56W6yaiBfxSiHUvRJHDGCyGvBBiBz6J1gLdz+yFfKkEhtR9t + hroSeNYCde1QVsCrb8WMlODJdAKeicolR55GDGKhjoe623+xgBKJHcmlatKHUiqpbr8N + qQcwzR21MQQiBkgHoAcwABgBTABm8GuskCNkQ3YUB/6ZU8KGoanoKn4Cf0610nL6bvoj + ZhZzjl3EbmLD3Cq+EoIdT8qmyzn5MPkrCo9ileKaskBlV81V9aqnwvMU+CyIuQC+Lg2z + KY7FsYR0MDYABG0PQhcACA5l+rMexAAgKPOfoRPS/Ccnn4BeWDQ5OSMzWyfqAgAlzNae + m79jT307uoepugExEaBaL7zsQ/YJ8JS8hwXcg7NDSobhlQzfySJ5hUzb1mvt/bA/HxUX + X3s3M8OQMxIPz9Z5db0/3enfepr+psNQt/vb++lvpL5CMGYX+yyKR7tD1blMOTOFnee8 + 37XStQ5vpIQkYaptnm2VbZXjdRuL4rGGcahtIu+wQdyLdWs08QZ5joH1uJeJ8UrxQT7P + vDBeHdCsdefFJ1R4yTh+2XetT3u97zIqLuovKu7T6fPT9ZZ8DLk+P18HCarPzBi9IuRg + bEqfzq/Qq4NIZuSD2MaotPIgFkyQQARQq8Uk4LAW1efqi3Hu8NycYX5vPM/xXiiLWXqT + kec0mIMK0SSOXf+T02uHTepcfbzCzxyjS5bh4DdfrCh/fXNT3kw7rb6ZeBzrFy0cl1Mz + b/W2LePWn2y7EPnm5YMrK5rH52ZOmbtfoktm9FPGzu4EPu0Nuccoa1KbE2ekLktclsp1 + +vE4IVluTTaq6H9kGnNU4MR4Q0ZdjvZBlSozLieB5XMyVdbOQKmuB6KPGnle2kLKnehZ + Sweo7Iqs26gCtEHFfcV9QJRr/Ve1fVpCH0IbiSS56Rk2P5Kxfqcv3s8hOogYWsgAcji8 + 7iCy+6xBzGAeyJUOiUuMA5r5IQFiJScTYmmLSGntWqAZrmeonGyzxZydFSMcx+e4cHbW + bWQcRsgIniJQ0IVNRuTF5itvKoPlx7YefP1Fvc/g8JubRy3Z0Xy0zM92h+7Hpl/9uSKl + fPGDkb/+PYAt5x4pXrxj+ZNtGD9PU568x+a1Li9Z+cKicz89vm5SttN9uP3dSATICtw9 + DPgtnX0GSio0LRQvo+SCClPUW3qO4ykOs7wA8QVeTi1TsF/TSp6he7DlddypEg7Ke3Dt + EVZToZYoeP1aUT9wVTFkRbp8iWpAuPyNacnMau0ZTWYG1smwTszB2bpsk1dHvRrJwe/1 + b6Ee2/HBBxCa2Nz/QITF08P01pv3PRt5iYwNo5LoZ4wD4lQedDKUXKnf5KbyleWGKYbZ + BqZAUKp4pJRr1OpleoNBr9Z49AYeGSxySw4MLD5kVz2oVjv1BRqGyfGcdap0fJ59Icrz + xFeIsRW/3tcLi91X3A+rffnarZUm2wDGDENGsaWHtbfCfgha3VhG+WkXBIkgxOxhHbAn + ZFZIsJsJIi4OEsEW2xtktbVFZLnJWtcb7ljngAE2BA2bJDuLMRkpMT4h0K9fHbr7hZ3H + 2uvXpz+zgPqy//kRWakT5pzB+huRvq7I/2nxgp2FrndWdb5SGZLR9GuRJX6DGPnpf0d+ + fuYdaQ2ror9ivOxzIDkDaF8o/wE7tgg+IWCrtW1AG/EmGV8hyMWAmKNWG+mzfE4cG8iB + vZJIrXXl6RZa5FSRPCHTklgRlAjTn79q3KTlK9OtICYG9gPZEIRA0jYY5vM7PBoz4li/ + R+MKYr8pIYgcBiiRPYEZ2q0Vg9hnDgSRUw8J2ROSrMCxDUB2wFpcD8cXZpPXHwCpQX1H + Dm880mmH60X9wLYwGc3ZdMWpbq131Lrt3fKR0yfPPYqVkf89H/ls1Go8fu2ja3a3dj3/ + KPvcP9bdkzE18ofIzXtTg1cv/zTyAc6EkJPiBJ757a9//ND9Z3fu2kRimRjicYTf25EG + 1YRyWYWNylMUKPNVY1X3UJOZJuoYL1+lOqo6o6IpGVapC5CGkSkpFWi/hWohT3ZQravQ + SmQCMXqFMDiwPHA8sA0GwVmPTRwFOxbEoN6QO1zMYdLLrtROSXWmnS39cvP2m1+y7c+O + jhw9fXLnjM/wTtz5p0Ovkz34DIzJwl4APeVBz4QqgvpKQ62hWbVMxc5RrlBSfkGjVZk0 + CpnVpFcpGI92Ck6i5Z6fxyVwWK/J1LrxTJqWeax5Mnu8O9NjE+M/EGdUQlS8+lpVX7X2 + mypJzPddIxqIMPxV3XcSXy+tqd3mYgSnz8G6RyE7bx2FXUzcKGwTIAHxRVYvJrV8sHWR + PpvIJY5XY5N3WO6dwh/3nT0b6br24Zm+Kesa8rtLl05IMAeXbdwTSmC7L1xgzmP+t11z + 17XXr13zo67Fd8X7RpU3Pbaq7CGgwdToJXYxewU0vwsdDhXGsdtxJ0u7YXc9hDeymw1s + jUBvcOp0Jq7ASSsLTDIX5XLZ6EyqUJups3tkmTab2/OiOHfW7fO+RjgXVigm07V90mQL + kMPiM/jVvji/wizLQiqjNgvrdRot7wCMRXQWxhRDy63KLKTRQyLYuSwQ7ZAQAQ5sTBg5 + lpKKtSDOBWzxpmGJe4E8w3OHZwMTiJ6AH7g5V/QyLjxM97Z4pvvTyPW/fP3Z0hGut+1P + dEU+jqLXrhw8gSuC7JXIpZNbd0fej5yJRCI/3lf3+JfPntr1Lj6Iyy78Ttrjr4K1OANs + GRVYSrND7o26Tj2VJShcGgq5LIKQabDbVT61zWb/SGzbHKNBv7T2qLi/uF+auB+bdT6T + n+NZnuFpnuJZTq4VYLZmSGR6RRbmjRARlRY8iczLR2ZCdLuW8oo6WvRYzDojTyVi6kLz + qNaxhXbNp3+JPH+OqsHpe56q3RXZ0N+13xRYWPdITQXW4bQbO1jDx29HLv7xVKRbmsOM + 6Kckxgy2jRddDI2M4zbg9RTtxG52A97seMPDhgQNYzLT2gXmNWZKY9apmA3xWp3LoNeb + +IJ42iSoCuwyL+X10i59Dx4X0tJMJl2o9RnsPnmmy5YAh2Wzj4hzF93BA/2SGo+xgaTb + Yb9KVfn1RLfD1pWokxInIqXD5/FjnzJODqsuQsIhJgtTNMsonKosJHPzWZilICFKfYAR + JBYgPIAsZsMgE4hZCQYxR9R5A8AI3luMEKCvbv9V5pmE3x18J/KHq5g5i1k6Moxa357R + XP3wzyM33nz33Fs4TWS/mLA08vmL2yLvRS5Gvo0c+z2mXr35p1MLk8fu+yVeghdfukBJ + suw8CLQnJPvWQk5lT4B4QygtGc5giFlLpWdkGsDOPH/+PDFXoZHImXFwP4syQwZE0ZSL + YQXazmPKxyIbB+eFNUfEtukS9Yqq+ouqtVfBToRCMVitYACbxGfOUl/enAjd/bUL9mxD + 9EPmG1jPdIh5R0LTEzUBr9+fq84RK/xN/pXqBxJk8wSr2uKj6tQt6v3xtFxdEJ8QL6cZ + h3WDMT092VFgpJmCZFkGJVcLuoR4dzAjQ2f1WcYIvqA9y+3TjUG+dFtm1gvi3AGWBo0k + MbW0sfVgZxC4bYPrwJhN68+uXywtaVUwTedGAuWn/Kk+zmf30ykoGaWmSRmbJCRjp8Gd + jOJM1mRss+JUJhnJAopk7FPgNCjziZC49A5oNENC1hzsXmnhpaJkzxHRSCw6SckT+y3g + T8eg1nKGJRANH9NwnMloMbvJPSYjQ7hhOMYuftiMbxdN6x43/qWzP524BZT97/Hok5rM + ey+Fd04tvPDeUxO3RJ7938ifdu2iqSp8aXX1E56RLyzPzvKlpuRMO/azyG+utxUvfbJp + fpYnIz2+cHbvtV9seeRPDLhlGPngy4DFsM5KOJh+SpDh5fwK2XLFRryBYSvwOKqUrmSq + hBL5ZmGj/Bx1FkyCcwplrWI236LYTG2gN/CbFU9TnfRT/E7FPmo3/Sq/X6GB02S5oLAJ + ZvkUnlMIjJwaGSwLsj6wEpFPqVTIGEwrYKNwShZRglxB84LaYrGDiNkQEmjmmpySXWtX + ILxBaVNtBQ6zVWuvWav68/PtAAOZrbqsufSqFRVbiuCPuCUbq9L6Nqb1wantURmclYBh + tzOk0RPxzNIMx8sEmVwgdXI9w9BQjZSKjau1wpmNaVY2WQAjbqMAcAsZN3HFEQyH1fDE + G9AdA51IHcpkQqw/8Ikp6EHQnpZAy67stwq91o2ksFroBRt2SX39YlS/xCDD2fCHvTLs + xf3YhMd/gsdj06XImouRQ5EDFyPt7Kkb9zAHCHw7mnn7xkhpl04Ce5acYmngfLII/TqU + l5SB5VpFnNIRyK7UzpHN1fL5gl4po+Oy+ASZU6t0FiZTaYmFxwqpwqwkn17Ls4IjEG9x + 9OCOkNfidPMBZ5qCcuYoiviiIoeRT0zam2AfGZfoGKsJ5NlGjHwTb4fDu+O4Ew1IxNj2 + udzfO6gXwSLQ55NtUw+yMK0vrY/IRDAQpA0UzB1uikfY5sO5GhFZXSAizR6jiMV4NJwS + kd1pEbFJhGRAHsZkoeTgJIBvMzx3BFZjyQ003eEjjgRfB/aCDqy7LHiFGjRnwB8gGeyb + 3OEGrF5SfV9dp9iStaApswYfHWlSPrzy0UJRvpf928un2pZZfEqXLinFX59klg1/b9VT + p05s73h/asqY3Y+bHJxa5UifjecLKdbUaTXjk2p+tquyckf/dkc8Ta9XciXeUOXc1zc9 + 9YoBXya2F5xy0heYainusCeUvseGd1j3Cvut9FhBt8tI00bOaedVTqMijo+Ls2gDegxu + o87ulAcsNgd8+sEfEZes/k7fFFX15ef/M6tjGLIJPqVJ7kdqg9YfszdsgIG9IUr2hsKs + 8oO9AYnMyvmJvSH+E3tDoi0yx6wNHuSKRMFsQjoqR4uyeeqjLyxd2iVrDo7N2PTEoodt + Xa4/n/zFt1j/SwdTHf54xsN7F7zw4mebH/jwDM6+Cke0BfB+VBm9xNiBLx1wEu/DytCK + 7cLT9j1umlVTGtZoUus1JmNIGTIKiXY8TvEGfRb/jD4b94nwqewj9yfeLy1fehVndWf1 + 1DSBFRM0O83OhHyO582i08HLnWaFj9/u2OM45vjYwfjMGrAybXIlr4O4hDPA2gMJaXzA + ZvMHfinuro/Rsf+yJON/2U+csZiGTq8fFPK3tLfEoOXIC1sXjq4xy3BusLb0WoPWqGU4 + pS8+LsEP9rTTj11OmYX3I4VJ7QeT3msXoYqFRLAC/SGq4UeSaJf8E8lHSUpOWosX16PF + 9eCkWIjUFmPe+XAgNDHvJUMPZRM5743nQH8e/SgvV6+9+TX72PZH784wHubvypy0YtSk + c5E/YuvvsFsRHHto1V4We5mKefdMnD/2pZfP1OdWFD6eNsGhBQnCYQqXRPzLyh860oHJ + B2KwJiMihfSXsCZulApfVhwLVeUaxwhjZLVCnWyTcl/cXue+wO7k43EKkK7m+ER1rzwe + xAHDJTptcr1Trknj09JYB51mTktNZO0ZSnVANdIfcNjSMzaKS0oGpUE+oXT/5etA5QE7 + GYzlmHEUM4q8QbtLoUvwaf1el9+PgnZIdAq1iDRqpcrnjPfjQFwi8K1SL0pU/M4oktQi + 2ESWnGwwGjkx3h+IRTyG50o7PUEH7IqAfgNcDLoTU6umZ+fsLloUOX/oK/UxVWDEw++H + /HTujtWvRW5g/gQufeXBt8p921a9fVdK5CJTMtI7euPNrHfaLu16tTJQ9MTkX0+a8Dfs + xCqcFnnxdPd9O18/1TVjHZUK9KTg6xCECyUpzKPckIO/woCi5Gi5DLQU0D+Rp2GDyvaL + TTHiFFX19hf1SgqJeHjFVeA7EGOKBC/WHYMfk3TjI/YU8b8xaocF+zv0rUB7QjPrKFwg + YBsFjGLhprCz2RXccn4je5w+T1+i5SzLgSqV0dQ66knqFTDB8vUyGcPCRwDcAj3PQxt8 + DsByMoElLgjYazQn5zk5Z1dBZCYRKWxKVbfYdBybYxKdGGdFoEuJ1iwqgnESSY4BQHMS + BfgTBrRfcj27WntaKxQJRaDCCFsvgakQDebFvM7bfgi/dzUyCx++Gunefog9dfMAPhtZ + 2N9EOToi90vzA9pxPqYC+dH6UCEv8GpOYxEsaosmIARgG1faJitmK5Ren9zu9NrkFGPx + iU6LUwWmARfn8NEGeRCIrEuED8twtz2RfE8XkiOc5gPGsQWCPVh15Duy91/WXgNXdYDy + YAmAAdoHDuwtlzUzA9Ubsk1SAAWY65Ym8epI/JEzwQqBBRYrresODatb3F6dklD0UvMn + 1Ukn51XNffqYPXHRrD1HmfQddyWMKE4on1zzzN1b+4dTX86bsHV3/+PUyQVZ4557v//c + AM/QfbAHSVR6eijzGHeWoxjOyAWMbVwrzxqVlNGqdbIwTatCbuftdqRMlNkdOM2aaEO2 + OFDT3B0zk8RabKfBvPpImIw4HyQeCFO6bSpkBiBnwMPWefG6A+P3t1yekHLMmbEmlDg2 + LzXuKN4D458+6fkpL/VPpF5uKpqpMpfkLJ7T/z4MFmRHIcRKRdBpSvAVbeixUPYOoVP7 + tPlVZq+wW7vP3COcEz5mrqj/YFQWCJzTyiudeoWNt9lMVEBjj5MFTDZ7XA+WgWYbkMgx + o3tQRsREAxwn+BUGGUhPHeXHvAVKrApKcqPSj7AWEsEMioxWQ0IMAykhEeMEPbGSyRqB + 9tJDwAeiYCimvD5fnzH+xKudnS/DB2U3I3/7deQm1v+ea8Wa3Z3Tn7zZfeAyfSnyVeRa + pD/yGk6+CcZFiIX9txkW6wngTx26K+QP0H7VcLqCYdSCllLLdDJlQCBLpJMLdgNO0ybq + kE1v6MFlsDRrJEEImwg0Drg6xVXFvf29ROPEgr4Sp5FlMVtMxLsn7LX5gOmVeazVqY3T + bnoC2Oh47i6Kfoumupb07yA8UxL9mH6DGQcyOx2nhX6UJ9vBduqfNu4w7Ujiggm+QK5Y + LlYkVAQmJ0wJzEqY7V+hXKFaoW7ztia0+lr9u117Uww0qCo2lUkzILspzuKwmlKNaUGN + Yo7g9+X6KF+8Ss4kG6w/czgNPONM25msSOdlai3Fo3Qx3e62mq0By8ignw8E7Zlqd0A7 + EgXSbBmZ3YP6lUSCJLmfr4USmW5+OqQDnhTESqTtFnOhxuNUym8C10lUu0Ukg89MMXhP + ImKToOTUQ12c0SpijyZeRGK8WiUE5CL2+2Ry8KZE+LYYEpfOIRIPKmYlxgInUvQkxhCS + zQhhUtgDkg91uwsFQVKLmf//fSgODMUA/lrwle6duWNEYOmPNo9q/dXxv84bTe1n/SOf + njWnLFj9wNslcz79n6/P8vgYnjA1Y8qUe8sSwDKJTxqzdsebW6e2jMiqqA6VJ9kMzvSU + sid/dOHTF6h/AC9Zol9TMnYq7JxJr6vS5KfVcJ5THPIx5nwLzanlOjuIMoiIJyKT2qSh + 3TRF3zRD1OWmOHvACuyvz+9NJ0ZKTISlEwHWX9Sn7b8saRAS/AZncNDu9efovDnZe984 + cMBvylS5jO7RgTVTH3+cnRr5cFt/WZ5BgamtMmHtbOrMNkmPtUe/oP8Htjk5MZseKugx + njNSMoNgtBlsxiD3AP0xKBHEquWIU8lZ2NdW3moF0zJNnqhU2O04kQz2F7fErRQeJOw/ + qP+LiwhDxGQSjg0UFLMONN9wye4Bx1bnw3n2jIffLPUd3U95h83edqUmFXcx6f35k4Y1 + 7J36LKW+cfG5EUl3Pz1pM/WJnehHcErpPzLpCPRsKK0EnwFHbTZqoVro2dxGZhO7B+2l + BPiSkipjxrIbmM3sWTjlE8YElwZ58O5ADM0m+xScwJ7ooqNg6HmYHvzwMZpeoKcwBd8z + PxxycaA94U0sR9xAlqI5GuLTjFwgi9VFncBE+687grs4m62a+J2ff94/6G1KzubAERgP + alNbfbmKj2XJ4DKGfFSinqYZlAhHImBn3tE5KOkukECD/Uq+7JCeWV6bDH+gesGkrF9M + fEdQvJ9hF04+E5l/OrKMSb+5g265cREoROI68Is2w/ei/+zngkojnJnGTkyJXooHrRyA + 7w1TIAqTiwpQKZzjVsJH2OPhe03yVSz59nQymoJqUZ3UIYbTVyyVOPgfAjSmrK5izPjk + yub5bc2tc2Y0QkusldzSDrAFAOJG6CDAmwDE4iEm6lcAN+FmJYADIAWgCGA8wDSA+QCr + ALYAPANwEOBNgHcAPgP4CuAmCEwlgAMgBaAIYHx04Af9o8EyhnO/O/GsIXj2EBzOte54 + PmcInjsEHz4ELx6Cjx6Clw7BK4bgoIjveP/4IXjNEHzKEJysxO3znzEEl/jjNvrMHtI+ + Zwg+bwgu/b/Lbc8TS+/29y0cgi8Zgi8dgrcOwZcNwdsI/v8AjCJchAplbmRzdHJlYW0K + ZW5kb2JqCjM4IDAgb2JqCjg5MjcKZW5kb2JqCjM5IDAgb2JqCjw8IC9UeXBlIC9Gb250 + RGVzY3JpcHRvciAvQXNjZW50IDc3MCAvQ2FwSGVpZ2h0IDcxNyAvRGVzY2VudCAtMjMw + IC9GbGFncyAzMgovRm9udEJCb3ggWy05NTEgLTQ4MSAxNDQ1IDExMjJdIC9Gb250TmFt + ZSAvSUVZR0lMK0hlbHZldGljYSAvSXRhbGljQW5nbGUgMAovU3RlbVYgMCAvTWF4V2lk + dGggMTUwMCAvWEhlaWdodCA2MzcgL0ZvbnRGaWxlMiAzNyAwIFIgPj4KZW5kb2JqCjQw + IDAgb2JqClsgNTU2IDU1NiA1NTYgNTU2IDU1NiA1NTYgNTU2IDAgMCAwIDAgMCAwIDAg + NTU2IDAgMCAwIDcyMiA3MjIgMCAwIDc3OCAwIDI3OAowIDAgNTU2IDAgMCAwIDAgMCAw + IDY2NyAwIDAgMCA5NDQgMCAwIDAgMCAwIDAgMCAwIDAgNTU2IDAgNTAwIDAgNTU2IDAg + NTU2CjAgMjIyIDAgNTAwIDAgODMzIDU1NiA1NTYgMCAwIDMzMyA1MDAgMjc4IDU1NiA1 + MDAgXQplbmRvYmoKMTAgMCBvYmoKPDwgL1R5cGUgL0ZvbnQgL1N1YnR5cGUgL1RydWVU + eXBlIC9CYXNlRm9udCAvSUVZR0lMK0hlbHZldGljYSAvRm9udERlc2NyaXB0b3IKMzkg + MCBSIC9XaWR0aHMgNDAgMCBSIC9GaXJzdENoYXIgNDkgL0xhc3RDaGFyIDExOCAvRW5j + b2RpbmcgL01hY1JvbWFuRW5jb2RpbmcKPj4KZW5kb2JqCjQxIDAgb2JqCjw8IC9MZW5n + dGggNDIgMCBSIC9MZW5ndGgxIDU4NjQgL0ZpbHRlciAvRmxhdGVEZWNvZGUgPj4Kc3Ry + ZWFtCngBvVh7dBTVGf/uPHY3EDAJIWwem5ll2LwjJJEQSApL2A15gYEA7iLIbpINSZqU + FEIqKJhasLIgVSmo0GMLbW0BkUnCgU0oGKkWrdai1keptT5qbXvqsa2lx4rs9DezyZZw + qid/cJx77nzfd7977/e7v3vnztzpWr8xQBOoh3iqW+nvbCbjSmqHuLOxw98ZsRNegFzZ + 2N0lR2wxi4hvb+5c2xGxLd8lGmdb275puP2kEiLujZaAvynip88gi1tQELHZTZDTWjq6 + bo/YCScgre3rGof9k9A/Tezw3z4cn96ELX/N3xGI1E9yQuZ1rtvQNWzHQc7oXB8Yrs88 + wPdrYig10TqKoTYyE0dxSKuJzH8eZyMBXt2P68DzMRfX3FB2ieIthr1m0XcM+Qv7z575 + JPhZZuyDMaloHTNSX5em7HA2USyDf2bsgxZi/UaTkZspRPW5IapCnoc8Ezk3d76Vethj + dD/yD5B5amU7aRPyDuRHkIWodhjWANvZJ1icg2wTpbBq53hBWpaYLFnHjZdeDjHTiUel + 31rfO82SMXvvsOS+CRQzfxz7Afs+NZHEfkwOtpkqKYvt789ul3xwHaZO5B5k3rgzdrgv + vVA6y/LIITC0yaB0gZ2UPijIl94vCHGsTzqXGRIgnkqH5bxBGrI9Kj1pWyudRT4acR3J + Ro2T0mFbu7QnPcT290kP2kIMbR6IiI02ND0pdWTvk5oKDH/tvhB3tE+aDf8K53ipuMQu + zbT9UZqeGbIw2Pm2Wimn4FfSNDRENRmdOpzxUpptjzQHrnSbO3MO8ml2hB2gHHagz1Et + DULFcPurskv2hdgd/ZVZBY4Q2+wsrszal12Z6ciulRzZFZmZ0Fc8a95mvtU831xozjVn + mTPMdnOqOdGSYImzTLTEWsZZLBZziD3eN08ynWZHaR5oOdpvMVnEEHsChcJpdswoPHbK + Ilg4C1kSQ9rbWLyMEkPs6AksQ0ZQTpoMzRRix7Au9KJjTknQNcFwxHG6jhvuxDELR9Wk + svtCJtqe1D3POi9hbvzsCtfn3XyGZ+Se+/mXldnUfTX1HvWIzasW6opm845Ut44onyu7 + NsIVKM/NrVm6qb+7s63ZHVDcPsUdQPapO7tbrGpPgyz3tnXqDlnlM3wNjS269AfUTiXg + UtsUl9zbbbS7xt2su7sVVy81u5d5epudAVdft7Pbrfhd3v6G8vWrR8XaEY21vvz/xCrX + O1uvx2ow2l0Ta7XubtBjrdZjrdZjNTgbjFj64N2t9eUburA6ZXdrjaxm1atVS1Z6VNnv + dYXYYyh0bSRxiOLEM5Ql9lCKMJ0kIu23yBd1GV6u/Uk8T3HhDu0ffCkmdUDPXHheGQ3R + fXSAjmMX+in0LLqNHqbnWBue7VV0gl5j6XQj9l6BQlRLLzBNe4ma6Ueo30XnaC/1Uiza + dNBkeHczh7YZthN6A23TDtE0KqF76AzNRq+76UPtsNYP71JaTkfoKNo/zxSuV5ikPaH9 + kSy0BH1ug+clrVY7TgmUR+VUh9JtdJY5+ItaC1mpFOi+R9+ng/QU/Y3dzU5oLVq3dkF7 + B0vVSmlUj7SFnWDv8MeFe7TvaX/VwmAii3IQ1Ud76Ifo/zjSELZWN/sq62J72F7Oyd3N + nRC2i1PCV8BDNi1EqsSufC8YGKCn6Z/0H/YRZ+Xj+C7+GW2m9jGNpxqMUh9JgLqRvo20 + G2M6zUxsBlvA6tgW9l22l73C5XDLOQ/3De527k/8Yn4Vv4l/Rdgg9Im7xIdN48OXtNPa + ee1VmkI2upXW01aM7hxdoH/Rp4xHX2nMwUpZObsNqYcd4AbYQTbA1bEhdoE7wv7A3mMf + scucyMVyk7lcrovbwx3lznEv8q38Xv4R/g/8JWGuyIkHxfdNDvPvwg3hHeEXtVLtHe0T + bLEWsmNmymkxrSE/RttJN9FdGMUxpOOYtafpGXrOSO+xNPqQPgELxBJYCitki5AWs5tZ + M2tlj7JBpLMGln9zmAguhovnpnBpXD3XwHVwPdyrXA+fyufw1fxK/jjSs/xr/GX+siAK + k4TJwkKhinYJHcJ+pMeEnwp9wq/F2eJccbG4QuwRd4i7+EbxJfE101bTblOf6SPT37Et + 1prXmXdhdp7Dmn1q5JVmSIFNA/pC+ho1MhdroH2YjYPMT0GsriZ2L/jqpCxtNb+VX8jN + wGo4S3dgte6nLbSDX0UHtTf4I/Q6Vor+fdFDPxHKySY+hNm5m2ZgFQ0nZ3ZOdlZmhmOa + MtUuY8tPS01Jtk5Jmpw4KSE+bkLs+HExFrNJFHiOUZ5bqfDJaoZPFTKUysp83Vb8KPBf + VeDDoyyrFaPrqLLezg/XqJpO1Gy+pqYzUtMZrcni5DIqy8+T3Yqs/sqlyCG2cokH+n0u + xSurHxr6IkO/39AnQLfb0UB2W1tcssp8slut6G4Jun2u/Dw24AQd4/Lz9I3DSeP1jlVa + 4N+CDZYW6DXcaoricqvJCnT4eIfb36TWLfG4Xal2uxdlKFrqQYz8vFYVOGlnbJPStDPk + pAafrvlXeVTe71U5n95XfK46RXGpUza/b/2fOaK5d13lVDlHhT8QrFCdvp0gVzd9uuXf + BaumXka33HavR2Xbh0HoGNuAVIcbeSc4fG2yGqOUKy3BNh/IpaWevhRnirH5qlTn6Ut2 + JhtGft6AdWupHaMfyJ+fP1+XpXbr1oj84FuR8peHdGnd+vTbkDVLowQwnQGlCjhVudEI + ogBsiX4LlFCwsQQ84fIyDLMVeBaoHNYM71BFR5Vf7akfgdHiioDztbn6YpJTjJdQuRf1 + fcG4OZgp1I9T5OAlvK19yod/G13iHy4xOeIuke7UJzq6VlTmH9G79ZelA6NusSot+vx2 + G3MKW7G6ryqArVOjY1YT8QKv89hV2YsCfE3m1YQops7Ty9hub4hp20Pksg3gG5Vfcxvc + efpSa3UhPoz8PBTk2KHdmCdXIHKFvlbkoBysagrKFXILFpPgMCQcgaB3Ohis94AnWoaI + Tm9qVA14vXPQz3S9HzRB9aAXPbQN9wBpFE2/gkoz8vAy5TPqPEs8ao8rVXW6vJgFLN+h + Oo86hJXr9aJWQRQpEG9ptQ5jLgTmghz4iyK94NulB114g0G9z3qPYleHgsHUoP68RewQ + o2sLnMMFIdKr6JSHWE8d2kIo9lRjDuyKHbC8Oqc3YUmPrCh8s38xw8VR3Gg5C2iLDYZL + rhPDs8fC8JwxMVwaRTqK4TJgLtUZ/sqXx/DcUQzP+2KGnVHcADkfaJ0Gw+XXieEFY2HY + NSaG3VGkoxiuAGa3zvDCL4/hylEMV30xw9VR3ABZA7TVBsO114nhRWNhePGYGL45inQU + w3XAfLPO8JIvj+Gloxiu/2KGl0VxA+RyoF1mMLziOjF8y1gY9oyJYW8U6SiGVwKzV2f4 + 1ijDzlSVrt6He67Zdum6b8yrrqIcX0piApWLK+i4sIHWIW9h52kbZCpcI/90YnHSqIVd + h69wHLHHdHH4ao9cOKlfdYnD+k34i/QEhfGFrn+9cvi2J+ECzoQ8/i3Ni/zvsUzHSxnZ + EhciuoCs29D5N0MkIBN085s0iBZEK3IH0YsIOaOgKN4en4lcLuwOffaueObTBSFh0WX9 + nxKH720yMcQZR4mUqsfBaTFm+iDGNcHoIw696X0we2GS2SQodl5hGbjbC4tnFbMPuL1P + P5U7LfzxW+HCb50XJpdtCHe1s133PClNFc/8/tnHtSt7hI+lML/+/kOItU67KGaI53AC + s0Vj3YBYRElGrDTwqsdy6H1nKFNNZlMSS5oyqYgpsyYyZSo3q5iKCrnyLS/m5t65+d5X + cx1+bmJO/oZj4fOHpGVPruv/RXEBK3nrjm+8/tzzP7/wy8f/zD+UXsPZPp27t9HD8j/9 + K7sFoYBji3YRp5WFOHNOI8kY8yBOVIkGhhTMqc6d/g/PGHeSOclsNyUVFc4qnlU0kZkB + CkgmJQBJEl+cacAUvukQmfLZR+lrH7pvbZnSm9hR2niXe+mzb5TMYqveXT90+8TkG4/d + +aLCf3tJe/WhHz6zunhh6QM31qXFMQUHRY6VfzX8wMaKu/uDEXzbAHLD8NzfMDwnAiab + m455KMIEFJkV1nzq3VOs5FTeKSHn8mvimRcwrtTwcuFlnMgm0lTCQ2XMpR38TjbGMojz + fRrGhMWChRL35iDJsGPjE2ajV2Mg+timYKTFCIAZKCpM4Dl9tJkZmbzCv52aIA+e7phj + T5k0dXDrb678+Hi6u6rljpPnZlW/fu/+TQtzcrtOcOk9q3pPN+2/85bHXuHe2l2VVRb+ + S/hK+NC+NTPTq678HhhJ+xdyAGfv/3eZUFhknOBr8TdAvxj+JkSeMRP+ElD1Le76Wk9u + ZaC9O9DV2ujPv7mhvfXr+Mus1xy5ZCh5yGXI+nO6CrldG76gU1RneIZH203X2AbOq+q3 + XuPv1O3/Agh6PzUKZW5kc3RyZWFtCmVuZG9iago0MiAwIG9iagozMzIyCmVuZG9iago0 + MyAwIG9iago8PCAvVHlwZSAvRm9udERlc2NyaXB0b3IgL0FzY2VudCA3NzAgL0NhcEhl + aWdodCA2ODQgL0Rlc2NlbnQgLTIzMCAvRmxhZ3MgOTYKL0ZvbnRCQm94IFstOTMzIC00 + ODEgMTU3MSAxMTM4XSAvRm9udE5hbWUgL0pXRVNMWCtIZWx2ZXRpY2EtT2JsaXF1ZSAv + SXRhbGljQW5nbGUKLTYgL1N0ZW1WIDAgL01heFdpZHRoIDE1MDAgL1hIZWlnaHQgNTEz + IC9Gb250RmlsZTIgNDEgMCBSID4+CmVuZG9iago0NCAwIG9iagpbIDY2NyAwIDAgMCAw + IDAgMCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAgMCAwIDU1NiA1NTYgMCAwIDAgMjIyIDAg + MCAwIDAgMCAwCjU1NiBdCmVuZG9iagoxMSAwIG9iago8PCAvVHlwZSAvRm9udCAvU3Vi + dHlwZSAvVHJ1ZVR5cGUgL0Jhc2VGb250IC9KV0VTTFgrSGVsdmV0aWNhLU9ibGlxdWUg + L0ZvbnREZXNjcmlwdG9yCjQzIDAgUiAvV2lkdGhzIDQ0IDAgUiAvRmlyc3RDaGFyIDgw + IC9MYXN0Q2hhciAxMTIgL0VuY29kaW5nIC9NYWNSb21hbkVuY29kaW5nCj4+CmVuZG9i + ago0NSAwIG9iago8PCAvTGVuZ3RoIDQ2IDAgUiAvTGVuZ3RoMSAxNjQ2MCAvRmlsdGVy + IC9GbGF0ZURlY29kZSA+PgpzdHJlYW0KeAG9e3d8VUX2+Mztr/fe8/JaeiEhIYE8QgoB + gnQSJBhK6EgLERAwKF1kEaQIqNiQIvIIEQKIyyIsuO4qdsWyuoLLlqzufpFV4d38ztwX + IuS3ux//2M/e+87MnJl7586cOXPmnDPzGuctaEAq1IxoNGTM+DmTkXQV70KIjk2cNX5O + AjeQ+DcTmxq9CZwNQ/nMyXOmzErgwmMIyV1TZi7qfN8I74fGTm0YPylRjm5CnD8VMhI4 + 7gFx8tRZjQsTuL4V4ldmzp7YWW4sBrx+1viFnd9HnwLuvXf8rIbE88XkveQ5s+c3JvCi + 7yFunjOvofN5XAPtewthyM1Es5EMzUA8opAW7jqE+KtyF2KglJTDNSlNsfkeTfF3SCdI + +D3Vv5DiX/te+eD7hpshxaPCD5Ahu/U8ibmIGEFIiaG8XfFoV4n0HgSZbWh4ahuqAigB + yANITe1rRc14D9oIsBuARtPww2gRwFqAxwGYrtQ+wI7jh1sYIXoCL0J2PCCqYDwjjDaP + Va7wvNOGudYnPR9bvzqJbTB6X2JbiwrJ+srxbvwUmoQ8+HkUwItRfxTGO45EZnrqoWgf + mgPQDEBLIcb7Wtw5nldxGgowGN4JIjeDj3r+mJ3uuZLdRuEWz5lQGwPRr9yARTWe064n + Pb90TfG8CnAgUbQ/Ak8c9exzzfRsdrfhHS2eTa42DO88mogWuODVo55Zka2eSdlS+aCt + bdSBFk8hlI+KKjz5BT5PnuuyJzPUJmDA012DPCnZv/Mkw4vwmBcqDUR1Hqdrs6cXFLld + 5aFeACfxfrwTpeCdLYEBnhOQhO4eqYoUbG3D9x/pH84OtOHF0fz+4a2R/qFAZJAnEKkI + hSA96gK/gr+b78vn8Kl8mA/yPt7BGwW9oBXUglKQC4LAt+EXW0o83El8AJUAWQ4cETiB + bcMvQSZzEh+UMg8eExiBEpBgbOv4ApgXI2MbPtCqJSlIHOWkFNeGDx5JZB2MehiSYqQC + LUXSEECIKCxQaACK4UfaOLTS3FRiLdH30RVWlP27oF4quRWm/vvLil2xrQOH18T2u2pj + OSTR4aq99bj1VuLfxo0LoKihNDV14LBFR5rmTJ9c3uAvr/eXNwDUxx5ummqNNU/weg9P + n0MKvDE6WD9h4lQSj2+IzfE3lMWm+8u8h5uk97oVTybFTf6yw2hy+Yiaw5OjDWUtTdGm + cv/4stojE0rn1d3xrbVd35pX+i++VUoqm0e+NUF6r9u36kjxBPKtOvKtOvKtCdEJ0rdI + 58unDS+d3wjc6S2fNtAbCw+PVQ0dUxPzjq8ta8N7ILNsAWJPIy17CoXZZmRnMpEHoY6P + AS6RWBzZ8TV7HmnFWR1/p4tgUI8ToMSSYnQaPYJ2okOIQ3shHUbj0Hb0Op4Oc3ssakUf + YDfKANnLoDY0CP0Wd3S8jSaj5+D5RnQGbUGHkRLemYVMULoBBzoWAx6F9AS0ouMZlIwK + 0Cp0ChVCrRtQe8e+jiNQOgyNRPvRAXj/DeynDjOGjpc6LiMBDYU6V0DJ2x2DOg4hPUpD + pWgI5K5Ar+IAfaljKrKiImjdLvQUehr9Cv0VP4hbO6Z2NHVc7PgSWNWKnGg43EtxK/6S + PsSs6tjV8ecOESgRRinw1Xq0GT0L9R+C+zSI1nI8AzfizXgLFaUepFqZlaxFjAMdIqgS + 7v4gldcABY6js+gf6Af8DWWltXQjfa4jr+P/kAINhF6SnjSgJrhXw70B+nQSczgL98ND + 8FL8GN6C36VSqJFUDXUftZD6mh5Mj6UX0e8y85kWdj27nVOI33Wc7Djf8T6yIBe6G81D + y6B3Z9BFdA39iGmoy4kDuAiX4nFwN+Od1HH8ND5ODcGn8UVqP/49/gp/g29QLKWkTFQq + 1Uhtpg5QZ6g36Wn0Fvpx+vf0d0wflmKfZq9wAf4TcYK4Vnyzo6jjy47vQcQKyAcjU4oG + o3vQeOjtHNQDPQC9OAj3IRi1s+gcel26v8JO1I6+ByogrMd2nIOr4R6M78KT8TT8JD4B + 96tSW65TMBCUjNJRFspJDacmULOoZup9qpl20Cn0AHoMfQjuC/QH9A36BsMyBsbEVDJV + aD0zi9kB9x5mL9PCvMUWsn3Ywewotpldy66nJ7Jvsx9wy7gNXAv3DfctiMVB/Gx+PYzO + 68CzvwJe/ulicDK0PgfdiybiMjwBbYXReBqPR+uAuybhNUCvOSjcUUcvoyupLOCGV9H9 + wK070FK0lh6Lnu74iN6PPgROmQlVNqMXmFLkYrfB6DyIsoCLOu9oJCUSDgUDyf4knxdE + vtNht1ktZpPRoNdpVUqFXCbwHMvQFEZp5f6Kem8sWB9jgv7+/dMJ7h8PGeNvy6iHqeyN + Vdz5TMxL3hsPRXc8GYUnJ3d7Mpp4Mtr1JNZ6i1Fxepq33O+N/a7M723DY4bWQPqRMn+t + N9Yupaul9EYprYK0zwcveMutU8u8MVzvLY9VNE1dV15flp6Gj0eBHPL0NCI4okhBKo6h + fuOXgoBF/cgT5TG7v6w8ZvNDGsroQPn4SbEhQ2vKyxw+Xy3kQdawGvhGetq0GLQTPayc + 5J/0cFsUTagnqfFja2L0+NoYVU/q0qXGLP6ymGXxFetP6K1U+frbCmNUoGJ8w7qKWLT+ + YSAuQesJNn49YAOHe6FaamVtTQyv7GwEaeN0aClpbmJNCNRP98Zk/lL/1HXT64G4aFhN + iz1ql4RvDA2pabFFbRKSnnbcuqzIB70/nt43vS+Ji3zWZYn4jw8l8t85TWLrsrNfQDxw + WBcBMKGAvwraGfNOlD7ih8YWkKChAK2bWAB0gqsWQzenQXv6xSjgGToQYwNV42PNw281 + Y2pZonH108taZDa7tAiV1sLz9eu0vWCk4Hmt37vuO1it6/3tf70zZ3xnDhfQfodIIRno + Ll6J4fG30k1ksQxAr6da/VPJ+DZJYwq431p+WwbghDSkzTEjLOBDanwxby1kgDaZNrAN + yYbUHMZ4Q20b7ljZhspcx0FHpe8ZB8VphNWmlcH3AUlPg4wUH6Qy0rwV8OUKwivedd51 + VZPWeSu8U4GZmIAUQ0HDutpMoODwGqATGgFfjNY6upINtbW9oJ5MUg+8Ao+vq4UapnfW + ALGUlRmHh7LSYDGlg0NqhtbEmsscsWhZLYwCsO/pITWx08C5tbXwVHZXS6HFS6dZO9uc + A23OToHy3EQtoLs0QxW169aROofX+H2x0+vWOdaR+ZbA2zDqnhHtzGhD5BFC8jbcPATe + hcjvc0hj4PP7oFm1hKY9gKVvcRTo7P+Zwvld7YY3e0Jr8yUKF/yXKFz4cyjc62dRuKir + pXdQuBjaXEQo3Pt/R+E+d1C45D9TONrVbmhkX2htVKJw6X+Jwv1+DoXLfhaFy7taegeF + K6DN5YTClf87Cve/g8JV/5nCA7raDY0cCK0dIFF40H+JwtU/h8KDfxaF7+pq6R0UHgJt + votQeOj/jsLD7qDw8P9M4RFd7YZGjoTWjpAoPOq/ROHRP4fCNT+LwrVdLb2DwmOgzbWE + wnd3UTjqiKHb5XBzN7GL/uuCeextJAdNidWjUqoQDOf9aANAGfMiGsvMR1GAs51xNsQ9 + AEoBqgEGdqar2FHIDbbaGIgPQR2vQ/4uwHcRHOJWiOuZr5AP0vv5R8AzMh8NA2gC470I + 4gKA/vCeE+Le+DxaAfnNEK/l9kN6vgTkuSZo11ooI+20AN4MaQUBaP4tv5MSrKFXAPei + WuIGuOOiwIJACF7ovFh4lgebQobkUIcSfD7qW0WdsQZi8C0gXSeul2IDhEYAE4AZ7CAr + skHKDtD9coB950JusOe8YLkglATgBxszgIIoBDYeAsstBaWC3ZgO6QwAcvWAewlo+f8A + RWcVpadepZX0biafaWZz2AvcCO4yWBNRfir/qmAUnpRNl22XfSF/UdGk+EI5Qfmmarvq + snq9+oQmRTNIc0GbrN2lU+tG6HvoV+g/NzQaXgJjpxSIcBHsbhp6X5LwqQngW5MBCNo2 + hC4CEBzS9KdtiAFAkOY/RSfgDYRGpZ6AWliIs7JzdT5dCKCU2dB28w/sqR/7tTHVN8A/ + A9TfII6jxrPvA636RGVGncxgtljsspN4F9DciHdF1VEYwkFam8n8T9/MYdY2Pmdlaurg + a9Xt9s/s7e+1Dy5vKPsalZRkZ2GK53Rai9ngz8ChYCiYp+2Zb6DGPZFZOTRn86JNFZEC + s6Ku6CT7vvjWxk/EL8XPv31M/PPlZTMf2zv6Lhz+42YckNpTBu2xQHsMKD+qFHTIYIL2 + MIM0BtIkcEVCk2SCzWj6p6/kfnDkkJa81/7Zbe0w6Hvm67ShIJ3rxhY3Nml5jq58KqOC + tGJH32BWZFzRCXEczt/wIfZh37ePYfP1+Q1Lr80VP7q6RfxcasNYoP1sxgT81jPqpBez + lFdQLJbLVdASbjEj89LyxcimLBlmTR2svVZ9rThefK2TDgkkO8uQ59MB1U0+nV83Frce + wK3ioAP46F58TKzaKw7AR+E7FIp2fMw42e1IAxw4N2pZzeIKwZSnYZ15vEpfQM+2Fijc + lS5t01nre+3xdlTSXtKendVvUbQHcqiCOGAPygJs0Ky2hmH09GHsECCl5SBlUZrC2EBB + YJM7w0jHQED8PZgE0rUc3MEWs07LUz5vKKjr0VPv0+frelD+JEpntJhz6eiS+tHLxD+I + 4rJpJU04b92ehQef2pzZ/yV2+5XD4m/FT38p/u2Lk7jo2iFc8eOV7/Gwa7hIfF/87JOV + b0DXgK/OQgffZzcBF/kPC7gN50aVDMMrGX4ri+SVMtKps+/HC4F1rv2O0KsP7pkLxDr7 + 2o7ghtP09XWG2j0/3ktfl+qKwni42SdgZu6JDs5nKpjR7AzXve7F7hV4NSWkCGNsM2xL + bEucL9tYlIQ1jFNt8/FOG/h2WY9Gk2SQ5xlYr2eBL0npe4AvMM9OUoc0yz0FScmV/gRx + r7Vrv2u/jEpgJEvadfrCTL2lEEOsLyzUQYDqJLI7GZsyoAsq9Oowkhl5IC6j0srDWDBB + APTVaiX6Amnz9SU4v2d+Xo+gP4nneD+kfTl6k5HnNJiDDGCMASt/dXp5j2Fblx6vDDLH + 6NIFOHz9q0UVL6+dUDDJTqtvRo5j/ZzZA/OGz1i6ef3AlSebLorXn31xcWXDoPzs0dP3 + S3TJBv6xsztQNjob9VQph6c3RCamL4gsSOe2BvFAIVVuTTWq6B+yjXkqMNT9UaMuT/uA + SpXtyEtm+bxslXVrqEzXBh52jbwgYzbliXiX0yEqtzLnNqq0X0swHhDlWvxrbbuW0IfQ + RiJJfmaWLYhkbNAVSApyiA4jhhaygBxOvyeM7AFrGDOYB3JlQuD2OYBmQQi6mFFbTLhx + +XKgGa5jqLxcM/BeToJwHJ/nxrk5t5GxByEjeEOAgjCxjciPzVdeUYYrjm148eWn9QGD + M2hu6Dtve0NreZBtid6LTZ98W5lWMfcB8R/fh7DlwsMlc7cvfKwJ46doyluwcUbjwtLF + u+dceO34imG5Ls/h5t+JIpAV5mUP4LdMdhekVGhsNElGyQUVpqhX9RzHUxxmeQF8aLyc + WqBgv6GVPEO3YcvLeKtKeFHehmuOsJpKtUTB70A0AFeVEAmhK5SoBoQrXJ2RyizVntOA + zNTJsM6Xh3N1uSa/jnpezMNvxtdTG7e/+y6439bG7xNZPC5Gb7h5zxPiM6RtGJV2fAoy + oxnWqpPR1P76NR6qUFlhGG2YYmB6CUoVj5RyjVq9QG8w6NUar97AI4NFbsmDhiVF7aoH + 1GqXvpeGYfK8510qHV9gn40KvEmVvsSIf9d+FqRMe0kcRvvytVsjTaYBtBmajBJDD2Nv + BTEUtnqwjArSblgkYRvFyzphTsisEGAPE0acAwLBlpgbRPRoi8lwk7Gug1XmtnEOGWBC + 0DBJcnMYk5HyJSWH4vql0RG7dxxrrluZuWsWdTX+VO+c9CHTzmH9DbH9kPh/WjxrR5H7 + t0u2Ptc/KqPpl8R5QYNPfO0N8TfnfiuNYXXHJ4yffRI5YA3fFy28z44tQkAI2Wpsq9Bq + vEbGVwpyX8iXp1Yb6fN8noMN5cFciVDL3QW62RY5VSxPzrZEKsMSYeKFSwYOW7g40wpi + onM+tAOJCIESAjkQdHo1ZsSxQa/GHcZBU3IYOQ2QInMCM7RH6wvjgDkURi49BGROSLIC + JyYAmQHLcR3IZLPJD2snSOGfyOFPQjqtJJ8lecKZjCCeK0+1aP19V2xrkfcZN2p6K1aK + f3ld/LTvUjxo+SPL9jQeeuoR9skfVozMGiP+Sbx5d3r468uvie/ibHCrKk7gST9+9ssH + 7z2/Y+ca4q/H4HMm/N4M69DwaD6rsFEFil7KQtUA1UhqFDOBOsbLl6haVedUNCXDKnUv + pGFkSkoFW4az1UKB7EW1rlIrkQnE6BXC4MDywPHANhgEZx02caAaEDGoN+T39OUxmeVX + akanuzLOl11du+3mVbb5iX5i6+mTOyZ+infgrX87+DJsYQKffwiybReswRbwur4RrRyF + R8vGaGoNk3CDbIZmmuG+gKxKe7+tyT8vMD+0JHtJzhrbau/q0JqMNdnbbapKIUcIqKlA + jiJPp0tj89ysJS9NRRWAA2TVMXVBZHamUOCA9MvGgswelbkJ9peWgZ/kXXtiynaOcV5K + htOrN9Mqc7oxjJSp6jCW6wVgcxcEjIcKY1OGJYxUKRDwTjaMaS8EXZJOknKJMe4Ub2Qc + 9belEehMPWAKJAQcTBCYCv6kZMjrST23qvmhBxu3Tl7z/P6Vy5/dskt8OeWuq++/+eey + 4JDa3HvEq2+Lv1+ymI6uHDtk1aoxDfPiRatXPbxx84NznqV2pw5p3v31x4+uGp6ZHsmb + tPuU+MNXHz1wPJvIuaqOjxgdrB9kjhyIZtjYVDZs7s/VsFPZtbY19u12WYXA+0KhPLnc + 6svTskye47xVxVPFvDsbdv1GRhUqFHEsTy5Q3ZooMDu038ULlyZmiyRL7pwonqDNrjBg + Wh+ggkkamCVeHcwS2gYiJKgA1K+GieIxQIDtID4CSpgtZIno1FwS8wQnpIdBjWGi5PXQ + 53oNZtDzQHAE89BtFMVaYUbP8uVHg8WHJ7/1979dxYX3ld71kHj+nUtUzuGn7l+xc80W + PGZLoftDXHVPNabeeA2Hxa93/kn84Q3xpU/34OAjsSd3Hn5s/fOEH91gffUGfZyS7JBL + 0SH9cQ2eiuk19DZmu3yfvE3WJufCcox4jsOUIJNBIEc8i9djmvEa5fKAHvKMLBuAVQQr + FCwtkzMcixUUphHl5oU2XBuVgWufk8lpFrC9Ub2KaJzsk/hJuU2petq3fhwovLbB16zV + 8bhNUjgryqyoxFJcUlwdl5aYErIwJ7SWTEliDwQPJHPaEWPO1q7OsIIIJxk0ZNBna1M7 + n12tLS7mAWA5qgMRhBXYkIv9tI/2Y3rD79tXfkmZLm2Jn3zqt9RGagxZlOiJP/bDbWJ/ + iRpjOi6xc9krkgV1OFrkYLfhrSztgRXgQbyaXWtghwv0KpdOZ+J6uWhlL5PMTbndNjqb + KtJm6+xeWbbN5vE+7Zs+uVOZH6y9Xg1cBNIVpAgovJDQJrTeXshpCRiC6oAjqDDLcpDK + qM3Bep1GyzsBYxGdgzHF0HKrMgdp9BAIdi4H1A8IJA5KLD+JkGQsB5VDwBYwVyQJqyeq + Rs9cEFSSRgzWi8/PuHEP3RnfuZaPxe/+/s2n83u7z9g3HRI/7EAvXXnxBK4Ms1fESyc3 + 7BHfEs+JovjLfbWPXn3i1M7f4Rdx+cU/SOvQIZCt7UApYrsOjibzboZR0G7YApcJbrlC + UFJKJYW4aVSRzK6mhQCyqdRtWHHEt2VtgiDxYkKRa5dhsAhFiIZaTIgSB6WZ2BedgA8x + mTc306k336eX3DhDedhTrWLpflF9CD4tyfjXQdBvAoRGFnIi4wSIfTBmU2H/lZiRVGZW + tgH079dff52Yh1AIxhYzEJ5nUXbUgCiacjOsQNt5TAVYZOPgrMDwI74mwpGDrxH2G6wF + M5AkwBSEmqBZu85TV28Oher+cQhmDdTHpkB9xI5vihp74gJYFbAFh3AlrqFYqJdqwzuj + FknHIgoWBYcP5LRcjjkBvg5lL7OMXUnmyc6oXIZsCuVuX9Oc275/nXyd8HSCUCXFkGRg + Fqxeei47i+iXwNZglPkx/Hb9hfr61O/jmlepXuypG2OYPT/2Y56/cbdELAr2+RDzPaTl + YFeNj+ZNU07TL1Iu1jP9jTXGqcbFRoYX3DqtVo7VGjKScoHi9EpGZjRmM3azRgaDaDL/ + i0GM68CsSIyhFkYQhlJaHw2STsv5wVaEJQAiHyjAh6gtZ7/94HMx5zzdvLB0vtiI1696 + gT312YUXO+KbmeO9PCI9byOR4XAEg10o0TWEHovqeVUV7s/W4hp2GjvJuJAVzCdh49iG + HNgZLfX7vMF6/Vz9AiOtd3uMThPtc5uNTFCfHHAjmczBuxVU0OkQvAGTJ2CmszXTHPaI + EAyE5LZw5APflsQcTYw28CQY3u+BcVBcLOmLZMXXJcwmIoHqYCRSiUjBRJ2X+kX7csii + RpR3DxHhFhPMvUxM9B3oO125/tl5vSeL9vPU3r2z3po1YdRolqcV+oxrciWj5CcVLhaL + ztPOOZueKHSLcurp7HHxFXtz/fOaz42IVBh9huJR323MdsTXAU3qO95nroNMyoS9VTE6 + LqIJ+YPBfHWerzI4IbhYfV+ybIZgVVsCVK16qnp/Ei1X90pKTpLTjNO6ypiZmersZaSZ + XqmyLEquFnTJSZ5wVpbOGrBUCYGwPccT0FWhQKYtO2e3b3rnLAWJ9ZPg0oOuT+A2AUZG + PiOeWzdXUherwxk6DxKoIBVMD3Bgx9Np4F9Kz5AiNkVIxS6DJxU5TNZUbLPidCYVyUKK + VBxQ4AxI8xEI3HonFJohkNZHrRbWyIQy2alSdlpV0lJJbKhQUCJ1Xo9komUntEzQJS1m + aSxMRsYPjoCeGLv5HhN/nDO2ZeCgZ86/NnQ9KNx/xP1OarLvvhTbMabo4ptbhq4Xn/iL + +LedO2mqGl9aOniTt8/uhbk5gfS0vLHHfi3+/rumkvmPTZiZ483KTCqacvbaO+sf/hsD + 7j9MvGsMyBfwT/WI2jHnRjzFCDJY49ANig6wzA3OJpBFrrtXRZIrwEuSOwUUyNdF3W9E + HXvq0I//YNUwWck82N/xMZsJdZvA21cc9VvYEFugpeWIYntpZWbabDbKAkq7FQeMNot1 + t29LQnJUJ4YtIVvbS4pBX8WS/4OYnSAsJP8IHbSBq6ixuPbd+N3Zv6laJa4X16+sovqx + p2427p6+++C4p+j1N8+Lf98kXsfyTVhDF0JfwZvFz4X2KPGo6BZBhhfyi2QLFavxKoat + xAOpMro/Uy2UytcKq+UXqPNgglxQKGsUU/ipirXUKnoVv1bxOLWV3sLvUOyj9tDP8/sV + GjihJRcUNsEsH81zCoGRU33C5WE2AFYpKEpKhYzBtIKiWU7JIlA+FDQvqIG4PMutigo0 + c01Oya41KxBepbSpNgCZbUBn0CUKC+0AnVFCr0hoFZZi4gZZXZ3RvjqjHU5Ctcrg/AEY + kjuiGj1Zalma4XiZIJODTN4RlesZhoZspFSsXqoVzoG+waYKYDSuFgBuIQOHLjqCQWbC + G0ehOgYqkSqUyYREfeAjpqAGQXtaAi27OG4VzlpXk8RS4SyMz7y6urmobp5BhnPhh/0y + 7MdxbMKDPsKDsOmSuOxt8aB44G2xGST7SOYAAZDuZ270gdGg0TCwn8nJEA2c+SlGn0UL + UrKwXKtwKJ2h3P7aabLpWr5Q0CtltCOHT5a5tEpXUSqVESk6VkQV5aQE9FqeFZyhJIuz + Da8DNnN5+JArQ0G58hTFfHGx08hHUvYm2/s4Is4BmlCBrXefV/A2YPzjeCvq1HASouJy + /OwtrgMtB1Z0IiKI0Mxoz2gnahzIUklYhPN7mpIQtgVwvsaHrG6HD5m9RnBdJqGelA/Z + XRYfTAwIiBzoFAGdDpVksLF75vfGaiy5nUx3+KT6gG8FxLEOrJAc+ATRpcFtSyJihvQ0 + YPW8wffUbvVNzZk1IXs4bu1jUj60+JEin3wv+89nTzUtsASUbl1KWrAuxSzr+eaSLadO + bFv31pi0qj2PmpycWuXMnIJnCmnW9LHDB6UM//XO/v23x7c5k2h6pZIr9Uf7T395zZbn + DPgymbtNHZ8zAfYMePDdaE40Yw//gvNDJ50kaNwUi5DFxfI6udulUBhDgt1rz9Bm4AjS + gdq42neq7pYCcPmyJICJkxR+OvDbSdSz6s2c3MwZg1gvh8DEW4LYIHMHgVigFRJfBCgr + hBR6HbG2gAImf3KXCQFOiqZDRc/VX/jh+qXFI3IK91CTH330kfuPByvPsGfif6keKraL + 10QxVuSvXrv06qv7Pj/69rZxhyV5BKeh6IvMYNhxcKAXopkv2PB2615hv5UeIOh2Gmna + yLnsvMplVDh4h8OiDekxuN50dpc8ZLE54Ygof8Q3b2knx0DPiqvBGv1XWnEPZBMCSpM8 + iNQGLfSS6MM2wEAf9kn6sMKsCoI+DIHMygWJPuz7F/qw5IBD5oQ2zEs+BuCK3ISFpUW5 + PPXBV5ZD2nnLXhyQtWbTnIdsh9zfnnznR6x/z8kMjn048aG9s3Y//ena+94/h3O/hqNc + vUAEoYKOS3Q7jKsCudB90Zye6kr1aPULzD4HGxCMlMalRYLLxRvklMuiYDMMGdqITm/3 + KEJ2m9uz2jev9Pbuxy+DJ+3OsbVbnTI5wtiqgL45IUA2KojkDiEIHZRGF3qlJ+zd6TgB + j7eFaIB5pFuIGI7XNz299Ok9i9fsw+uGZ/U++EzJi7OPiD9+8zm+5+qHr7/x2sXfUD17 + uAdSrh/7bJlYg9N//DMeDTKkf8clxg4yxEl2ibAyumib8Lj9BQ/NqikNazSp9RqTMaqM + GoWIHQ9UHKXP41/T5x0fCR/LPvB85L9quepXnNed11NjBdaXrNlhdiUXcjxv9rmcvNxl + VgT4bc4XnMdgDjABsybgZG1yJa8Dn7UrxNpDyRl8yGYLht7z7Ukwf3U8wfrvxSXLTzIA + M+u6lA/QyySfrTQdKpAfxCwc3cMsw3mCOq1ea9AatQynDCQ5koPgVXQFsdsls/BBpDCp + g+Du8dt9kMVCIFiBr8DjDYTuNKiITw+SKakpy/HcOjQX7Eei15lNvoTnljAQcf1IBhbK + lVS9JA50/dYPCvL12pvfsBu3PTIiy3iYvyt72KK+wy6If8bWP2CPIjzg4JK9LPYzlTNG + Dp054Jlnz9XlVxY9mjHEqQVpDwY2LhWDCyoePLIOf0rWfwxjgWDf6B3wFlVHU3kXJ3fR + WGMsNKs4vdwGq6BapYtY9Lxeo/aoKfVNo81qu+mbsizBYvG6wrNEH9fe2s6SDBnQbrOz + 9MQrCapqBrAMZyJuWrjzcvNe9pe06pItTptimLeltWXLFra0x1iKeo7CI1/acHMSvWvD + XmgXjXqLRfRV4BUP7B5moGPR6nxjlVAlqxFqZWuU+xx7XftCe1KPOxSwQpuTIuqz8iRY + Uhgu4rLJ9S65JoPPyGCddIY5Iz3C2rOU6pCqTzDktGVm3TZBrrUXEuEXv/wdrBuddjNI + QWnYE2IwzR+2uxW65IA26HcHgyhsh0CnUPuQRq1UBVxJQRxyREBOKPWgpCUWkp9ck9Je + kSUvVweed/CxhBJe+p750mqRrAPxgKSNDuLRIooTppaMy83bUzxHfP3gX9XHVKHeD70V + DdL525e+JN7A/Alc9twDr1YENi85c1ea+DZT2sffb/XNnN82Xdr5fP9Q8aZRnw0b8k/s + wiqcIT59uuWeHS+fOjRxBUV2Xyk4tYtwkbSS87BD6OSvMDD4HC0naiTwRYSnQSDK9vsm + JKRHcfXZePHZrmEtqYYhJYYucbivOAYXk3LjA/YU8Rlj1AwDRuw+BUjsSbUU7iVgGwUM + bOFGs1PYRdxCfjV7nH6dvkTLWZYDdUxGUyuox2DQaaoQ3DsMC4czuVl6nocyOKbJcjKB + JS4J0HtpTs5zcs6ugt2ECFKAQ6fFN+E4Nie0AsJvxaCPfQ2aF9hTxSVEG8AAoH0RJepX + YMhaU+vYpdrTWqFYkHw1MN3mQVeIFuTHvM7ffBC/+bU4GR/+WmzZdhCU0wP4vDg7PoFy + rhPvlfq3FjpJfFg0ikRhXxx6AUQDmiHaxrC3kQys+a5tXUKsta2tkleA0AjozwWYStgb + Xxkt4gVezWksgkVt0YSEEIio/rZRiikKpT8gt7v8NjnFWAI+l8WlAhWVczgDtEEehm/q + IuA+xC32CPmvRBRkeEYAmM8WCrdh1ZGfhi5+WXut/Vq8szHg5wJDoB2c7rc254hZb8g1 + SRsHIHZuaTR+Hdl340wwysTwlFIrWqI9auc2D05LLn6m4aPBKSdnVE9//Jg9MmfyC61M + 5va7knuXJFeMGr5rxIZ4T+rqjCEb9sQfpU7Oyhn45FvxC518R7fDPLbBij4umn2MO89R + DGfkQsYmrpFnjUrKaNWCpoI4q0Ju5+12pIzI7E6cYY3YkM0B6iJ3R88kbSUxW6Ff7WR7 + iDiaE34K021dIT0AGarG0B+84sCg/VMvD0k75spaFo0MKEh3tOIXoP3jhj01+pn4UOrZ + CcWTVObSvLnT4m9BY2Gki2CP0Ad6iFI6AbExmrtd2Kp93Pw8s1fYo91nbhMuCB8yV9R/ + Mip7CZzLyitdeoWNt9lMVEhjd8hCJpsd/OQy0EY6V5s7LaaEay4NjnoEFQYZrAw6Koh5 + C6RYFaTkRmUQYS0EghmUD1oNgbR2kCAVludkveQcgDEi3nDY6AAnLsqTFI4vVmYNOvH8 + 1q3Pwp8Fbor//Ey8ifV/5BqxZs/WcY/dbDlwmb4k/hXUr7j4Ek69CUpulOgcTeJIJgBd + V8NucWM0bZ/wgoUKC16nTs25TLyGU7uciiQ1FbLak+WgSfoiSRqbP/lfapKSukH2f6Wl + 02l2INYeZILIAR1jzRBgmzqIaIvUJ6lbRJ8k2mNizIhxnYs7+RMOcJN1EBRMnZ/69QuB + ihMnywMQihmH8qN3339UPNa4Y9GwrKLWRe++0zz28MlJO5aM3kMf3lAVLoYtnLj4zNZ7 + 8txV8c/IHIR5TG2COahDd0WDITqo6klXMoxa0FJqmU6mDAmEDXVywW7ARKdCNr2hDZcD + +yWWOxA2oDGQUxvVJWfjZ2HN6NzQlWYTYb2u9Q7m/gHTczNYq0vr0K7ZBFPleP5Oin6V + pg7Ni28n8wL2aeijzEBY2zJxRvQXBbLt7Fb948btpu0pXDg5EMr3VfgqkytDo5JHhyYn + TwkuUi5SLVI3+RuTGwONwT3uvWkGGlQNNp3JMCC7yWFxWk3pxoywRjENPE/5ASqQpJIz + qQbrr50uA8+4MnakKjJ5mVpL8SjTl2n3WM3WkKVPOMiHwvZstSek7YNCGbas7JYu/QhE + SGJ9LNRCinS3MJMYWAkPDbG+iEhJuGYG4XQqaAKXjE/t8SEZ/E0Kg1fGB75LSLn0kOcw + Wn3Yq0nyIV+SWiWE5D4cDMjk4KXxwX/jIHDrnD7imUlYZIldT8nrLLHILcYn/kjJN3O7 + awZUDYuZ//99M8A4wRD+RgiU7Z20vXdo/i/W9m385Pg/ZvSj9rPBPo9PnlYeHnzfmdJp + H3/+zXkeH8NDxmSNHn13eTJolkkpVcu3v7JhzNTeOZWDoxUpNoMrM638sV9c/Hg39QPw + kqXjG0rGjgHpMOxlVYb8tBrOapREA4y50EJzarnODuIadrsjyKQ2aWgPTdE3zTabHXSn + Tuukm+6USYR0vLhdG78srbREYyLz4JaNGcwj6tPeowcOBE3ZKrfR0y+0bMyjj7JjxPc3 + x8sLDApMbZAJy6dQ5zZL631zx1f05zCfyQmucdFebcYLRkpmEIw2g80Y5u6jP4TFFrFq + OeJUchZkl5W3WsHkyZBHlAq7HUdIY9+5taRI7h7C/l16Ugk4LW/J3Ts8P/6ekt4Ke3K6 + AC6wZz30SlmgdT/l7zFl85Xh6cTdHi8c1qN+75gnKPWNt5/snTLi8WFrqY/ggBkG/QHR + f2YyEegj0YxSfA6cIlPQVGoqPYVbzaxhX0B7KQH+CUSVMwPYVcxa9jxzgRWqwvPDxLsN + olZSS8Hh0tYxpxUUdS/Thh86RtOz9BSm4P94D0XdHGgZ8CWWIy4XlqI5GvaeGblABusQ + dQITLWnFEXyIsyX2i774onPHiOgXsGOU2MfXF/KgXmgHX67mE1EquGeiASqip2kGRcAV + D3bCHZWDMnOIRT/VK/mNunxGiZpZXpsKP/DUgElQN5f4aUBB+RS7ceo5ceZpcQHsVGyn + p954GyiUoNWXkFKgPHLKrXM/goY9CY4cb8skp9g4WFb0hSfgVOCtlNCZIrsWDmwBDYgo + Qe4/Xf/hE3EbXvS1eF0UL+NFTKa4Gi9i4zfin+BN4r0UOWIG9UlXRwP8x+pfXZmQCSIJ + Vkw1nEIzSh7FxPlBYssTmw/0ZzgNmAnnbnIRbDiinqgA9UJlqBxVSP/wqoI/NQ6C/z/d + Jf3PbBj8d2wkGoVGoxo473g31I7hX2hY+jRHTi6OrO07vHp4av+GmU0NjdMmjpeekIoh + OAtwEeALgG/JqwDwDvYCZAFEAYYA1APMAWgG2AiwGyAGcBrgIsAXAN8mOk9pIfYCZAFE + AYYA1APMAWgG2AiwGyAGcBrgIsAXAN8SwgBoAbwAWQBRgCEA9QBzAJoBNgLs7ui8EFxd + aYy83XByvvL28kg3PLUbDt+843nYyL4Dz+mG53bD4WzPHc/ndcPzu+E9u+EF3XA4jXtH + fX274f264eXdcFDS7nh/UDd8cDccaH3H88O64cO74SO64SO74aO74eO74RO64RO74ZO6 + 4dKcum28J3crn9INn9YNn9ENn9kNl/53f1v9xLK5nX9md8OBJ+8on9cNn98Nb+yGL+iG + N3XDFxL8/wEtXOnXCmVuZHN0cmVhbQplbmRvYmoKNDYgMCBvYmoKMTEyMDgKZW5kb2Jq + CjQ3IDAgb2JqCjw8IC9UeXBlIC9Gb250RGVzY3JpcHRvciAvQXNjZW50IDc3MCAvQ2Fw + SGVpZ2h0IDcxNyAvRGVzY2VudCAtMjMwIC9GbGFncyAzMgovRm9udEJCb3ggWy05NTEg + LTQ4MSAxNDQ1IDExMjJdIC9Gb250TmFtZSAvVVlBU01TK0hlbHZldGljYSAvSXRhbGlj + QW5nbGUgMAovU3RlbVYgMCAvTWF4V2lkdGggMTUwMCAvWEhlaWdodCA1NDAgL0ZvbnRG + aWxlMiA0NSAwIFIgPj4KZW5kb2JqCjQ4IDAgb2JqClsgMjc4IDAgMCAwIDAgMCAwIDAg + MzMzIDMzMyAwIDU4NCAwIDAgMCAwIDU1NiA1NTYgNTU2IDU1NiA1NTYgNTU2IDU1NiA1 + NTYKNTU2IDU1NiAwIDAgMCAwIDAgMCAwIDY2NyAwIDcyMiAwIDY2NyAwIDAgMCAyNzgg + MCAwIDU1NiAwIDcyMiAwIDY2NyAwIDcyMgo2NjcgNjExIDcyMiAwIDk0NCAwIDAgMCAw + IDAgMCAwIDAgMCA1NTYgNTU2IDUwMCA1NTYgNTU2IDI3OCA1NTYgMCAyMjIgMCA1MDAK + MjIyIDgzMyA1NTYgNTU2IDU1NiAwIDMzMyA1MDAgMjc4IDU1NiA1MDAgMCA1MDAgXQpl + bmRvYmoKMjQgMCBvYmoKPDwgL1R5cGUgL0ZvbnQgL1N1YnR5cGUgL1RydWVUeXBlIC9C + YXNlRm9udCAvVVlBU01TK0hlbHZldGljYSAvRm9udERlc2NyaXB0b3IKNDcgMCBSIC9X + aWR0aHMgNDggMCBSIC9GaXJzdENoYXIgMzIgL0xhc3RDaGFyIDEyMCAvRW5jb2Rpbmcg + L01hY1JvbWFuRW5jb2RpbmcKPj4KZW5kb2JqCjQ5IDAgb2JqCihNYWMgT1MgWCAxMC42 + LjggUXVhcnR6IFBERkNvbnRleHQpCmVuZG9iago1MCAwIG9iagooRDoyMDEyMDEyNjE3 + NDExM1owMCcwMCcpCmVuZG9iagoxIDAgb2JqCjw8IC9Qcm9kdWNlciA0OSAwIFIgL0Ny + ZWF0aW9uRGF0ZSA1MCAwIFIgL01vZERhdGUgNTAgMCBSID4+CmVuZG9iagp4cmVmCjAg + NTEKMDAwMDAwMDAwMCA2NTUzNSBmIAowMDAwMDUwMDUxIDAwMDAwIG4gCjAwMDAwMTg2 + OTYgMDAwMDAgbiAKMDAwMDAwNzI0NyAwMDAwMCBuIAowMDAwMDE4NTI2IDAwMDAwIG4g + CjAwMDAwMDAwMjIgMDAwMDAgbiAKMDAwMDAwNzIyNyAwMDAwMCBuIAowMDAwMDA3MzUx + IDAwMDAwIG4gCjAwMDAwMDk0NDggMDAwMDAgbiAKMDAwMDAwODU1MiAwMDAwMCBuIAow + MDAwMDMzODI1IDAwMDAwIG4gCjAwMDAwMzc3NjMgMDAwMDAgbiAKMDAwMDAwNzUzOSAw + MDAwMCBuIAowMDAwMDA3NTg0IDAwMDAwIG4gCjAwMDAwMDc2MzIgMDAwMDAgbiAKMDAw + MDAwNzY3OSAwMDAwMCBuIAowMDAwMDA3NzI0IDAwMDAwIG4gCjAwMDAwMDg1MzIgMDAw + MDAgbiAKMDAwMDAwODU4OCAwMDAwMCBuIAowMDAwMDA5NDI4IDAwMDAwIG4gCjAwMDAw + MTgyOTUgMDAwMDAgbiAKMDAwMDAwOTQ4NCAwMDAwMCBuIAowMDAwMDE4Mjc0IDAwMDAw + IG4gCjAwMDAwMTg0MDIgMDAwMDAgbiAKMDAwMDA0OTc4MiAwMDAwMCBuIAowMDAwMDI0 + MTYwIDAwMDAwIG4gCjAwMDAwMTg2MTYgMDAwMDAgbiAKMDAwMDAxOTAzOCAwMDAwMCBu + IAowMDAwMDE4NzQ0IDAwMDAwIG4gCjAwMDAwMTkwMTYgMDAwMDAgbiAKMDAwMDAxODg1 + MCAwMDAwMCBuIAowMDAwMDE4OTk0IDAwMDAwIG4gCjAwMDAwMTg5NTcgMDAwMDAgbiAK + MDAwMDAxOTE0NSAwMDAwMCBuIAowMDAwMDIzNjgzIDAwMDAwIG4gCjAwMDAwMjM3MDQg + MDAwMDAgbiAKMDAwMDAyMzkzOCAwMDAwMCBuIAowMDAwMDI0MzQzIDAwMDAwIG4gCjAw + MDAwMzMzNjEgMDAwMDAgbiAKMDAwMDAzMzM4MiAwMDAwMCBuIAowMDAwMDMzNjA3IDAw + MDAwIG4gCjAwMDAwMzQwMDAgMDAwMDAgbiAKMDAwMDAzNzQxMiAwMDAwMCBuIAowMDAw + MDM3NDMzIDAwMDAwIG4gCjAwMDAwMzc2NjcgMDAwMDAgbiAKMDAwMDAzNzk0NiAwMDAw + MCBuIAowMDAwMDQ5MjQ1IDAwMDAwIG4gCjAwMDAwNDkyNjcgMDAwMDAgbiAKMDAwMDA0 + OTQ5MiAwMDAwMCBuIAowMDAwMDQ5OTU3IDAwMDAwIG4gCjAwMDAwNTAwMDkgMDAwMDAg + biAKdHJhaWxlcgo8PCAvU2l6ZSA1MSAvUm9vdCAyNiAwIFIgL0luZm8gMSAwIFIgL0lE + IFsgPGI1ZmJmYWY5ZDExOTc0NzQzZWU1MzI0YjRlNGI3ZjYxPgo8YjVmYmZhZjlkMTE5 + NzQ3NDNlZTUzMjRiNGU0YjdmNjE+IF0gPj4Kc3RhcnR4cmVmCjUwMTI2CiUlRU9GCjMg + MCBvYmoKPDwvVHlwZSAvUGFnZSAvQ29udGVudHMgNSAwIFIgL01lZGlhQm94IFswIDAg + NTc2IDU3Nl0gL1BhcmVudCA0IDAgUiAvUmVzb3VyY2VzIDcgMCBSID4+CmVuZG9iagoy + MCAwIG9iago8PC9UeXBlIC9QYWdlIC9Db250ZW50cyAyMSAwIFIgL01lZGlhQm94IFsw + IDAgNTc2IDczM10gL1BhcmVudCA0IDAgUiAvUmVzb3VyY2VzIDIzIDAgUiA+PgplbmRv + YmoKMSAwIG9iago8PC9BdXRob3IgKERlcmVrIFd5YXR0XG5QYXRyaWsgTm9yZHdhbGwp + L0NyZWF0aW9uRGF0ZSAoRDoyMDEyMDEyNTEwMDcwMFopL0NyZWF0b3IgKE9tbmlHcmFm + ZmxlIDUuMy42KS9Nb2REYXRlIChEOjIwMTIwMTI2MTczNzAwWikvUHJvZHVjZXIgNDkg + MCBSIC9UaXRsZSAoZmF1bHR0b2xlcmFuY2VzYW1wbGUuZ3JhZmZsZSk+PgplbmRvYmoK + eHJlZgoxIDEKMDAwMDA1MTUxMyAwMDAwMCBuIAozIDEKMDAwMDA1MTMwNCAwMDAwMCBu + IAoyMCAxCjAwMDAwNTE0MDcgMDAwMDAgbiAKdHJhaWxlcgo8PC9JRCBbPGI1ZmJmYWY5 + ZDExOTc0NzQzZWU1MzI0YjRlNGI3ZjYxPiA8YjVmYmZhZjlkMTE5NzQ3NDNlZTUzMjRi + NGU0YjdmNjE+XSAvSW5mbyAxIDAgUiAvUHJldiA1MDEyNiAvUm9vdCAyNiAwIFIgL1Np + emUgNTE+PgpzdGFydHhyZWYKNTE3MTQKJSVFT0YK + + QuickLookThumbnail + + TU0AKgAAK1iAP+BP8AQWDQeEQmFQuGQ2HQ+IRGJQuBgB3u91gADAYFAAGR+JyGRSOSSW + TSeUSmVSuWSuBwSWzGZTOEORyOEAOl0rEAPx+hMACwVlAAAujTSkUmlUumU2nSGX0+pU + +dOgAONyK8ACMRwV+Px/ABwOAMAAWiwlgAEWup223W+4XGZ1G5XWDv68RaLgB830APV9 + veLO13gB/PpmWYXAMAMVitsAAoEgYACYT2VzOUSAAYDAkXbQaHRaOnXTSUyKtVutcANJ + 3NwAPEHPsAPgDv0AAED4x8vV8AAHuBvgAqjMJABaLRoAAjEYWX96wR9PkbgAVCocaftd + vud2G6bvSu8WFZMVcWILvOih7jgMCYyHvd5vYABBstYAEsUg4AYT6G8cQOgAIQdikAAB + wQ8MFQXBi4PBBqSm+4QAFUeJhAACoVwEhZpFuZIAAcCwIgACgQAuyIIgaAB7nieqNGUZ + QAHsBoIAAc5wH0ABFC8PcIR9H8gJlB8goMasjOhFwAyUABum+bwAHIGKwgkEMToWa5fm + ehB+n2fi1AajoSByFaGHSYjYCIBYYoYfE2gAHs4SJOU5yBIc5GjPDKhMEwAATPwAH3QI + AFWZpbIsE7GATMCEHGaRugACwRg0AB4HMdoAAkDwLAAAoDsog53nCdQAA4b4DgAK4iCi + hk8GiAAX1hOlZVm707SJVs9T5PwEoQelfMabbEm8A1LnwB6CAWCUVAIAwCtqejBH8nye + nw2gFnUggVAMDwAByFQaAAAVxVZPNYBfWl0XS0NbSDXE911P6FvGvTCnOdp0gAXJ0xiA + YK14Eh8gqAAJgOB8SARGoMgpE6jAW3MlofXFzXVimKrbdkgXdPc+3ihx+4+ABbmIXgAG + 4exyAAI4Th4AAUhEE6aYlWOLZpmqkYxH+NXhXkFZlc+baBoKU5xH2dY5njw59oWl6YqC + K1po1dwXpWm6rqyFaJCGo4672qavr+razBut6Q7R47O5Dks4zoABXt2wbhoOxQZsjuza + 35m7yAAQb4AAN7/uPA5pua5Y+3BlmbLR9H2mCDJscdIAtE6N0+8KLncAAGga/i8S8FgU + hKAAMdHwXSzlwi4mQZTEnGeVeA+EYUaAr8vHGaxh1QJ4idN3kf9QuBlGcagAHoAdJgYB + 2DJQbZrmk/p2KsCYKLKCgLAzPoFYccZwtgDgPK6fJ8MFJQAuACKgJYdJwecJogzX3v4Q + X363mcaRsUof9N+R5STmoaCHxxjfNgm0+gBwEK8AKARZw8h4mFAgBIChsR4OYckpMHgQ + wmEtfU+x9z8YPK1aed5+r9x4P5I88klI/FBD3Hui6FRtB3jtVG9ZDZBjpm/gmRmGhHnN + wafWAB9r74PxDNI/Mt0I38P6hQ0uDcQIOxEKm4YAAwxjIxHwPw+DNWPpeAgAtZwOwcg1 + IhEYtsSISqbAQ9lTg6RlkFH0PEhBRiOjqHWOwAAHgNqTG6OAnAIQPoCjopcBYCiOkHHq + PY+gFgKlAG+OFyAIwQAfAAOIco5lIAVgiPWQ8cZBgAHUOwwoDgShAJ6AVg0TYgtXHYOw + jI1xrjBAADMGYSjIyELcMaXAAB5ADA4pgCwG2mDaGkMYAAXAng9jHCE7sZoTD3HWNoAA + RALr4U8s5OgxxvGCAGCgokqIntCIuYUbY3BXAAA+B8+g4hxTALPLQtYCCnt5GaX8BTMC + 1tlJKNAZYxFMATU2AFcY6x0yWeyAxzID0RjmHEk8DIHQQsPfKPUeg8jKgpZ+Scb8wlUB + HiEd+ZR3JmKbAKPdlAOAGDgU5AlWQ6B9IqHEAwHROYfypZsPOmwAJXCwnMCAeAABtDZK + sCUExZRyjkBEAAFwLgjUpmsUlXA8QDFdnuSkXgtRVgAIqRtU5X4YDuIyBihqKx7n0gnH + YDQHJJAOfOWYGJ2SUUZecFejkySBIKpCAAA4+ZLA3AWcMAh7yQjgHIvgEAHET2CKsPlL + p9QGsOgSARA4Any2KNpY8AAEQHoqJIMcblFABgqCrTKDlHTurzHdadko7CcDkHmvgeo/ + Dfj/AK+UeQ3jYBbTSiAByvBhDBfuD4H7sidH0HcO0HNSAXAyKXU+qJaoDk0XmuIAVWK6 + kHfJdRxpBbrksrhRu0hFKPnbruPwds0AfgVmos0kIyRov3AiA5FQxhoGsAReq6pPkvWS + umvMGAKSugtBOCAko4R5qnHSBOUk3rvmjXmLwZ7uBtgKIyT+yACQHMOPdFkgo9x5IuAe + NgaoAAmArRqV83CgTcVFqOrAI9TLlp5qhVK57S7u1ywWQmMhU67gPACYUGIBTWXSaHXW + 65FbtkGyMxAk40RyD5L4CIJ1oonY3NCLE81OAJx2AsCRSY/VpqVjsBEDjAshEHHmO2OA + GxwDZTeCFhwsBn0UBWA2UgPAdA7KfcwroB4DF8G2LlEgBD1EHkGrwdI63MAfA4WUbY35 + HgfmBodzBRp8SaMEBcCpxxujhHKAAEoIUBDhHIVbTBx5DmC0IZIAEoI4ANA8DNSgC3ZY + 1rmQ/HJUq7jrG4h8I4Hj1AQAdQaKSCDGLzyOQXJL5V57EMMXnIuRMlbIrrkLYaCYpDYH + bREDgScpBABoZsce4StlcrxnwuQlxjiorwDVTZBxwjOmgOYa5OADgMV4P8fxBAIgaKAO + kbslgWhGXAP4bBkADgyBbOYaxlAlA4d2U7PUnhzHDCaBZJ81VZDXHAqMeYJgugA1plQg + +tyn13eyR0f42xZmVA0wYa2jTKgiW4ObQ6ICQLzHuPjJwFAJI1sEOcrekCxDk6ABoC0E + R5D0RcANcZa1TjsHdT0DoGkTjaQAdbT4ABtjhksAADQNlwgTZhE0f483IOUAAPrtTkd2 + lwFoM4XqpAu3KAMAlU46t/gAHKNU4YDAKsGG6MY/AIgbOyALfUAAIQauyIOO4by+ANpa + 8SB+h5Tht+Xb8C+DJhz6BBAUM5A6CU6DnHcPQq4GAp8go1jaul2aQP2iTCd/jQYmhLB+ + ucdfue2UQLkPL3wABZjaGAAAdYGDaAMAswbMpEh8LQXCPJLwHxyEdCMDcIXocNYvVdjG + XQ7SrBHAgfgAtKiSDMGmZAbg4pLAiA6WUA/43ic5svsE2o+Uc/sLLsCgxJBxDqp6HQA4 + Cu9Uri1qIc5IKcruf260GmGQYGAcngIOkIYclWVGb+W4OENgA8BAqOlYXwkI/2IMkQ9M + AoAqROHEHASeBABCM2HKHKHERIeqRkHsRdAijUHaHacwA2BQOqIOwUQWV89MG6HOJwGq + HeUeqgN+AEBKP4HqHMp6AUHwsgAMH2umAwACRqBYAoM2A6A4QE2OzyxguaHkHa6A/ANY + /GsgJIF+GQVcsoAA/aYEHEHOIyAqswkmHOVGuuCiCKpiJQG6HKjsHaBCC3AGu89ars9g + jOU4RWM4H8RiTAVOSCIqGWHQU+A++vB8SCdoAAE2F2FML4AifKCUA0uOAuAeYEak7QNI + 4iH0HsooCCASnm2YToHYHkMEG8AoKI5DEOPCruAIHscgByAaJxDQVkHIHsI6HMAgmREy + QaikHIqKp8HWpQBeA6BS92a1DCK6e4xCBqAMGmAABOA+euIq2oZA2Y2M2iIZHKXG2q2K + 2dHVHYv2LyGkG+VGH0BSC/EK9Y1svCO0ruAEHmpQB2AgZRGKIOG8HG6A2CI6ApDsIWkO + N+kHAgIMGaGoMgBkBWM3C/IiKKAVIoIis4s8tAykpoihDA+2uaqmH+HWxCAWH4446URI + AmOOkcZQBEA+W41E06AwkUAAHnJkyET8ngHZBxDeA0euG4QkK2j8AAHQ5qAaAYYc3yII + hYN+ApJoSg5odEBSZYHmX7H3AKo8uqhFEShMAUH2KsBqAUUfFmIMGWGmmg5yRy/sRyHo + HsN+HmHqMFI+MoHQHYp6BQBEl6HiPmKK7sRsHWMKBGA8LK0Qs86ZKclAAACwCUB8Pq/o + IPGPGTGXJKm/JOKaGfNFJ+AQdCqmxo9XLFHXH8NOruHyHWNYCEAujs4w3cHLA8AQVOS6 + NwHhL0AAAuAmRqY+LCHjJkAy0y5AsGb9OAl0k2AmsyUAK+NjLuABOGK2A8evC/JEQPJJ + GbNAKYG7PCAAGSGsVGBEW+uwYsuuHIG0nmCoCSrdANNYNIx2AAcwBkAOzZC+SCHGHqV4 + HOAjMvO9O+4gGiedN6N/P2JkG8G6NgAM3MA5C6KWoAfK6mYEkiklH7LImXLMU2HmHZBe + B4AamgAkwvPS2eIIyFHSfK2SIRHlOrHOQTRXRc2hRYrqGaHUngH+A8CDM85FQIVkk0Rc + GtSIIQYnFUNHAOKbAShRQ+ZQAEH4PUrGPoAeAgRGHOHMZQA6A8oeHGHEOGAyrOgkMKAK + I2xcHiHgp6rAmAG+G+MgBCBCdCHQHO07SqRqUCNoH27XSqRHKeXxGsuPB6pnM/SAXUGz + UPPTTwOAAeYMA9UcNPSUKYGaGiNYHkAEeuAWAYs0aEHKG2nmCiCM7BUKYqlwmI/GWdE2 + 7Qlk1giLPmNGnUcgGkHCRcAmAyqOZsWkRyG6GeF0AAC8CyQNVGIAAIFA4JBYNB4RCYVC + 4ZBX3DwA0IlBRpFQAAYxDY1G42/48/45IZFDY/EYm/JRIXXKwAFZdI5hCm/MwAHZsAAJ + OYLGACABPPwADaFMaJRaNR6RSaVCZQ/AAz6hF4zFRpS6tAo/IKvW6U0a8ABfYa5HK80Q + AJrQAATa7Hbbdb7hcYFD33T6iArwAKpcpjWb5f4S0sEABdhcBBsE0rPaQRjcPj8hkbjd + LszwBeAFeotkoZfs5f7LYLFktDaBNarZn9Vq9ZGspUMtmM1VdbBs9kH7ub4094ABhv85 + pbTawTteNx9ZlIk0KlPb3yKxJcA1eoAHj1wABe1b393QAN/Bwa+JfIAAP56Psp50PZ7Y + S+vgANDshn9fbt75wtPxPdV22/4AHfATmqOfEDAAHsEv7Bb3PgfT5K++j7PY/C5P01Di + wY/rQrCF8NQ+40HQgs0JBm+7pNAr7TQxED2w40cWxiyURPmvL6xNCkUJicBwGyACPH8A + AQhCFKEtCnJ8gAB4HgsAAOA4ECEu7IJtG0ZoAAkCQPAACwLAu48DHwAB6zIAB8zPH6PJ + wnTGgQAAHThAiGRfD0ZTsx8aQjG0Jqukr1phCqNnGcZvgAdp2lqgQAyCCIICSAAPg+Eq + CluW5YgADYOUKlADgADQMieAAMAwDU0pAbBsGYy4BmK6x4gWAASBGKcsSzPD4muX5eAA + fxxnCAAGHsebzPiAcfgAfQBgIAB8AQBQAHkBwIAAEYiCOAAKAuDEjK/DqlzPJJlmWWlY + hIHNRVJO87RExLLryGV4KWmcfHYdpdqCBlzhQFEcI7HSGHSdJ0AAchylffB5PMBACgAd + x2zcC4LiaACUQedB04OmwDAAAYBsycJw42EQQ1odx3HKAB9n4X4AHsex4S4CwHABgQJJ + 8E4ozfOK5HvnoAGkVpUgAFh4HUAAFvQdp6HqAB1HnYYUgzbZ+N0s1lhEK4ugACOuW4s1 + vKNETeFzTAOHAAFB42EIQVoCm3XVD886snQY7qo5yHJX52HZcgUhVNz/notQECJagRhW + kl/oWiRSp8FB7gAcpyneADqZQI4jhaux+2ABmGLRIJKEnXYiiMFlyyaXZdHsAAbBsCds + Aqupdl2agABEEUmg0DVpm4bYMgBeAnMeeHigAbpVcaFp+zFqkgmSb9fiCFFJmuesxAYJ + 4qpqEIRIbOiYqaADeF92JrgAbxunToYWg59BuuK8mcgh+m4Qa+Mj7puyYYDgeCsHBICQ + nosBYDOAAEkJILgADnHOSBRoSgAAeA8CMhKgSFKoSuAEAQxExj1YSAoBTG2OqdHyPgGy + vB/lOAWAtK55yenaWM3tIIAgAg+AAz1gYGAMjcAAOEcA60nAdAioYdqywJARghBKChkh + 0DmHM00Vop2hgGMyPBnw9B/EgHqDxwoKQbA3JC+AmA1BqGKAYA2DiSyeiaEyMAAAXgvg + 7ABCBjY2hspbBqDWCD9ijGUXAsg+KInxD4H6g+QiSR9j+c5FlII+h9oPG+N5QoBGOlnB + KacAoAljAIAGp0BABFOnaYZKIAA6B0DjAAP0fwwzFnFGUMkbx3wcQUR44If4/gemLBhB + VxRtk1ICcoM4aMbgPAdbOx0nouBoJBAMBkDqvFjLQHENgAAVwcMzHgPJB4vBuMMAeBeZ + 4/x8zMHcOIAASAdJuHMOdJIAR/y5LDCcuRlGnrDHqPZpg7B7uUGyNkaxZxqTUBCAhToq + gDpBeCpMAYCmGSLUUVgghPYbz3jmAlZ4AitACKcZejYBB/GZAUAQ4oCwBKdHwOqWIMwW + OCZMsN3CTRsDXYY/IAD9Fpx8IGSV4rMB3jzHi2geQ52aD2Hcy0ASDx9AGSCPsApIB+AH + qcAMkEGiegDAKsuiMNF3GZJLLdIKaiCj9ZUj+VR2R/LGAImInCD0xjhqED0D7kAXgsAq + AAmbMB5jyl2vAICciDwWh6OOcwzB0z/HWAhpg9QGFOH8OCcyxiQAOByDFVc0SBj8rGPY + ZEBh/lDAcC8FFfiBD3Hm6wfAxRlSpAql8B4CltgYHyA0AAOwQAyWw28mBlByxNAANYdg + 3ZSj7coPcBpIB8gJSCASEJOADMMHuNFQoBRoDkAAAYMQP7qgJU6YezBTpCl1HwPRyAAR + sDamqCsBgABlDWp+PMf6RQbgil2BoCzwIVqwJgM2/UpZTGEMMedTuAI5wgcqdVfdoU2k + hGyN4bZ8h23mHaApMQ+AGzMAacUA12mOSUTuSUd407zA8AGzAYA6iegMAkB+CIA66g1A + 06cDIFltkEPwNZHoABbDtGSm8FKXwCXNIKO8csQAIgbrqW60jrMMqdWVZYuY+UH2NcEu + dSYMAVp1IWPLLQABQC/FUrwFrMwEAVtlkshI4Rn4NAYBNmYEwPJfHrT0AA8x1swluSAD + IKEtkwzisPOmdosgAzznshefbqgKU6PYZhigBAjBCWoCTMx7tKpqOUnoPQLy7ZG94jgr + dPOGgoMPUSTkntNHU0YA2qTzHoC1q0kIxxo2pGOAW4ADAQuwAJVckKUyIiyVaBcEyWwC + gHYYPQdzCdcrGACx65jDB8j0TFnFhOz3IATA+8BH6QQIAaApnMdtPx+D4LqPYeLggVBC + tsQSRTnB9DyaYAQBiz9crLySy0bg7AABGAWiYF4KnNH4EsLYUAAAEBG0IQpXJlh9j4Qe + PXY4AALglfcO0cLAx0DZupi/R4+N3bNznnVJQGGbD7HyXUFQQ7bAQAy7Ahg8RUo+DiFs + NJDRWC4FgAAcAKEkgSA5kche6wAcJZTyVtAz4eAVBIqVJkQwNArSjxR9Y5xszm1ywzoA + CgI2yHUNxlGa2ZgR57oLPXOBm3mHuPFpmbkmjg7Km8C8QwahUuxJrJxTKxi4DGJsAARw + fuFCz39Xh3sBZaYSes3LnDYLoW2mFTAGwNuRck8FeOp2jMRS+yaoowfNAAEWI8RsBxFB + eSEDoFVoiG68GcK+VgFQQvAGmLi1IGwW6PAWBC2Q4hoQ8AOAw4oFgRKlswXXs7TM7mXw + 5hljY5BqKFBEDVIvhwAbn3SUUf82mtjGTEFIJgUFTFaIENIbTthdjwgMAsFNdczEEHWN + +oWTQAbGYSP4fiQQHAWWmOgbd1AKAgW3+4eoeBwQfYexJLIrbr+BLDsLNYB4gofIeq8Y + cRyAHwA5xAFQExIohhnpyAWgahlgc4CJpgAYCRNwBIBy/Ag4bYYp2w9YfQfEAgDbbofQ + exMSqxhgfC0qnJrbIxZEAZVYzKsQpwfTohjwzLZYzLhZB6qhZAepJLNa2Q3JIIDYFTFT + ukBkGRZAdhpgBAbhJIYgUZsgBkMAAAQkMbwJII/g66n4CcNQABvbfENR2AekOK6rVQ2T + xhOBmZQ4dq27bsNDnBHgAASsQIAAMINQMjnAALfAeIBwuqmpZYA4BY4oAoBBjYhBPwjI + hjXg2USpNQ9cSyiQoq7pZkG4A4ewnoCAd5hgBgdjS4G6OUO77ohBi5gYZgcoaqBZAL94 + BxzgAoCA4qUBhkX6rYo7XkH5Mx65XgeZB4BgeZYwDof52AGADRIrywmIygdQdZowbQdp + s4cQexowe4BJzjCgkAf4AwnrMyqxZcSRjZP4gcTIvIrboAfpqhlLhiVIfaRYewuoBAe5 + YwBIegnoDQBDboE4CRKIC4CpJrBJ+wyiYByIeRowcgeRgYdwfZYYfKg5ihDIfQAiRcc0 + HskD3cSIA8Sgg8eZzkI7bKr7QIksYsk4nERgAofQzIA4fYzIAsUpLAAxmYDgBZJoCrFC + mp+q+6XisAjRlx1kPocSoJQzs46wfJYYcYeBgYBYGp4DH5hgewdCn4BgehZYAajYAURg + A4ASbwA62QC4BZmwDIBaupJcBcoguTXkOJwUpAACKxhIdgfBmAeUBrOcASG7khQwd8PQ + AgF5mwBoDCIboEGxyAfwdpMQfqfRYABS9JRYgSqQnssQgQA4AZjYBoAxZ4Bz3Zrcs5LA + BEuCFZo81QnKrCnEYY70DJMxNBETcRJIdAdsbIeqVAeoFhNwkofIeR1gA4d6pwcZwQBo + AI4pkbR6TJZaT5ToBMzyOakQ8wApjbVJjbATBJjrupQCXomL6AYgaIZAAAYYdwxQCi+h + H4b5wQIIEwHBSADhLc7o7I7cds14gYmZQobQdxX4ZIdJ2wB4EhJoBIfJYwBYexq4B59w + ETsCuwmhbw2U+s+s/E/JBgUiKJjgDZWAdIekPQCwBp2EtjboD4By+wA5Z5QaVE1o3w4B + HMowo6nYAAbIcq4DMh2DSJaZHhs4n4E5FlC4jYXgYZ8oCICpmwBwBC2S1q2SEBZ8olCy + MVINKYojxiMh2yUhsA6CwAtxC4/lKgw9KVMFMYjbxg3gacOZjdLQ5FLgttLw1NMhCxbp + GFONOohNMw3s7I0TLFNk8FLpFQ4dOFO1P5r9OlQdQ4gVPFNDAQwqBVGD7w/NQA/dQVRA + rdMVStQc2VK7VZTtRpE9GNOQsxFdL9TFS1OdPlUtOtTSMlTi/1R1LdP1N1SVIFVIq1S9 + WtMdVZ21Rgw1R9MI8Y8rAUTtUAhg9US9TFW9XFKlXVVtT1XwwA/7Bshw9dYdSAhrxgHF + bM+xhgyT6FYovLXgmFM9FyXdZVOM2Q6kW1PVZ1WFYhdVN5DI6Y6sPqUghFaomLXg8CMF + c1MldA6rAQFtgNT9axuFeAx9g1flhIpNf0W1gFgVZ6nFhAwFiRdTXgYFi5JRJYABG4pg + lIXNj7UAADA9O5A5SoW4ACPIGrUh91Q9hlVtgJzViCPlilSNUVQNeJGLXgZlnbUxox8R + FYFVoJiglIYtoqDppiP436XZIbjZA7zQYMMpZhA4HNqjxVVRn1dLggxwFlrlgc/NmlUI + xdSdnBBgkpdobltBmhgJmJJrXheC20Nq/hgbwljMBYyllJ9AbyWJMhpga9vwABuqykPo + INwhWpm1ZZnwa1xVVtrh09mR+1sAuNyI91HgAFtCHlPVaM2ZJNkbyhnRmcMC9IV90dVw + mom9vSWIDN1U1ZWAXF1wAAH12NbdVoIF2tMEu1xSf7BNxtrxuBKq8zxIIt4VwwvlTafp + HwId5MoSm5GRVCalTc2UaluNkZesPQeBAZLJmz6FuJbyJyJ82TzCCKCT98OVwgINz925 + ly3txd3drtx5GVFcPxs9jlJwvly6BaBgAFbM+Neo/qnR4z9hX4YAZSDgCoAxaYegfJ1g + bIAKoQAwCpZ6RIpwfYcpYYHgCZDwckqZ9ABDfABoCUBcjozIAEbtjcKKUs3J1oGqE4Fc + C1WlIN3F9lrd91dtglPokAZoZyAwh6Ro+IlaIDxzx4w7xlGZUbGYkaRxB4FoFZIt7IhN + rNvj0whB8QZob1NAcoApygfgCMRwBZNwewb6ooAoB44oBAC62QhCRRIIeobKIAAQCZTo + BYC95gg0Yoeoa6IAf4CiERZQtU4p4ICi0IDYCRbcTQgdbFbV/o5GGN3WGdx2GpBYaAaV + NAbIc5zgEIE1mNMiR5B4cobCVgKwJ4Ixr1sWF4hIUAWwVDoIBCoVJRZ4hAdQbplABIB6 + 9LFC2Tchple42UlIBbNkHZ5ofQpwezOQCwEZUqDWEanJNQfAdJYYJoCwHRBEVogtyY1m + RlrRN13l945AY4ZQ5ge4A5LYBgB0BYggegeSn4br8JoYGM+M+ohmdJymcqIYggbwbUWx + ZxWADRKAhKvJmABpaQmIdAb4swJzdGUlUdSghAQoUIRxZgExNwATDghAb4ZaaklIeTkA + DTsYeYdin4dIbZlDbbXAAxZYeAc6orlRm0rBAIcrfAG4LAIQkjQIDIbAzINIL4M2atWd + UmRd9Vvx87BIFeol3o9wZwaSageAf5JucmcwgYZoZCNwFmdwAAagZ08pAx1iR6dqDSyp + QwdaoSEpMQC4DJ9wBQBa2V67fEwiIGszFWTk+0dgjKDRYweqnzvYJ4LIhIdIcAxQJoIK + yghBdpFchYhYdMbAAAUYYQVppoBhpgAuLwnDYkUR1g9YlEcTjsBUu4czfAB7kRRQnrah + XisoBQB5WGXLkJmyTQzIfiRxjgepIIEIfLboJAGl891TbAgeaw1eKOoJFmohxGbg4+pG + pWphYGcrIIdqIGdZ2xUhLaEpyAcocaWID4ENH4A4BA4odwdh9aVRzhLzx+7m7yRRUxLg + C7x4d4dyIACACLbu8a6u7Q6JWIFGTQg2vuv+wOhNm4kM2Qb5vJn4caagcgep9YeAAZyA + eycQy4ApYzM0dYgsluYSVMejoGNQi4fokABgf5NwCAf5Z4D4Bp4AF4D70oDBL2Uwgmwg + xgxw9m31v4/m4Oow9u4su+4+p1RG/AAGwGwQg+3gkbxieploe51gcodp9YVoYhcgAgFh + mwfwe4pwDYfyIYDoBxL5pBZ4BlFJYER5LABpabeJWEojAQmHH4gT6AaIaaf4dDkAAw9A + rYyimMWwCQCbbrx1lgtr6ACwCJWAGIGFV4hVNo1vGupepu5NQ/HQJQH3QAgtcdH3FIq5 + erfAWAYgXCBYfLfADwCJUoHIEaygD4DZLfMgyPM19af4dIfZ2ABYBzbtQ4bAZ4YSN4KV + 84ztWPQepPGxJqdxB4Bwclk4CIBChAgbeI4odAdhygEADZL4bQb5lAEja6UvY8ylsgeg + exyADACiIYbitws4ELx4cAcp9fbCIfapyAgnYiBej66oEWmYeYBYEh44aaDgEgDOV4g9 + zwJHfJjiSthQhHUo0IeIAyCgxtsgmGe8WweIeEPQEQEpxAA2+Q2Snxym9WIADp7w2W6J + JQCLlgoob4bQxQK4I/HvQPWw1nQm44BIfh9YGwBKHmeBGQkoaIcqpAER4fHXHnforunu + hfHwr/gPge+QowZgY5lgDQDrR4cQbxHwc/ACZxKO1xB4cgcRQq1Z4A85Nzqt/QHxa4o3 + j3kHkRxNd24nXHQpMwdZ84IQC7fEklbgkZpZyABkSAzgbgd5YwdwC5wvm+/XnIpA0I8h + SfUYgV7xhpkxAJAYAQC4qvggo+f5AO5YgvOpbYcociWOfjR4eMwgnA7YnJhm7VJ6M4o/ + ryavsHWvsQ43k5JoCAARmAGIAp89CwhgaYbJs4ghlQpwe7KBLGWp44cRgfaxJIBQBJtQ + DhbYFYErFQmAaodJzgeoDqCHvXkfvgolzVadY4gRgJ9amzyBlADIFwJdrXgtMn0XkP6M + Tf0w2v1D94coswIoDZmABKggkKe5MQaIbCWICT228weXavaYgAAfb8fgAA4GAwAC4TCA + ACQQBoAiUTikVizScr6AD4EJNADpcDSABNIIxi0nlEplUrlktl0vmExmQAaM1ADxAwjA + AIBAJADsbTFAATAr2ioLBYKADqdbsAAdDYbADdcDgAAhD4epdNAFJpUUer2owWCoUADf + cLiAAjEAgADjcrlAFksz1sNHpIAdrueAACAhGoAfIHC1nbUiK5Hk0sf+Nf8zyGRijOaT + YADwf+FCACvowAjWAAC0WS0mllDgeUJdQUIMfkMjkum2Wz2m1yU1aM3nMfcjfABKCzer + oIAu240wZ7cdECFRfw2IxUtx2P4/GymWzGFdzkaoAHoQtQXCgRAD98wAAfpAD+9gAAPv + iuO93w+WiAXr9vvAIA+X6+LGtC0bzH69D1PkYRtnqAAEhYKbXJEkjFurCcKQqmLcN0nQ + BvKAAdACYj5v3C0Rpuex9gAbIICY54AMTCSVOnEjTOuy7MgABgHAem5vGSAAPAuhpsm0 + bQABEEIQgAdElAAB8mgAfR9xPKKNAgB6GnKcxzAArKtHEcZxgADQMgym55HiAACAI4qE + ISeB4HeAAMgxMhvG+3wTBKEqzrShQVCAAB+AChKQQg2MZUPRDqwwnCdHOcJtgAJ4NHCn + aDsiZhp0gfh/QIEQOAw9DRyggqDuKBADuKex8HyADxx0yBsHCdIAHkEovRZFzpPlRLIR + o7MbxzXlhIpQjYRfYdkWSllFt2fp8wUHwDGVAsNsgVhcRAfMoPK9oLAnHRqm2tQQA2C4 + AHmu4GAUnwqCQHjInMdp5gAcgMipXDosZXdlJbX0bAOnoAAMdRkPQfV5oopClKYdqng5 + Mhum+tQRA+DqPq5hSKrsowLrKABvHDMARhCrRxnIc65gqCYAY2vClHSdh3AABoSz+fYD + ZXYsI35nmeopZidHedRyAAJIJGzgQDOKyD+vpAD/PkcZznXJgGgWAAHXVELInEdS+nQD + gr3xY6URjnyVX8wp8nZSAhgs5aEaXs7ImMbp6TQFMHZ1Q2577XmgKWczfB4A7cvE8mmv + 2+rRvYf2t8S/7Hvs8rzvTDfG62inIcwb5zzgdwRC3sddQBvyLbS0J5qsHYIaIAoCgJ0y + ZnQfSInEBgdQfY3ZbOu0FFp4GtiZ4YAAV4yPnTWZ3+Xrb/SU5Z7+iACxKMCYUCK0IAvu + Bp2mPrB/Kce58I0COs3odGqBCDs6HFLX1u0eN56VNfYAAd55XnT4KgAbRvTACYETFmTt + UAiA8iKoyBEEAA+UBiSR2JwAWB0GRlwGgtdGvp0rvCJOoAUPs5YNAEjdTQARapMxfDIN + yBg8arXyqhPvAg0R+wBmjAg+ZNMJSZDIG6PI0IKQqu6Z3BpZA+YiAAGDEdAJ93NIAB9E + 0AA04oJFBECJx6AEspaIIQUdUWydgddyTwn0QjbDfMOi1fJK2zRCdQPkdY1wABCAuU5U + pMXMCTFGLMAAIAOGFHePFu4HwNGFGIM40AEoDKVISDMFZOgYApJ0ZAb48TijrAqEJ3QS + gfAuJSf6Th8IxGmGBKFc4815lVKsA2VBQwJsrAdK0vwECGgmlkgsBMYSVjXlwAAag5FW + AdBECs/kGZPkyP8OAbAzAABYCWDl0h1I1GVRqYUCQBEzgvAGaA/zTGntOMe1BAA1huFq + BQp5ahphpDmROPgECKxujTRACQDJXyTn+eiPcAAOJ8AAde3IlURFWT+SePojUWYEkFHs + Pkow+B9kaH6P4gqm3HGPck9pNFFISKmIQ8UAxPk0uxn2QYA4B4jRIo6AAZlJydk8AACy + lkwTHgYpgACWQJpaS2RggAZIyhmgAHoPlx0wzSOTA6BgCQAASgkipBiZ0GnUDpG2wUI4 + HV5kPIigOcrmJskTcWfeqzlj8OOqzTdySAnKnqqsNUdREh8AfCVEBvhKkMAfrkYKIo4x + 2JaHCOtog6B4tUQUqwe4/iND4H/YMgRAh/kFe0fcAUMyKycAEftTlPyTqcQIAFAjAgBH + FpE7EBE+iJWBRuPI+4LwTwWIo8tOARLWAAHDa+mUsyeWfqBbWrS+6mTQV+AsBkDR+Daj + wCYDdRRrjbOEWxiw5x1FOay1dzA9nxAAZUeQcA40tXIAAOJk5ClvLnHogqyJ9112fHWO + 1OBUFQDcYkAAFAJEkDcHCloAYHgcHuAekhvbZCJu+AAKUWgqgADuAIUYdoAV5j7AKY8A + hxE0NKczba/Y7YeAXHTZ88xBR2gKnsCYCiSAMgDPIEgID2HjTywhMONNuTsI2Rwq/E5k + RzjdGepEIgNCKjsxwAATwv8ADoAY3cA4C7aErHKNQ3w+h7qsAkB4wo7VZGhsdYtJgGDy + JqdiPQdzdx+wKcmP89uW0CD3HkUYBADSfAOAseQiTij2j4HoUbMSCgLgkA5Pp1+ayJAG + yEoAfRBR5jpL6DoCMwAvBKbFi/FFuHeDeHApQZA1X0gnk1ogmGF0UDKFqAAMYXYfkUi3 + WkRArxKnoBEA5NCqCWjgGa0gco1SrALIgfwAJjwHgXqK5PMBGx5lGAcBc8g8F466KMBg + E7Fh4jnYbgshICAGE+y85Kx2RSrAVBGmQfI9J7AgBoCdG63yWj9IGzMbqBA+hmDrpST+ + KXeOYGUMsZcwYlboJbY0+4KATAkSYk4lA5RzpaF4NEYTHx7soHgAEo2ymBAKpE5o9p87 + HzcwejA/Jo2yzC4i6QgT4z1j4ILh88hUqihDBoD4AFMFQbyg1urlHKzbHyHjy8AA5h1n + LG4SAt47mUDzH6UYewASND1H8PhJ9mD3WRyhvEiQCQHNXJYPvjR8uoD+olQ09Z59ZE7T + QV0ANnylUiAgASBoHQJJkBKBktxD6ipNR05PlkQuVdt7gomepGx8dCoFYMfSrB4D0TPO + AywyB3mgAOBzUw+h69CAIPs/a5kdAiAoxYAwAyEgFsaQajICgDFKAaAhq+ZmrkHpFSH0 + NIaU5D7jyvt/p/VLJGoNc7ozRtkiGsORSAGANKgAqAqooJAMAfKuBcrUsCG+i9X8U4/q + fjfJ+V8v5nFeLfN+h9H6XcSolR+n9f7H2cTkBAAADgEAAAMAAAABAHMAAAEBAAMAAAAB + AHQAAAECAAMAAAAEAAAsBgEDAAMAAAABAAUAAAEGAAMAAAABAAIAAAERAAQAAAABAAAA + CAESAAMAAAABAAEAAAEVAAMAAAABAAQAAAEWAAMAAAABAHQAAAEXAAQAAAABAAArTwEc + AAMAAAABAAEAAAE9AAMAAAABAAIAAAFSAAMAAAABAAEAAAFTAAMAAAAEAAAsDgAAAAAA + CAAIAAgACAABAAEAAQAB + + ReadOnly + NO + Sheets + + + ActiveLayerIndex + 0 + AutoAdjust + + BackgroundGraphic + + Bounds + {{0, 0}, {576, 576}} + Class + SolidGraphic + ID + 2 + Style + + shadow + + Draws + NO + + stroke + + Draws + NO + + + + CanvasOrigin + {0, 0} + CanvasSize + {576, 576} + ColumnAlign + 1 + ColumnSpacing + 36 + DisplayScale + 1 0/72 in = 1.0000 in + GraphicsList + + + Bounds + {{316.728, 320.875}, {31, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 173 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\i\fs24 \cf0 Piped} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + Head + + ID + 172 + + ID + 39 + Points + + {290.381, 335.905} + {375.476, 335.208} + + Style + + stroke + + HeadArrow + FilledBall + Pattern + 1 + TailArrow + FilledBall + + + Tail + + ID + 132 + + + + Class + Group + Graphics + + + Class + Group + Graphics + + + Bounds + {{495.565, 306.375}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 170 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 7} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{486.315, 299.375}, {25.5, 28}} + Class + ShapedGraphic + ID + 171 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 169 + + + Bounds + {{373.242, 316.625}, {147, 36}} + Class + ShapedGraphic + ID + 172 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 CurrentCount} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + ID + 168 + + + Bounds + {{339.874, 336.5}, {222.001, 15}} + Class + ShapedGraphic + Head + + ID + 42 + + ID + 27 + Rotation + 270.19351196289062 + Shape + AdjustableArrow + ShapeData + + ratio + 0.32189163565635681 + width + 14.973806381225586 + + Style + + fill + + Color + + a + 0.1 + b + 0 + g + 0 + r + 0 + + MiddleFraction + 0.70634919404983521 + + shadow + + Color + + a + 0.4 + b + 0 + g + 0 + r + 0 + + Fuzziness + 0.0 + ShadowVector + {0, 2} + + stroke + + Color + + a + 0.75 + b + 0 + g + 0 + r + 0 + + + + TextRelativeArea + {{0.125, 0.25}, {0.75, 0.5}} + isConnectedShape + + + + Class + Group + Graphics + + + Class + Group + Graphics + + + Bounds + {{344.447, 518}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 190 + Shape + Rectangle + Style + + fill + + Color + + b + 0.709303 + g + 0.709303 + r + 0.709303 + + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 6} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{335.197, 511}, {25.5, 28}} + Class + ShapedGraphic + ID + 191 + Shape + Diamond + Style + + fill + + Color + + b + 0.709303 + g + 0.709303 + r + 0.709303 + + + + Text + + VerticalPad + 0 + + + + ID + 189 + + + Bounds + {{222.125, 528.25}, {147, 36}} + Class + ShapedGraphic + ID + 192 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.470799 + g + 0.720363 + r + 0.411647 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 GetCurrentCount} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + ID + 188 + + + Class + LineGraphic + ID + 161 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {207, 525} + {225.561, 525} + {227.5, 544} + {363.5, 532.5} + {363.5, 514.5} + {393, 515} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Class + Group + Graphics + + + Bounds + {{325.036, 411.5}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 152 + Shape + Rectangle + Style + + fill + + Color + + b + 0.704208 + g + 0.704208 + r + 0.704208 + + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 5} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{315.786, 404.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 153 + Shape + Diamond + Style + + fill + + Color + + b + 0.704208 + g + 0.704208 + r + 0.704208 + + + + Text + + VerticalPad + 0 + + + + ID + 151 + + + Class + Group + Graphics + + + Bounds + {{272.109, 411.5}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 155 + Shape + Rectangle + Style + + fill + + Color + + b + 0.704208 + g + 0.704208 + r + 0.704208 + + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 3} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{262.859, 404.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 156 + Shape + Diamond + Style + + fill + + Color + + b + 0.704208 + g + 0.704208 + r + 0.704208 + + + + Text + + VerticalPad + 0 + + + + ID + 154 + + + Class + Group + Graphics + + + Bounds + {{298.572, 411.5}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 158 + Shape + Rectangle + Style + + fill + + Color + + b + 0.704208 + g + 0.704208 + r + 0.704208 + + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 4} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{289.322, 404.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 159 + Shape + Diamond + Style + + fill + + Color + + b + 0.704208 + g + 0.704208 + r + 0.704208 + + + + Text + + VerticalPad + 0 + + + + ID + 157 + + + Bounds + {{250.5, 425}, {99, 36}} + Class + ShapedGraphic + ID + 160 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.499395 + g + 0.764118 + r + 0.43665 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Increment} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + ID + 150 + + + Class + LineGraphic + ID + 149 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {207, 464} + {225.561, 464} + {227, 446} + {363.5, 434} + {363.5, 449} + {393, 476} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Bounds + {{151.061, 299.375}, {72, 53.25}} + Class + ShapedGraphic + ID + 23 + Shape + AdjustableWedge + ShapeData + + startAngle + 328 + + Style + + fill + + Color + + b + 0.479135 + g + 0.526363 + r + 1 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 ?} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.1, 0.1}, {0.8, 0.7}} + + + Class + Group + Graphics + + + Class + Group + Graphics + + + Bounds + {{266.536, 308.25}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 130 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 6} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{257.286, 301.25}, {25.5, 28}} + Class + ShapedGraphic + ID + 131 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 129 + + + Bounds + {{144.214, 318.5}, {147, 36}} + Class + ShapedGraphic + ID + 132 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 GetCurrentCount} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + ID + 128 + + + Class + LineGraphic + ID + 137 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {184.061, 254.5} + {185.091, 267.5} + {223.061, 281.5} + {193.5, 394} + {169.061, 407.5} + {169.061, 433.5} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Class + Group + Graphics + + + Bounds + {{96.0362, 304.5}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 108 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 5} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{86.7862, 297.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 109 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 107 + + + Class + Group + Graphics + + + Bounds + {{43.1086, 304.5}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 111 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 3} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{33.8586, 297.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 112 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 110 + + + Class + Group + Graphics + + + Bounds + {{69.5724, 304.5}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 114 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 4} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{60.3224, 297.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 115 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 113 + + + Bounds + {{21.5, 318.5}, {99, 36}} + Class + ShapedGraphic + ID + 116 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Increment} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + ID + 106 + + + Class + LineGraphic + ID + 136 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {121.5, 254.5} + {122.53, 267.5} + {67.5, 280} + {82, 391} + {106.5, 407.5} + {106.5, 433.5} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{163.75, 67}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 68 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 2} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{154.5, 60}, {25.5, 28}} + Class + ShapedGraphic + ID + 69 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 67 + + + Class + Group + Graphics + + + Bounds + {{323.75, 173.25}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 63 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 1} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{314.5, 166.25}, {25.5, 28}} + Class + ShapedGraphic + ID + 64 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 62 + + + Bounds + {{122.53, 74}, {54, 36}} + Class + ShapedGraphic + ID + 53 + Line + + ID + 52 + Position + 0.49880924820899963 + RotationType + 0 + + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Do} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + Class + LineGraphic + ID + 52 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {106.5, 141} + {149, 92} + {193.5, 140.5} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Bounds + {{281.828, 179.75}, {54, 36}} + Class + ShapedGraphic + ID + 25 + Line + + ID + 50 + Position + 0.546242356300354 + RotationType + 0 + + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Start} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + Class + LineGraphic + Head + + ID + 42 + + ID + 50 + Points + + {207.5, 197.75} + {393, 197.75} + + Style + + stroke + + HeadArrow + 0 + TailArrow + Arrow + + + Tail + + ID + 57 + + + + Class + Group + Graphics + + + Bounds + {{401.75, 187.75}, {99, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 41 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Worker} + + + + Bounds + {{393, 162.5}, {116.5, 70.5}} + Class + ShapedGraphic + ID + 42 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 1 + g + 0.834169 + r + 0.745487 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Listener} + + TextPlacement + 0 + + + ID + 40 + + + Bounds + {{401.75, 481.5}, {99, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 30 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Storage} + + + + AllowConnections + NO + Bounds + {{393, 455.5}, {116.5, 70.5}} + Class + ShapedGraphic + ID + 31 + Shape + Rectangle + Style + + fill + + Color + + b + 1 + g + 0.834169 + r + 0.745487 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Counter} + + TextPlacement + 0 + + + Bounds + {{99.25, 503.5}, {99, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 33 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Storage} + + + + Bounds + {{99.25, 461.5}, {99, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 34 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Counter} + + + + AllowConnections + NO + Bounds + {{90.5, 434}, {116.5, 113.5}} + Class + ShapedGraphic + ID + 35 + Shape + Rectangle + Style + + fill + + Color + + b + 1 + g + 0.834169 + r + 0.745487 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 CounterService} + + TextPlacement + 0 + + + Bounds + {{99.25, 208.5}, {99, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 55 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 CounterService} + + + + Bounds + {{99.25, 166.25}, {99, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 56 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Listener} + + + + AllowConnections + NO + Bounds + {{90.5, 141}, {116.5, 113.5}} + Class + ShapedGraphic + ID + 57 + Shape + Rectangle + Style + + fill + + Color + + b + 1 + g + 0.834169 + r + 0.745487 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Worker} + + TextPlacement + 0 + + + GridInfo + + HPages + 1 + KeepToScale + + Layers + + + Lock + NO + Name + Layer 1 + Print + YES + View + YES + + + LayoutInfo + + Animate + NO + circoMinDist + 18 + circoSeparation + 0.0 + layoutEngine + dot + neatoSeparation + 0.0 + twopiSeparation + 0.0 + + Orientation + 2 + PrintOnePage + + RowAlign + 1 + RowSpacing + 36 + SheetTitle + Canvas 1 + UniqueID + 1 + VPages + 1 + + + ActiveLayerIndex + 0 + AutoAdjust + + BackgroundGraphic + + Bounds + {{0, 0}, {576, 733}} + Class + SolidGraphic + ID + 2 + Style + + shadow + + Draws + NO + + stroke + + Draws + NO + + + + CanvasOrigin + {0, 0} + ColumnAlign + 1 + ColumnSpacing + 36 + DisplayScale + 1 0/72 in = 1.0000 in + GraphicsList + + + Bounds + {{181.75, 176.75}, {70, 22}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 284 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\i\fs18 \cf0 Perhaps at some\ +point in the future} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{40.7892, 237.744}, {32.9216, 14}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 282 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 N+1} + VerticalPad + 0 + + + + Bounds + {{39, 230.744}, {36.5, 28}} + Class + ShapedGraphic + ID + 283 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 281 + + + Class + Group + Graphics + + + Bounds + {{157.463, 186.25}, {9, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 279 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 N} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{149.213, 179.25}, {25.5, 28}} + Class + ShapedGraphic + ID + 280 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 278 + + + Bounds + {{12.1562, 250.388}, {47.6875, 23.488}} + Class + ShapedGraphic + ID + 277 + Line + + ID + 275 + Position + 0.50168603658676147 + RotationType + 0 + + Shape + AndGate + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Stop} + VerticalPad + 0 + + TextRelativeArea + {{0, 0.1}, {1, 0.8}} + + + Class + LineGraphic + ID + 275 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {72.5, 110.5} + {36, 241} + {72.5, 412.5} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Bounds + {{79.2255, 195.39}, {98.1063, 36}} + Class + ShapedGraphic + FontInfo + + Color + + b + 1 + g + 1 + r + 1 + + + ID + 274 + Line + + ID + 273 + Position + 0.7117239236831665 + RotationType + 0 + + Shape + FlattenedRectangle + Style + + fill + + Color + + b + 0.037037 + g + 0.037037 + r + 1 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf1 Service\ +Unavailable} + VerticalPad + 0 + + TextRelativeArea + {{0.1, 0}, {0.8, 1}} + + + Class + LineGraphic + ID + 273 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {128.279, 341.5} + {128.279, 161.5} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{480.375, 373.5}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 271 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 7} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{471.125, 366.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 272 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 270 + + + Class + LineGraphic + ID + 269 + Points + + {500.875, 419.5} + {379.125, 375} + + Style + + stroke + + Color + + b + 0.0116515 + g + 0.0470275 + r + 1 + + HeadArrow + 0 + TailArrow + 0 + Width + 2 + + + + + Class + LineGraphic + ID + 268 + Points + + {376.125, 418} + {500.875, 377.5} + + Style + + stroke + + Color + + b + 0.0116515 + g + 0.0470275 + r + 1 + + HeadArrow + 0 + TailArrow + 0 + Width + 2 + + + + + Class + Group + Graphics + + + Bounds + {{474.625, 618.5}, {14, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 263 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 16} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{468.875, 611.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 264 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 262 + + + Bounds + {{227.812, 566.5}, {47.6875, 23.488}} + Class + ShapedGraphic + ID + 261 + Shape + Bezier + ShapeData + + UnitPoints + + {-0.5, -0.5} + {-0.5, -0.5} + {0.5, -0.5} + {0.5, -0.5} + {0.5, -0.5} + {0.5, 0.499995} + {0.5, 0.499995} + {0.166667, 0.499995} + {-0.333334, 0.500006} + {-0.5, 0.499995} + {-0.499999, 0.499995} + {-0.5, -0.5} + + + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + stroke + + CornerRadius + 9 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs18 \cf0 ActorRef} + + + + Class + Group + Graphics + + + Bounds + {{308.812, 576.157}, {14, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 259 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 15} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{303.062, 569.157}, {25.5, 28}} + Class + ShapedGraphic + ID + 260 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 258 + + + Bounds + {{226.562, 584.657}, {107, 36}} + Class + ShapedGraphic + ID + 257 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 UseStorage} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + Class + LineGraphic + ID + 256 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {195, 513.5} + {194.5, 586.5} + {367.463, 604} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{475.125, 564.063}, {13, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 254 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 11} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{468.875, 557.063}, {25.5, 28}} + Class + ShapedGraphic + ID + 255 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 253 + + + Class + LineGraphic + ID + 252 + Points + + {498.625, 610.063} + {376.875, 565.563} + + Style + + stroke + + Color + + b + 0.0116515 + g + 0.0470275 + r + 1 + + HeadArrow + 0 + TailArrow + 0 + Width + 2 + + + + + Class + LineGraphic + ID + 251 + Points + + {373.875, 608.563} + {498.625, 568.063} + + Style + + stroke + + Color + + b + 0.0116515 + g + 0.0470275 + r + 1 + + HeadArrow + 0 + TailArrow + 0 + Width + 2 + + + + + Bounds + {{376.125, 571.063}, {119.5, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 250 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.444444 + g + 0.444444 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Storage} + + + + Bounds + {{292.213, 476.5}, {44.5, 23.488}} + Class + ShapedGraphic + ID + 248 + Shape + Bezier + ShapeData + + UnitPoints + + {-0.5, -0.5} + {-0.5, -0.5} + {0.5, -0.5} + {0.5, -0.5} + {0.5, -0.5} + {0.5, 0.499995} + {0.5, 0.499995} + {0.166667, 0.499995} + {-0.333334, 0.500006} + {-0.5, 0.499995} + {-0.499999, 0.499995} + {-0.5, -0.5} + + + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + stroke + + CornerRadius + 9 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs18 \cf0 None} + + + + Class + Group + Graphics + + + Bounds + {{373.213, 486.157}, {14, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 246 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 10} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{367.463, 479.157}, {25.5, 28}} + Class + ShapedGraphic + ID + 247 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 245 + + + Bounds + {{290.963, 494.657}, {107, 36}} + Class + ShapedGraphic + ID + 244 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 UseStorage} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + Class + LineGraphic + ID + 249 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {213.5, 488.75} + {437.5, 494.657} + {437.5, 546} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{326.106, 220}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 242 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 6} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{316.856, 213}, {25.5, 28}} + Class + ShapedGraphic + ID + 243 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 241 + + + Class + Group + Graphics + + + Bounds + {{299.106, 220}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 239 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 4} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{289.856, 213}, {25.5, 28}} + Class + ShapedGraphic + ID + 240 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 238 + + + Class + Group + Graphics + + + Bounds + {{272.356, 220}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 236 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 2} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{263.106, 213}, {25.5, 28}} + Class + ShapedGraphic + ID + 237 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 235 + + + Class + Group + Graphics + + + Bounds + {{330.152, 275.343}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 233 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 5} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{320.902, 268.343}, {25.5, 28}} + Class + ShapedGraphic + ID + 234 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 232 + + + Class + Group + Graphics + + + Bounds + {{303.152, 275.343}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 230 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 3} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{293.902, 268.343}, {25.5, 28}} + Class + ShapedGraphic + ID + 231 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 229 + + + Class + Group + Graphics + + + Bounds + {{276.402, 275.343}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 227 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 1} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{267.152, 268.343}, {25.5, 28}} + Class + ShapedGraphic + ID + 228 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 226 + + + Bounds + {{260.537, 235.256}, {90.7313, 23.488}} + Class + ShapedGraphic + ID + 26 + Line + + ID + 220 + Position + 0.53924757242202759 + RotationType + 0 + + Shape + AndGate + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Restart} + VerticalPad + 0 + + TextRelativeArea + {{0, 0.1}, {1, 0.8}} + + + Class + LineGraphic + ID + 220 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {445, 379.25} + {229, 247} + {168.553, 341.5} + + Style + + stroke + + HeadArrow + 0 + LineType + 2 + TailArrow + Arrow + + + + + Bounds + {{261.633, 290.5}, {90, 36}} + Class + ShapedGraphic + FontInfo + + Color + + b + 1 + g + 1 + r + 1 + + + ID + 24 + Line + + ID + 219 + Position + 0.55132246017456055 + RotationType + 0 + + Shape + FlattenedRectangle + Style + + fill + + Color + + b + 0.037037 + g + 0.037037 + r + 1 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf1 Storage\ +Exception} + VerticalPad + 0 + + TextRelativeArea + {{0.1, 0}, {0.8, 1}} + + + Class + LineGraphic + ID + 219 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {430.75, 379} + {225.5, 308.5} + {181.75, 342} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{310.981, 425.75}, {14, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 217 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 13} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{305.231, 418.75}, {25.5, 28}} + Class + ShapedGraphic + ID + 218 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 216 + + + Bounds + {{270.731, 437.5}, {64, 28}} + Class + ShapedGraphic + ID + 20 + Shape + Hexagon + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Create} + VerticalPad + 0 + + TextRelativeArea + {{0, 0.1}, {1, 0.8}} + + + Class + LineGraphic + ID + 215 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {213.375, 452} + {335.231, 451.5} + {378, 451.5} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + AllowConnections + NO + Bounds + {{378, 434.75}, {114.728, 33.5}} + Class + ShapedGraphic + ID + 214 + Shape + Rectangle + Style + + fill + + Color + + b + 1 + g + 0.834169 + r + 0.745487 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Storage} + + + + Class + Group + Graphics + + + Bounds + {{181.375, 463.5}, {14, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 212 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 14} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{175.625, 456.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 213 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 211 + + + Bounds + {{82.125, 469.5}, {121.75, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 210 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Storage} + + + + Bounds + {{74.2787, 532.961}, {54, 23.488}} + Class + ShapedGraphic + ID + 21 + Line + + ID + 205 + Position + 0.14824382960796356 + RotationType + 0 + + Shape + Bezier + ShapeData + + UnitPoints + + {-0.5, -0.5} + {-0.5, -0.5} + {0.5, -0.5} + {0.5, -0.5} + {0.5, -0.5} + {0.5, 0.499995} + {0.5, 0.499995} + {0.166667, 0.499995} + {-0.333334, 0.500006} + {-0.5, 0.499995} + {-0.499999, 0.499995} + {-0.5, -0.5} + + + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + stroke + + CornerRadius + 9 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs18 \cf0 In 10 secs} + + + + Class + Group + Graphics + + + Bounds + {{162, 542}, {14, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 208 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 12} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{156.25, 535}, {25.5, 28}} + Class + ShapedGraphic + ID + 209 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 207 + + + Bounds + {{85.25, 550.5}, {101, 36}} + Class + ShapedGraphic + ID + 206 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Reconnect} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + Class + LineGraphic + ID + 205 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {101.5, 513.5} + {101, 584} + {170.5, 513.5} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{184.875, 412}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 203 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 9} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{175.625, 405}, {25.5, 28}} + Class + ShapedGraphic + ID + 204 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 202 + + + Class + LineGraphic + ID + 201 + Points + + {205.375, 458} + {83.625, 413.5} + + Style + + stroke + + Color + + b + 0.0116515 + g + 0.0470275 + r + 1 + + HeadArrow + 0 + TailArrow + 0 + Width + 2 + + + + + Class + LineGraphic + ID + 200 + Points + + {80.625, 456.5} + {205.375, 416} + + Style + + stroke + + Color + + b + 0.0116515 + g + 0.0470275 + r + 1 + + HeadArrow + 0 + TailArrow + 0 + Width + 2 + + + + + Class + Group + Graphics + + + Bounds + {{333, 355.75}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 224 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 8} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{323.75, 348.75}, {25.5, 28}} + Class + ShapedGraphic + ID + 225 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 223 + + + Bounds + {{242.25, 360.75}, {107, 53.25}} + Class + ShapedGraphic + ID + 25 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.444444 + g + 0.444444 + r + 1 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Terminated\ +(Storage)} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.15}, {0.7, 0.7}} + + + Class + LineGraphic + ID + 137 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {378, 400} + {357.5, 400} + {282, 366.5} + {213.375, 365} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + AllowConnections + NO + Bounds + {{378, 379.25}, {114.728, 33.5}} + Class + ShapedGraphic + ID + 174 + Shape + Rectangle + Style + + fill + + Color + + b + 0.444444 + g + 0.444444 + r + 1 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Storage} + + + + Bounds + {{375.614, 625.5}, {119.5, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 30 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Storage} + + + + AllowConnections + NO + Bounds + {{367.463, 546}, {136.537, 122.5}} + Class + ShapedGraphic + ID + 31 + Shape + Rectangle + Style + + fill + + Color + + b + 1 + g + 0.834169 + r + 0.745487 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Counter} + + TextPlacement + 0 + + + Bounds + {{82.875, 419}, {119.5, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 33 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.444444 + g + 0.444444 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Storage} + + + + Bounds + {{81.75, 366.5}, {121.75, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 34 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Counter} + + + + AllowConnections + NO + Bounds + {{72.5, 341.5}, {141, 172}} + Class + ShapedGraphic + ID + 35 + Shape + Rectangle + Style + + fill + + Color + + b + 1 + g + 0.834169 + r + 0.745487 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 CounterService} + + TextPlacement + 0 + + + Bounds + {{81.25, 117.187}, {99, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 55 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 CounterService} + + + + Bounds + {{81.25, 74.9365}, {99, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 56 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 ProgressListener} + + + + AllowConnections + NO + Bounds + {{72.5, 49.6865}, {116.5, 113.5}} + Class + ShapedGraphic + ID + 57 + Shape + Rectangle + Style + + fill + + Color + + b + 1 + g + 0.834169 + r + 0.745487 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Worker} + + TextPlacement + 0 + + + GridInfo + + HPages + 1 + KeepToScale + + Layers + + + Lock + NO + Name + Layer 1 + Print + YES + View + YES + + + LayoutInfo + + Animate + NO + circoMinDist + 18 + circoSeparation + 0.0 + layoutEngine + dot + neatoSeparation + 0.0 + twopiSeparation + 0.0 + + Orientation + 2 + PrintOnePage + + RowAlign + 1 + RowSpacing + 36 + SheetTitle + Canvas 2 + UniqueID + 2 + VPages + 1 + + + SmartAlignmentGuidesActive + YES + SmartDistanceGuidesActive + YES + UseEntirePage + + WindowInfo + + CurrentSheet + 0 + ExpandedCanvases + + + name + Canvas 1 + + + Frame + {{29, 4}, {1423, 1024}} + ListView + + OutlineWidth + 142 + RightSidebar + + ShowRuler + + Sidebar + + SidebarWidth + 120 + VisibleRegion + {{-73, 81.25}, {723.864, 494.318}} + Zoom + 1.7599999904632568 + ZoomValues + + + Canvas 1 + 1.7599999904632568 + 1.8700000047683716 + + + Canvas 2 + 2 + 1 + + + + saveQuickLookFiles + YES + + diff --git a/akka-docs/java/code/akka/docs/actor/FaultHandlingTestBase.java b/akka-docs/java/code/akka/docs/actor/FaultHandlingTestBase.java index abf2207a1d..bb8f11467c 100644 --- a/akka-docs/java/code/akka/docs/actor/FaultHandlingTestBase.java +++ b/akka-docs/java/code/akka/docs/actor/FaultHandlingTestBase.java @@ -40,9 +40,9 @@ public class FaultHandlingTestBase { //#strategy private static SupervisorStrategy strategy = new OneForOneStrategy(10, Duration.parse("1 minute"), - new Function() { + new Function() { @Override - public Action apply(Throwable t) { + public Directive apply(Throwable t) { if (t instanceof ArithmeticException) { return resume(); } else if (t instanceof NullPointerException) { @@ -78,9 +78,9 @@ public class FaultHandlingTestBase { //#strategy2 private static SupervisorStrategy strategy = new OneForOneStrategy(10, Duration.parse("1 minute"), - new Function() { + new Function() { @Override - public Action apply(Throwable t) { + public Directive apply(Throwable t) { if (t instanceof ArithmeticException) { return resume(); } else if (t instanceof NullPointerException) { diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java b/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java index ff14c4e09b..749dd1e1d9 100644 --- a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java +++ b/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java @@ -12,6 +12,7 @@ import akka.actor.Props; //#import-future import akka.dispatch.Future; import akka.dispatch.Futures; +import akka.dispatch.Mapper; import akka.dispatch.Await; import akka.util.Duration; import akka.util.Timeout; @@ -37,16 +38,16 @@ import akka.util.Duration; import akka.actor.ActorTimeoutException; //#import-gracefulStop -//#import-askPipeTo +//#import-askPipe import static akka.pattern.Patterns.ask; -import static akka.pattern.Patterns.pipeTo; +import static akka.pattern.Patterns.pipe; import akka.dispatch.Future; import akka.dispatch.Futures; import akka.util.Duration; import akka.util.Timeout; import java.util.concurrent.TimeUnit; import java.util.ArrayList; -//#import-askPipeTo +//#import-askPipe import akka.actor.Props; import akka.actor.UntypedActor; @@ -223,12 +224,12 @@ public class UntypedActorDocTestBase { } @Test - public void usePatternsAskPipeTo() { + public void usePatternsAskPipe() { ActorSystem system = ActorSystem.create("MySystem"); ActorRef actorA = system.actorOf(new Props(MyUntypedActor.class)); ActorRef actorB = system.actorOf(new Props(MyUntypedActor.class)); ActorRef actorC = system.actorOf(new Props(MyUntypedActor.class)); - //#ask-pipeTo + //#ask-pipe final Timeout t = new Timeout(Duration.create(5, TimeUnit.SECONDS)); final ArrayList> futures = new ArrayList>(); @@ -236,8 +237,8 @@ public class UntypedActorDocTestBase { futures.add(ask(actorB, "reqeest", t)); // using timeout from above final Future> aggregate = Futures.sequence(futures, system.dispatcher()); - - final Future transformed = aggregate.map(new akka.japi.Function, Result>() { + + final Future transformed = aggregate.map(new Mapper, Result>() { public Result apply(Iterable coll) { final Iterator it = coll.iterator(); final String s = (String) it.next(); @@ -246,8 +247,8 @@ public class UntypedActorDocTestBase { } }); - pipeTo(transformed, actorC); - //#ask-pipeTo + pipe(transformed).to(actorC); + //#ask-pipe system.shutdown(); } diff --git a/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java b/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java index 265f005059..db39a5d663 100644 --- a/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java +++ b/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java @@ -11,6 +11,7 @@ import java.util.List; import java.util.Map; import akka.actor.*; +import akka.dispatch.Mapper; import akka.japi.Function; import akka.util.Duration; import akka.util.Timeout; @@ -19,9 +20,11 @@ import akka.event.LoggingAdapter; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; +import static akka.japi.Util.manifest; + import static akka.actor.SupervisorStrategy.*; import static akka.pattern.Patterns.ask; -import static akka.pattern.Patterns.pipeTo; +import static akka.pattern.Patterns.pipe; import static akka.docs.actor.japi.FaultHandlingDocSample.WorkerApi.*; import static akka.docs.actor.japi.FaultHandlingDocSample.CounterServiceApi.*; @@ -115,9 +118,9 @@ public class FaultHandlingDocSample { // Stop the CounterService child if it throws ServiceUnavailable private static SupervisorStrategy strategy = new OneForOneStrategy(-1, Duration.Inf(), - new Function() { + new Function() { @Override - public Action apply(Throwable t) { + public Directive apply(Throwable t) { if (t instanceof ServiceUnavailable) { return stop(); } else { @@ -142,11 +145,14 @@ public class FaultHandlingDocSample { counterService.tell(new Increment(1), getSelf()); // Send current progress to the initial sender - pipeTo(ask(counterService, GetCurrentCount, askTimeout).map(new Function() { - public Progress apply(CurrentCount c) { - return new Progress(100.0 * c.count / totalCount); - } - }), progressListener); + pipe(ask(counterService, GetCurrentCount, askTimeout) + .mapTo(manifest(CurrentCount.class)) + .map(new Mapper() { + public Progress apply(CurrentCount c) { + return new Progress(100.0 * c.count / totalCount); + } + })) + .to(progressListener); } else { unhandled(msg); } @@ -224,9 +230,9 @@ public class FaultHandlingDocSample { // Restart the storage child when StorageException is thrown. // After 3 restarts within 5 seconds it will be stopped. private static SupervisorStrategy strategy = new OneForOneStrategy(3, Duration.parse("5 seconds"), - new Function() { + new Function() { @Override - public Action apply(Throwable t) { + public Directive apply(Throwable t) { if (t instanceof StorageException) { return restart(); } else { diff --git a/akka-docs/java/code/akka/docs/agent/AgentDocTest.java b/akka-docs/java/code/akka/docs/agent/AgentDocTest.java index 94ddef2c9f..553d64eee5 100644 --- a/akka-docs/java/code/akka/docs/agent/AgentDocTest.java +++ b/akka-docs/java/code/akka/docs/agent/AgentDocTest.java @@ -44,7 +44,7 @@ public class AgentDocTest { @Test public void createAndClose() { - //#create + //#create ActorSystem system = ActorSystem.create("app"); Agent agent = new Agent(5, system); diff --git a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java b/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java index e642047709..b064eb803b 100644 --- a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java +++ b/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java @@ -4,12 +4,10 @@ package akka.docs.future; //#imports1 -import akka.dispatch.Promise; +import akka.dispatch.*; import akka.japi.Procedure; import akka.japi.Procedure2; import akka.util.Timeout; -import akka.dispatch.Await; -import akka.dispatch.Future; //#imports1 @@ -57,7 +55,6 @@ import akka.actor.ActorSystem; import akka.actor.UntypedActor; import akka.actor.ActorRef; import akka.actor.Props; -import akka.dispatch.Futures; import akka.pattern.Patterns; import static org.junit.Assert.*; @@ -110,7 +107,7 @@ public class FutureDocTestBase { } }, system.dispatcher()); - Future f2 = f1.map(new Function() { + Future f2 = f1.map(new Mapper() { public Integer apply(String s) { return s.length(); } @@ -131,7 +128,7 @@ public class FutureDocTestBase { } }, system.dispatcher()); - Future f2 = f1.map(new Function() { + Future f2 = f1.map(new Mapper() { public Integer apply(String s) { return s.length(); } @@ -153,7 +150,7 @@ public class FutureDocTestBase { Thread.sleep(100); - Future f2 = f1.map(new Function() { + Future f2 = f1.map(new Mapper() { public Integer apply(String s) { return s.length(); } @@ -173,7 +170,7 @@ public class FutureDocTestBase { } }, system.dispatcher()); - Future f2 = f1.flatMap(new Function>() { + Future f2 = f1.flatMap(new Mapper>() { public Future apply(final String s) { return future(new Callable() { public Integer call() { @@ -204,7 +201,7 @@ public class FutureDocTestBase { // Find the sum of the odd numbers Future futureSum = futureListOfInts.map( - new Function, Long>() { + new Mapper, Long>() { public Long apply(Iterable ints) { long sum = 0; for (Integer i : ints) @@ -306,24 +303,87 @@ public class FutureDocTestBase { //#filter Future future1 = Futures.successful(4, system.dispatcher()); Future successfulFilter = - future1.filter(new Function() { - public Boolean apply(Integer i) { return i % 2 == 0; } + future1.filter(new Filter() { + public boolean filter(Integer i) { return i % 2 == 0; } }); Future failedFilter = - future1.filter(new Function() { - public Boolean apply(Integer i) { return i % 2 != 0; } + future1.filter(new Filter() { + public boolean filter(Integer i) { return i % 2 != 0; } }); //When filter fails, the returned Future will be failed with a scala.MatchError //#filter } + public void sendToTheInternetz(String s) { + + } + + public void sendToIssueTracker(Throwable t) { + + } + + @Test public void useAndThen() { + //#and-then + Future future1 = Futures.successful("value", system.dispatcher()). + andThen(new OnComplete() { + public void onComplete(Throwable failure, String result) { + if (failure != null) sendToIssueTracker(failure); + } + }).andThen(new OnComplete() { + public void onComplete(Throwable failure, String result) { + if (result != null) sendToTheInternetz(result); + } + }); + //#and-then + } + + @Test public void useRecover() { + //#recover + Future future = future(new Callable() { + public Integer call() { + return 1 / 0; + } + }, system.dispatcher()).recover(new Recover() { + public Integer recover(Throwable problem) throws Throwable { + if (problem instanceof ArithmeticException) return 0; + else throw problem; + } + }); + int result = Await.result(future, Duration.create(1, SECONDS)); + assertEquals(result, 0); + //#recover + } + + @Test public void useTryRecover() { + //#try-recover + Future future = future(new Callable() { + public Integer call() { + return 1 / 0; + } + }, system.dispatcher()).recoverWith(new Recover>() { + public Future recover(Throwable problem) throws Throwable { + if (problem instanceof ArithmeticException) { + return future(new Callable() { + public Integer call() { + return 0; + } + }, system.dispatcher()); + } + else throw problem; + } + }); + int result = Await.result(future, Duration.create(1, SECONDS)); + assertEquals(result, 0); + //#try-recover + } + @Test public void useOnSuccessOnFailureAndOnComplete() { { Future future = Futures.successful("foo", system.dispatcher()); //#onSuccess - future.onSuccess(new Procedure() { - public void apply(String result) { + future.onSuccess(new OnSuccess() { + public void onSuccess(String result) { if ("bar" == result) { //Do something if it resulted in "bar" } else { @@ -337,8 +397,8 @@ public class FutureDocTestBase { Future future = Futures.failed(new IllegalStateException("OHNOES"), system.dispatcher()); //#onFailure - future.onFailure( new Procedure() { - public void apply(Throwable failure) { + future.onFailure( new OnFailure() { + public void onFailure(Throwable failure) { if (failure instanceof IllegalStateException) { //Do something if it was this particular failure } else { @@ -351,8 +411,8 @@ public class FutureDocTestBase { { Future future = Futures.successful("foo", system.dispatcher()); //#onComplete - future.onComplete(new Procedure2() { - public void apply(Throwable failure, String result) { + future.onComplete(new OnComplete() { + public void onComplete(Throwable failure, String result) { if (failure != null) { //We got a failure, handle it here } else { @@ -370,7 +430,7 @@ public class FutureDocTestBase { Future future1 = Futures.successful("foo", system.dispatcher()); Future future2 = Futures.successful("bar", system.dispatcher()); Future future3 = - future1.zip(future2).map(new Function, String>() { + future1.zip(future2).map(new Mapper, String>() { public String apply(scala.Tuple2 zipped) { return zipped._1() + " " + zipped._2(); } @@ -382,7 +442,7 @@ public class FutureDocTestBase { } { - //#or + //#fallback-to Future future1 = Futures.failed(new IllegalStateException("OHNOES1"), system.dispatcher()); Future future2 = @@ -390,10 +450,10 @@ public class FutureDocTestBase { Future future3 = Futures.successful("bar", system.dispatcher()); Future future4 = - future1.or(future2).or(future3); // Will have "bar" in this case + future1.fallbackTo(future2).fallbackTo(future3); // Will have "bar" in this case String result = Await.result(future4, Duration.create(1, SECONDS)); assertEquals("bar", result); - //#or + //#fallback-to } } diff --git a/akka-docs/java/extending-akka.rst b/akka-docs/java/extending-akka.rst index ac60147881..0d88248cc8 100644 --- a/akka-docs/java/extending-akka.rst +++ b/akka-docs/java/extending-akka.rst @@ -54,6 +54,12 @@ Loading from Configuration To be able to load extensions from your Akka configuration you must add FQCNs of implementations of either ``ExtensionId`` or ``ExtensionIdProvider`` in the "akka.extensions" section of the config you provide to your ``ActorSystem``. +:: + + akka { + extensions = ["akka.docs.extension.ExtensionDocTestBase.CountExtension"] + } + Applicability ============= diff --git a/akka-docs/java/fault-tolerance-sample.rst b/akka-docs/java/fault-tolerance-sample.rst index 4b359b792d..8e379c5fcc 100644 --- a/akka-docs/java/fault-tolerance-sample.rst +++ b/akka-docs/java/fault-tolerance-sample.rst @@ -1,5 +1,51 @@ .. _fault-tolerance-sample-java: +Diagrams of the Fault Tolerance Sample (Java) +---------------------------------------------- + +.. image:: ../images/faulttolerancesample-normal-flow.png + +*The above diagram illustrates the normal message flow.* + +**Normal flow:** + +======= ================================================================================== +Step Description +======= ================================================================================== +1 The progress ``Listener`` starts the work. +2 The ``Worker`` schedules work by sending ``Do`` messages periodically to itself +3, 4, 5 When receiving ``Do`` the ``Worker`` tells the ``CounterService`` + to increment the counter, three times. The ``Increment`` message is forwarded + to the ``Counter``, which updates its counter variable and sends current value + to the ``Storage``. +6, 7 The ``Worker`` asks the ``CounterService`` of current value of the counter and pipes + the result back to the ``Listener``. +======= ================================================================================== + + +.. image:: ../images/faulttolerancesample-failure-flow.png + +*The above diagram illustrates what happens in case of storage failure.* + +**Failure flow:** + +=========== ================================================================================== +Step Description +=========== ================================================================================== +1 The ``Storage`` throws ``StorageException``. +2 The ``CounterService`` is supervisor of the ``Storage`` and restarts the + ``Storage`` when ``StorageException`` is thrown. +3, 4, 5, 6 The ``Storage`` continues to fail and is restarted. +7 After 3 failures and restarts within 5 seconds the ``Storage`` is stopped by its + supervisor, i.e. the ``CounterService``. +8 The ``CounterService`` is also watching the ``Storage`` for termination and + receives the ``Terminated`` message when the ``Storage`` has been stopped ... +9, 10, 11 and tells the ``Counter`` that there is no ``Storage``. +12 The ``CounterService`` schedules a ``Reconnect`` message to itself. +13, 14 When it receives the ``Reconnect`` message it creates a new ``Storage`` ... +15, 16 and tells the the ``Counter`` to use the new ``Storage`` +=========== ================================================================================== + Full Source Code of the Fault Tolerance Sample (Java) ------------------------------------------------------ diff --git a/akka-docs/java/fault-tolerance.rst b/akka-docs/java/fault-tolerance.rst index 8e2dfe3cd3..17107b8a82 100644 --- a/akka-docs/java/fault-tolerance.rst +++ b/akka-docs/java/fault-tolerance.rst @@ -43,7 +43,7 @@ For the sake of demonstration let us consider the following strategy: :include: strategy I have chosen a few well-known exception types in order to demonstrate the -application of the fault handling actions described in :ref:`supervision`. +application of the fault handling directives described in :ref:`supervision`. First off, it is a one-for-one strategy, meaning that each child is treated separately (an all-for-one strategy works very similarly, the only difference is that any decision is applied to all children of the supervisor, not only the @@ -71,7 +71,7 @@ in the same way as the default strategy defined above. Test Application ---------------- -The following section shows the effects of the different actions in practice, +The following section shows the effects of the different directives in practice, wherefor a test setup is needed. First off, we need a suitable supervisor: .. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java @@ -93,13 +93,13 @@ Let us create actors: .. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java :include: create -The first test shall demonstrate the ``Resume`` action, so we try it out by +The first test shall demonstrate the ``Resume`` directive, so we try it out by setting some non-initial state in the actor and have it fail: .. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java :include: resume -As you can see the value 42 survives the fault handling action. Now, if we +As you can see the value 42 survives the fault handling directive. Now, if we change the failure to a more serious ``NullPointerException``, that will no longer be the case: @@ -113,7 +113,7 @@ terminated by the supervisor: :include: stop Up to now the supervisor was completely unaffected by the child’s failure, -because the actions set did handle it. In case of an ``Exception``, this is not +because the directives set did handle it. In case of an ``Exception``, this is not true anymore and the supervisor escalates the failure. .. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java @@ -123,7 +123,7 @@ The supervisor itself is supervised by the top-level actor provided by the :class:`ActorSystem`, which has the default policy to restart in case of all ``Exception`` cases (with the notable exceptions of ``ActorInitializationException`` and ``ActorKilledException``). Since the -default action in case of a restart is to kill all children, we expected our poor +default directive in case of a restart is to kill all children, we expected our poor child not to survive this failure. In case this is not desired (which depends on the use case), we need to use a diff --git a/akka-docs/java/futures.rst b/akka-docs/java/futures.rst index e9b743535a..00f17e57df 100644 --- a/akka-docs/java/futures.rst +++ b/akka-docs/java/futures.rst @@ -67,7 +67,7 @@ These allow you to create 'pipelines' or 'streams' that the result will travel t Future is a Monad ^^^^^^^^^^^^^^^^^ -The first method for working with ``Future`` functionally is ``map``. This method takes a ``Function`` which performs +The first method for working with ``Future`` functionally is ``map``. This method takes a ``Mapper`` which performs some operation on the result of the ``Future``, and returning a new result. The return value of the ``map`` method is another ``Future`` that will contain the new result: @@ -176,14 +176,26 @@ For this Akka supports ``onComplete``, ``onSuccess`` and ``onFailure``, of which .. includecode:: code/akka/docs/future/FutureDocTestBase.java :include: onComplete +Ordering +-------- + +Since callbacks are executed in any order and potentially in parallel, +it can be tricky at the times when you need sequential ordering of operations. +But there's a solution! And it's name is ``andThen``, and it creates a new Future with +the specified callback, a Future that will have the same result as the Future it's called on, +which allows for ordering like in the following sample: + +.. includecode:: code/akka/docs/future/FutureDocTestBase.java + :include: and-then + Auxiliary methods ----------------- -``Future`` ``or`` combines 2 Futures into a new ``Future``, and will hold the successful value of the second ``Future` +``Future`` ``fallbackTo`` combines 2 Futures into a new ``Future``, and will hold the successful value of the second ``Future` if the first ``Future`` fails. .. includecode:: code/akka/docs/future/FutureDocTestBase.java - :include: or + :include: fallback-to You can also combine two Futures into a new ``Future`` that will hold a tuple of the two Futures successful results, using the ``zip`` operation. @@ -197,4 +209,22 @@ Exceptions Since the result of a ``Future`` is created concurrently to the rest of the program, exceptions must be handled differently. It doesn't matter if an ``UntypedActor`` or the dispatcher is completing the ``Future``, if an ``Exception`` is caught the ``Future`` will contain it instead of a valid result. If a ``Future`` does contain an ``Exception``, -calling ``Await.result`` will cause it to be thrown again so it can be handled properly. \ No newline at end of file +calling ``Await.result`` will cause it to be thrown again so it can be handled properly. + +It is also possible to handle an ``Exception`` by returning a different result. +This is done with the ``recover`` method. For example: + +.. includecode:: code/akka/docs/future/FutureDocTestBase.java + :include: recover + +In this example, if the actor replied with a ``akka.actor.Status.Failure`` containing the ``ArithmeticException``, +our ``Future`` would have a result of 0. The ``recover`` method works very similarly to the standard try/catch blocks, +so multiple ``Exception``\s can be handled in this manner, and if an ``Exception`` is not handled this way +it will behave as if we hadn't used the ``recover`` method. + +You can also use the ``recoverWith`` method, which has the same relationship to ``recover`` as ``flatMap` has to ``map``, +and is use like this: + +.. includecode:: code/akka/docs/future/FutureDocTestBase.java + :include: try-recover + diff --git a/akka-docs/java/remoting.rst b/akka-docs/java/remoting.rst index bb7c12b199..196d7a40a5 100644 --- a/akka-docs/java/remoting.rst +++ b/akka-docs/java/remoting.rst @@ -25,7 +25,7 @@ to your ``application.conf`` file:: provider = "akka.remote.RemoteActorRefProvider" } remote { - transport = "akka.remote.netty.NettyRemoteSupport" + transport = "akka.remote.netty.NettyRemoteTransport" server { hostname = "127.0.0.1" port = 2552 diff --git a/akka-docs/java/untyped-actors.rst b/akka-docs/java/untyped-actors.rst index d755359f60..5120bb908d 100644 --- a/akka-docs/java/untyped-actors.rst +++ b/akka-docs/java/untyped-actors.rst @@ -323,15 +323,15 @@ Ask: Send-And-Receive-Future The ``ask`` pattern involves actors as well as futures, hence it is offered as a use pattern rather than a method on :class:`ActorRef`: -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java#import-askPipeTo +.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java#import-askPipe -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java#ask-pipeTo +.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java#ask-pipe -This example demonstrates ``ask`` together with the ``pipeTo`` pattern on +This example demonstrates ``ask`` together with the ``pipe`` pattern on futures, because this is likely to be a common combination. Please note that all of the above is completely non-blocking and asynchronous: ``ask`` produces a :class:`Future`, two of which are composed into a new future using the -:meth:`Futures.sequence` and :meth:`map` methods and then ``pipeTo`` installs +:meth:`Futures.sequence` and :meth:`map` methods and then ``pipe`` installs an ``onComplete``-handler on the future to effect the submission of the aggregated :class:`Result` to another actor. diff --git a/akka-docs/modules/microkernel.rst b/akka-docs/modules/microkernel.rst index ec6eabe3ef..7600e1ebd2 100644 --- a/akka-docs/modules/microkernel.rst +++ b/akka-docs/modules/microkernel.rst @@ -24,7 +24,7 @@ command (on a unix-based system): .. code-block:: none - bin/start sample.kernel.hello.HelloKernel + bin/akka sample.kernel.hello.HelloKernel Use ``Ctrl-C`` to interrupt and exit the microkernel. diff --git a/akka-docs/scala/actors.rst b/akka-docs/scala/actors.rst index c6faa5e7e1..01ef6ade83 100644 --- a/akka-docs/scala/actors.rst +++ b/akka-docs/scala/actors.rst @@ -657,3 +657,7 @@ extend that, either through inheritance or delegation, is to use ``PartialFunction.orElse`` chaining. .. includecode:: code/akka/docs/actor/ActorDocSpec.scala#receive-orElse + +Or: + +.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#receive-orElse2 \ No newline at end of file diff --git a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala index a4c903b564..55a205746f 100644 --- a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala @@ -133,6 +133,29 @@ class SpecificActor extends GenericActor { case class MyMsg(subject: String) //#receive-orElse +//#receive-orElse2 +trait ComposableActor extends Actor { + private var receives: List[Receive] = List() + protected def registerReceive(receive: Receive) { + receives = receive :: receives + } + + def receive = receives reduce { _ orElse _ } +} + +class MyComposableActor extends ComposableActor { + override def preStart() { + registerReceive({ + case "foo" ⇒ /* Do something */ + }) + + registerReceive({ + case "bar" ⇒ /* Do something */ + }) + } +} + +//#receive-orElse2 class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { "import context" in { @@ -314,7 +337,7 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { "using pattern ask / pipeTo" in { val actorA, actorB, actorC, actorD = system.actorOf(Props.empty) //#ask-pipeTo - import akka.pattern.{ ask, pipeTo } + import akka.pattern.{ ask, pipe } case class Result(x: Int, s: String, d: Double) case object Request @@ -329,7 +352,7 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { } yield Result(x, s, d) f pipeTo actorD // .. or .. - pipeTo(f, actorD) + pipe(f) to actorD //#ask-pipeTo } diff --git a/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala b/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala index fbdf3e25b9..d08bcb53b2 100644 --- a/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala +++ b/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala @@ -11,7 +11,7 @@ import akka.util.duration._ import akka.util.Duration import akka.util.Timeout import akka.event.LoggingReceive -import akka.pattern.ask +import akka.pattern.{ ask, pipe } import com.typesafe.config.ConfigFactory //#imports diff --git a/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala b/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala index d0e0945fe8..0df4e3ca5b 100644 --- a/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala @@ -22,12 +22,17 @@ object DispatcherDocSpec { my-dispatcher { # Dispatcher is the name of the event-based dispatcher type = Dispatcher - # minimum number of threads to cap factor-based core number to - core-pool-size-min = 2 - # No of core threads ... ceil(available processors * factor) - core-pool-size-factor = 2.0 - # maximum number of threads to cap factor-based number to - core-pool-size-max = 10 + # What kind of ExecutionService to use + executor = "thread-pool-executor" + # Configuration for the thread pool + thread-pool-executor { + # minimum number of threads to cap factor-based core number to + core-pool-size-min = 2 + # No of core threads ... ceil(available processors * factor) + core-pool-size-factor = 2.0 + # maximum number of threads to cap factor-based number to + core-pool-size-max = 10 + } # Throughput defines the number of messages that are processed in a batch before the # thread is returned to the pool. Set to 1 for as fair as possible. throughput = 100 @@ -37,8 +42,11 @@ object DispatcherDocSpec { //#my-bounded-config my-dispatcher-bounded-queue { type = Dispatcher - core-pool-size-factor = 8.0 - max-pool-size-factor = 16.0 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-factor = 8.0 + max-pool-size-factor = 16.0 + } # Specifies the bounded capacity of the mailbox queue mailbox-capacity = 100 throughput = 3 @@ -48,6 +56,11 @@ object DispatcherDocSpec { //#my-balancing-config my-balancing-dispatcher { type = BalancingDispatcher + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-factor = 8.0 + max-pool-size-factor = 16.0 + } } //#my-balancing-config diff --git a/akka-docs/scala/code/akka/docs/extension/ExtensionDocSpec.scala b/akka-docs/scala/code/akka/docs/extension/ExtensionDocSpec.scala index 0c778a4812..05baa28ecb 100644 --- a/akka-docs/scala/code/akka/docs/extension/ExtensionDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/extension/ExtensionDocSpec.scala @@ -41,6 +41,15 @@ object CountExtension //#extensionid object ExtensionDocSpec { + + val config = """ + //#config + akka { + extensions = ["akka.docs.extension.CountExtension$"] + } + //#config + """ + //#extension-usage-actor class MyActor extends Actor { @@ -64,7 +73,7 @@ object ExtensionDocSpec { //#extension-usage-actor-trait } -class ExtensionDocSpec extends AkkaSpec { +class ExtensionDocSpec extends AkkaSpec(ExtensionDocSpec.config) { import ExtensionDocSpec._ "demonstrate how to create an extension in Scala" in { @@ -73,4 +82,10 @@ class ExtensionDocSpec extends AkkaSpec { //#extension-usage } + "demonstrate how to lookup a configured extension in Scala" in { + //#extension-lookup + system.extension(CountExtension) + //#extension-lookup + } + } diff --git a/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala b/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala index 175fc08ff5..098fe873ad 100644 --- a/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala @@ -13,6 +13,7 @@ import akka.dispatch.Future import akka.dispatch.Await import akka.util.duration._ import akka.dispatch.Promise +import java.lang.IllegalStateException object FutureDocSpec { @@ -266,6 +267,19 @@ class FutureDocSpec extends AkkaSpec { Await.result(future, 1 second) must be(0) } + "demonstrate usage of recoverWith" in { + implicit val timeout = system.settings.ActorTimeout + val actor = system.actorOf(Props[MyActor]) + val msg1 = -1 + //#try-recover + val future = akka.pattern.ask(actor, msg1) recoverWith { + case e: ArithmeticException ⇒ Promise.successful(0) + case foo: IllegalArgumentException ⇒ Promise.failed[Int](new IllegalStateException("All br0ken!")) + } + //#try-recover + Await.result(future, 1 second) must be(0) + } + "demonstrate usage of zip" in { val future1 = Future { "foo" } val future2 = Future { "bar" } @@ -275,13 +289,28 @@ class FutureDocSpec extends AkkaSpec { Await.result(future3, 1 second) must be("foo bar") } - "demonstrate usage of or" in { + "demonstrate usage of andThen" in { + def loadPage(s: String) = s + val url = "foo bar" + def log(cause: Throwable) = () + def watchSomeTV = () + //#and-then + val result = Future { loadPage(url) } andThen { + case Left(exception) ⇒ log(exception) + } andThen { + case _ ⇒ watchSomeTV + } + //#and-then + Await.result(result, 1 second) must be("foo bar") + } + + "demonstrate usage of fallbackTo" in { val future1 = Future { "foo" } val future2 = Future { "bar" } val future3 = Future { "pigdog" } - //#or - val future4 = future1 or future2 or future3 - //#or + //#fallback-to + val future4 = future1 fallbackTo future2 fallbackTo future3 + //#fallback-to Await.result(future4, 1 second) must be("foo") } diff --git a/akka-docs/scala/code/akka/docs/testkit/TestkitDocSpec.scala b/akka-docs/scala/code/akka/docs/testkit/TestkitDocSpec.scala index 3a4608e840..2b2cb003a9 100644 --- a/akka-docs/scala/code/akka/docs/testkit/TestkitDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/testkit/TestkitDocSpec.scala @@ -89,10 +89,10 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { val fsm = TestFSMRef(new Actor with FSM[Int, String] { startWith(1, "") when(1) { - case Ev("go") ⇒ goto(2) using "go" + case Event("go", _) ⇒ goto(2) using "go" } when(2) { - case Ev("back") ⇒ goto(1) using "back" + case Event("back", _) ⇒ goto(1) using "back" } }) diff --git a/akka-docs/scala/extending-akka.rst b/akka-docs/scala/extending-akka.rst index 0fe149e0f2..7627326767 100644 --- a/akka-docs/scala/extending-akka.rst +++ b/akka-docs/scala/extending-akka.rst @@ -48,6 +48,11 @@ Loading from Configuration To be able to load extensions from your Akka configuration you must add FQCNs of implementations of either ``ExtensionId`` or ``ExtensionIdProvider`` in the ``akka.extensions`` section of the config you provide to your ``ActorSystem``. +.. includecode:: code/akka/docs/extension/ExtensionDocSpec.scala + :include: config + +Note that in this case ``CountExtension`` is an object and therefore the class name ends with ``$``. + Applicability ============= diff --git a/akka-docs/scala/fault-tolerance-sample.rst b/akka-docs/scala/fault-tolerance-sample.rst index 6859d54a8f..ccda303e45 100644 --- a/akka-docs/scala/fault-tolerance-sample.rst +++ b/akka-docs/scala/fault-tolerance-sample.rst @@ -1,5 +1,53 @@ .. _fault-tolerance-sample-scala: +Diagrams of the Fault Tolerance Sample (Scala) +---------------------------------------------- + + + +.. image:: ../images/faulttolerancesample-normal-flow.png + +*The above diagram illustrates the normal message flow.* + +**Normal flow:** + +======= ================================================================================== +Step Description +======= ================================================================================== +1 The progress ``Listener`` starts the work. +2 The ``Worker`` schedules work by sending ``Do`` messages periodically to itself +3, 4, 5 When receiving ``Do`` the ``Worker`` tells the ``CounterService`` + to increment the counter, three times. The ``Increment`` message is forwarded + to the ``Counter``, which updates its counter variable and sends current value + to the ``Storage``. +6, 7 The ``Worker`` asks the ``CounterService`` of current value of the counter and pipes + the result back to the ``Listener``. +======= ================================================================================== + + +.. image:: ../images/faulttolerancesample-failure-flow.png + +*The above diagram illustrates what happens in case of storage failure.* + +**Failure flow:** + +=========== ================================================================================== +Step Description +=========== ================================================================================== +1 The ``Storage`` throws ``StorageException``. +2 The ``CounterService`` is supervisor of the ``Storage`` and restarts the + ``Storage`` when ``StorageException`` is thrown. +3, 4, 5, 6 The ``Storage`` continues to fail and is restarted. +7 After 3 failures and restarts within 5 seconds the ``Storage`` is stopped by its + supervisor, i.e. the ``CounterService``. +8 The ``CounterService`` is also watching the ``Storage`` for termination and + receives the ``Terminated`` message when the ``Storage`` has been stopped ... +9, 10, 11 and tells the ``Counter`` that there is no ``Storage``. +12 The ``CounterService`` schedules a ``Reconnect`` message to itself. +13, 14 When it receives the ``Reconnect`` message it creates a new ``Storage`` ... +15, 16 and tells the the ``Counter`` to use the new ``Storage`` +=========== ================================================================================== + Full Source Code of the Fault Tolerance Sample (Scala) ------------------------------------------------------ diff --git a/akka-docs/scala/fault-tolerance.rst b/akka-docs/scala/fault-tolerance.rst index 8eaf9398b4..f8b9fe0631 100644 --- a/akka-docs/scala/fault-tolerance.rst +++ b/akka-docs/scala/fault-tolerance.rst @@ -43,7 +43,7 @@ For the sake of demonstration let us consider the following strategy: :include: strategy I have chosen a few well-known exception types in order to demonstrate the -application of the fault handling actions described in :ref:`supervision`. +application of the fault handling directives described in :ref:`supervision`. First off, it is a one-for-one strategy, meaning that each child is treated separately (an all-for-one strategy works very similarly, the only difference is that any decision is applied to all children of the supervisor, not only the @@ -53,8 +53,8 @@ that the respective limit does not apply, leaving the possibility to specify an absolute upper limit on the restarts or to make the restarts work infinitely. The match statement which forms the bulk of the body is of type ``Decider``, -which is a ``PartialFunction[Throwable, Action]``. This -is the piece which maps child failure types to their corresponding actions. +which is a ``PartialFunction[Throwable, Directive]``. This +is the piece which maps child failure types to their corresponding directives. Default Supervisor Strategy --------------------------- @@ -76,7 +76,7 @@ in the same way as the default strategy defined above. Test Application ---------------- -The following section shows the effects of the different actions in practice, +The following section shows the effects of the different directives in practice, wherefor a test setup is needed. First off, we need a suitable supervisor: .. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala @@ -99,13 +99,13 @@ Let us create actors: .. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala :include: create -The first test shall demonstrate the ``Resume`` action, so we try it out by +The first test shall demonstrate the ``Resume`` directive, so we try it out by setting some non-initial state in the actor and have it fail: .. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala :include: resume -As you can see the value 42 survives the fault handling action. Now, if we +As you can see the value 42 survives the fault handling directive. Now, if we change the failure to a more serious ``NullPointerException``, that will no longer be the case: @@ -119,7 +119,7 @@ terminated by the supervisor: :include: stop Up to now the supervisor was completely unaffected by the child’s failure, -because the actions set did handle it. In case of an ``Exception``, this is not +because the directives set did handle it. In case of an ``Exception``, this is not true anymore and the supervisor escalates the failure. .. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala @@ -129,7 +129,7 @@ The supervisor itself is supervised by the top-level actor provided by the :class:`ActorSystem`, which has the default policy to restart in case of all ``Exception`` cases (with the notable exceptions of ``ActorInitializationException`` and ``ActorKilledException``). Since the -default action in case of a restart is to kill all children, we expected our poor +default directive in case of a restart is to kill all children, we expected our poor child not to survive this failure. In case this is not desired (which depends on the use case), we need to use a diff --git a/akka-docs/scala/fsm.rst b/akka-docs/scala/fsm.rst index 618381901c..2b35d21f41 100644 --- a/akka-docs/scala/fsm.rst +++ b/akka-docs/scala/fsm.rst @@ -178,7 +178,7 @@ demonstrated below: .. code-block:: scala when(Idle) { - case Ev(Start(msg)) => // convenience extractor when state data not needed + case Event(Start(msg), _) => goto(Timer) using (msg, sender) } @@ -188,9 +188,8 @@ demonstrated below: goto(Idle) } -The :class:`Event(msg, data)` case class may be used directly in the pattern as -shown in state Idle, or you may use the extractor :obj:`Ev(msg)` when the state -data are not needed. +The :class:`Event(msg: Any, data: D)` case class is parameterized with the data +type held by the FSM for convenient pattern matching. Defining the Initial State -------------------------- @@ -216,7 +215,7 @@ do something else in this case you can specify that with case Event(x : X, data) => log.info(this, "Received unhandled event: " + x) stay - case Ev(msg) => + case Event(msg, _) => log.warn(this, "Received unknown event: " + x) goto(Error) } @@ -259,7 +258,7 @@ All modifier can be chained to achieve a nice and concise description: .. code-block:: scala when(State) { - case Ev(msg) => + case Event(msg, _) => goto(Processing) using (msg) forMax (5 seconds) replying (WillDo) } @@ -396,7 +395,7 @@ state data which is available during termination handling. .. code-block:: scala when(A) { - case Ev(Stop) => + case Event(Stop, _) => doCleanup() stop() } diff --git a/akka-docs/scala/futures.rst b/akka-docs/scala/futures.rst index c46db30927..181cc9f8fa 100644 --- a/akka-docs/scala/futures.rst +++ b/akka-docs/scala/futures.rst @@ -198,14 +198,26 @@ For this Akka supports ``onComplete``, ``onSuccess`` and ``onFailure``, of which .. includecode:: code/akka/docs/future/FutureDocSpec.scala :include: onComplete +Ordering +-------- + +Since callbacks are executed in any order and potentially in parallel, +it can be tricky at the times when you need sequential ordering of operations. +But there's a solution! And it's name is ``andThen``, and it creates a new Future with +the specified callback, a Future that will have the same result as the Future it's called on, +which allows for ordering like in the following sample: + +.. includecode:: code/akka/docs/future/FutureDocSpec.scala + :include: and-then + Auxiliary methods ----------------- -``Future`` ``or`` combines 2 Futures into a new ``Future``, and will hold the successful value of the second ``Future` +``Future`` ``fallbackTo`` combines 2 Futures into a new ``Future``, and will hold the successful value of the second ``Future` if the first ``Future`` fails. .. includecode:: code/akka/docs/future/FutureDocSpec.scala - :include: or + :include: fallback-to You can also combine two Futures into a new ``Future`` that will hold a tuple of the two Futures successful results, using the ``zip`` operation. @@ -232,3 +244,9 @@ our ``Future`` would have a result of 0. The ``recover`` method works very simil so multiple ``Exception``\s can be handled in this manner, and if an ``Exception`` is not handled this way it will behave as if we hadn't used the ``recover`` method. +You can also use the ``recoverWith`` method, which has the same relationship to ``recover`` as ``flatMap` has to ``map``, +and is use like this: + +.. includecode:: code/akka/docs/future/FutureDocSpec.scala + :include: try-recover + diff --git a/akka-docs/scala/remoting.rst b/akka-docs/scala/remoting.rst index 2e1c4b7b8c..ae2aa0f411 100644 --- a/akka-docs/scala/remoting.rst +++ b/akka-docs/scala/remoting.rst @@ -26,7 +26,7 @@ to your ``application.conf`` file:: provider = "akka.remote.RemoteActorRefProvider" } remote { - transport = "akka.remote.netty.NettyRemoteSupport" + transport = "akka.remote.netty.NettyRemoteTransport" server { hostname = "127.0.0.1" port = 2552 diff --git a/akka-kernel/src/main/dist/bin/akka b/akka-kernel/src/main/dist/bin/akka index 595bc6e34c..84ae2e5d78 100755 --- a/akka-kernel/src/main/dist/bin/akka +++ b/akka-kernel/src/main/dist/bin/akka @@ -19,6 +19,6 @@ declare AKKA_HOME="$(cd "$(cd "$(dirname "$0")"; pwd -P)"/..; pwd)" [ -n "$JAVA_OPTS" ] || JAVA_OPTS="-Xmx1024M -Xms1024M -Xss1M -XX:MaxPermSize=256M -XX:+UseParallelGC -XX:OnOutOfMemoryError=\"kill -9 %p\"" -[ -n "$AKKA_CLASSPATH" ] || AKKA_CLASSPATH="$AKKA_HOME/lib/scala-library.jar:$AKKA_HOME/lib/akka/*:$AKKA_HOME/config" +[ -n "$AKKA_CLASSPATH" ] || AKKA_CLASSPATH="$AKKA_HOME/lib/scala-library.jar:$AKKA_HOME/config:$AKKA_HOME/lib/akka/*" java "$JAVA_OPTS" -cp "$AKKA_CLASSPATH" -Dakka.home="$AKKA_HOME" -Dakka.kernel.quiet=$quiet akka.kernel.Main "$@" diff --git a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java index 90c493e176..0fcb423d1e 100644 --- a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java +++ b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java @@ -12,10 +12,12 @@ public final class RemoteProtocol { implements com.google.protobuf.ProtocolMessageEnum { CONNECT(0, 1), SHUTDOWN(1, 2), + HEARTBEAT(2, 3), ; public static final int CONNECT_VALUE = 1; public static final int SHUTDOWN_VALUE = 2; + public static final int HEARTBEAT_VALUE = 3; public final int getNumber() { return value; } @@ -24,6 +26,7 @@ public final class RemoteProtocol { switch (value) { case 1: return CONNECT; case 2: return SHUTDOWN; + case 3: return HEARTBEAT; default: return null; } } @@ -54,7 +57,7 @@ public final class RemoteProtocol { } private static final CommandType[] VALUES = { - CONNECT, SHUTDOWN, + CONNECT, SHUTDOWN, HEARTBEAT, }; public static CommandType valueOf( @@ -460,7 +463,7 @@ public final class RemoteProtocol { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); if (messageBuilder_ == null) { @@ -477,20 +480,20 @@ public final class RemoteProtocol { bitField0_ = (bitField0_ & ~0x00000002); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return akka.remote.RemoteProtocol.AkkaRemoteProtocol.getDescriptor(); } - + public akka.remote.RemoteProtocol.AkkaRemoteProtocol getDefaultInstanceForType() { return akka.remote.RemoteProtocol.AkkaRemoteProtocol.getDefaultInstance(); } - + public akka.remote.RemoteProtocol.AkkaRemoteProtocol build() { akka.remote.RemoteProtocol.AkkaRemoteProtocol result = buildPartial(); if (!result.isInitialized()) { @@ -498,7 +501,7 @@ public final class RemoteProtocol { } return result; } - + private akka.remote.RemoteProtocol.AkkaRemoteProtocol buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { akka.remote.RemoteProtocol.AkkaRemoteProtocol result = buildPartial(); @@ -508,7 +511,7 @@ public final class RemoteProtocol { } return result; } - + public akka.remote.RemoteProtocol.AkkaRemoteProtocol buildPartial() { akka.remote.RemoteProtocol.AkkaRemoteProtocol result = new akka.remote.RemoteProtocol.AkkaRemoteProtocol(this); int from_bitField0_ = bitField0_; @@ -533,7 +536,7 @@ public final class RemoteProtocol { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof akka.remote.RemoteProtocol.AkkaRemoteProtocol) { return mergeFrom((akka.remote.RemoteProtocol.AkkaRemoteProtocol)other); @@ -542,7 +545,7 @@ public final class RemoteProtocol { return this; } } - + public Builder mergeFrom(akka.remote.RemoteProtocol.AkkaRemoteProtocol other) { if (other == akka.remote.RemoteProtocol.AkkaRemoteProtocol.getDefaultInstance()) return this; if (other.hasMessage()) { @@ -554,23 +557,23 @@ public final class RemoteProtocol { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (hasMessage()) { if (!getMessage().isInitialized()) { - + return false; } } if (hasInstruction()) { if (!getInstruction().isInitialized()) { - + return false; } } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -615,9 +618,9 @@ public final class RemoteProtocol { } } } - + private int bitField0_; - + // optional .RemoteMessageProtocol message = 1; private akka.remote.RemoteProtocol.RemoteMessageProtocol message_ = akka.remote.RemoteProtocol.RemoteMessageProtocol.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< @@ -695,7 +698,7 @@ public final class RemoteProtocol { } } private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.RemoteMessageProtocol, akka.remote.RemoteProtocol.RemoteMessageProtocol.Builder, akka.remote.RemoteProtocol.RemoteMessageProtocolOrBuilder> + akka.remote.RemoteProtocol.RemoteMessageProtocol, akka.remote.RemoteProtocol.RemoteMessageProtocol.Builder, akka.remote.RemoteProtocol.RemoteMessageProtocolOrBuilder> getMessageFieldBuilder() { if (messageBuilder_ == null) { messageBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -707,7 +710,7 @@ public final class RemoteProtocol { } return messageBuilder_; } - + // optional .RemoteControlProtocol instruction = 2; private akka.remote.RemoteProtocol.RemoteControlProtocol instruction_ = akka.remote.RemoteProtocol.RemoteControlProtocol.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< @@ -785,7 +788,7 @@ public final class RemoteProtocol { } } private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.RemoteControlProtocol, akka.remote.RemoteProtocol.RemoteControlProtocol.Builder, akka.remote.RemoteProtocol.RemoteControlProtocolOrBuilder> + akka.remote.RemoteProtocol.RemoteControlProtocol, akka.remote.RemoteProtocol.RemoteControlProtocol.Builder, akka.remote.RemoteProtocol.RemoteControlProtocolOrBuilder> getInstructionFieldBuilder() { if (instructionBuilder_ == null) { instructionBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -797,42 +800,42 @@ public final class RemoteProtocol { } return instructionBuilder_; } - + // @@protoc_insertion_point(builder_scope:AkkaRemoteProtocol) } - + static { defaultInstance = new AkkaRemoteProtocol(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:AkkaRemoteProtocol) } - + public interface RemoteMessageProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required .ActorRefProtocol recipient = 1; boolean hasRecipient(); akka.remote.RemoteProtocol.ActorRefProtocol getRecipient(); akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getRecipientOrBuilder(); - + // required .MessageProtocol message = 2; boolean hasMessage(); akka.remote.RemoteProtocol.MessageProtocol getMessage(); akka.remote.RemoteProtocol.MessageProtocolOrBuilder getMessageOrBuilder(); - + // optional .ActorRefProtocol sender = 4; boolean hasSender(); akka.remote.RemoteProtocol.ActorRefProtocol getSender(); akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getSenderOrBuilder(); - + // repeated .MetadataEntryProtocol metadata = 5; - java.util.List + java.util.List getMetadataList(); akka.remote.RemoteProtocol.MetadataEntryProtocol getMetadata(int index); int getMetadataCount(); - java.util.List + java.util.List getMetadataOrBuilderList(); akka.remote.RemoteProtocol.MetadataEntryProtocolOrBuilder getMetadataOrBuilder( int index); @@ -845,26 +848,26 @@ public final class RemoteProtocol { super(builder); } private RemoteMessageProtocol(boolean noInit) {} - + private static final RemoteMessageProtocol defaultInstance; public static RemoteMessageProtocol getDefaultInstance() { return defaultInstance; } - + public RemoteMessageProtocol getDefaultInstanceForType() { return defaultInstance; } - + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return akka.remote.RemoteProtocol.internal_static_RemoteMessageProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_RemoteMessageProtocol_fieldAccessorTable; } - + private int bitField0_; // required .ActorRefProtocol recipient = 1; public static final int RECIPIENT_FIELD_NUMBER = 1; @@ -878,7 +881,7 @@ public final class RemoteProtocol { public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getRecipientOrBuilder() { return recipient_; } - + // required .MessageProtocol message = 2; public static final int MESSAGE_FIELD_NUMBER = 2; private akka.remote.RemoteProtocol.MessageProtocol message_; @@ -891,7 +894,7 @@ public final class RemoteProtocol { public akka.remote.RemoteProtocol.MessageProtocolOrBuilder getMessageOrBuilder() { return message_; } - + // optional .ActorRefProtocol sender = 4; public static final int SENDER_FIELD_NUMBER = 4; private akka.remote.RemoteProtocol.ActorRefProtocol sender_; @@ -904,14 +907,14 @@ public final class RemoteProtocol { public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getSenderOrBuilder() { return sender_; } - + // repeated .MetadataEntryProtocol metadata = 5; public static final int METADATA_FIELD_NUMBER = 5; private java.util.List metadata_; public java.util.List getMetadataList() { return metadata_; } - public java.util.List + public java.util.List getMetadataOrBuilderList() { return metadata_; } @@ -925,7 +928,7 @@ public final class RemoteProtocol { int index) { return metadata_.get(index); } - + private void initFields() { recipient_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); message_ = akka.remote.RemoteProtocol.MessageProtocol.getDefaultInstance(); @@ -936,7 +939,7 @@ public final class RemoteProtocol { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasRecipient()) { memoizedIsInitialized = 0; return false; @@ -968,7 +971,7 @@ public final class RemoteProtocol { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -986,12 +989,12 @@ public final class RemoteProtocol { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -1013,14 +1016,14 @@ public final class RemoteProtocol { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static akka.remote.RemoteProtocol.RemoteMessageProtocol parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -1087,14 +1090,14 @@ public final class RemoteProtocol { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(akka.remote.RemoteProtocol.RemoteMessageProtocol prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { @@ -1108,17 +1111,17 @@ public final class RemoteProtocol { getDescriptor() { return akka.remote.RemoteProtocol.internal_static_RemoteMessageProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_RemoteMessageProtocol_fieldAccessorTable; } - + // Construct using akka.remote.RemoteProtocol.RemoteMessageProtocol.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); @@ -1134,7 +1137,7 @@ public final class RemoteProtocol { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); if (recipientBuilder_ == null) { @@ -1163,20 +1166,20 @@ public final class RemoteProtocol { } return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return akka.remote.RemoteProtocol.RemoteMessageProtocol.getDescriptor(); } - + public akka.remote.RemoteProtocol.RemoteMessageProtocol getDefaultInstanceForType() { return akka.remote.RemoteProtocol.RemoteMessageProtocol.getDefaultInstance(); } - + public akka.remote.RemoteProtocol.RemoteMessageProtocol build() { akka.remote.RemoteProtocol.RemoteMessageProtocol result = buildPartial(); if (!result.isInitialized()) { @@ -1184,7 +1187,7 @@ public final class RemoteProtocol { } return result; } - + private akka.remote.RemoteProtocol.RemoteMessageProtocol buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { akka.remote.RemoteProtocol.RemoteMessageProtocol result = buildPartial(); @@ -1194,7 +1197,7 @@ public final class RemoteProtocol { } return result; } - + public akka.remote.RemoteProtocol.RemoteMessageProtocol buildPartial() { akka.remote.RemoteProtocol.RemoteMessageProtocol result = new akka.remote.RemoteProtocol.RemoteMessageProtocol(this); int from_bitField0_ = bitField0_; @@ -1236,7 +1239,7 @@ public final class RemoteProtocol { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof akka.remote.RemoteProtocol.RemoteMessageProtocol) { return mergeFrom((akka.remote.RemoteProtocol.RemoteMessageProtocol)other); @@ -1245,7 +1248,7 @@ public final class RemoteProtocol { return this; } } - + public Builder mergeFrom(akka.remote.RemoteProtocol.RemoteMessageProtocol other) { if (other == akka.remote.RemoteProtocol.RemoteMessageProtocol.getDefaultInstance()) return this; if (other.hasRecipient()) { @@ -1275,7 +1278,7 @@ public final class RemoteProtocol { metadataBuilder_ = null; metadata_ = other.metadata_; bitField0_ = (bitField0_ & ~0x00000008); - metadataBuilder_ = + metadataBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getMetadataFieldBuilder() : null; } else { @@ -1286,39 +1289,39 @@ public final class RemoteProtocol { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasRecipient()) { - + return false; } if (!hasMessage()) { - + return false; } if (!getRecipient().isInitialized()) { - + return false; } if (!getMessage().isInitialized()) { - + return false; } if (hasSender()) { if (!getSender().isInitialized()) { - + return false; } } for (int i = 0; i < getMetadataCount(); i++) { if (!getMetadata(i).isInitialized()) { - + return false; } } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -1378,9 +1381,9 @@ public final class RemoteProtocol { } } } - + private int bitField0_; - + // required .ActorRefProtocol recipient = 1; private akka.remote.RemoteProtocol.ActorRefProtocol recipient_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< @@ -1458,7 +1461,7 @@ public final class RemoteProtocol { } } private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> + akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> getRecipientFieldBuilder() { if (recipientBuilder_ == null) { recipientBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -1470,7 +1473,7 @@ public final class RemoteProtocol { } return recipientBuilder_; } - + // required .MessageProtocol message = 2; private akka.remote.RemoteProtocol.MessageProtocol message_ = akka.remote.RemoteProtocol.MessageProtocol.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< @@ -1548,7 +1551,7 @@ public final class RemoteProtocol { } } private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.MessageProtocol, akka.remote.RemoteProtocol.MessageProtocol.Builder, akka.remote.RemoteProtocol.MessageProtocolOrBuilder> + akka.remote.RemoteProtocol.MessageProtocol, akka.remote.RemoteProtocol.MessageProtocol.Builder, akka.remote.RemoteProtocol.MessageProtocolOrBuilder> getMessageFieldBuilder() { if (messageBuilder_ == null) { messageBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -1560,7 +1563,7 @@ public final class RemoteProtocol { } return messageBuilder_; } - + // optional .ActorRefProtocol sender = 4; private akka.remote.RemoteProtocol.ActorRefProtocol sender_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< @@ -1638,7 +1641,7 @@ public final class RemoteProtocol { } } private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> + akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> getSenderFieldBuilder() { if (senderBuilder_ == null) { senderBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -1650,7 +1653,7 @@ public final class RemoteProtocol { } return senderBuilder_; } - + // repeated .MetadataEntryProtocol metadata = 5; private java.util.List metadata_ = java.util.Collections.emptyList(); @@ -1660,10 +1663,10 @@ public final class RemoteProtocol { bitField0_ |= 0x00000008; } } - + private com.google.protobuf.RepeatedFieldBuilder< akka.remote.RemoteProtocol.MetadataEntryProtocol, akka.remote.RemoteProtocol.MetadataEntryProtocol.Builder, akka.remote.RemoteProtocol.MetadataEntryProtocolOrBuilder> metadataBuilder_; - + public java.util.List getMetadataList() { if (metadataBuilder_ == null) { return java.util.Collections.unmodifiableList(metadata_); @@ -1801,7 +1804,7 @@ public final class RemoteProtocol { return metadataBuilder_.getMessageOrBuilder(index); } } - public java.util.List + public java.util.List getMetadataOrBuilderList() { if (metadataBuilder_ != null) { return metadataBuilder_.getMessageOrBuilderList(); @@ -1818,12 +1821,12 @@ public final class RemoteProtocol { return getMetadataFieldBuilder().addBuilder( index, akka.remote.RemoteProtocol.MetadataEntryProtocol.getDefaultInstance()); } - public java.util.List + public java.util.List getMetadataBuilderList() { return getMetadataFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< - akka.remote.RemoteProtocol.MetadataEntryProtocol, akka.remote.RemoteProtocol.MetadataEntryProtocol.Builder, akka.remote.RemoteProtocol.MetadataEntryProtocolOrBuilder> + akka.remote.RemoteProtocol.MetadataEntryProtocol, akka.remote.RemoteProtocol.MetadataEntryProtocol.Builder, akka.remote.RemoteProtocol.MetadataEntryProtocolOrBuilder> getMetadataFieldBuilder() { if (metadataBuilder_ == null) { metadataBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< @@ -1836,29 +1839,29 @@ public final class RemoteProtocol { } return metadataBuilder_; } - + // @@protoc_insertion_point(builder_scope:RemoteMessageProtocol) } - + static { defaultInstance = new RemoteMessageProtocol(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:RemoteMessageProtocol) } - + public interface RemoteControlProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required .CommandType commandType = 1; boolean hasCommandType(); akka.remote.RemoteProtocol.CommandType getCommandType(); - + // optional string cookie = 2; boolean hasCookie(); String getCookie(); - + // optional .AddressProtocol origin = 3; boolean hasOrigin(); akka.remote.RemoteProtocol.AddressProtocol getOrigin(); @@ -1872,26 +1875,26 @@ public final class RemoteProtocol { super(builder); } private RemoteControlProtocol(boolean noInit) {} - + private static final RemoteControlProtocol defaultInstance; public static RemoteControlProtocol getDefaultInstance() { return defaultInstance; } - + public RemoteControlProtocol getDefaultInstanceForType() { return defaultInstance; } - + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return akka.remote.RemoteProtocol.internal_static_RemoteControlProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_RemoteControlProtocol_fieldAccessorTable; } - + private int bitField0_; // required .CommandType commandType = 1; public static final int COMMANDTYPE_FIELD_NUMBER = 1; @@ -1902,7 +1905,7 @@ public final class RemoteProtocol { public akka.remote.RemoteProtocol.CommandType getCommandType() { return commandType_; } - + // optional string cookie = 2; public static final int COOKIE_FIELD_NUMBER = 2; private java.lang.Object cookie_; @@ -1914,7 +1917,7 @@ public final class RemoteProtocol { if (ref instanceof String) { return (String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; String s = bs.toStringUtf8(); if (com.google.protobuf.Internal.isValidUtf8(bs)) { @@ -1926,7 +1929,7 @@ public final class RemoteProtocol { private com.google.protobuf.ByteString getCookieBytes() { java.lang.Object ref = cookie_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); cookie_ = b; return b; @@ -1934,7 +1937,7 @@ public final class RemoteProtocol { return (com.google.protobuf.ByteString) ref; } } - + // optional .AddressProtocol origin = 3; public static final int ORIGIN_FIELD_NUMBER = 3; private akka.remote.RemoteProtocol.AddressProtocol origin_; @@ -1947,7 +1950,7 @@ public final class RemoteProtocol { public akka.remote.RemoteProtocol.AddressProtocolOrBuilder getOriginOrBuilder() { return origin_; } - + private void initFields() { commandType_ = akka.remote.RemoteProtocol.CommandType.CONNECT; cookie_ = ""; @@ -1957,7 +1960,7 @@ public final class RemoteProtocol { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasCommandType()) { memoizedIsInitialized = 0; return false; @@ -1971,7 +1974,7 @@ public final class RemoteProtocol { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -1986,12 +1989,12 @@ public final class RemoteProtocol { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -2009,14 +2012,14 @@ public final class RemoteProtocol { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static akka.remote.RemoteProtocol.RemoteControlProtocol parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -2083,14 +2086,14 @@ public final class RemoteProtocol { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(akka.remote.RemoteProtocol.RemoteControlProtocol prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { @@ -2104,17 +2107,17 @@ public final class RemoteProtocol { getDescriptor() { return akka.remote.RemoteProtocol.internal_static_RemoteControlProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_RemoteControlProtocol_fieldAccessorTable; } - + // Construct using akka.remote.RemoteProtocol.RemoteControlProtocol.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); @@ -2127,7 +2130,7 @@ public final class RemoteProtocol { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); commandType_ = akka.remote.RemoteProtocol.CommandType.CONNECT; @@ -2142,20 +2145,20 @@ public final class RemoteProtocol { bitField0_ = (bitField0_ & ~0x00000004); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return akka.remote.RemoteProtocol.RemoteControlProtocol.getDescriptor(); } - + public akka.remote.RemoteProtocol.RemoteControlProtocol getDefaultInstanceForType() { return akka.remote.RemoteProtocol.RemoteControlProtocol.getDefaultInstance(); } - + public akka.remote.RemoteProtocol.RemoteControlProtocol build() { akka.remote.RemoteProtocol.RemoteControlProtocol result = buildPartial(); if (!result.isInitialized()) { @@ -2163,7 +2166,7 @@ public final class RemoteProtocol { } return result; } - + private akka.remote.RemoteProtocol.RemoteControlProtocol buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { akka.remote.RemoteProtocol.RemoteControlProtocol result = buildPartial(); @@ -2173,7 +2176,7 @@ public final class RemoteProtocol { } return result; } - + public akka.remote.RemoteProtocol.RemoteControlProtocol buildPartial() { akka.remote.RemoteProtocol.RemoteControlProtocol result = new akka.remote.RemoteProtocol.RemoteControlProtocol(this); int from_bitField0_ = bitField0_; @@ -2198,7 +2201,7 @@ public final class RemoteProtocol { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof akka.remote.RemoteProtocol.RemoteControlProtocol) { return mergeFrom((akka.remote.RemoteProtocol.RemoteControlProtocol)other); @@ -2207,7 +2210,7 @@ public final class RemoteProtocol { return this; } } - + public Builder mergeFrom(akka.remote.RemoteProtocol.RemoteControlProtocol other) { if (other == akka.remote.RemoteProtocol.RemoteControlProtocol.getDefaultInstance()) return this; if (other.hasCommandType()) { @@ -2222,21 +2225,21 @@ public final class RemoteProtocol { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasCommandType()) { - + return false; } if (hasOrigin()) { if (!getOrigin().isInitialized()) { - + return false; } } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -2288,9 +2291,9 @@ public final class RemoteProtocol { } } } - + private int bitField0_; - + // required .CommandType commandType = 1; private akka.remote.RemoteProtocol.CommandType commandType_ = akka.remote.RemoteProtocol.CommandType.CONNECT; public boolean hasCommandType() { @@ -2314,7 +2317,7 @@ public final class RemoteProtocol { onChanged(); return this; } - + // optional string cookie = 2; private java.lang.Object cookie_ = ""; public boolean hasCookie() { @@ -2350,7 +2353,7 @@ public final class RemoteProtocol { cookie_ = value; onChanged(); } - + // optional .AddressProtocol origin = 3; private akka.remote.RemoteProtocol.AddressProtocol origin_ = akka.remote.RemoteProtocol.AddressProtocol.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< @@ -2428,7 +2431,7 @@ public final class RemoteProtocol { } } private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.AddressProtocol, akka.remote.RemoteProtocol.AddressProtocol.Builder, akka.remote.RemoteProtocol.AddressProtocolOrBuilder> + akka.remote.RemoteProtocol.AddressProtocol, akka.remote.RemoteProtocol.AddressProtocol.Builder, akka.remote.RemoteProtocol.AddressProtocolOrBuilder> getOriginFieldBuilder() { if (originBuilder_ == null) { originBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -2440,21 +2443,21 @@ public final class RemoteProtocol { } return originBuilder_; } - + // @@protoc_insertion_point(builder_scope:RemoteControlProtocol) } - + static { defaultInstance = new RemoteControlProtocol(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:RemoteControlProtocol) } - + public interface ActorRefProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required string path = 1; boolean hasPath(); String getPath(); @@ -2467,26 +2470,26 @@ public final class RemoteProtocol { super(builder); } private ActorRefProtocol(boolean noInit) {} - + private static final ActorRefProtocol defaultInstance; public static ActorRefProtocol getDefaultInstance() { return defaultInstance; } - + public ActorRefProtocol getDefaultInstanceForType() { return defaultInstance; } - + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return akka.remote.RemoteProtocol.internal_static_ActorRefProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_ActorRefProtocol_fieldAccessorTable; } - + private int bitField0_; // required string path = 1; public static final int PATH_FIELD_NUMBER = 1; @@ -2499,7 +2502,7 @@ public final class RemoteProtocol { if (ref instanceof String) { return (String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; String s = bs.toStringUtf8(); if (com.google.protobuf.Internal.isValidUtf8(bs)) { @@ -2511,7 +2514,7 @@ public final class RemoteProtocol { private com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); path_ = b; return b; @@ -2519,7 +2522,7 @@ public final class RemoteProtocol { return (com.google.protobuf.ByteString) ref; } } - + private void initFields() { path_ = ""; } @@ -2527,7 +2530,7 @@ public final class RemoteProtocol { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasPath()) { memoizedIsInitialized = 0; return false; @@ -2535,7 +2538,7 @@ public final class RemoteProtocol { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -2544,12 +2547,12 @@ public final class RemoteProtocol { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -2559,14 +2562,14 @@ public final class RemoteProtocol { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static akka.remote.RemoteProtocol.ActorRefProtocol parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -2633,14 +2636,14 @@ public final class RemoteProtocol { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(akka.remote.RemoteProtocol.ActorRefProtocol prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { @@ -2654,17 +2657,17 @@ public final class RemoteProtocol { getDescriptor() { return akka.remote.RemoteProtocol.internal_static_ActorRefProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_ActorRefProtocol_fieldAccessorTable; } - + // Construct using akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); @@ -2676,27 +2679,27 @@ public final class RemoteProtocol { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); path_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return akka.remote.RemoteProtocol.ActorRefProtocol.getDescriptor(); } - + public akka.remote.RemoteProtocol.ActorRefProtocol getDefaultInstanceForType() { return akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); } - + public akka.remote.RemoteProtocol.ActorRefProtocol build() { akka.remote.RemoteProtocol.ActorRefProtocol result = buildPartial(); if (!result.isInitialized()) { @@ -2704,7 +2707,7 @@ public final class RemoteProtocol { } return result; } - + private akka.remote.RemoteProtocol.ActorRefProtocol buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { akka.remote.RemoteProtocol.ActorRefProtocol result = buildPartial(); @@ -2714,7 +2717,7 @@ public final class RemoteProtocol { } return result; } - + public akka.remote.RemoteProtocol.ActorRefProtocol buildPartial() { akka.remote.RemoteProtocol.ActorRefProtocol result = new akka.remote.RemoteProtocol.ActorRefProtocol(this); int from_bitField0_ = bitField0_; @@ -2727,7 +2730,7 @@ public final class RemoteProtocol { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof akka.remote.RemoteProtocol.ActorRefProtocol) { return mergeFrom((akka.remote.RemoteProtocol.ActorRefProtocol)other); @@ -2736,7 +2739,7 @@ public final class RemoteProtocol { return this; } } - + public Builder mergeFrom(akka.remote.RemoteProtocol.ActorRefProtocol other) { if (other == akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance()) return this; if (other.hasPath()) { @@ -2745,15 +2748,15 @@ public final class RemoteProtocol { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasPath()) { - + return false; } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -2785,9 +2788,9 @@ public final class RemoteProtocol { } } } - + private int bitField0_; - + // required string path = 1; private java.lang.Object path_ = ""; public boolean hasPath() { @@ -2823,29 +2826,29 @@ public final class RemoteProtocol { path_ = value; onChanged(); } - + // @@protoc_insertion_point(builder_scope:ActorRefProtocol) } - + static { defaultInstance = new ActorRefProtocol(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:ActorRefProtocol) } - + public interface MessageProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required bytes message = 1; boolean hasMessage(); com.google.protobuf.ByteString getMessage(); - + // required int32 serializerId = 2; boolean hasSerializerId(); int getSerializerId(); - + // optional bytes messageManifest = 3; boolean hasMessageManifest(); com.google.protobuf.ByteString getMessageManifest(); @@ -2858,26 +2861,26 @@ public final class RemoteProtocol { super(builder); } private MessageProtocol(boolean noInit) {} - + private static final MessageProtocol defaultInstance; public static MessageProtocol getDefaultInstance() { return defaultInstance; } - + public MessageProtocol getDefaultInstanceForType() { return defaultInstance; } - + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return akka.remote.RemoteProtocol.internal_static_MessageProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_MessageProtocol_fieldAccessorTable; } - + private int bitField0_; // required bytes message = 1; public static final int MESSAGE_FIELD_NUMBER = 1; @@ -2888,7 +2891,7 @@ public final class RemoteProtocol { public com.google.protobuf.ByteString getMessage() { return message_; } - + // required int32 serializerId = 2; public static final int SERIALIZERID_FIELD_NUMBER = 2; private int serializerId_; @@ -2898,7 +2901,7 @@ public final class RemoteProtocol { public int getSerializerId() { return serializerId_; } - + // optional bytes messageManifest = 3; public static final int MESSAGEMANIFEST_FIELD_NUMBER = 3; private com.google.protobuf.ByteString messageManifest_; @@ -2908,7 +2911,7 @@ public final class RemoteProtocol { public com.google.protobuf.ByteString getMessageManifest() { return messageManifest_; } - + private void initFields() { message_ = com.google.protobuf.ByteString.EMPTY; serializerId_ = 0; @@ -2918,7 +2921,7 @@ public final class RemoteProtocol { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasMessage()) { memoizedIsInitialized = 0; return false; @@ -2930,7 +2933,7 @@ public final class RemoteProtocol { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -2945,12 +2948,12 @@ public final class RemoteProtocol { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -2968,14 +2971,14 @@ public final class RemoteProtocol { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static akka.remote.RemoteProtocol.MessageProtocol parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -3042,14 +3045,14 @@ public final class RemoteProtocol { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(akka.remote.RemoteProtocol.MessageProtocol prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { @@ -3063,17 +3066,17 @@ public final class RemoteProtocol { getDescriptor() { return akka.remote.RemoteProtocol.internal_static_MessageProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_MessageProtocol_fieldAccessorTable; } - + // Construct using akka.remote.RemoteProtocol.MessageProtocol.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); @@ -3085,7 +3088,7 @@ public final class RemoteProtocol { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); message_ = com.google.protobuf.ByteString.EMPTY; @@ -3096,20 +3099,20 @@ public final class RemoteProtocol { bitField0_ = (bitField0_ & ~0x00000004); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return akka.remote.RemoteProtocol.MessageProtocol.getDescriptor(); } - + public akka.remote.RemoteProtocol.MessageProtocol getDefaultInstanceForType() { return akka.remote.RemoteProtocol.MessageProtocol.getDefaultInstance(); } - + public akka.remote.RemoteProtocol.MessageProtocol build() { akka.remote.RemoteProtocol.MessageProtocol result = buildPartial(); if (!result.isInitialized()) { @@ -3117,7 +3120,7 @@ public final class RemoteProtocol { } return result; } - + private akka.remote.RemoteProtocol.MessageProtocol buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { akka.remote.RemoteProtocol.MessageProtocol result = buildPartial(); @@ -3127,7 +3130,7 @@ public final class RemoteProtocol { } return result; } - + public akka.remote.RemoteProtocol.MessageProtocol buildPartial() { akka.remote.RemoteProtocol.MessageProtocol result = new akka.remote.RemoteProtocol.MessageProtocol(this); int from_bitField0_ = bitField0_; @@ -3148,7 +3151,7 @@ public final class RemoteProtocol { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof akka.remote.RemoteProtocol.MessageProtocol) { return mergeFrom((akka.remote.RemoteProtocol.MessageProtocol)other); @@ -3157,7 +3160,7 @@ public final class RemoteProtocol { return this; } } - + public Builder mergeFrom(akka.remote.RemoteProtocol.MessageProtocol other) { if (other == akka.remote.RemoteProtocol.MessageProtocol.getDefaultInstance()) return this; if (other.hasMessage()) { @@ -3172,19 +3175,19 @@ public final class RemoteProtocol { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasMessage()) { - + return false; } if (!hasSerializerId()) { - + return false; } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -3226,9 +3229,9 @@ public final class RemoteProtocol { } } } - + private int bitField0_; - + // required bytes message = 1; private com.google.protobuf.ByteString message_ = com.google.protobuf.ByteString.EMPTY; public boolean hasMessage() { @@ -3252,7 +3255,7 @@ public final class RemoteProtocol { onChanged(); return this; } - + // required int32 serializerId = 2; private int serializerId_ ; public boolean hasSerializerId() { @@ -3273,7 +3276,7 @@ public final class RemoteProtocol { onChanged(); return this; } - + // optional bytes messageManifest = 3; private com.google.protobuf.ByteString messageManifest_ = com.google.protobuf.ByteString.EMPTY; public boolean hasMessageManifest() { @@ -3297,25 +3300,25 @@ public final class RemoteProtocol { onChanged(); return this; } - + // @@protoc_insertion_point(builder_scope:MessageProtocol) } - + static { defaultInstance = new MessageProtocol(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:MessageProtocol) } - + public interface MetadataEntryProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required string key = 1; boolean hasKey(); String getKey(); - + // required bytes value = 2; boolean hasValue(); com.google.protobuf.ByteString getValue(); @@ -3328,26 +3331,26 @@ public final class RemoteProtocol { super(builder); } private MetadataEntryProtocol(boolean noInit) {} - + private static final MetadataEntryProtocol defaultInstance; public static MetadataEntryProtocol getDefaultInstance() { return defaultInstance; } - + public MetadataEntryProtocol getDefaultInstanceForType() { return defaultInstance; } - + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return akka.remote.RemoteProtocol.internal_static_MetadataEntryProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_MetadataEntryProtocol_fieldAccessorTable; } - + private int bitField0_; // required string key = 1; public static final int KEY_FIELD_NUMBER = 1; @@ -3360,7 +3363,7 @@ public final class RemoteProtocol { if (ref instanceof String) { return (String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; String s = bs.toStringUtf8(); if (com.google.protobuf.Internal.isValidUtf8(bs)) { @@ -3372,7 +3375,7 @@ public final class RemoteProtocol { private com.google.protobuf.ByteString getKeyBytes() { java.lang.Object ref = key_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); key_ = b; return b; @@ -3380,7 +3383,7 @@ public final class RemoteProtocol { return (com.google.protobuf.ByteString) ref; } } - + // required bytes value = 2; public static final int VALUE_FIELD_NUMBER = 2; private com.google.protobuf.ByteString value_; @@ -3390,7 +3393,7 @@ public final class RemoteProtocol { public com.google.protobuf.ByteString getValue() { return value_; } - + private void initFields() { key_ = ""; value_ = com.google.protobuf.ByteString.EMPTY; @@ -3399,7 +3402,7 @@ public final class RemoteProtocol { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasKey()) { memoizedIsInitialized = 0; return false; @@ -3411,7 +3414,7 @@ public final class RemoteProtocol { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -3423,12 +3426,12 @@ public final class RemoteProtocol { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -3442,14 +3445,14 @@ public final class RemoteProtocol { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static akka.remote.RemoteProtocol.MetadataEntryProtocol parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -3516,14 +3519,14 @@ public final class RemoteProtocol { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(akka.remote.RemoteProtocol.MetadataEntryProtocol prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { @@ -3537,17 +3540,17 @@ public final class RemoteProtocol { getDescriptor() { return akka.remote.RemoteProtocol.internal_static_MetadataEntryProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_MetadataEntryProtocol_fieldAccessorTable; } - + // Construct using akka.remote.RemoteProtocol.MetadataEntryProtocol.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); @@ -3559,7 +3562,7 @@ public final class RemoteProtocol { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); key_ = ""; @@ -3568,20 +3571,20 @@ public final class RemoteProtocol { bitField0_ = (bitField0_ & ~0x00000002); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return akka.remote.RemoteProtocol.MetadataEntryProtocol.getDescriptor(); } - + public akka.remote.RemoteProtocol.MetadataEntryProtocol getDefaultInstanceForType() { return akka.remote.RemoteProtocol.MetadataEntryProtocol.getDefaultInstance(); } - + public akka.remote.RemoteProtocol.MetadataEntryProtocol build() { akka.remote.RemoteProtocol.MetadataEntryProtocol result = buildPartial(); if (!result.isInitialized()) { @@ -3589,7 +3592,7 @@ public final class RemoteProtocol { } return result; } - + private akka.remote.RemoteProtocol.MetadataEntryProtocol buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { akka.remote.RemoteProtocol.MetadataEntryProtocol result = buildPartial(); @@ -3599,7 +3602,7 @@ public final class RemoteProtocol { } return result; } - + public akka.remote.RemoteProtocol.MetadataEntryProtocol buildPartial() { akka.remote.RemoteProtocol.MetadataEntryProtocol result = new akka.remote.RemoteProtocol.MetadataEntryProtocol(this); int from_bitField0_ = bitField0_; @@ -3616,7 +3619,7 @@ public final class RemoteProtocol { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof akka.remote.RemoteProtocol.MetadataEntryProtocol) { return mergeFrom((akka.remote.RemoteProtocol.MetadataEntryProtocol)other); @@ -3625,7 +3628,7 @@ public final class RemoteProtocol { return this; } } - + public Builder mergeFrom(akka.remote.RemoteProtocol.MetadataEntryProtocol other) { if (other == akka.remote.RemoteProtocol.MetadataEntryProtocol.getDefaultInstance()) return this; if (other.hasKey()) { @@ -3637,19 +3640,19 @@ public final class RemoteProtocol { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasKey()) { - + return false; } if (!hasValue()) { - + return false; } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -3686,9 +3689,9 @@ public final class RemoteProtocol { } } } - + private int bitField0_; - + // required string key = 1; private java.lang.Object key_ = ""; public boolean hasKey() { @@ -3724,7 +3727,7 @@ public final class RemoteProtocol { key_ = value; onChanged(); } - + // required bytes value = 2; private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY; public boolean hasValue() { @@ -3748,29 +3751,29 @@ public final class RemoteProtocol { onChanged(); return this; } - + // @@protoc_insertion_point(builder_scope:MetadataEntryProtocol) } - + static { defaultInstance = new MetadataEntryProtocol(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:MetadataEntryProtocol) } - + public interface AddressProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required string system = 1; boolean hasSystem(); String getSystem(); - + // required string hostname = 2; boolean hasHostname(); String getHostname(); - + // required uint32 port = 3; boolean hasPort(); int getPort(); @@ -3783,26 +3786,26 @@ public final class RemoteProtocol { super(builder); } private AddressProtocol(boolean noInit) {} - + private static final AddressProtocol defaultInstance; public static AddressProtocol getDefaultInstance() { return defaultInstance; } - + public AddressProtocol getDefaultInstanceForType() { return defaultInstance; } - + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return akka.remote.RemoteProtocol.internal_static_AddressProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_AddressProtocol_fieldAccessorTable; } - + private int bitField0_; // required string system = 1; public static final int SYSTEM_FIELD_NUMBER = 1; @@ -3815,7 +3818,7 @@ public final class RemoteProtocol { if (ref instanceof String) { return (String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; String s = bs.toStringUtf8(); if (com.google.protobuf.Internal.isValidUtf8(bs)) { @@ -3827,7 +3830,7 @@ public final class RemoteProtocol { private com.google.protobuf.ByteString getSystemBytes() { java.lang.Object ref = system_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); system_ = b; return b; @@ -3835,7 +3838,7 @@ public final class RemoteProtocol { return (com.google.protobuf.ByteString) ref; } } - + // required string hostname = 2; public static final int HOSTNAME_FIELD_NUMBER = 2; private java.lang.Object hostname_; @@ -3847,7 +3850,7 @@ public final class RemoteProtocol { if (ref instanceof String) { return (String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; String s = bs.toStringUtf8(); if (com.google.protobuf.Internal.isValidUtf8(bs)) { @@ -3859,7 +3862,7 @@ public final class RemoteProtocol { private com.google.protobuf.ByteString getHostnameBytes() { java.lang.Object ref = hostname_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); hostname_ = b; return b; @@ -3867,7 +3870,7 @@ public final class RemoteProtocol { return (com.google.protobuf.ByteString) ref; } } - + // required uint32 port = 3; public static final int PORT_FIELD_NUMBER = 3; private int port_; @@ -3877,7 +3880,7 @@ public final class RemoteProtocol { public int getPort() { return port_; } - + private void initFields() { system_ = ""; hostname_ = ""; @@ -3887,7 +3890,7 @@ public final class RemoteProtocol { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasSystem()) { memoizedIsInitialized = 0; return false; @@ -3903,7 +3906,7 @@ public final class RemoteProtocol { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -3918,12 +3921,12 @@ public final class RemoteProtocol { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -3941,14 +3944,14 @@ public final class RemoteProtocol { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static akka.remote.RemoteProtocol.AddressProtocol parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -4015,14 +4018,14 @@ public final class RemoteProtocol { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(akka.remote.RemoteProtocol.AddressProtocol prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { @@ -4036,17 +4039,17 @@ public final class RemoteProtocol { getDescriptor() { return akka.remote.RemoteProtocol.internal_static_AddressProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_AddressProtocol_fieldAccessorTable; } - + // Construct using akka.remote.RemoteProtocol.AddressProtocol.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); @@ -4058,7 +4061,7 @@ public final class RemoteProtocol { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); system_ = ""; @@ -4069,20 +4072,20 @@ public final class RemoteProtocol { bitField0_ = (bitField0_ & ~0x00000004); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return akka.remote.RemoteProtocol.AddressProtocol.getDescriptor(); } - + public akka.remote.RemoteProtocol.AddressProtocol getDefaultInstanceForType() { return akka.remote.RemoteProtocol.AddressProtocol.getDefaultInstance(); } - + public akka.remote.RemoteProtocol.AddressProtocol build() { akka.remote.RemoteProtocol.AddressProtocol result = buildPartial(); if (!result.isInitialized()) { @@ -4090,7 +4093,7 @@ public final class RemoteProtocol { } return result; } - + private akka.remote.RemoteProtocol.AddressProtocol buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { akka.remote.RemoteProtocol.AddressProtocol result = buildPartial(); @@ -4100,7 +4103,7 @@ public final class RemoteProtocol { } return result; } - + public akka.remote.RemoteProtocol.AddressProtocol buildPartial() { akka.remote.RemoteProtocol.AddressProtocol result = new akka.remote.RemoteProtocol.AddressProtocol(this); int from_bitField0_ = bitField0_; @@ -4121,7 +4124,7 @@ public final class RemoteProtocol { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof akka.remote.RemoteProtocol.AddressProtocol) { return mergeFrom((akka.remote.RemoteProtocol.AddressProtocol)other); @@ -4130,7 +4133,7 @@ public final class RemoteProtocol { return this; } } - + public Builder mergeFrom(akka.remote.RemoteProtocol.AddressProtocol other) { if (other == akka.remote.RemoteProtocol.AddressProtocol.getDefaultInstance()) return this; if (other.hasSystem()) { @@ -4145,23 +4148,23 @@ public final class RemoteProtocol { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasSystem()) { - + return false; } if (!hasHostname()) { - + return false; } if (!hasPort()) { - + return false; } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -4203,9 +4206,9 @@ public final class RemoteProtocol { } } } - + private int bitField0_; - + // required string system = 1; private java.lang.Object system_ = ""; public boolean hasSystem() { @@ -4241,7 +4244,7 @@ public final class RemoteProtocol { system_ = value; onChanged(); } - + // required string hostname = 2; private java.lang.Object hostname_ = ""; public boolean hasHostname() { @@ -4277,7 +4280,7 @@ public final class RemoteProtocol { hostname_ = value; onChanged(); } - + // required uint32 port = 3; private int port_ ; public boolean hasPort() { @@ -4298,25 +4301,25 @@ public final class RemoteProtocol { onChanged(); return this; } - + // @@protoc_insertion_point(builder_scope:AddressProtocol) } - + static { defaultInstance = new AddressProtocol(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:AddressProtocol) } - + public interface ExceptionProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required string classname = 1; boolean hasClassname(); String getClassname(); - + // required string message = 2; boolean hasMessage(); String getMessage(); @@ -4329,26 +4332,26 @@ public final class RemoteProtocol { super(builder); } private ExceptionProtocol(boolean noInit) {} - + private static final ExceptionProtocol defaultInstance; public static ExceptionProtocol getDefaultInstance() { return defaultInstance; } - + public ExceptionProtocol getDefaultInstanceForType() { return defaultInstance; } - + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return akka.remote.RemoteProtocol.internal_static_ExceptionProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_ExceptionProtocol_fieldAccessorTable; } - + private int bitField0_; // required string classname = 1; public static final int CLASSNAME_FIELD_NUMBER = 1; @@ -4361,7 +4364,7 @@ public final class RemoteProtocol { if (ref instanceof String) { return (String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; String s = bs.toStringUtf8(); if (com.google.protobuf.Internal.isValidUtf8(bs)) { @@ -4373,7 +4376,7 @@ public final class RemoteProtocol { private com.google.protobuf.ByteString getClassnameBytes() { java.lang.Object ref = classname_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); classname_ = b; return b; @@ -4381,7 +4384,7 @@ public final class RemoteProtocol { return (com.google.protobuf.ByteString) ref; } } - + // required string message = 2; public static final int MESSAGE_FIELD_NUMBER = 2; private java.lang.Object message_; @@ -4393,7 +4396,7 @@ public final class RemoteProtocol { if (ref instanceof String) { return (String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; String s = bs.toStringUtf8(); if (com.google.protobuf.Internal.isValidUtf8(bs)) { @@ -4405,7 +4408,7 @@ public final class RemoteProtocol { private com.google.protobuf.ByteString getMessageBytes() { java.lang.Object ref = message_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); message_ = b; return b; @@ -4413,7 +4416,7 @@ public final class RemoteProtocol { return (com.google.protobuf.ByteString) ref; } } - + private void initFields() { classname_ = ""; message_ = ""; @@ -4422,7 +4425,7 @@ public final class RemoteProtocol { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasClassname()) { memoizedIsInitialized = 0; return false; @@ -4434,7 +4437,7 @@ public final class RemoteProtocol { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -4446,12 +4449,12 @@ public final class RemoteProtocol { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -4465,14 +4468,14 @@ public final class RemoteProtocol { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static akka.remote.RemoteProtocol.ExceptionProtocol parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -4539,14 +4542,14 @@ public final class RemoteProtocol { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(akka.remote.RemoteProtocol.ExceptionProtocol prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { @@ -4560,17 +4563,17 @@ public final class RemoteProtocol { getDescriptor() { return akka.remote.RemoteProtocol.internal_static_ExceptionProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_ExceptionProtocol_fieldAccessorTable; } - + // Construct using akka.remote.RemoteProtocol.ExceptionProtocol.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); @@ -4582,7 +4585,7 @@ public final class RemoteProtocol { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); classname_ = ""; @@ -4591,20 +4594,20 @@ public final class RemoteProtocol { bitField0_ = (bitField0_ & ~0x00000002); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return akka.remote.RemoteProtocol.ExceptionProtocol.getDescriptor(); } - + public akka.remote.RemoteProtocol.ExceptionProtocol getDefaultInstanceForType() { return akka.remote.RemoteProtocol.ExceptionProtocol.getDefaultInstance(); } - + public akka.remote.RemoteProtocol.ExceptionProtocol build() { akka.remote.RemoteProtocol.ExceptionProtocol result = buildPartial(); if (!result.isInitialized()) { @@ -4612,7 +4615,7 @@ public final class RemoteProtocol { } return result; } - + private akka.remote.RemoteProtocol.ExceptionProtocol buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { akka.remote.RemoteProtocol.ExceptionProtocol result = buildPartial(); @@ -4622,7 +4625,7 @@ public final class RemoteProtocol { } return result; } - + public akka.remote.RemoteProtocol.ExceptionProtocol buildPartial() { akka.remote.RemoteProtocol.ExceptionProtocol result = new akka.remote.RemoteProtocol.ExceptionProtocol(this); int from_bitField0_ = bitField0_; @@ -4639,7 +4642,7 @@ public final class RemoteProtocol { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof akka.remote.RemoteProtocol.ExceptionProtocol) { return mergeFrom((akka.remote.RemoteProtocol.ExceptionProtocol)other); @@ -4648,7 +4651,7 @@ public final class RemoteProtocol { return this; } } - + public Builder mergeFrom(akka.remote.RemoteProtocol.ExceptionProtocol other) { if (other == akka.remote.RemoteProtocol.ExceptionProtocol.getDefaultInstance()) return this; if (other.hasClassname()) { @@ -4660,19 +4663,19 @@ public final class RemoteProtocol { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasClassname()) { - + return false; } if (!hasMessage()) { - + return false; } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -4709,9 +4712,9 @@ public final class RemoteProtocol { } } } - + private int bitField0_; - + // required string classname = 1; private java.lang.Object classname_ = ""; public boolean hasClassname() { @@ -4747,7 +4750,7 @@ public final class RemoteProtocol { classname_ = value; onChanged(); } - + // required string message = 2; private java.lang.Object message_ = ""; public boolean hasMessage() { @@ -4783,31 +4786,31 @@ public final class RemoteProtocol { message_ = value; onChanged(); } - + // @@protoc_insertion_point(builder_scope:ExceptionProtocol) } - + static { defaultInstance = new ExceptionProtocol(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:ExceptionProtocol) } - + public interface DurableMailboxMessageProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required .ActorRefProtocol recipient = 1; boolean hasRecipient(); akka.remote.RemoteProtocol.ActorRefProtocol getRecipient(); akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getRecipientOrBuilder(); - + // optional .ActorRefProtocol sender = 2; boolean hasSender(); akka.remote.RemoteProtocol.ActorRefProtocol getSender(); akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getSenderOrBuilder(); - + // required bytes message = 3; boolean hasMessage(); com.google.protobuf.ByteString getMessage(); @@ -4820,26 +4823,26 @@ public final class RemoteProtocol { super(builder); } private DurableMailboxMessageProtocol(boolean noInit) {} - + private static final DurableMailboxMessageProtocol defaultInstance; public static DurableMailboxMessageProtocol getDefaultInstance() { return defaultInstance; } - + public DurableMailboxMessageProtocol getDefaultInstanceForType() { return defaultInstance; } - + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return akka.remote.RemoteProtocol.internal_static_DurableMailboxMessageProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_DurableMailboxMessageProtocol_fieldAccessorTable; } - + private int bitField0_; // required .ActorRefProtocol recipient = 1; public static final int RECIPIENT_FIELD_NUMBER = 1; @@ -4853,7 +4856,7 @@ public final class RemoteProtocol { public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getRecipientOrBuilder() { return recipient_; } - + // optional .ActorRefProtocol sender = 2; public static final int SENDER_FIELD_NUMBER = 2; private akka.remote.RemoteProtocol.ActorRefProtocol sender_; @@ -4866,7 +4869,7 @@ public final class RemoteProtocol { public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getSenderOrBuilder() { return sender_; } - + // required bytes message = 3; public static final int MESSAGE_FIELD_NUMBER = 3; private com.google.protobuf.ByteString message_; @@ -4876,7 +4879,7 @@ public final class RemoteProtocol { public com.google.protobuf.ByteString getMessage() { return message_; } - + private void initFields() { recipient_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); sender_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); @@ -4886,7 +4889,7 @@ public final class RemoteProtocol { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasRecipient()) { memoizedIsInitialized = 0; return false; @@ -4908,7 +4911,7 @@ public final class RemoteProtocol { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -4923,12 +4926,12 @@ public final class RemoteProtocol { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -4946,14 +4949,14 @@ public final class RemoteProtocol { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static akka.remote.RemoteProtocol.DurableMailboxMessageProtocol parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -5020,14 +5023,14 @@ public final class RemoteProtocol { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(akka.remote.RemoteProtocol.DurableMailboxMessageProtocol prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { @@ -5041,17 +5044,17 @@ public final class RemoteProtocol { getDescriptor() { return akka.remote.RemoteProtocol.internal_static_DurableMailboxMessageProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_DurableMailboxMessageProtocol_fieldAccessorTable; } - + // Construct using akka.remote.RemoteProtocol.DurableMailboxMessageProtocol.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); @@ -5532,12 +5535,13 @@ public final class RemoteProtocol { "assname\030\001 \002(\t\022\017\n\007message\030\002 \002(\t\"y\n\035Durabl" + "eMailboxMessageProtocol\022$\n\trecipient\030\001 \002" + "(\0132\021.ActorRefProtocol\022!\n\006sender\030\002 \001(\0132\021.", - "ActorRefProtocol\022\017\n\007message\030\003 \002(\014*(\n\013Com" + - "mandType\022\013\n\007CONNECT\020\001\022\014\n\010SHUTDOWN\020\002*K\n\026R" + - "eplicationStorageType\022\r\n\tTRANSIENT\020\001\022\023\n\017" + - "TRANSACTION_LOG\020\002\022\r\n\tDATA_GRID\020\003*>\n\027Repl" + - "icationStrategyType\022\021\n\rWRITE_THROUGH\020\001\022\020" + - "\n\014WRITE_BEHIND\020\002B\017\n\013akka.remoteH\001" + "ActorRefProtocol\022\017\n\007message\030\003 \002(\014*7\n\013Com" + + "mandType\022\013\n\007CONNECT\020\001\022\014\n\010SHUTDOWN\020\002\022\r\n\tH" + + "EARTBEAT\020\003*K\n\026ReplicationStorageType\022\r\n\t" + + "TRANSIENT\020\001\022\023\n\017TRANSACTION_LOG\020\002\022\r\n\tDATA" + + "_GRID\020\003*>\n\027ReplicationStrategyType\022\021\n\rWR" + + "ITE_THROUGH\020\001\022\020\n\014WRITE_BEHIND\020\002B\017\n\013akka." + + "remoteH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { diff --git a/akka-remote/src/main/protocol/RemoteProtocol.proto b/akka-remote/src/main/protocol/RemoteProtocol.proto index 7fe287522d..da9414a110 100644 --- a/akka-remote/src/main/protocol/RemoteProtocol.proto +++ b/akka-remote/src/main/protocol/RemoteProtocol.proto @@ -41,6 +41,7 @@ message RemoteControlProtocol { enum CommandType { CONNECT = 1; SHUTDOWN = 2; + HEARTBEAT = 3; } /** diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 86de93527c..1158d12295 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -61,23 +61,23 @@ akka { # it reuses inbound connections for replies, which is called a passive client connection (i.e. from server # to client). netty { - + # (O) In case of increased latency / overflow how long # should we wait (blocking the sender) until we deem the send to be cancelled? # 0 means "never backoff", any positive number will indicate time to block at most. backoff-timeout = 0ms - + # (I&O) Generate your own with '$AKKA_HOME/scripts/generate_config_with_secure_cookie.sh' # or using 'akka.util.Crypt.generateSecureCookie' secure-cookie = "" - + # (I) Should the remote server require that it peers share the same secure-cookie # (defined in the 'remote' section)? require-cookie = off # (I) Reuse inbound connections for outbound messages use-passive-connections = on - + # (I) The hostname or ip to bind the remoting to, # InetAddress.getLocalHost.getHostAddress is used if empty hostname = "" @@ -110,46 +110,30 @@ akka { # (O) Time between reconnect attempts for active clients reconnect-delay = 5s - # (O) Inactivity period after which active client connection is shutdown; will be - # re-established in case of new communication requests - read-timeout = 3600s + # (O) Read inactivity period (lowest resolution is seconds) + # after which active client connection is shutdown; + # will be re-established in case of new communication requests. + # A value of 0 will turn this feature off + read-timeout = 0s + + # (O) Write inactivity period (lowest resolution is seconds) + # after which a heartbeat is sent across the wire. + # A value of 0 will turn this feature off + write-timeout = 10s + + # (O) Inactivity period of both reads and writes (lowest resolution is seconds) + # after which active client connection is shutdown; + # will be re-established in case of new communication requests + # A value of 0 will turn this feature off + all-timeout = 0s # (O) Maximum time window that a client should try to reconnect for reconnection-time-window = 600s } - # accrual failure detection config - failure-detector { - - # defines the failure detector threshold - # A low threshold is prone to generate many wrong suspicions but ensures - # a quick detection in the event of a real crash. Conversely, a high - # threshold generates fewer mistakes but needs more time to detect - # actual crashes - threshold = 8 - - max-sample-size = 1000 - } - - gossip { - initialDelay = 5s - frequency = 1s - } - - # The dispatcher used for remote system messages - compute-grid-dispatcher { - # defaults to same settings as default-dispatcher - name = ComputeGridDispatcher - } - # The dispatcher used for the system actor "network-event-sender" network-event-sender-dispatcher { type = PinnedDispatcher } - - } - - cluster { - seed-nodes = [] } } diff --git a/akka-remote/src/main/scala/akka/remote/Gossiper.scala b/akka-remote/src/main/scala/akka/remote/Gossiper.scala deleted file mode 100644 index d99414f9c9..0000000000 --- a/akka-remote/src/main/scala/akka/remote/Gossiper.scala +++ /dev/null @@ -1,311 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.remote - -import akka.actor._ -import akka.actor.Status._ -import akka.event.Logging -import akka.util.Duration -import akka.config.ConfigurationException - -import java.util.concurrent.atomic.AtomicReference -import java.util.concurrent.TimeUnit.SECONDS -import java.security.SecureRandom -import System.{ currentTimeMillis ⇒ newTimestamp } - -import scala.collection.immutable.Map -import scala.annotation.tailrec - -import java.util.concurrent.TimeoutException -import akka.dispatch.Await -import akka.pattern.ask - -/** - * Interface for node membership change listener. - */ -trait NodeMembershipChangeListener { - def nodeConnected(node: Address) - def nodeDisconnected(node: Address) -} - -/** - * Represents the node state of to gossip, versioned by a vector clock. - */ -case class Gossip( - version: VectorClock, - node: Address, - availableNodes: Set[Address] = Set.empty[Address], - unavailableNodes: Set[Address] = Set.empty[Address]) - -// ====== START - NEW GOSSIP IMPLEMENTATION ====== -/* - case class Gossip( - version: VectorClock, - node: ParsedTransportAddress, - leader: ParsedTransportAddress, // FIXME leader is always head of 'members', so we probably don't need this field - members: SortedSet[Member] = SortetSet.empty[Member](Ordering.fromLessThan[String](_ > _)), // sorted set of members with their status, sorted by name - seen: Map[Member, VectorClock] = Map.empty[Member, VectorClock], // for ring convergence - pendingChanges: Option[Vector[PendingPartitioningChange]] = None, // for handoff - meta: Option[Map[String, Array[Byte]]] = None) // misc meta-data - - case class Member(address: ParsedTransportAddress, status: MemberStatus) - - sealed trait MemberStatus - object MemberStatus { - case class Joining(version: VectorClock) extends MemberStatus - case class Up(version: VectorClock) extends MemberStatus - case class Leaving(version: VectorClock) extends MemberStatus - case class Exiting(version: VectorClock) extends MemberStatus - case class Down(version: VectorClock) extends MemberStatus - } - - sealed trait PendingPartitioningStatus - object PendingPartitioningStatus { - case object Complete extends PendingPartitioningStatus - case object Awaiting extends PendingPartitioningStatus - } - - // FIXME what is this? - type VNodeMod = AnyRef - - case class PendingPartitioningChange( - owner: ParsedTransportAddress, - nextOwner: ParsedTransportAddress, - changes: Vector[VNodeMod], - status: PendingPartitioningStatus) -*/ -// ====== END - NEW GOSSIP IMPLEMENTATION ====== - -/** - * This module is responsible for Gossiping cluster information. The abstraction maintains the list of live - * and dead nodes. Periodically i.e. every 1 second this module chooses a random node and initiates a round - * of Gossip with it. Whenever it gets gossip updates it updates the Failure Detector with the liveness - * information. - *

- * During each of these runs the node initiates gossip exchange according to following rules (as defined in the - * Cassandra documentation [http://wiki.apache.org/cassandra/ArchitectureGossip]: - *

- *   1) Gossip to random live node (if any)
- *   2) Gossip to random unreachable node with certain probability depending on number of unreachable and live nodes
- *   3) If the node gossiped to at (1) was not seed, or the number of live nodes is less than number of seeds,
- *       gossip to random seed with certain probability depending on number of unreachable, seed and live nodes.
- * 
- */ -class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { - - /** - * Represents the state for this Gossiper. Implemented using optimistic lockless concurrency, - * all state is represented by this immutable case class and managed by an AtomicReference. - */ - private case class State( - currentGossip: Gossip, - nodeMembershipChangeListeners: Set[NodeMembershipChangeListener] = Set.empty[NodeMembershipChangeListener]) - - private val remoteSettings = remote.remoteSettings - private val serialization = remote.serialization - private val log = Logging(system, "Gossiper") - private val failureDetector = remote.failureDetector - private val connectionManager = new RemoteConnectionManager(system, remote, Map.empty[Address, ActorRef]) - - private val seeds = { - if (remoteSettings.SeedNodes.isEmpty) throw new ConfigurationException( - "At least one seed node must be defined in the configuration [akka.cluster.seed-nodes]") - else remoteSettings.SeedNodes - } - - private val address = remote.transport.address - private val nodeFingerprint = address.## - - private val random = SecureRandom.getInstance("SHA1PRNG") - private val initalDelayForGossip = remoteSettings.InitialDelayForGossip - private val gossipFrequency = remoteSettings.GossipFrequency - - private val state = new AtomicReference[State](State(currentGossip = newGossip())) - - { - // start periodic gossip and cluster scrutinization - default is run them every second with 1/2 second in between - system.scheduler.schedule(Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(initateGossip()) - system.scheduler.schedule(Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(scrutinize()) - } - - /** - * Tell the gossiper some gossip news. - */ - @tailrec - final def tell(newGossip: Gossip) { - val gossipingNode = newGossip.node - - failureDetector heartbeat gossipingNode // update heartbeat in failure detector - - val oldState = state.get - val latestGossip = latestVersionOf(newGossip, oldState.currentGossip) - val oldAvailableNodes = latestGossip.availableNodes - val oldUnavailableNodes = latestGossip.unavailableNodes - - if (!(oldAvailableNodes contains gossipingNode) && !(oldUnavailableNodes contains gossipingNode)) { - // we have a new node - val newGossip = latestGossip copy (availableNodes = oldAvailableNodes + gossipingNode) - val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) - - // if we won the race then update else try again - if (!state.compareAndSet(oldState, newState)) tell(newGossip) // recur - else { - // create connections for all new nodes in the latest gossip - for { - node ← oldAvailableNodes - if connectionManager.connectionFor(node).isEmpty - } { - val connectionFactory = () ⇒ system.actorFor(RootActorPath(gossipingNode) / "remote") - connectionManager.putIfAbsent(node, connectionFactory) // create a new remote connection to the new node - oldState.nodeMembershipChangeListeners foreach (_ nodeConnected node) // notify listeners about the new nodes - } - } - - } else if (oldUnavailableNodes contains gossipingNode) { - // gossip from an old former dead node - - val newUnavailableNodes = oldUnavailableNodes - gossipingNode - val newAvailableNodes = oldAvailableNodes + gossipingNode - - val newGossip = latestGossip copy (availableNodes = newAvailableNodes, unavailableNodes = newUnavailableNodes) - val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) - - // if we won the race then update else try again - if (!state.compareAndSet(oldState, newState)) tell(newGossip) // recur - else oldState.nodeMembershipChangeListeners foreach (_ nodeConnected gossipingNode) // notify listeners on successful update of state - } - } - - @tailrec - final def registerListener(listener: NodeMembershipChangeListener) { - val oldState = state.get - val newListeners = oldState.nodeMembershipChangeListeners + listener - val newState = oldState copy (nodeMembershipChangeListeners = newListeners) - if (!state.compareAndSet(oldState, newState)) registerListener(listener) // recur - } - - @tailrec - final def unregisterListener(listener: NodeMembershipChangeListener) { - val oldState = state.get - val newListeners = oldState.nodeMembershipChangeListeners - listener - val newState = oldState copy (nodeMembershipChangeListeners = newListeners) - if (!state.compareAndSet(oldState, newState)) unregisterListener(listener) // recur - } - - /** - * Initates a new round of gossip. - */ - private def initateGossip() { - val oldState = state.get - val oldGossip = oldState.currentGossip - - val oldAvailableNodes = oldGossip.availableNodes - val oldUnavailableNodes = oldGossip.unavailableNodes - - val oldAvailableNodesSize = oldAvailableNodes.size - val oldUnavailableNodesSize = oldUnavailableNodes.size - - // 1. gossip to alive nodes - val gossipedToSeed = - if (oldAvailableNodesSize > 0) gossipTo(oldAvailableNodes) - else false - - // 2. gossip to dead nodes - if (oldUnavailableNodesSize > 0) { - val probability: Double = oldUnavailableNodesSize / (oldAvailableNodesSize + 1) - if (random.nextDouble() < probability) gossipTo(oldUnavailableNodes) - } - - // 3. gossip to a seed for facilitating partition healing - if ((!gossipedToSeed || oldAvailableNodesSize < 1) && (seeds.head != address)) { - if (oldAvailableNodesSize == 0) gossipTo(seeds) - else { - val probability = 1.0 / oldAvailableNodesSize + oldUnavailableNodesSize - if (random.nextDouble() <= probability) gossipTo(seeds) - } - } - } - - /** - * Gossips set of nodes passed in as argument. Returns 'true' if it gossiped to a "seed" node. - */ - private def gossipTo(nodes: Set[Address]): Boolean = { - val peers = nodes filter (_ != address) // filter out myself - val peer = selectRandomNode(peers) - val oldState = state.get - val oldGossip = oldState.currentGossip - - val connection = connectionManager.connectionFor(peer).getOrElse( - throw new IllegalStateException("Connection for [" + peer + "] is not set up")) - - try { - val t = remoteSettings.RemoteSystemDaemonAckTimeout - Await.result(connection.?(newGossip)(t), t) match { - case Success(receiver) ⇒ log.debug("Gossip sent to [{}] was successfully received", receiver) - case Failure(cause) ⇒ log.error(cause, cause.toString) - } - } catch { - case e: TimeoutException ⇒ log.error(e, "Gossip to [%s] timed out".format(connection.path)) - case e: Exception ⇒ - log.error(e, "Could not gossip to [{}] due to: {}", connection.path, e.toString) - } - - seeds exists (peer == _) - } - - /** - * Scrutinizes the cluster; marks nodes detected by the failure detector as unavailable, and notifies all listeners - * of the change in the cluster membership. - */ - @tailrec - final private def scrutinize() { - val oldState = state.get - val oldGossip = oldState.currentGossip - - val oldAvailableNodes = oldGossip.availableNodes - val oldUnavailableNodes = oldGossip.unavailableNodes - val newlyDetectedUnavailableNodes = oldAvailableNodes filterNot failureDetector.isAvailable - - if (!newlyDetectedUnavailableNodes.isEmpty) { // we have newly detected nodes marked as unavailable - val newAvailableNodes = oldAvailableNodes diff newlyDetectedUnavailableNodes - val newUnavailableNodes = oldUnavailableNodes ++ newlyDetectedUnavailableNodes - - val newGossip = oldGossip copy (availableNodes = newAvailableNodes, unavailableNodes = newUnavailableNodes) - val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) - - // if we won the race then update else try again - if (!state.compareAndSet(oldState, newState)) scrutinize() // recur - else { - // notify listeners on successful update of state - for { - deadNode ← newUnavailableNodes - listener ← oldState.nodeMembershipChangeListeners - } listener nodeDisconnected deadNode - } - } - } - - private def newGossip(): Gossip = Gossip( - version = VectorClock(), - node = address, - availableNodes = Set(address)) - - private def incrementVersionForGossip(from: Gossip): Gossip = { - val newVersion = from.version.increment(nodeFingerprint, newTimestamp) - from copy (version = newVersion) - } - - private def latestVersionOf(newGossip: Gossip, oldGossip: Gossip): Gossip = { - (newGossip.version compare oldGossip.version) match { - case VectorClock.After ⇒ newGossip // gossiped version is newer, use new version - case VectorClock.Before ⇒ oldGossip // gossiped version is older, use old version - case VectorClock.Concurrent ⇒ oldGossip // can't establish a causal relationship between two versions => conflict - } - } - - private def selectRandomNode(nodes: Set[Address]): Address = { - nodes.toList(random.nextInt(nodes.size)) - } -} diff --git a/akka-remote/src/main/scala/akka/remote/NetworkEventStream.scala b/akka-remote/src/main/scala/akka/remote/NetworkEventStream.scala deleted file mode 100644 index 7254a914f7..0000000000 --- a/akka-remote/src/main/scala/akka/remote/NetworkEventStream.scala +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.remote - -import scala.collection.mutable - -import akka.actor.{ Props, Address, ActorSystemImpl, Actor } - -/** - * Stream of all kinds of network events, remote failure and connection events, cluster failure and connection events etc. - * Also provides API for sender listener management. - */ -object NetworkEventStream { - - private sealed trait NetworkEventStreamEvent - - private case class Register(listener: Listener, connectionAddress: Address) - extends NetworkEventStreamEvent - - private case class Unregister(listener: Listener, connectionAddress: Address) - extends NetworkEventStreamEvent - - /** - * Base trait for network event listener. - */ - trait Listener { - def notify(event: RemoteLifeCycleEvent) - } - - /** - * Channel actor with a registry of listeners. - */ - private class Channel extends Actor { - - val listeners = new mutable.HashMap[Address, mutable.Set[Listener]]() { - override def default(k: Address) = mutable.Set.empty[Listener] - } - - def receive = { - case event: RemoteClientLifeCycleEvent ⇒ - listeners(event.remoteAddress) foreach (_ notify event) - - case event: RemoteServerLifeCycleEvent ⇒ // FIXME handle RemoteServerLifeCycleEvent, ticket #1408 and #1190 - - case Register(listener, connectionAddress) ⇒ - listeners(connectionAddress) += listener - - case Unregister(listener, connectionAddress) ⇒ - listeners(connectionAddress) -= listener - - case _ ⇒ //ignore other - } - } -} - -class NetworkEventStream(system: ActorSystemImpl) { - - import NetworkEventStream._ - - // FIXME: check that this supervision is correct, ticket #1408 - private[akka] val sender = - system.systemActorOf(Props[Channel].withDispatcher("akka.remote.network-event-sender-dispatcher"), "network-event-sender") - - /** - * Registers a network event stream listener (asyncronously). - */ - def register(listener: Listener, connectionAddress: Address) = - sender ! Register(listener, connectionAddress) - - /** - * Unregisters a network event stream listener (asyncronously) . - */ - def unregister(listener: Listener, connectionAddress: Address) = - sender ! Unregister(listener, connectionAddress) -} diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index 69ccdec572..872be5aa41 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -4,6 +4,7 @@ package akka.remote +import akka.AkkaException import akka.actor._ import akka.dispatch._ import akka.event.{ DeathWatch, Logging, LoggingAdapter } @@ -15,6 +16,10 @@ import akka.util.ReflectiveAccess import akka.serialization.Serialization import akka.serialization.SerializationExtension +class RemoteException(msg: String) extends AkkaException(msg) +class RemoteCommunicationException(msg: String) extends RemoteException(msg) +class RemoteConnectionException(msg: String) extends RemoteException(msg) + /** * Remote ActorRefProvider. Starts up actor on remote node and creates a RemoteActorRef representing it. */ @@ -41,8 +46,6 @@ class RemoteActorRefProvider( val deathWatch = new RemoteDeathWatch(local.deathWatch, this) - val failureDetector = new AccrualFailureDetector(remoteSettings.FailureDetectorThreshold, remoteSettings.FailureDetectorMaxSampleSize) - // these are only available after init() def rootGuardian = local.rootGuardian def guardian = local.guardian @@ -66,10 +69,6 @@ class RemoteActorRefProvider( private var _remoteDaemon: InternalActorRef = _ def remoteDaemon = _remoteDaemon - @volatile - private var _networkEventStream: NetworkEventStream = _ - def networkEventStream = _networkEventStream - def init(system: ActorSystemImpl) { local.init(system) @@ -78,9 +77,6 @@ class RemoteActorRefProvider( _serialization = SerializationExtension(system) - _networkEventStream = new NetworkEventStream(system) - system.eventStream.subscribe(networkEventStream.sender, classOf[RemoteLifeCycleEvent]) - _transport = { val fqn = remoteSettings.RemoteTransport val args = Seq( diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index 6509d19383..5c29d22161 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -13,25 +13,10 @@ import akka.actor.Address import akka.actor.AddressExtractor class RemoteSettings(val config: Config, val systemName: String) { - import config._ - val RemoteTransport = getString("akka.remote.transport") val LogReceive = getBoolean("akka.remote.log-received-messages") val LogSend = getBoolean("akka.remote.log-sent-messages") - - // AccrualFailureDetector - val FailureDetectorThreshold = getInt("akka.remote.failure-detector.threshold") - val FailureDetectorMaxSampleSize = getInt("akka.remote.failure-detector.max-sample-size") - - // Gossiper val RemoteSystemDaemonAckTimeout = Duration(getMilliseconds("akka.remote.remote-daemon-ack-timeout"), MILLISECONDS) - val InitialDelayForGossip = Duration(getMilliseconds("akka.remote.gossip.initialDelay"), MILLISECONDS) - val GossipFrequency = Duration(getMilliseconds("akka.remote.gossip.frequency"), MILLISECONDS) - // TODO cluster config will go into akka-cluster/reference.conf when we enable that module - val SeedNodes = Set.empty[Address] ++ getStringList("akka.cluster.seed-nodes").asScala.collect { - case AddressExtractor(addr) ⇒ addr - } - val UntrustedMode = getBoolean("akka.remote.untrusted-mode") -} \ No newline at end of file +} diff --git a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala index 3fbe5913b2..a9f1199546 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala @@ -8,9 +8,9 @@ import scala.reflect.BeanProperty import akka.actor.{ Terminated, LocalRef, InternalActorRef, AutoReceivedMessage, AddressExtractor, Address, ActorSystemImpl, ActorSystem, ActorRef } import akka.dispatch.SystemMessage import akka.event.{ LoggingAdapter, Logging } -import akka.remote.RemoteProtocol.{ RemoteMessageProtocol, RemoteControlProtocol, AkkaRemoteProtocol, ActorRefProtocol } import akka.AkkaException import akka.serialization.Serialization +import akka.remote.RemoteProtocol._ /** * Remote life-cycle events. diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 2947d9db26..eafd01d91a 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -10,7 +10,6 @@ import org.jboss.netty.channel.group.DefaultChannelGroup import org.jboss.netty.channel.{ ChannelHandler, StaticChannelPipeline, SimpleChannelUpstreamHandler, MessageEvent, ExceptionEvent, ChannelStateEvent, ChannelPipelineFactory, ChannelPipeline, ChannelHandlerContext, ChannelFuture, Channel } import org.jboss.netty.handler.codec.frame.{ LengthFieldPrepender, LengthFieldBasedFrameDecoder } import org.jboss.netty.handler.execution.ExecutionHandler -import org.jboss.netty.handler.timeout.{ ReadTimeoutHandler, ReadTimeoutException } import akka.remote.RemoteProtocol.{ RemoteControlProtocol, CommandType, AkkaRemoteProtocol } import akka.remote.{ RemoteProtocol, RemoteMessage, RemoteLifeCycleEvent, RemoteClientStarted, RemoteClientShutdown, RemoteClientException, RemoteClientError, RemoteClientDisconnected, RemoteClientConnected } import akka.actor.{ simpleName, Address } @@ -24,6 +23,7 @@ import java.net.InetAddress import org.jboss.netty.util.TimerTask import org.jboss.netty.util.Timeout import java.util.concurrent.TimeUnit +import org.jboss.netty.handler.timeout.{ IdleState, IdleStateEvent, IdleStateAwareChannelHandler, IdleStateHandler } class RemoteClientMessageBufferException(message: String, cause: Throwable) extends AkkaException(message, cause) { def this(msg: String) = this(msg, null) @@ -159,7 +159,7 @@ class ActiveRemoteClient private[akka] ( executionHandler = new ExecutionHandler(netty.executor) val b = new ClientBootstrap(netty.clientChannelFactory) - b.setPipelineFactory(new ActiveRemoteClientPipelineFactory(name, b, executionHandler, remoteAddress, this)) + b.setPipelineFactory(new ActiveRemoteClientPipelineFactory(name, b, executionHandler, remoteAddress, localAddress, this)) b.setOption("tcpNoDelay", true) b.setOption("keepAlive", true) b.setOption("connectTimeoutMillis", settings.ConnectionTimeout.toMillis) @@ -234,14 +234,36 @@ class ActiveRemoteClientHandler( val name: String, val bootstrap: ClientBootstrap, val remoteAddress: Address, + val localAddress: Address, val timer: HashedWheelTimer, val client: ActiveRemoteClient) - extends SimpleChannelUpstreamHandler { + extends IdleStateAwareChannelHandler { def runOnceNow(thunk: ⇒ Unit): Unit = timer.newTimeout(new TimerTask() { def run(timeout: Timeout) = try { thunk } finally { timeout.cancel() } }, 0, TimeUnit.MILLISECONDS) + override def channelIdle(ctx: ChannelHandlerContext, e: IdleStateEvent) { + import IdleState._ + + def createHeartBeat(localAddress: Address, cookie: Option[String]): AkkaRemoteProtocol = { + val beat = RemoteControlProtocol.newBuilder.setCommandType(CommandType.HEARTBEAT) + if (cookie.nonEmpty) beat.setCookie(cookie.get) + + client.netty.createControlEnvelope( + beat.setOrigin(RemoteProtocol.AddressProtocol.newBuilder + .setSystem(localAddress.system) + .setHostname(localAddress.host.get) + .setPort(localAddress.port.get) + .build).build) + } + + e.getState match { + case READER_IDLE | ALL_IDLE ⇒ runOnceNow { client.netty.shutdownClientConnection(remoteAddress) } + case WRITER_IDLE ⇒ e.getChannel.write(createHeartBeat(localAddress, client.netty.settings.SecureCookie)) + } + } + override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) { try { event.getMessage match { @@ -291,18 +313,9 @@ class ActiveRemoteClientHandler( } override def exceptionCaught(ctx: ChannelHandlerContext, event: ExceptionEvent) = { - val cause = event.getCause - if (cause ne null) { - client.notifyListeners(RemoteClientError(cause, client.netty, client.remoteAddress)) - cause match { - case e: ReadTimeoutException ⇒ - runOnceNow { - client.netty.shutdownClientConnection(remoteAddress) // spawn in another thread - } - case e: Exception ⇒ event.getChannel.close() - } - - } else client.notifyListeners(RemoteClientError(new Exception("Unknown cause"), client.netty, client.remoteAddress)) + val cause = if (event.getCause ne null) event.getCause else new Exception("Unknown cause") + client.notifyListeners(RemoteClientError(cause, client.netty, client.remoteAddress)) + event.getChannel.close() } } @@ -311,17 +324,21 @@ class ActiveRemoteClientPipelineFactory( bootstrap: ClientBootstrap, executionHandler: ExecutionHandler, remoteAddress: Address, + localAddress: Address, client: ActiveRemoteClient) extends ChannelPipelineFactory { import client.netty.settings def getPipeline: ChannelPipeline = { - val timeout = new ReadTimeoutHandler(client.netty.timer, settings.ReadTimeout.length, settings.ReadTimeout.unit) + val timeout = new IdleStateHandler(client.netty.timer, + settings.ReadTimeout.toSeconds.toInt, + settings.WriteTimeout.toSeconds.toInt, + settings.AllTimeout.toSeconds.toInt) val lenDec = new LengthFieldBasedFrameDecoder(settings.MessageFrameSize, 0, 4, 0, 4) val lenPrep = new LengthFieldPrepender(4) val messageDec = new RemoteMessageDecoder val messageEnc = new RemoteMessageEncoder(client.netty) - val remoteClient = new ActiveRemoteClientHandler(name, bootstrap, remoteAddress, client.netty.timer, client) + val remoteClient = new ActiveRemoteClientHandler(name, bootstrap, remoteAddress, localAddress, client.netty.timer, client) new StaticChannelPipeline(timeout, lenDec, messageDec, lenPrep, messageEnc, executionHandler, remoteClient) } diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index e9fe83dd7e..9ad92ca236 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -23,7 +23,7 @@ import akka.actor.{ Address, ActorSystemImpl, ActorRef } import akka.dispatch.MonitorableThreadFactory import akka.event.Logging import akka.remote.RemoteProtocol.AkkaRemoteProtocol -import akka.remote.{ RemoteTransportException, RemoteTransport, RemoteSettings, RemoteMarshallingOps, RemoteActorRefProvider, RemoteActorRef } +import akka.remote.{ RemoteTransportException, RemoteTransport, RemoteSettings, RemoteMarshallingOps, RemoteActorRefProvider, RemoteActorRef, RemoteServerStarted } /** * Provides the implementation of the Netty remote support @@ -73,6 +73,7 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor def start(): Unit = { server.start() setAddressFromChannel(server.channel) + notifyListeners(RemoteServerStarted(this)) } def shutdown(): Unit = { @@ -157,7 +158,10 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor def unbindClient(remoteAddress: Address): Unit = { clientsLock.writeLock().lock() try { - remoteClients.foreach { case (k, v) ⇒ if (v.isBoundTo(remoteAddress)) { v.shutdown(); remoteClients.remove(k) } } + remoteClients foreach { + case (k, v) ⇒ + if (v.isBoundTo(remoteAddress)) { v.shutdown(); remoteClients.remove(k) } + } } finally { clientsLock.writeLock().unlock() } @@ -227,7 +231,8 @@ class DefaultDisposableChannelGroup(name: String) extends DefaultChannelGroup(na override def close(): ChannelGroupFuture = { guard.writeLock().lock() try { - if (open.getAndSet(false)) super.close() else throw new IllegalStateException("ChannelGroup already closed, cannot add new channel") + if (open.getAndSet(false)) super.close() + else throw new IllegalStateException("ChannelGroup already closed, cannot add new channel") } finally { guard.writeLock().unlock() } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index a8bc6ef67b..2c51875e9d 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -15,7 +15,7 @@ import org.jboss.netty.handler.codec.frame.{ LengthFieldPrepender, LengthFieldBa import org.jboss.netty.handler.execution.ExecutionHandler import akka.event.Logging import akka.remote.RemoteProtocol.{ RemoteControlProtocol, CommandType, AkkaRemoteProtocol } -import akka.remote.{ RemoteServerStarted, RemoteServerShutdown, RemoteServerError, RemoteServerClientDisconnected, RemoteServerClientConnected, RemoteServerClientClosed, RemoteProtocol, RemoteMessage } +import akka.remote.{ RemoteServerShutdown, RemoteServerError, RemoteServerClientDisconnected, RemoteServerClientConnected, RemoteServerClientClosed, RemoteProtocol, RemoteMessage } import akka.actor.Address import java.net.InetAddress import akka.actor.ActorSystemImpl @@ -51,9 +51,8 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { private[akka] var channel: Channel = _ def start(): Unit = { - channel = bootstrap.bind(new InetSocketAddress(ip, settings.DesiredPortFromConfig)) + channel = bootstrap.bind(new InetSocketAddress(ip, settings.PortSelector)) openChannels.add(channel) - netty.notifyListeners(RemoteServerStarted(netty)) } def shutdown() { @@ -61,9 +60,9 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { val shutdownSignal = { val b = RemoteControlProtocol.newBuilder.setCommandType(CommandType.SHUTDOWN) b.setOrigin(RemoteProtocol.AddressProtocol.newBuilder - .setSystem(settings.systemName) - .setHostname(settings.Hostname) - .setPort(settings.DesiredPortFromConfig) + .setSystem(netty.address.system) + .setHostname(netty.address.host.get) + .setPort(netty.address.port.get) .build) if (settings.SecureCookie.nonEmpty) b.setCookie(settings.SecureCookie.get) @@ -187,8 +186,9 @@ class RemoteServerHandler( val inbound = Address("akka", origin.getSystem, Some(origin.getHostname), Some(origin.getPort)) val client = new PassiveRemoteClient(event.getChannel, netty, inbound) netty.bindClient(inbound, client) - case CommandType.SHUTDOWN ⇒ //Will be unbound in channelClosed - case _ ⇒ //Unknown command + case CommandType.SHUTDOWN ⇒ //Will be unbound in channelClosed + case CommandType.HEARTBEAT ⇒ //Other guy is still alive + case _ ⇒ //Unknown command } case _ ⇒ //ignore } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index 3f7c8f83de..0db6cabf18 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -30,6 +30,8 @@ class NettySettings(config: Config, val systemName: String) { val ReconnectionTimeWindow = Duration(getMilliseconds("reconnection-time-window"), MILLISECONDS) val ReadTimeout = Duration(getMilliseconds("read-timeout"), MILLISECONDS) + val WriteTimeout = Duration(getMilliseconds("write-timeout"), MILLISECONDS) + val AllTimeout = Duration(getMilliseconds("all-timeout"), MILLISECONDS) val ReconnectDelay = Duration(getMilliseconds("reconnect-delay"), MILLISECONDS) val MessageFrameSize = getBytes("message-frame-size").toInt @@ -37,7 +39,9 @@ class NettySettings(config: Config, val systemName: String) { case "" ⇒ InetAddress.getLocalHost.getHostAddress case value ⇒ value } - val DesiredPortFromConfig = getInt("port") + + @deprecated("WARNING: This should only be used by professionals.") + val PortSelector = getInt("port") val ConnectionTimeout = Duration(getMilliseconds("connection-timeout"), MILLISECONDS) diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala old mode 100755 new mode 100644 index a560bc29d6..dd15817374 --- a/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala @@ -1,28 +1,28 @@ -package akka.remote - -import com.typesafe.config.{Config, ConfigFactory} - -trait AbstractRemoteActorMultiJvmSpec { - def NrOfNodes: Int - def commonConfig: Config - - private[this] val remotes: IndexedSeq[String] = { - val nodesOpt = Option(AkkaRemoteSpec.testNodes).map(_.split(",").toIndexedSeq) - nodesOpt getOrElse IndexedSeq.fill(NrOfNodes)("localhost") - } - - def akkaSpec(idx: Int) = "AkkaRemoteSpec@%s:%d".format(remotes(idx), 9991+idx) - - def akkaURIs(count: Int): String = { - 0 until count map {idx => "\"akka://" + akkaSpec(idx) + "\""} mkString "," - } - - val nodeConfigs = ((1 to NrOfNodes).toList zip remotes) map { - case (idx, host) => - ConfigFactory.parseString(""" - akka { - remote.netty.hostname="%s" - remote.netty.port = "%d" - }""".format(host, 9990+idx, idx)) withFallback commonConfig - } -} +package akka.remote + +import com.typesafe.config.{Config, ConfigFactory} + +trait AbstractRemoteActorMultiJvmSpec { + def NrOfNodes: Int + def commonConfig: Config + + private[this] val remotes: IndexedSeq[String] = { + val nodesOpt = Option(AkkaRemoteSpec.testNodes).map(_.split(",").toIndexedSeq) + nodesOpt getOrElse IndexedSeq.fill(NrOfNodes)("localhost") + } + + def akkaSpec(idx: Int) = "AkkaRemoteSpec@%s:%d".format(remotes(idx), 9991+idx) + + def akkaURIs(count: Int): String = { + 0 until count map {idx => "\"akka://" + akkaSpec(idx) + "\""} mkString "," + } + + val nodeConfigs = ((1 to NrOfNodes).toList zip remotes) map { + case (idx, host) => + ConfigFactory.parseString(""" + akka { + remote.netty.hostname="%s" + remote.netty.port = "%d" + }""".format(host, 9990+idx, idx)) withFallback commonConfig + } +} diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/Barrier.scala b/akka-remote/src/multi-jvm/scala/akka/remote/Barrier.scala old mode 100755 new mode 100644 index b11ec837b7..e99fca2a45 --- a/akka-remote/src/multi-jvm/scala/akka/remote/Barrier.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/Barrier.scala @@ -1,19 +1,19 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.remote - -trait Barrier { - def await() = { enter(); leave() } - - def apply(body: ⇒ Unit) { - enter() - body - leave() - } - - def enter(): Unit - - def leave(): Unit -} +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.remote + +trait Barrier { + def await() = { enter(); leave() } + + def apply(body: ⇒ Unit) { + enter() + body + leave() + } + + def enter(): Unit + + def leave(): Unit +} diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala b/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala old mode 100755 new mode 100644 index 156c955566..733883228e --- a/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala @@ -1,78 +1,78 @@ -/** - * Copyright (C) 2011-2012 Typesafe - */ -package akka.remote - -import org.apache.zookeeper._ -import ZooDefs.Ids - -object ZkClient extends Watcher { - // Don't forget to close! - lazy val zk: ZooKeeper = { - val remoteNodes = AkkaRemoteSpec.testNodes split ',' - - // ZkServers are configured to listen on a specific port. - val connectString = remoteNodes map (_+":2181") mkString "," - new ZooKeeper(connectString, 3000, this) - } - - def process(ev: WatchedEvent) { - synchronized { notify() } - } - - class ZkBarrier(name: String, count: Int, root: String) extends Barrier { - @annotation.tailrec - private def waitForServer() { - // SI-1672 - val r = try { - zk.exists("/", false); true - } catch { - case _: KeeperException.ConnectionLossException => - Thread.sleep(10000) - false - } - if (!r) waitForServer() - } - waitForServer() - - try { - zk.create(root, Array[Byte](), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT) - } catch { - case _: KeeperException.NodeExistsException => - } - - val timeoutMs = 300*1000 - - private def block(num: Int) { - val start = System.currentTimeMillis - while (true) { - if (System.currentTimeMillis - start > timeoutMs) - throw new InterruptedException("Timed out blocking in zk") - - ZkClient.this.synchronized { - val children = zk.getChildren(root, true) - if (children.size < num) { - ZkClient.this.wait(timeoutMs) - } else - return - } - } - } - - def enter() { - zk.create(root + "/" + name, Array[Byte](), Ids.OPEN_ACL_UNSAFE, - CreateMode.EPHEMERAL) - - block(count) - } - - final def leave() { - zk.create(root + "/" + name + ".leave", Array[Byte](), Ids.OPEN_ACL_UNSAFE, - CreateMode.EPHEMERAL) - - block(2*count) - } - } - - def barrier(name: String, count: Int, root: String) = new ZkBarrier(name, count, root) -} +/** + * Copyright (C) 2011-2012 Typesafe + */ +package akka.remote + +import org.apache.zookeeper._ +import ZooDefs.Ids + +object ZkClient extends Watcher { + // Don't forget to close! + lazy val zk: ZooKeeper = { + val remoteNodes = AkkaRemoteSpec.testNodes split ',' + + // ZkServers are configured to listen on a specific port. + val connectString = remoteNodes map (_+":2181") mkString "," + new ZooKeeper(connectString, 3000, this) + } + + def process(ev: WatchedEvent) { + synchronized { notify() } + } + + class ZkBarrier(name: String, count: Int, root: String) extends Barrier { + @annotation.tailrec + private def waitForServer() { + // SI-1672 + val r = try { + zk.exists("/", false); true + } catch { + case _: KeeperException.ConnectionLossException => + Thread.sleep(10000) + false + } + if (!r) waitForServer() + } + waitForServer() + + try { + zk.create(root, Array[Byte](), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT) + } catch { + case _: KeeperException.NodeExistsException => + } + + val timeoutMs = 300*1000 + + private def block(num: Int) { + val start = System.currentTimeMillis + while (true) { + if (System.currentTimeMillis - start > timeoutMs) + throw new InterruptedException("Timed out blocking in zk") + + ZkClient.this.synchronized { + val children = zk.getChildren(root, true) + if (children.size < num) { + ZkClient.this.wait(timeoutMs) + } else + return + } + } + } + + def enter() { + zk.create(root + "/" + name, Array[Byte](), Ids.OPEN_ACL_UNSAFE, + CreateMode.EPHEMERAL) + + block(count) + } + + final def leave() { + zk.create(root + "/" + name + ".leave", Array[Byte](), Ids.OPEN_ACL_UNSAFE, + CreateMode.EPHEMERAL) + + block(2*count) + } + } + + def barrier(name: String, count: Int, root: String) = new ZkBarrier(name, count, root) +} diff --git a/akka-remote/src/test/resources/log4j.properties b/akka-remote/src/test/resources/log4j.properties deleted file mode 100644 index 2d07c8e051..0000000000 --- a/akka-remote/src/test/resources/log4j.properties +++ /dev/null @@ -1,58 +0,0 @@ -# Define some default values that can be overridden by system properties -zookeeper.root.logger=INFO, CONSOLE -zookeeper.console.threshold=OFF -zookeeper.log.dir=. -zookeeper.log.file=zookeeper.log -zookeeper.log.threshold=DEBUG -zookeeper.tracelog.dir=. -zookeeper.tracelog.file=zookeeper_trace.log - -# -# ZooKeeper Logging Configuration -# - -# Format is " (, )+ - -# DEFAULT: console appender only -log4j.rootLogger=${zookeeper.root.logger} - -# Example with rolling log file -#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE - -# Example with rolling log file and tracing -#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE - -# -# Log INFO level and above messages to the console -# -log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender -log4j.appender.CONSOLE.Threshold=${zookeeper.console.threshold} -log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout -log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n - -# -# Add ROLLINGFILE to rootLogger to get log file output -# Log DEBUG level and above messages to a log file -log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender -log4j.appender.ROLLINGFILE.Threshold=${zookeeper.log.threshold} -log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/${zookeeper.log.file} - -# Max log file size of 10MB -log4j.appender.ROLLINGFILE.MaxFileSize=10MB -# uncomment the next line to limit number of backup files -#log4j.appender.ROLLINGFILE.MaxBackupIndex=10 - -log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout -log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n - - -# -# Add TRACEFILE to rootLogger to get log file output -# Log DEBUG level and above messages to a log file -log4j.appender.TRACEFILE=org.apache.log4j.FileAppender -log4j.appender.TRACEFILE.Threshold=TRACE -log4j.appender.TRACEFILE.File=${zookeeper.tracelog.dir}/${zookeeper.tracelog.file} - -log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout -### Notice we are including log4j's NDC here (%x) -log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L][%x] - %m%n diff --git a/akka-remote/src/test/resources/logback-test.xml b/akka-remote/src/test/resources/logback-test.xml deleted file mode 100644 index 240a412687..0000000000 --- a/akka-remote/src/test/resources/logback-test.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - [%4p] [%d{ISO8601}] [%t] %c{1}: %m%n - - - - - - - - - - - diff --git a/akka-remote/src/test/resources/zoo.cfg b/akka-remote/src/test/resources/zoo.cfg deleted file mode 100644 index b71eadcc33..0000000000 --- a/akka-remote/src/test/resources/zoo.cfg +++ /dev/null @@ -1,12 +0,0 @@ -# The number of milliseconds of each tick -tickTime=2000 -# The number of ticks that the initial -# synchronization phase can take -initLimit=10 -# The number of ticks that can pass between -# sending a request and getting an acknowledgement -syncLimit=5 -# the directory where the snapshot is stored. -dataDir=/export/crawlspace/mahadev/zookeeper/server1/data -# the port at which the clients will connect -clientPort=2181 diff --git a/akka-remote/src/test/scala/akka/remote/GossiperSpec.scala b/akka-remote/src/test/scala/akka/remote/GossiperSpec.scala deleted file mode 100644 index 12e2925b26..0000000000 --- a/akka-remote/src/test/scala/akka/remote/GossiperSpec.scala +++ /dev/null @@ -1,13 +0,0 @@ -package akka.remote - -import java.net.InetSocketAddress -import akka.testkit.AkkaSpec - -class GossiperSpec extends AkkaSpec { - - "An Gossiper" must { - - "..." in { - } - } -} diff --git a/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala index 0f6898a239..88d80d6d81 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala @@ -82,8 +82,8 @@ akka { "support ask" in { Await.result(here ? "ping", timeout.duration) match { - case ("pong", s: akka.pattern.AskSupport.PromiseActorRef) ⇒ // good - case m ⇒ fail(m + " was not (pong, AskActorRef)") + case ("pong", s: akka.pattern.PromiseActorRef) ⇒ // good + case m ⇒ fail(m + " was not (pong, AskActorRef)") } } diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala index b60b90b900..3074e033d7 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala @@ -13,9 +13,8 @@ import akka.remote.netty.NettyRemoteTransport class RemoteConfigSpec extends AkkaSpec( """ akka { - actor { - provider = "akka.remote.RemoteActorRefProvider" - } + actor.provider = "akka.remote.RemoteActorRefProvider" + remote.netty.port = 0 } """) { @@ -28,13 +27,6 @@ class RemoteConfigSpec extends AkkaSpec( RemoteTransport must be("akka.remote.netty.NettyRemoteTransport") UntrustedMode must be(false) RemoteSystemDaemonAckTimeout must be(30 seconds) - - FailureDetectorThreshold must be(8) - FailureDetectorMaxSampleSize must be(1000) - - InitialDelayForGossip must be(5 seconds) - GossipFrequency must be(1 second) - SeedNodes must be(Set()) } "be able to parse Netty config elements" in { @@ -50,7 +42,7 @@ class RemoteConfigSpec extends AkkaSpec( RequireCookie must be(false) UsePassiveConnections must be(true) Hostname must not be "" // will be set to the local IP - DesiredPortFromConfig must be(2552) + PortSelector must be(0) MessageFrameSize must be(1048576) ConnectionTimeout must be(2 minutes) Backlog must be(4096) @@ -59,7 +51,9 @@ class RemoteConfigSpec extends AkkaSpec( MaxChannelMemorySize must be(0) MaxTotalMemorySize must be(0) ReconnectDelay must be(5 seconds) - ReadTimeout must be(1 hour) + ReadTimeout must be(0 millis) + WriteTimeout must be(10 seconds) + AllTimeout must be(0 millis) ReconnectionTimeWindow must be(10 minutes) } diff --git a/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala index 1b32ce7112..1b250f1ea9 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala @@ -18,6 +18,7 @@ object RemoteDeployerSpec { remote = "akka://sys@wallace:2552" } } + akka.remote.netty.port = 0 """, ConfigParseOptions.defaults) class RecipeActor extends Actor { diff --git a/akka-samples/akka-sample-remote/src/main/resources/application.conf b/akka-samples/akka-sample-remote/src/main/resources/application.conf index ce550b33eb..0fe79b9290 100644 --- a/akka-samples/akka-sample-remote/src/main/resources/application.conf +++ b/akka-samples/akka-sample-remote/src/main/resources/application.conf @@ -3,7 +3,7 @@ calculator { include "common" akka { - remote.server.port = 2552 + remote.netty.port = 2552 } } //#calculator @@ -13,7 +13,7 @@ remotelookup { include "common" akka { - remote.server.port = 2553 + remote.netty.port = 2553 } } //#remotelookup @@ -31,7 +31,7 @@ remotecreation { } } - remote.server.port = 2554 + remote.netty.port = 2554 } } //#remotecreation diff --git a/akka-samples/akka-sample-remote/src/main/resources/common.conf b/akka-samples/akka-sample-remote/src/main/resources/common.conf index 79009c0aea..a3e16cf1a9 100644 --- a/akka-samples/akka-sample-remote/src/main/resources/common.conf +++ b/akka-samples/akka-sample-remote/src/main/resources/common.conf @@ -1,13 +1,11 @@ akka { - version = "2.0-SNAPSHOT" actor { provider = "akka.remote.RemoteActorRefProvider" } remote { - transport = "akka.remote.netty.NettyRemoteSupport" - server { + netty { hostname = "127.0.0.1" } } diff --git a/akka-sbt-plugin/sample/src/main/config/logback.xml b/akka-sbt-plugin/sample/src/main/config/logback.xml index bddac0313d..019d298192 100644 --- a/akka-sbt-plugin/sample/src/main/config/logback.xml +++ b/akka-sbt-plugin/sample/src/main/config/logback.xml @@ -9,7 +9,7 @@ - + diff --git a/akka-spring/src/test/resources/akka-test.conf b/akka-spring/src/test/resources/akka-test.conf index 806783d217..a4838f6cc7 100644 --- a/akka-spring/src/test/resources/akka-test.conf +++ b/akka-spring/src/test/resources/akka-test.conf @@ -128,7 +128,7 @@ akka { # secure-cookie = "050E0A0D0D06010A00000900040D060F0C09060B" # generate your own with '$AKKA_HOME/scripts/generate_secure_cookie.sh' or using 'Crypt.generateSecureCookie' secure-cookie = "" - layer = "akka.remote.netty.NettyRemoteSupport" + layer = "akka.remote.netty.NettyRemoteTransport" server { hostname = "localhost" # The hostname or IP that clients should connect to diff --git a/akka-spring/src/test/scala/TypedActorSpringFeatureTest.scala b/akka-spring/src/test/scala/TypedActorSpringFeatureTest.scala index 1fa4874408..8ca1a981d6 100644 --- a/akka-spring/src/test/scala/TypedActorSpringFeatureTest.scala +++ b/akka-spring/src/test/scala/TypedActorSpringFeatureTest.scala @@ -13,7 +13,7 @@ import org.springframework.context.ApplicationContext import org.springframework.context.support.ClassPathXmlApplicationContext import org.springframework.core.io.{ ClassPathResource, Resource } import org.scalatest.{ BeforeAndAfterAll, FeatureSpec } -import akka.remote.netty.NettyRemoteSupport +import akka.remote.netty.NettyRemoteTransport import akka.actor._ import akka.actor.Actor._ import java.util.concurrent.{TimeoutException, CountDownLatch} @@ -36,17 +36,17 @@ object RemoteTypedActorLog { @RunWith(classOf[JUnitRunner]) class TypedActorSpringFeatureTest extends FeatureSpec with ShouldMatchers with BeforeAndAfterAll { - var optimizeLocal_? = remote.asInstanceOf[NettyRemoteSupport].optimizeLocalScoped_? + var optimizeLocal_? = remote.asInstanceOf[NettyRemoteTransport].optimizeLocalScoped_? override def beforeAll { - remote.asInstanceOf[NettyRemoteSupport].optimizeLocal.set(false) //Can't run the test if we're eliminating all remote calls + remote.asInstanceOf[NettyRemoteTransport].optimizeLocal.set(false) //Can't run the test if we're eliminating all remote calls remote.start("localhost", 9990) val typedActor = TypedActor.newInstance(classOf[RemoteTypedActorOne], classOf[RemoteTypedActorOneImpl], 1000) remote.registerTypedActor("typed-actor-service", typedActor) } override def afterAll { - remote.asInstanceOf[NettyRemoteSupport].optimizeLocal.set(optimizeLocal_?) //Reset optimizelocal after all tests + remote.asInstanceOf[NettyRemoteTransport].optimizeLocal.set(optimizeLocal_?) //Reset optimizelocal after all tests remote.shutdown Thread.sleep(1000) diff --git a/akka-spring/src/test/scala/UntypedActorSpringFeatureTest.scala b/akka-spring/src/test/scala/UntypedActorSpringFeatureTest.scala index 66ca68dba7..92ca4c500e 100644 --- a/akka-spring/src/test/scala/UntypedActorSpringFeatureTest.scala +++ b/akka-spring/src/test/scala/UntypedActorSpringFeatureTest.scala @@ -9,7 +9,7 @@ import org.scalatest.matchers.ShouldMatchers import org.scalatest.junit.JUnitRunner import org.junit.runner.RunWith import org.springframework.context.support.ClassPathXmlApplicationContext -import akka.remote.netty.NettyRemoteSupport +import akka.remote.netty.NettyRemoteTransport import org.scalatest.{ BeforeAndAfterAll, FeatureSpec } import java.util.concurrent.CountDownLatch @@ -23,15 +23,15 @@ import akka.actor.Actor._ @RunWith(classOf[JUnitRunner]) class UntypedActorSpringFeatureTest extends FeatureSpec with ShouldMatchers with BeforeAndAfterAll { - var optimizeLocal_? = remote.asInstanceOf[NettyRemoteSupport].optimizeLocalScoped_? + var optimizeLocal_? = remote.asInstanceOf[NettyRemoteTransport].optimizeLocalScoped_? override def beforeAll { - remote.asInstanceOf[NettyRemoteSupport].optimizeLocal.set(false) //Can't run the test if we're eliminating all remote calls + remote.asInstanceOf[NettyRemoteTransport].optimizeLocal.set(false) //Can't run the test if we're eliminating all remote calls remote.start("localhost", 9990) } override def afterAll { - remote.asInstanceOf[NettyRemoteSupport].optimizeLocal.set(optimizeLocal_?) //Reset optimizelocal after all tests + remote.asInstanceOf[NettyRemoteTransport].optimizeLocal.set(optimizeLocal_?) //Reset optimizelocal after all tests remote.shutdown Thread.sleep(1000) diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index 18618a8f0c..d42cfcf165 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -34,16 +34,15 @@ class TestActorRef[T <: Actor]( _supervisor.path / name, false) { - private case object InternalGetActor extends AutoReceivedMessage + import TestActorRef.InternalGetActor override def newActorCell( system: ActorSystemImpl, ref: InternalActorRef, props: Props, supervisor: InternalActorRef, - receiveTimeout: Option[Duration], - hotswap: Stack[PartialFunction[Any, Unit]]): ActorCell = - new ActorCell(system, ref, props, supervisor, receiveTimeout, hotswap) { + receiveTimeout: Option[Duration]): ActorCell = + new ActorCell(system, ref, props, supervisor, receiveTimeout) { override def autoReceiveMessage(msg: Envelope) { msg.message match { case InternalGetActor ⇒ sender ! actor @@ -99,6 +98,8 @@ class TestActorRef[T <: Actor]( object TestActorRef { + private case object InternalGetActor extends AutoReceivedMessage + private val number = new AtomicLong private[testkit] def randomName: String = { val l = number.getAndIncrement() diff --git a/akka-testkit/src/test/java/akka/testkit/TestActorRefJavaCompile.java b/akka-testkit/src/test/java/akka/testkit/TestActorRefJavaCompile.java new file mode 100644 index 0000000000..5c13557854 --- /dev/null +++ b/akka-testkit/src/test/java/akka/testkit/TestActorRefJavaCompile.java @@ -0,0 +1,16 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.testkit; + +import org.junit.Test; +import akka.actor.Props; + +public class TestActorRefJavaCompile { + + public void shouldBeAbleToCompileWhenUsingApply() { + //Just a dummy call to make sure it compiles + TestActorRef ref = TestActorRef.apply(new Props(), null); + } +} \ No newline at end of file diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala index c8db05b171..172bdc230f 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala @@ -29,12 +29,12 @@ object AkkaSpec { stdout-loglevel = "WARNING" actor { default-dispatcher { - core-pool-size-factor = 2 - core-pool-size-min = 8 - core-pool-size-max = 8 - max-pool-size-factor = 2 - max-pool-size-min = 8 - max-pool-size-max = 8 + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = 8 + parallelism-factor = 2.0 + parallelism-max = 8 + } } } } diff --git a/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala index d2ec767504..86c6a8c7c5 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala @@ -12,19 +12,17 @@ import akka.util.duration._ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class TestFSMRefSpec extends AkkaSpec { - import FSM._ - "A TestFSMRef" must { "allow access to state data" in { val fsm = TestFSMRef(new Actor with FSM[Int, String] { startWith(1, "") when(1) { - case Ev("go") ⇒ goto(2) using "go" - case Ev(StateTimeout) ⇒ goto(2) using "timeout" + case Event("go", _) ⇒ goto(2) using "go" + case Event(StateTimeout, _) ⇒ goto(2) using "timeout" } when(2) { - case Ev("back") ⇒ goto(1) using "back" + case Event("back", _) ⇒ goto(1) using "back" } }, "test-fsm-ref-1") fsm.stateName must be(1) diff --git a/akka-transactor/src/test/java/scala/concurrent/stm/JavaAPITests.java b/akka-transactor/src/test/java/scala/concurrent/stm/JavaAPITests.java index 63fb6abb74..09ed90af7a 100644 --- a/akka-transactor/src/test/java/scala/concurrent/stm/JavaAPITests.java +++ b/akka-transactor/src/test/java/scala/concurrent/stm/JavaAPITests.java @@ -27,9 +27,9 @@ public class JavaAPITests { public void atomicWithRunnable() { final Ref.View ref = newRef(0); atomic(new Runnable() { - public void run() { - ref.set(10); - } + public void run() { + ref.set(10); + } }); int value = ref.get(); assertEquals(10, value); @@ -39,9 +39,9 @@ public class JavaAPITests { public void atomicWithCallable() { final Ref.View ref = newRef(0); int oldValue = atomic(new Callable() { - public Integer call() { - return ref.swap(10); - } + public Integer call() { + return ref.swap(10); + } }); assertEquals(0, oldValue); int newValue = ref.get(); @@ -53,10 +53,10 @@ public class JavaAPITests { final Ref.View ref = newRef(0); try { atomic(new Runnable() { - public void run() { - ref.set(10); - throw new TestException(); - } + public void run() { + ref.set(10); + throw new TestException(); + } }); } catch (TestException e) { int value = ref.get(); @@ -69,9 +69,9 @@ public class JavaAPITests { public void transformInteger() { Ref.View ref = newRef(0); transform(ref, new AbstractFunction1() { - public Integer apply(Integer i) { - return i + 10; - } + public Integer apply(Integer i) { + return i + 10; + } }); int value = ref.get(); assertEquals(10, value); @@ -110,13 +110,13 @@ public class JavaAPITests { final Map map = newMap(); try { atomic(new Runnable() { - public void run() { - map.put(1, "one"); + public void run() { + map.put(1, "one"); map.put(2, "two"); assertTrue(map.containsKey(1)); assertTrue(map.containsKey(2)); - throw new TestException(); - } + throw new TestException(); + } }); } catch (TestException e) { assertFalse(map.containsKey(1)); diff --git a/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala b/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala index 265d4a9eaf..9c019a56a5 100644 --- a/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala +++ b/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala @@ -20,8 +20,11 @@ object CoordinatedIncrement { akka { actor { default-dispatcher { - core-pool-size-min = 5 - core-pool-size-max = 16 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = 5 + core-pool-size-max = 16 + } } } } diff --git a/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala b/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala index 983a2951c1..7c498bd653 100644 --- a/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala +++ b/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala @@ -9,11 +9,7 @@ import akka.util.duration._ import akka.actor.{ Cancellable, Actor, Props, ActorRef } object ConcurrentSocketActorSpec { - val config = """ -akka { - extensions = [] -} -""" + val config = "" } class ConcurrentSocketActorSpec @@ -23,7 +19,7 @@ class ConcurrentSocketActorSpec val endpoint = "tcp://127.0.0.1:%s" format { val s = new java.net.ServerSocket(0); try s.getLocalPort finally s.close() } - def zmq = system.extension(ZeroMQExtension) + def zmq = ZeroMQExtension(system) "ConcurrentSocketActor" should { "support pub-sub connections" in { diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 9dada98416..a5c257ca84 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -31,7 +31,7 @@ object AkkaBuild extends Build { Unidoc.unidocExclude := Seq(samples.id, tutorials.id), Dist.distExclude := Seq(actorTests.id, akkaSbtPlugin.id, docs.id) ), - aggregate = Seq(actor, testkit, actorTests, remote, slf4j, agent, transactor, mailboxes, zeroMQ, kernel, akkaSbtPlugin, actorMigration, samples, tutorials, docs) + aggregate = Seq(actor, testkit, actorTests, remote, cluster, slf4j, agent, transactor, mailboxes, zeroMQ, kernel, akkaSbtPlugin, actorMigration, samples, tutorials, docs) ) lazy val actor = Project( @@ -86,6 +86,25 @@ object AkkaBuild extends Build { ) ) configs (MultiJvm) + lazy val cluster = Project( + id = "akka-cluster", + base = file("akka-cluster"), + dependencies = Seq(remote, remote % "test->test", testkit % "test->test"), + settings = defaultSettings ++ multiJvmSettings ++ schoirSettings ++ Seq( + libraryDependencies ++= Dependencies.cluster, + // disable parallel tests + parallelExecution in Test := false, + extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src => + (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq + }, + scalatestOptions in MultiJvm := Seq("-r", "org.scalatest.akka.QuietReporter"), + jvmOptions in MultiJvm := { + if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil + }, + test in Test <<= (test in Test) dependsOn (test in MultiJvm) + ) + ) configs (MultiJvm) + lazy val slf4j = Project( id = "akka-slf4j", base = file("akka-slf4j"), @@ -301,7 +320,7 @@ object AkkaBuild extends Build { lazy val docs = Project( id = "akka-docs", base = file("akka-docs"), - dependencies = Seq(actor, testkit % "test->test", remote, slf4j, agent, transactor, fileMailbox, mongoMailbox, redisMailbox, beanstalkMailbox, zookeeperMailbox), + dependencies = Seq(actor, testkit % "test->test", remote, cluster, slf4j, agent, transactor, fileMailbox, mongoMailbox, redisMailbox, beanstalkMailbox, zookeeperMailbox), settings = defaultSettings ++ Seq( unmanagedSourceDirectories in Test <<= baseDirectory { _ ** "code" get }, libraryDependencies ++= Dependencies.docs, @@ -410,10 +429,7 @@ object Dependencies { Test.zookeeper, Test.log4j // needed for ZkBarrier in multi-jvm tests ) -// val cluster = Seq( -// bookkeeper, commonsCodec, commonsIo, guice, h2Lzf, jacksonCore, jacksonMapper, log4j, netty, -// protobuf, sjson, zkClient, zookeeper, zookeeperLock, Test.junit, Test.scalatest -// ) + val cluster = Seq(Test.junit, Test.scalatest) val slf4j = Seq(slf4jApi) diff --git a/scripts/samples/start b/scripts/samples/start index 491c617db2..21563159f0 100755 --- a/scripts/samples/start +++ b/scripts/samples/start @@ -8,6 +8,6 @@ AKKA_HOME="$(cd "$SAMPLE"/../../../..; pwd)" [ -n "$AKKA_CLASSPATH" ] || AKKA_CLASSPATH="$AKKA_HOME/lib/scala-library.jar:$AKKA_HOME/lib/akka/*" -SAMPLE_CLASSPATH="$AKKA_CLASSPATH:$SAMPLE/lib/*:$SAMPLE/config" +SAMPLE_CLASSPATH="$SAMPLE/config:$AKKA_CLASSPATH:$SAMPLE/lib/*" java $JAVA_OPTS -cp "$SAMPLE_CLASSPATH" -Dakka.home="$SAMPLE" akka.kernel.Main diff --git a/scripts/samples/start.bat b/scripts/samples/start.bat index 1bffae4e5b..a6a3ec5e33 100644 --- a/scripts/samples/start.bat +++ b/scripts/samples/start.bat @@ -3,6 +3,6 @@ set SAMPLE=%~dp0.. set AKKA_HOME=%SAMPLE%\..\..\..\.. set JAVA_OPTS=-Xms1024M -Xmx1024M -Xss1M -XX:MaxPermSize=256M -XX:+UseParallelGC set AKKA_CLASSPATH=%AKKA_HOME%\lib\scala-library.jar;%AKKA_HOME%\lib\akka\* -set SAMPLE_CLASSPATH=%AKKA_CLASSPATH%;%SAMPLE%\lib\*;%SAMPLE%\config +set SAMPLE_CLASSPATH=%SAMPLE%\config;%AKKA_CLASSPATH%;%SAMPLE%\lib\* java %JAVA_OPTS% -cp "%SAMPLE_CLASSPATH%" -Dakka.home="%SAMPLE%" akka.kernel.Main