From 5f83340d5e08b71009735a99256831d03fccea0f Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 25 Jan 2012 22:36:03 +0100 Subject: [PATCH 01/94] wip --- .../src/main/scala/akka/dispatch/Future.scala | 11 +++- .../scala/akka/dispatch/japi/Future.scala | 50 +++++++++++++++---- .../src/main/scala/akka/util/BoxedType.scala | 8 +-- .../akka/docs/future/FutureDocTestBase.java | 21 ++++---- 4 files changed, 64 insertions(+), 26 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index 09ce22d6b8..19466c2a93 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -507,13 +507,20 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { * Creates a new Future[A] which is completed with this Future's result if * that conforms to A's erased type or a ClassCastException otherwise. */ - final def mapTo[A](implicit m: Manifest[A]): Future[A] = { + final def mapTo[A](implicit m: Manifest[A]): Future[A] = + mapTo[A](m.erasure.asInstanceOf[Class[A]]) + + /** + * Creates a new Future[A] which is completed with this Future's result if + * that conforms to A's erased type or a ClassCastException otherwise. + */ + final def mapTo[A](clazz: Class[A]): Future[A] = { val fa = Promise[A]() onComplete { case l: Left[_, _] ⇒ fa complete l.asInstanceOf[Either[Throwable, A]] case Right(t) ⇒ fa complete (try { - Right(BoxedType(m.erasure).cast(t).asInstanceOf[A]) + Right(BoxedType(clazz).cast(t).asInstanceOf[A]) } catch { case e: ClassCastException ⇒ Left(e) }) diff --git a/akka-actor/src/main/scala/akka/dispatch/japi/Future.scala b/akka-actor/src/main/scala/akka/dispatch/japi/Future.scala index ac4ef7694e..ca28099d8d 100644 --- a/akka-actor/src/main/scala/akka/dispatch/japi/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/japi/Future.scala @@ -6,6 +6,47 @@ package akka.dispatch.japi import akka.util.Timeout import akka.japi.{ Procedure2, Procedure, Function ⇒ JFunc, Option ⇒ JOption } +class Callback[-T] extends PartialFunction[T, Unit] { + override final def isDefinedAt(t: T): Boolean = true + override final def apply(t: T): Unit = on(t) + protected def on(result: T): Unit = () +} + +abstract class OnSuccess[-T] extends Callback[T] { + protected final override def on(result: T) = onSuccess(result) + def onSuccess(result: T): Unit +} + +abstract class OnFailure extends Callback[Throwable] { + protected final override def on(failure: Throwable) = onFailure(failure) + def onFailure(failure: Throwable): Unit +} + +abstract class OnComplete[-T] extends Callback[Either[Throwable, T]] { + protected final override def on(value: Either[Throwable, T]): Unit = value match { + case Left(t) ⇒ onComplete(t, null.asInstanceOf[T]) + case Right(r) ⇒ onComplete(null, r) + } + def onComplete(failure: Throwable, success: T): Unit +} + +abstract class Filter[-T] extends (T ⇒ Boolean) { + override final def apply(t: T): Boolean = filter(t) + def filter(result: T): Boolean +} + +abstract class Foreach[-T] extends (T ⇒ Unit) { + override final def apply(t: T): Unit = each(t) + def each(result: T): Unit +} + +abstract class Mapper[-T, +R] extends (T ⇒ R) + +/* +map => A => B +flatMap => A => F[B] +foreach +*/ /* Java API */ trait Future[+T] { self: akka.dispatch.Future[T] ⇒ /** @@ -50,14 +91,5 @@ trait Future[+T] { self: akka.dispatch.Future[T] ⇒ */ private[japi] final def filter[A >: T](p: JFunc[A, java.lang.Boolean]): akka.dispatch.Future[A] = self.filter((a: Any) ⇒ p(a.asInstanceOf[A])).asInstanceOf[akka.dispatch.Future[A]] - - /** - * Returns a new Future whose value will be of the specified type if it really is - * Or a failure with a ClassCastException if it wasn't. - */ - private[japi] final def mapTo[A](clazz: Class[A]): akka.dispatch.Future[A] = { - implicit val manifest: Manifest[A] = Manifest.classType(clazz) - self.mapTo[A] - } } diff --git a/akka-actor/src/main/scala/akka/util/BoxedType.scala b/akka-actor/src/main/scala/akka/util/BoxedType.scala index d2c5092be4..f5f95096d9 100644 --- a/akka-actor/src/main/scala/akka/util/BoxedType.scala +++ b/akka-actor/src/main/scala/akka/util/BoxedType.scala @@ -3,9 +3,8 @@ */ package akka.util -import java.{ lang ⇒ jl } - object BoxedType { + import java.{ lang ⇒ jl } private val toBoxed = Map[Class[_], Class[_]]( classOf[Boolean] -> classOf[jl.Boolean], @@ -18,8 +17,5 @@ object BoxedType { classOf[Double] -> classOf[jl.Double], classOf[Unit] -> classOf[scala.runtime.BoxedUnit]) - def apply(c: Class[_]): Class[_] = { - if (c.isPrimitive) toBoxed(c) else c - } - + final def apply(c: Class[_]): Class[_] = if (c.isPrimitive) toBoxed(c) else c } diff --git a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java b/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java index e642047709..8c600440f3 100644 --- a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java +++ b/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java @@ -10,6 +10,9 @@ import akka.japi.Procedure2; import akka.util.Timeout; import akka.dispatch.Await; import akka.dispatch.Future; +import akka.dispatch.japi.Mapper; +import akka.dispatch.japi.OnSuccess; +import akka.dispatch.japi.OnFailure; //#imports1 @@ -110,7 +113,7 @@ public class FutureDocTestBase { } }, system.dispatcher()); - Future f2 = f1.map(new Function() { + Future f2 = f1.map(new Mapper() { public Integer apply(String s) { return s.length(); } @@ -131,7 +134,7 @@ public class FutureDocTestBase { } }, system.dispatcher()); - Future f2 = f1.map(new Function() { + Future f2 = f1.map(new Mapper() { public Integer apply(String s) { return s.length(); } @@ -153,7 +156,7 @@ public class FutureDocTestBase { Thread.sleep(100); - Future f2 = f1.map(new Function() { + Future f2 = f1.map(new Mapper() { public Integer apply(String s) { return s.length(); } @@ -173,7 +176,7 @@ public class FutureDocTestBase { } }, system.dispatcher()); - Future f2 = f1.flatMap(new Function>() { + Future f2 = f1.flatMap(new Mapper>() { public Future apply(final String s) { return future(new Callable() { public Integer call() { @@ -322,8 +325,8 @@ public class FutureDocTestBase { { Future future = Futures.successful("foo", system.dispatcher()); //#onSuccess - future.onSuccess(new Procedure() { - public void apply(String result) { + future.onSuccess(new OnSuccess() { + public void onSuccess(String result) { if ("bar" == result) { //Do something if it resulted in "bar" } else { @@ -337,8 +340,8 @@ public class FutureDocTestBase { Future future = Futures.failed(new IllegalStateException("OHNOES"), system.dispatcher()); //#onFailure - future.onFailure( new Procedure() { - public void apply(Throwable failure) { + future.onFailure( new OnFailure() { + public void onFailure(Throwable failure) { if (failure instanceof IllegalStateException) { //Do something if it was this particular failure } else { @@ -370,7 +373,7 @@ public class FutureDocTestBase { Future future1 = Futures.successful("foo", system.dispatcher()); Future future2 = Futures.successful("bar", system.dispatcher()); Future future3 = - future1.zip(future2).map(new Function, String>() { + future1.zip(future2).map(new Mapper, String>() { public String apply(scala.Tuple2 zipped) { return zipped._1() + " " + zipped._2(); } From f00c4f61be66591a0ceb8bc071ea87607a5b71a7 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 25 Jan 2012 22:49:31 +0100 Subject: [PATCH 02/94] more wip --- .../java/code/akka/docs/future/FutureDocTestBase.java | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java b/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java index 8c600440f3..37349c186b 100644 --- a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java +++ b/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java @@ -13,6 +13,7 @@ import akka.dispatch.Future; import akka.dispatch.japi.Mapper; import akka.dispatch.japi.OnSuccess; import akka.dispatch.japi.OnFailure; +import akka.dispatch.japi.Filter; //#imports1 @@ -207,7 +208,7 @@ public class FutureDocTestBase { // Find the sum of the odd numbers Future futureSum = futureListOfInts.map( - new Function, Long>() { + new Mapper, Long>() { public Long apply(Iterable ints) { long sum = 0; for (Integer i : ints) @@ -309,13 +310,13 @@ public class FutureDocTestBase { //#filter Future future1 = Futures.successful(4, system.dispatcher()); Future successfulFilter = - future1.filter(new Function() { - public Boolean apply(Integer i) { return i % 2 == 0; } + future1.filter(new Filter() { + public boolean filter(Integer i) { return i % 2 == 0; } }); Future failedFilter = - future1.filter(new Function() { - public Boolean apply(Integer i) { return i % 2 != 0; } + future1.filter(new Filter() { + public boolean filter(Integer i) { return i % 2 != 0; } }); //When filter fails, the returned Future will be failed with a scala.MatchError //#filter From 87cb83f0d73616f73a64620ed83daa28fcb072b4 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 26 Jan 2012 12:41:50 +0100 Subject: [PATCH 03/94] wip --- .../src/main/scala/akka/dispatch/japi/Future.scala | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/akka-actor/src/main/scala/akka/dispatch/japi/Future.scala b/akka-actor/src/main/scala/akka/dispatch/japi/Future.scala index ca28099d8d..5f32ac1f35 100644 --- a/akka-actor/src/main/scala/akka/dispatch/japi/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/japi/Future.scala @@ -6,6 +6,7 @@ package akka.dispatch.japi import akka.util.Timeout import akka.japi.{ Procedure2, Procedure, Function ⇒ JFunc, Option ⇒ JOption } +@deprecated("Do not use this directly, use subclasses of this", "2.0") class Callback[-T] extends PartialFunction[T, Unit] { override final def isDefinedAt(t: T): Boolean = true override final def apply(t: T): Unit = on(t) @@ -30,6 +31,18 @@ abstract class OnComplete[-T] extends Callback[Either[Throwable, T]] { def onComplete(failure: Throwable, success: T): Unit } +@deprecated("Do not use this directly, use 'Recover'", "2.0") +class RecoverBridge[+T] extends PartialFunction[Throwable, T] { + override final def isDefinedAt(t: Throwable): Boolean = true + override final def apply(t: Throwable): T = on(t) + protected def on(result: Throwable): T = null.asInstanceOf[T] +} + +abstract class Recover[+T] extends RecoverBridge[T] { + protected final override def on(result: Throwable): T = recover(result) + def recover(failure: Throwable): T +} + abstract class Filter[-T] extends (T ⇒ Boolean) { override final def apply(t: T): Boolean = filter(t) def filter(result: T): Boolean From b31040733411889bed30f57c98e15edd1d73ea36 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 26 Jan 2012 14:15:25 +0100 Subject: [PATCH 04/94] wip --- .../java/akka/dispatch/JavaFutureTests.java | 24 ++-- .../src/main/scala/akka/dispatch/Future.scala | 64 ++++++++++- .../scala/akka/dispatch/japi/Future.scala | 108 ------------------ .../docs/actor/UntypedActorDocTestBase.java | 3 +- .../akka/docs/future/FutureDocTestBase.java | 13 +-- 5 files changed, 80 insertions(+), 132 deletions(-) delete mode 100644 akka-actor/src/main/scala/akka/dispatch/japi/Future.scala diff --git a/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java index a87b7933d8..9b89a2b476 100644 --- a/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java +++ b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java @@ -45,7 +45,7 @@ public class JavaFutureTests { } }, system.dispatcher()); - Future f2 = f1.map(new Function() { + Future f2 = f1.map(new Mapper() { public String apply(String s) { return s + " World"; } @@ -59,8 +59,8 @@ public class JavaFutureTests { final CountDownLatch latch = new CountDownLatch(1); Promise cf = Futures.promise(system.dispatcher()); Future f = cf; - f.onSuccess(new Procedure() { - public void apply(String result) { + f.onSuccess(new OnSuccess() { + public void onSuccess(String result) { if (result.equals("foo")) latch.countDown(); } @@ -76,8 +76,8 @@ public class JavaFutureTests { final CountDownLatch latch = new CountDownLatch(1); Promise cf = Futures.promise(system.dispatcher()); Future f = cf; - f.onFailure(new Procedure() { - public void apply(Throwable t) { + f.onFailure(new OnFailure() { + public void onFailure(Throwable t) { if (t instanceof NullPointerException) latch.countDown(); } @@ -94,8 +94,8 @@ public class JavaFutureTests { final CountDownLatch latch = new CountDownLatch(1); Promise cf = Futures.promise(system.dispatcher()); Future f = cf; - f.onComplete(new Procedure2() { - public void apply(Throwable t, String r) { + f.onComplete(new OnComplete() { + public void onComplete(Throwable t, String r) { latch.countDown(); } }); @@ -110,8 +110,8 @@ public class JavaFutureTests { final CountDownLatch latch = new CountDownLatch(1); Promise cf = Futures.promise(system.dispatcher()); Future f = cf; - f.foreach(new Procedure() { - public void apply(String future) { + f.foreach(new Foreach() { + public void each(String future) { latch.countDown(); } }); @@ -127,7 +127,7 @@ public class JavaFutureTests { Promise cf = Futures.promise(system.dispatcher()); cf.success("1000"); Future f = cf; - Future r = f.flatMap(new Function>() { + Future r = f.flatMap(new Mapper>() { public Future apply(String r) { latch.countDown(); Promise cf = Futures.promise(system.dispatcher()); @@ -146,8 +146,8 @@ public class JavaFutureTests { final CountDownLatch latch = new CountDownLatch(1); Promise cf = Futures.promise(system.dispatcher()); Future f = cf; - Future r = f.filter(new Function() { - public Boolean apply(String r) { + Future r = f.filter(new Filter() { + public boolean filter(String r) { latch.countDown(); return r.equals("foo"); } diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index 19466c2a93..454a045c9c 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -343,7 +343,7 @@ object Future { } } -sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { +sealed trait Future[+T] extends Await.Awaitable[T] { implicit def executor: ExecutionContext @@ -828,3 +828,65 @@ final class KeptPromise[T](suppliedValue: Either[Throwable, T])(implicit val exe case Right(r) ⇒ r } } +object japi { + @deprecated("Do not use this directly, use subclasses of this", "2.0") + class CallbackBridge[-T] extends PartialFunction[T, Unit] { + override final def isDefinedAt(t: T): Boolean = true + override final def apply(t: T): Unit = internal(t) + protected def internal(result: T): Unit = () + } + + @deprecated("Do not use this directly, use 'Recover'", "2.0") + class RecoverBridge[+T] extends PartialFunction[Throwable, T] { + override final def isDefinedAt(t: Throwable): Boolean = true + override final def apply(t: Throwable): T = internal(t) + protected def internal(result: Throwable): T = null.asInstanceOf[T] + } + + @deprecated("Do not use this directly, use subclasses of this", "2.0") + class BooleanFunctionBridge[-T] extends scala.Function1[T, Boolean] { + override final def apply(t: T): Boolean = internal(t) + protected def internal(result: T): Boolean = false + } + + @deprecated("Do not use this directly, use subclasses of this", "2.0") + class UnitFunctionBridge[-T] extends (T ⇒ Unit) { + override final def apply(t: T): Unit = internal(t) + protected def internal(result: T): Unit = () + } +} + +abstract class OnSuccess[-T] extends japi.CallbackBridge[T] { + protected final override def internal(result: T) = onSuccess(result) + def onSuccess(result: T): Unit +} + +abstract class OnFailure extends japi.CallbackBridge[Throwable] { + protected final override def internal(failure: Throwable) = onFailure(failure) + def onFailure(failure: Throwable): Unit +} + +abstract class OnComplete[-T] extends japi.CallbackBridge[Either[Throwable, T]] { + protected final override def internal(value: Either[Throwable, T]): Unit = value match { + case Left(t) ⇒ onComplete(t, null.asInstanceOf[T]) + case Right(r) ⇒ onComplete(null, r) + } + def onComplete(failure: Throwable, success: T): Unit +} + +abstract class Recover[+T] extends japi.RecoverBridge[T] { + protected final override def internal(result: Throwable): T = recover(result) + def recover(failure: Throwable): T +} + +abstract class Filter[-T] extends japi.BooleanFunctionBridge[T] { + override final def internal(t: T): Boolean = filter(t) + def filter(result: T): Boolean +} + +abstract class Foreach[-T] extends japi.UnitFunctionBridge[T] { + override final def internal(t: T): Unit = each(t) + def each(result: T): Unit +} + +abstract class Mapper[-T, +R] extends scala.runtime.AbstractFunction1[T, R] \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/dispatch/japi/Future.scala b/akka-actor/src/main/scala/akka/dispatch/japi/Future.scala deleted file mode 100644 index 5f32ac1f35..0000000000 --- a/akka-actor/src/main/scala/akka/dispatch/japi/Future.scala +++ /dev/null @@ -1,108 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.dispatch.japi - -import akka.util.Timeout -import akka.japi.{ Procedure2, Procedure, Function ⇒ JFunc, Option ⇒ JOption } - -@deprecated("Do not use this directly, use subclasses of this", "2.0") -class Callback[-T] extends PartialFunction[T, Unit] { - override final def isDefinedAt(t: T): Boolean = true - override final def apply(t: T): Unit = on(t) - protected def on(result: T): Unit = () -} - -abstract class OnSuccess[-T] extends Callback[T] { - protected final override def on(result: T) = onSuccess(result) - def onSuccess(result: T): Unit -} - -abstract class OnFailure extends Callback[Throwable] { - protected final override def on(failure: Throwable) = onFailure(failure) - def onFailure(failure: Throwable): Unit -} - -abstract class OnComplete[-T] extends Callback[Either[Throwable, T]] { - protected final override def on(value: Either[Throwable, T]): Unit = value match { - case Left(t) ⇒ onComplete(t, null.asInstanceOf[T]) - case Right(r) ⇒ onComplete(null, r) - } - def onComplete(failure: Throwable, success: T): Unit -} - -@deprecated("Do not use this directly, use 'Recover'", "2.0") -class RecoverBridge[+T] extends PartialFunction[Throwable, T] { - override final def isDefinedAt(t: Throwable): Boolean = true - override final def apply(t: Throwable): T = on(t) - protected def on(result: Throwable): T = null.asInstanceOf[T] -} - -abstract class Recover[+T] extends RecoverBridge[T] { - protected final override def on(result: Throwable): T = recover(result) - def recover(failure: Throwable): T -} - -abstract class Filter[-T] extends (T ⇒ Boolean) { - override final def apply(t: T): Boolean = filter(t) - def filter(result: T): Boolean -} - -abstract class Foreach[-T] extends (T ⇒ Unit) { - override final def apply(t: T): Unit = each(t) - def each(result: T): Unit -} - -abstract class Mapper[-T, +R] extends (T ⇒ R) - -/* -map => A => B -flatMap => A => F[B] -foreach -*/ -/* Java API */ -trait Future[+T] { self: akka.dispatch.Future[T] ⇒ - /** - * Asynchronously called when this Future gets a successful result - */ - private[japi] final def onSuccess[A >: T](proc: Procedure[A]): this.type = self.onSuccess({ case r ⇒ proc(r.asInstanceOf[A]) }: PartialFunction[T, Unit]) - - /** - * Asynchronously called when this Future gets a failed result - */ - private[japi] final def onFailure(proc: Procedure[Throwable]): this.type = self.onFailure({ case t: Throwable ⇒ proc(t) }: PartialFunction[Throwable, Unit]) - - /** - * Asynchronously called when this future is completed with either a failed or a successful result - * In case of a success, the first parameter (Throwable) will be null - * In case of a failure, the second parameter (T) will be null - * For no reason will both be null or neither be null - */ - private[japi] final def onComplete[A >: T](proc: Procedure2[Throwable, A]): this.type = self.onComplete(_.fold(t ⇒ proc(t, null.asInstanceOf[T]), r ⇒ proc(null, r))) - - /** - * Asynchronously applies the provided function to the (if any) successful result of this Future - * Any failure of this Future will be propagated to the Future returned by this method. - */ - private[japi] final def map[A >: T, B](f: JFunc[A, B]): akka.dispatch.Future[B] = self.map(f(_)) - - /** - * Asynchronously applies the provided function to the (if any) successful result of this Future and flattens it. - * Any failure of this Future will be propagated to the Future returned by this method. - */ - private[japi] final def flatMap[A >: T, B](f: JFunc[A, akka.dispatch.Future[B]]): akka.dispatch.Future[B] = self.flatMap(f(_)) - - /** - * Asynchronously applies the provided Procedure to the (if any) successful result of this Future - * Provided Procedure will not be called in case of no-result or in case of failed result - */ - private[japi] final def foreach[A >: T](proc: Procedure[A]): Unit = self.foreach(proc(_)) - - /** - * Returns a new Future whose successful result will be the successful result of this Future if that result conforms to the provided predicate - * Any failure of this Future will be propagated to the Future returned by this method. - */ - private[japi] final def filter[A >: T](p: JFunc[A, java.lang.Boolean]): akka.dispatch.Future[A] = - self.filter((a: Any) ⇒ p(a.asInstanceOf[A])).asInstanceOf[akka.dispatch.Future[A]] -} - diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java b/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java index a72c828862..9abd96b0a7 100644 --- a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java +++ b/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java @@ -12,6 +12,7 @@ import akka.actor.Props; //#import-future import akka.dispatch.Future; import akka.dispatch.Futures; +import akka.dispatch.Mapper; import akka.dispatch.Await; import akka.util.Duration; import akka.util.Timeout; @@ -236,7 +237,7 @@ public class UntypedActorDocTestBase { final Future> aggregate = Futures.sequence(futures, system.dispatcher()); - final Future transformed = aggregate.map(new akka.japi.Function, Result>() { + final Future transformed = aggregate.map(new Mapper, Result>() { public Result apply(Iterable coll) { final Iterator it = coll.iterator(); final String s = (String) it.next(); diff --git a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java b/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java index 37349c186b..8ecfccbeac 100644 --- a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java +++ b/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java @@ -4,16 +4,10 @@ package akka.docs.future; //#imports1 -import akka.dispatch.Promise; +import akka.dispatch.*; import akka.japi.Procedure; import akka.japi.Procedure2; import akka.util.Timeout; -import akka.dispatch.Await; -import akka.dispatch.Future; -import akka.dispatch.japi.Mapper; -import akka.dispatch.japi.OnSuccess; -import akka.dispatch.japi.OnFailure; -import akka.dispatch.japi.Filter; //#imports1 @@ -61,7 +55,6 @@ import akka.actor.ActorSystem; import akka.actor.UntypedActor; import akka.actor.ActorRef; import akka.actor.Props; -import akka.dispatch.Futures; import akka.pattern.Patterns; import static org.junit.Assert.*; @@ -355,8 +348,8 @@ public class FutureDocTestBase { { Future future = Futures.successful("foo", system.dispatcher()); //#onComplete - future.onComplete(new Procedure2() { - public void apply(Throwable failure, String result) { + future.onComplete(new OnComplete() { + public void onComplete(Throwable failure, String result) { if (failure != null) { //We got a failure, handle it here } else { From 5ddf1afb2045c3319f039fb36b28f6c70dee1613 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 26 Jan 2012 15:11:49 +0100 Subject: [PATCH 05/94] Adding tests for recover and mapTo, adding API for creating manifests from Java and doccing things --- .../java/akka/dispatch/JavaFutureTests.java | 27 +++++ .../src/main/scala/akka/dispatch/Future.scala | 108 ++++++++++++++++-- .../src/main/scala/akka/japi/JavaAPI.scala | 7 ++ 3 files changed, 133 insertions(+), 9 deletions(-) diff --git a/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java index 9b89a2b476..ca21b9a6fc 100644 --- a/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java +++ b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java @@ -14,6 +14,7 @@ import java.util.LinkedList; import java.lang.Iterable; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; +import static akka.japi.util.manifest; import akka.testkit.AkkaSpec; @@ -278,4 +279,30 @@ public class JavaFutureTests { Await.ready(p, d); assertEquals(Await.result(p, d), "foo"); } + + @Test + public void MapToMustBeCallable() { + Promise p = Futures.promise(system.dispatcher()); + Future f = p.future().mapTo(manifest(String.class)); + Duration d = Duration.create(1, TimeUnit.SECONDS); + p.success("foo"); + Await.ready(p, d); + assertEquals(Await.result(p, d), "foo"); + } + + @Test + public void RecoverToMustBeCallable() { + final IllegalStateException fail = new IllegalStateException("OHNOES"); + Promise p = Futures.promise(system.dispatcher()); + Future f = p.future().recover(new Recover() { + public Object recover(Throwable t) throws Throwable { + if (t == fail) return "foo"; + else throw t; + } + }); + Duration d = Duration.create(1, TimeUnit.SECONDS); + p.failure(fail); + Await.ready(p, d); + assertEquals(Await.result(p, d), "foo"); + } } diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index 555ad86055..70768133a0 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -503,21 +503,18 @@ sealed trait Future[+T] extends Await.Awaitable[T] { /** * Creates a new Future[A] which is completed with this Future's result if * that conforms to A's erased type or a ClassCastException otherwise. + * + * When used from Java, to create the Manifest, use: + * import static akka.japi.util.manifest; + * future.mapTo(manifest(MyClass.class)); */ - final def mapTo[A](implicit m: Manifest[A]): Future[A] = - mapTo[A](m.erasure.asInstanceOf[Class[A]]) - - /** - * Creates a new Future[A] which is completed with this Future's result if - * that conforms to A's erased type or a ClassCastException otherwise. - */ - final def mapTo[A](clazz: Class[A]): Future[A] = { + final def mapTo[A](implicit m: Manifest[A]): Future[A] = { val fa = Promise[A]() onComplete { case l: Left[_, _] ⇒ fa complete l.asInstanceOf[Either[Throwable, A]] case Right(t) ⇒ fa complete (try { - Right(BoxedType(clazz).cast(t).asInstanceOf[A]) + Right(BoxedType(m.erasure).cast(t).asInstanceOf[A]) } catch { case e: ClassCastException ⇒ Left(e) }) @@ -825,6 +822,11 @@ final class KeptPromise[T](suppliedValue: Either[Throwable, T])(implicit val exe case Right(r) ⇒ r } } + +/** + * This class contains bridge classes between Scala and Java. + * Internal use only. + */ object japi { @deprecated("Do not use this directly, use subclasses of this", "2.0") class CallbackBridge[-T] extends PartialFunction[T, Unit] { @@ -853,37 +855,125 @@ object japi { } } +/** + * Callback for when a Future is completed successfully + * SAM (Single Abstract Method) class + * + * Java API + */ abstract class OnSuccess[-T] extends japi.CallbackBridge[T] { protected final override def internal(result: T) = onSuccess(result) + + /** + * This method will be invoked once when/if a Future that this callback is registered on + * becomes successfully completed + */ def onSuccess(result: T): Unit } +/** + * Callback for when a Future is completed with a failure + * SAM (Single Abstract Method) class + * + * Java API + */ abstract class OnFailure extends japi.CallbackBridge[Throwable] { protected final override def internal(failure: Throwable) = onFailure(failure) + + /** + * This method will be invoked once when/if a Future that this callback is registered on + * becomes completed with a failure + */ def onFailure(failure: Throwable): Unit } +/** + * Callback for when a Future is completed with either failure or a success + * SAM (Single Abstract Method) class + * + * Java API + */ abstract class OnComplete[-T] extends japi.CallbackBridge[Either[Throwable, T]] { protected final override def internal(value: Either[Throwable, T]): Unit = value match { case Left(t) ⇒ onComplete(t, null.asInstanceOf[T]) case Right(r) ⇒ onComplete(null, r) } + + /** + * This method will be invoked once when/if a Future that this callback is registered on + * becomes completed with a failure or a success. + * In the case of success then "failure" will be null, and in the case of failure the "success" will be null. + */ def onComplete(failure: Throwable, success: T): Unit } +/** + * Callback for the Future.recover operation that conditionally turns failures into successes. + * + * SAM (Single Abstract Method) class + * + * Java API + */ abstract class Recover[+T] extends japi.RecoverBridge[T] { protected final override def internal(result: Throwable): T = recover(result) + + /** + * This method will be invoked once when/if the Future this recover callback is registered on + * becomes completed with a failure. + * + * @returns a successful value for the passed in failure + * @throws the passed in failure to propagate it. + * + * Java API + */ + @throws(classOf[Throwable]) def recover(failure: Throwable): T } +/** + * Callback for the Future.filter operation that creates a new Future which will + * conditionally contain the success of another Future. + * + * SAM (Single Abstract Method) class + * Java API + */ abstract class Filter[-T] extends japi.BooleanFunctionBridge[T] { override final def internal(t: T): Boolean = filter(t) + + /** + * This method will be invoked once when/if a Future that this callback is registered on + * becomes completed with a success. + * + * @returns true if the successful value should be propagated to the new Future or not + */ def filter(result: T): Boolean } +/** + * Callback for the Future.foreach operation that will be invoked if the Future that this callback + * is registered on becomes completed with a success. This method is essentially the same operation + * as onSuccess. + * + * SAM (Single Abstract Method) class + * Java API + */ abstract class Foreach[-T] extends japi.UnitFunctionBridge[T] { override final def internal(t: T): Unit = each(t) + + /** + * This method will be invoked once when/if a Future that this callback is registered on + * becomes successfully completed + */ def each(result: T): Unit } +/** + * Callback for the Future.map and Future.flatMap operations that will be invoked + * if the Future that this callback is registered on becomes completed with a success. + * This callback is the equivalent of an akka.japi.Function + * + * SAM (Single Abstract Method) class + * + * Java API + */ abstract class Mapper[-T, +R] extends scala.runtime.AbstractFunction1[T, R] diff --git a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala index e414d0fee6..94a347f653 100644 --- a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala +++ b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala @@ -119,3 +119,10 @@ object Option { implicit def java2ScalaOption[A](o: Option[A]): scala.Option[A] = o.asScala implicit def scala2JavaOption[A](o: scala.Option[A]): Option[A] = if (o.isDefined) some(o.get) else none } + +object util { + /** + * Given a Class returns a Scala Manifest of that Class + */ + def manifest[T](clazz: Class[T]): Manifest[T] = Manifest.classType(clazz) +} From 1ebdcaca1adc728165bb1f4e5dd7622fa72e6d33 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 26 Jan 2012 17:47:31 +0100 Subject: [PATCH 06/94] Fixes after review --- .../src/test/java/akka/dispatch/JavaFutureTests.java | 8 ++++---- akka-actor/src/main/scala/akka/dispatch/Future.scala | 2 +- akka-actor/src/main/scala/akka/japi/JavaAPI.scala | 5 ++++- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java index ca21b9a6fc..4ccdd46dc1 100644 --- a/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java +++ b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java @@ -14,7 +14,7 @@ import java.util.LinkedList; import java.lang.Iterable; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static akka.japi.util.manifest; +import static akka.japi.Util.manifest; import akka.testkit.AkkaSpec; @@ -272,7 +272,7 @@ public class JavaFutureTests { } @Test - public void BlockMustBeCallable() { + public void blockMustBeCallable() { Promise p = Futures.promise(system.dispatcher()); Duration d = Duration.create(1, TimeUnit.SECONDS); p.success("foo"); @@ -281,7 +281,7 @@ public class JavaFutureTests { } @Test - public void MapToMustBeCallable() { + public void mapToMustBeCallable() { Promise p = Futures.promise(system.dispatcher()); Future f = p.future().mapTo(manifest(String.class)); Duration d = Duration.create(1, TimeUnit.SECONDS); @@ -291,7 +291,7 @@ public class JavaFutureTests { } @Test - public void RecoverToMustBeCallable() { + public void recoverToMustBeCallable() { final IllegalStateException fail = new IllegalStateException("OHNOES"); Promise p = Futures.promise(system.dispatcher()); Future f = p.future().recover(new Recover() { diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index 70768133a0..c6fff48f34 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -505,7 +505,7 @@ sealed trait Future[+T] extends Await.Awaitable[T] { * that conforms to A's erased type or a ClassCastException otherwise. * * When used from Java, to create the Manifest, use: - * import static akka.japi.util.manifest; + * import static akka.japi.Util.manifest; * future.mapTo(manifest(MyClass.class)); */ final def mapTo[A](implicit m: Manifest[A]): Future[A] = { diff --git a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala index 94a347f653..47ce667759 100644 --- a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala +++ b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala @@ -120,7 +120,10 @@ object Option { implicit def scala2JavaOption[A](o: scala.Option[A]): Option[A] = if (o.isDefined) some(o.get) else none } -object util { +/** + * This class hold common utilities for Java + */ +object Util { /** * Given a Class returns a Scala Manifest of that Class */ From f8741c326e881d05b6b9d89b1345c62dbfbaa226 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Thu, 26 Jan 2012 18:42:11 +0100 Subject: [PATCH 07/94] DOC: Derek Wyatt's diagrams of fault tolerance sample, and descriptions. See #1730 --- .../faulttolerancesample-failure-flow.png | Bin 0 -> 113164 bytes .../faulttolerancesample-normal-flow.png | Bin 0 -> 84198 bytes akka-docs/images/faulttolerancesample.graffle | 7302 +++++++++++++++++ akka-docs/java/fault-tolerance-sample.rst | 46 + akka-docs/scala/fault-tolerance-sample.rst | 48 + 5 files changed, 7396 insertions(+) create mode 100755 akka-docs/images/faulttolerancesample-failure-flow.png create mode 100644 akka-docs/images/faulttolerancesample-normal-flow.png create mode 100755 akka-docs/images/faulttolerancesample.graffle diff --git a/akka-docs/images/faulttolerancesample-failure-flow.png b/akka-docs/images/faulttolerancesample-failure-flow.png new file mode 100755 index 0000000000000000000000000000000000000000..6e6fcdf6c80ec5774d38ff09e1e169372add9b99 GIT binary patch literal 113164 zcmeAS@N?(olHy`uVBq!ia0y~yU=m?qV4A|g#=yW3b84Y20|NtRfk$L90|U1(2s1Lw znj^u$puphi;uuoF_+~C^jflJSF?kaQj)Ql*q6^E*vkS|oFOm;hvn3>>FsN%w#=?x1 z4hub^HT<}@?AUF6Q1!me`#{v$0M^1gXf{s?Fd0;uP28BN?N1O!f z+Ju@R+Bq6C{2f?ax74$+faM)HI2u0)3pfrQB^|Q=G*sA}53IS9_3hc7H?i{_dL#@> z)Xv`fTC8I!0t$j1VvK?!4|Vvv*p(IH_Ex1@nms?ewLnezpmJk_Apf1)w`MH)D9m}% zdx{gtcjjFU99;%t%eN*NI5kzjPR_PhlN6L_bGh=S*TI0PX=jI|@iH;KG-px&ia`v1n+9sy3u9SU}G$^7PvIPVjA0}8z7&;0jnss%q ziQK#}=jpB=CC9#d%rnvzs!}a7Nc$)!$`Uc}`uey-GbgtPt`769`Ek+W%d4Br8FKr{apA=yg0k z?*HK9-zO3Bl5XsGD$NUJAv=uZMGe2>hI`U3ZSwlnZ$5Suvqc1NnpJ*8!w>$Ak zQ?=lUDXQ%2;&(T-^UG&V%=qO$!*;gKlADM3o(d__W@%|C=ICfunlgvkyj;TZ;QH&+ zy%IXAi#ay?&5qbFAMVuaB)?P<*-QU}fR&>}yR;sX4y_JSMl_7GHY@ zl!GR^IIy%bC;0y7tXsQjlA-K3nT|s%y5ZJeM>LJ-%SGlLJ>8StrO&TK z?k7tKRjNBqz12IlR9SvoZrox{+v;B|^MC2M&+OXz<$d_7MY1>dnwiQ(cJAgv7;s7x;Cw?kt>?Xy5hyw_Mm?HNS_qrgEQ{x_-kvw_gk2GwUk#$=+N3 z+kcL2Da*{w&$tXyEsh?18-3k$p^2A*ir=I&POtCRS{Hgp>OZPF!hDpqJv1($*4R(> zulUujWm|rH4nFJr{oS2Q`-}I-9x1wal|P4*^Yy3luYbJ%fpge7H3cy)_Rh^ghE9iM zwIseRUJx+D`mwM`*rO&<`&h%&r8De58=F2YVO!!in=x|J^Q*NQ75T^JTG#UYzPv2bok z-``pBQ8?kjiIX#IpEBD9Ey-BvwMzMU{*J1bd%m6Sn{_uTq~vtxf;$Uim_<7NfU7|l zL&nTc9gmm_+&;Z=-&oVg-!*-0mS>+Bm*0t0+n6njUv*e!IY^)R`cuMuUA)`$eTxOu z^J2UuqS-|RRZoQ-ep1M6nDeUnDoLI=N8p(c#YS9M;LjpuEVa_@Pa( zL-5e_8yThhlkaV)=DYlM!(_SYf5nHC{DOf@a=Zb-xk2rwn8 zDKsgad7-iZ+A3gFOyKBPV4#wMXh*cn1l601CpSPs0n`FfRA@RhVZvUx9)UC_C5=Np zeoVWp7++@|iM3rn$sX+B-NF+pHRi-LZTu5=;PiX3P3hjk3Qt2cgC882KU?=X*OawA z+1K+U>Q0v)|50Q3*4yXH%ubcg$qMQ7{#4ZeVJfI9EQDApV9AoQ>`Xw>Q=ggs^X+(z zf|vF8`>#kp^`)*Z(V&dsx=p+tF)wLSNLT*tAnT`6oRp6ct$RnJ`^?@~}mycHOc!|7u*+ZEQC=9cdA+ zh*qCyStaQ?f=@!V<7=RyhUXFKKjOY;^@ngudYVHT)O z(9&dA_JrY-*vYpoQ&c)1aBR4L_{>i3rq4>(!MWB+p@+pKpha+w^1*-qp6etX?Rx4V z@KM^6C1vXc4<_x4T#SkWZ$ZWJA>{*sjL^E&fn(1}7MFlMCzon}7i&4N{P2D5B0oQ9 zlko?rVPk(-AUR@hR%S=tk>+BKFFDt*SaN!5fkM48!_9%ERhX;h#e;i(9x^=g(lv!#2N zd;k0$8Rt}@!RCAim00Ri?sr_AZ!zUm*Vo$@O0Sgs`Eon2W`CmktgrI?^YgD;h`6Uz z&HH)R>P*xwI}f4Lbw7pmTJGjbUq5hbsion!aKYU3ao^K3zwN{3koI?`ui_UY&h$e;=cpUnI-?z`GNT zW}2@mmuY=7^O92FA(mUK4E}Gfv+|kf_IG2>Cjr^|$=~fGjDUbe!JhiaHgkI&l>!H{YP(}P3^j)zOxSsUtAmM8=~`Xo0yCC_59J}^-x8@ zSMDK;#H9K!mYsJS;s%CF37W`^oa`hV9vC#cR zoJ%}sikO}fJ9c!n`BsaK&#&L+_ZOz|naoe8m0p)oIj+W+z2#rIG`db_=`H%j8 z^;aV5{jK<~My&?9Ddw{iCsnCmd9EM7zgOkI?B+#js%M`yNxYh0crrw|qtJ8j-+q0K z*X}x71H@+AzPPtGjH_}}+J&=f`Oi=EEY04Y^g{e|;fLohCGNb8WwA?tw0S$f-kIk1 z{*Ny{25)$*8M{vBfh?V5jIwPdV|Ii;W1t=Rubo%eOms<-Yh z7B1W3!jdQU!AHP9TGh+^|NK|iKk%q>KMOf*dwG%1!#f&Ra|}K`J<+0l{r!TXlYbV7 zs(jdK`+wen<(tD3ZY`RxYyS26D$i-j(H}4Tx={BbMK$7+_Uhx-D{j~7>=CYdG%;&Y zqLRTO7K>udX3Z zdk$P)?*H+}$K)Tsemy$a%)X=eIbTgp%><25zmnJg8)M~;ZaQ*g_2NF$${W3Bjh+4mGe3xei{Yt&leV$c+b4Flt-XN zR&syj&Ome34L27AywhP^H(Nd~>j}GgwtIHl))@~>qo1!ie8Xy+s9K%Pe>u1C=3#SEewEJ3)(9yxI(uKvf^KdT52^*2T;E+jyiV7@udKx&m z)J1OG-g((lQPk53X3`ZkX**mxI$T>>5%6e|COc|M_g?i9h24F zYSmQO!Cp6?05&SCFXrc$HIw~3ye4t99k}~@U&NlOSOdE!lOSfaOl|1-#G!4;eyDc$ zH+hkoZE>v??;JEfN?l~Jc%A;~iRJ;%Q%S2e_d^@J4o3t9xLuEI{9d99!hu+srvjvaFs7`%OaE=_t)>!F*K6Tg6?5u{g&@ul;_ELEkS zf}oZuXpEqZ#U)@z;zG}95X}x8JERx|Mb?EWO_B#U9R&oK@<2f(EjB9!*3xKL4T@qk zWA5ZlX_q$QoAflayXM2m6?Tm0mix@)y1niF<~iEIy9Cz7?@#-wdtLUiap^0TA3pP2 zw!Ac2l<{(vghiIoqoe)_4>iF_fl*OlBg>twSu@ms~nTwZl3Rk2wF}kg=^5Jbmr0%PpIyfh($x2e_&UN4IiKt`GwGl0`v5 ztc$@y>Aic7jq5z|Xo1`BCHDlYbnam#aWD#s_*Jm&*(0RrSQ8n0g>!|o&!MYg)3^-}KXL?RwFhdT z(Vc}0w!O1?(GsG<35%^jcLx^ND;uXzS=``o{2A|Jg;38RkophLgasTwKAXPr&O*_~ zlkz;%KTeck+Q5 z|18Vgo^W+zn zER31)^KR9XlhYVaFXwS=PYmAh{zImU?;L@jeS zlWAYIlv~5GSZ1pCGxtX>^Ca&4jGe3Yx%}Cp|3AbnpVQjk6p||=E9Qw$-;s1$D+3WG_{)_%@I9ai7jo9Ak ztB&ulm0qo7>7A^7t_xE1GhSCx5aUvx;=c5h>7hBNCI*X7)D4#Eo{``Z$0@uZb8V5; zxrf5yNiqMg-Pm4bRJP)o09ViM@A8j+y{-QBYjt>?MdiCLJG-J+Oc9d*e_u9HHJSIY z-_DIOjheD4_7B8#oUVr@S(RHcu3HQFwc6gYg?f2HQ=YOXJr?Lg78mN26 z(b&MzCBWeN_34d`$&X%c7B}1M`1FuGeu`-6q$6OY_TKe&C+g!81vnH?kU@LsY zx%JV{f`6ZQi=*Dx|I6jSv|{Pi`a651XI@@<a()sOn`5aEakN zZzB?EyyW1$X7w-EcbznZQfzWx@@N3$=l3i^Iobl1}ss~z7TEd1)P((!!% zG7eqlFZplvV)m|_EuDA2dVzHv-O z*P{6!A3RL9srn;Q{_bbf!{SuW{$EqQUl@4q-TP%;ckxx7@Q7#&vz+_=Ri}~??`)l2 zE@*!4wb+i7PT_|)KZ}<=HhBmelvw8o>X@EVwD}ycButF?Tl9~#r*7VU>StotYJcg; z3_kGvVzRE(ju)cZFSV9(y|Ck(zW)BTf7{<$Cvm2~Kfuv_>uI;r^XQismiyXH30b{Z z&Z^MPW5RR&xP*1DRI=`zp1yv{kA`hp0WKyhBg5MJg{{rscBSZRnm~8ZHJZfWd63sFDuBisrlk!>YU?nQ_OjSX>!(8 znWX)~$S)O-a_+FLCbH8tySFvQ4>9w*_m?#H=ay>e;K|B4?y@A7J+az+-)|-0v+!q5pZg|NKWEA0Pkl_3P1QcK(9r=VX8U_@NNEYR-#m|Cx8c6gw>>yuR;W z#`mfW{SU9_#=l*YvG#89Rg(ieIDHFGLncI90h;UVIhlne%#7qM>D}5`TEi7>QwkTA{ zh~IUKR^HA?MFo~Qphk^wqIXcBzr(YI9lwkw$Z#+!en=A%aD3QOw72r}F5#ewDv*YN z1BZ(&s5(xF-{umBQ4!8LGSAqftGkM+c=|tquQS2sD6rgf0JR1}R&>|>IqRwEY4nZ5 zruv&qv;CZu_nV+&0*wU%6DlEH&*Q5?EjL%r3-QQVVd-JKoHQ#6BLdG6UUW0nP^ zV(+kEQqoZBTY7g!mT69}kN$tgxOv-OyX(2wG0s1l9k?thcji;0Pl4WCP7WL{&S1M= zh$lU3yuLd6!-wh@OGH#9vzcP#Y-{g*$_Uup{Wa4P+-Tuw9& zU!~6MTl_~isk&~RRGI4+D;s1}bk~r{KXiX*xZKGC#l*NNnKRC8Jn$$QlE<8U9avmf z1h(FPti3hPtuZV1!^!r)7t%hkY`t(pStsHbH6R`Je$IGNM< z=`JM!A$g~)+iS%B-P`T_`H_@Dad?jFJ0{hy`9J+3Ml1CFRi`&CO4*yN3!6_eA{Qs z-`t&Z;AArUL>t#t43~?er*EHqe_l#F$a?~rpype{wJr0D&mUd7W2ygqx#N8AnV+u| zc>a%tPxeOZ6wOJm_X#cQS$q2K-}eFU9Kf0^L7EO+-IZjxNU8eUn-ddtwcU6#-a;k> z=hziW$n(B!s`~n>>8zZHNaN`_@V-z0lafZ0(ZcftC8F*p6T8hVLZa|t zRk?yCxM{tEjq#=L3kORH!-J<UdJ9FcYN=FRh%VX#oo{ol6_A2xJwB`-C2_{L2~ z9GulJ&S>E13J}}3(r1VHHBM!O!c=n2iHb# zFDY(d-Vt+2`}744<}Q|Lil@7n4n=5c==_-dTvH=ro?FO{C%&G`z|PlmaA0xO;651Q zbS-|{nu3f*gK3;`$*e~{`_H%IRnoZQ>2qgqwfOar_z6Kvy^O735g9=f;`DH6m^SALeYGdZ6f`SQq1x1cICC)PHnUSD+ z#6Enh~5E7a5+LV;;Xd=idW2!^BzE8Y&>8h3|=e zzr8K@@B~HY2}Y@2b8Vy4`Q&U?Jj+<|yz3@!=)9g859ZC@7!)8gYt!P=&gg{KErP;o zeF3T(^T0uJu>&+G&oz50=h23hJv{~w@{<=Pq^9VqWgJ{!|M>bFS&wTk83yKRff|&*$?S%iqU|=thNb zFFM@L-(0J0lzJ*9@38K-#@2?Pxh_pxmo}wlxwKfGw>>PV5Fv3HEyVdj9)Yg`hkHzC_8sqDJy8e&ZmLs{V{QbKA?Ca}#Ykq!`%+K4- zufyRZS(I<4taK*-v3fzo_T7~yH-O`E66X<)g-z0HFUbp^tSr!|+IPeG#AfG@t;dZ& zCz>hCtohz^JEnZ9jdj)jd-v|`D1Se1@xho|`vuwW%s$J({@OPGP)zwt=}Bk6aTwD8 z8a@r=Shi_l!~?^*x{MFkB^_Fj$bQuP?95M(YG)RiJZE};e@pfB-{97+3ma(4#^Gv+ z$F;zlLW0}QMYb1SP5!1?F?o(4&$aIdNYES{l*M=geAZ zpmr>c0Xi9_z|y3wVCq^>@&r7411TW2n35K9h^l=Q31#syo_Nz({@A7DZ-Eod%l4)I z3Y^)#Vn@DbFsLL==x>P7cr$V0=RmiHM~$Lp=6r{33m>tl`OjMrZu`_mKFzB5@$sk} zi&>fHwt~j)Qcg}{T^YRm*nvi7XHnPLX1OK%oSs_FnYZKB>s_U<4@q{jfQN8g^cV$2 zCY}yYDxc6G$SJ}b!~gVHuXM(R1$^J$#C9$_d+(}qgr?D&R#sNm2-l;Lyo)ee93 z<>loEuU6f!`Ru#mkyX`~6_>-}@9wEI){EQoLc#N+9;lO305bU8Q{9!U&IdX;{bJ8O zX6Kh%P}h8GMXWmaGyRNan@XFDi(G|&{Q4Co7H|AWY0aXf`}=A&0v0r!6MgJ8-)`@= zt~K%d<=)=je*AX+{@%B@x4$mtl{RyEe6085VSf7;lFpe+PfS#17t@bhb2zx?g8x*l z&<(}U{ob}`&o6m%BXIuhU8SqPx&Px- z`?Y%|v#u3RxVfY7alm3XUX#pAF8b#ClaKS6WMAX?`ue*2#-yVLX=fxtRs=9kR`b17 zC~H~7vbXxX+rvYxAD>Q-XX}Ws1CKGts3pW5Df_ngYUT8zS8TCb_W4z5bB;!bKC3&W z^zi$SkID9`5`U(o9$Fu@)vIo8^z?7lpK@m1zx@32Upd>EuflhiG46NkmHM%A>D9ki zOdiyIe}Dgb@ao;aRz7|7SE5}=X`!A_Tj=VpUn{+?u8-cHSLem?V`bOv_kVtVuG5X) z_9Zw>VE_HP-)GlXetuS$RV%!Ij%D$$*j*(j-=DYt|K;>N+ueTOugBMaeSLfT`g7Y? zO)0&_EvEC~_m9W@f6a1kJXo$(Aj^OF`+$t#_x4B_9`Zk1@@jdS&DY>0xB1%f`}WMA z^)=-9T9-+F`~Bux&3gFu_V&L;&(Gy9nhu`$Rq$b161?oNkPnlK31gIUhU|l5`nm6_ zzr9%&#=N(6=4R_iE4yP|qQc(Owz>Ca#V7svu<*x=#r4vR|X#Ds0 z_iGdVm>mskA~!p6%4jq;gsqKw85(K->xJ^0o15A9R(-uxIBBL3QH~I$Doi6OwYt7CI9CPJM$_#-aNNJ zmfz}_dFm;VyK4@dnwvdyLy=zGo*nzn6{Mb?cJ+9SP48Jw5x!GF3ukL;bnRFmby&r3 zPQ>rorrFb8abI#UpRU2p^zY|$`GU~!*ZTy0Cj08$ul+tZ&TL-ch4_DeewNBSvp3tx z($`%1yhicKiZxohWRJJc3+L;%S%fyA3F17`u7!UjDE@O^szMu7i^6m1O&KU zD_(fsC=gsQBjKpO-~q20Q}0|^IeBB@W&az$((RA*%U@6XbMM~Ajqk3{(}=DTnxx{{ zbZzbJh(8}++iscGwg2BQ?zz_G+geQ7x5#`rZCm;2$%z?;i^Z6>*ln<9y}L2__?Fhj z<6DY+W*9Ken>X*ro6YBS#DuS!GzBmBTj)REF5oO@RcjbmSLNqt3o|b-vs$O*+NI#! z#?#|wee59LqBG)$yKJ-r>(pG+`sL|2EU!--CdNhgE%N zh0IU0xO&_tDzW;scR{Q=i&Ou_=(t_ez?tS@m&7&w-dd$6Wpb6H$m+%lqC)#h6kKWmt+oOgMdZ@}l^7k#rE4+p0F`0<0qPIo`Je=yO@ zfyH&nqp2r&8N*&IOIkkT!5&`U+55U1o3$2Hb|)Q+Zuon^G1Tz$>b0xv4}E2L{`T*S zo11N-ww+q9BbPfx^zpKzK?h4BPf7(Q>+jk7S;p$t*6i@v({p{hW#`nt_ON)=>}w>` zCRLHKVyk`haqnq5jI5mPGug`KWH)V2fef))Om5)dO24#i; z1g(-N9t|}MsfCf7-7>CNnE(2+Hlw-g|4JUoNelirta|bPUeWG4ruZKTIkBefE=8t? z7In`%(|K0QXL?Pg{2l4znyc7(BpR}=u6p@Xxnf>${PytRyy;q@T=(mKul;swskiuD zo5P-~+xTQx=`wF&*zkDi+RTk_f11B6vM+k#VS6a)>#M6#0T)&sOk6U@rn|4_h5Olp zmF?S%-XB{VyF0AZ?JuYOzaPq1a%LC=SACx0)byzRa16hk&5Gss(k!BKG)f}G(zpsu zPpdQe8|GbtEH63`2UsPBJ+0j0S&WCinps?G`e$j+I$JvJ{Jgd4 zbDnT5ol|Ki>b-SXnNQ6ptLOVKolx%RrySST@7z&;pLI>dzVNe| zhJnk))%|1&lH*RyQV!ptal`ukp2gF3o?H5{xH29*x@XVcy<0nWmA{`i??~5F6ZOrH z-prV3lzQp(isDsSTmr{VR_gaGwy*xzEC0^&ph)_mB{NpU?%pQG8uM|=$GDwEuIE>r zOp~_yw0++3CfUfdO=;KH$6r6ae)7k)(c8bsh|Q7cHqH#vG2pf85|t~Sq;1@~W3A?~ zt?7_~7XdcVDl&$%7Zu!W3eJn$Ylq2S{b!r=qriP}m(c$;3Cg!N2Ca`NR4RKfEB0uQ z@Yd|-hjp$_f3(^Edi#vt$?EwF{N75QnsmXv{%G)l0G}C^nrA))zRuj3nWz!GtVb&F zlZSuhS(j=0@qOEJZ%3ugOmWRwb}uc$Z+mWbRF1LsQr-X8W~R%``BimySE=K&kIOt3 zPHkL0Dd@_EOi-tL+5 zo5sFW;Zgp%CnuMQE8FRCo@@m5dk!#y5^;m5*!r4-(uxOd_qRx!XSI}s>pyf|x20pH z*0mR}?Dqc_DAo2o^eoo-3QzyP$rd-0KOQ}DOe5}#aP{=BKefZ(EzEkVnZ@~jS5=~} z-m#<0xcp3xo|;Xd4BG(Nfze?w57T^4;Nj?jnDCa+*%sV zw&K+PuRW9Iv@c3Nektop+yADRFZNka`cHX%)Ygmr8M1b>V8N|d)!*e7*L*x*@b8js z$=i$G>!%)^scrdLMOWue1Y_~y4GHpWX3QHtFaG*wijH>Jw}o?6FL#I)+b_KPx$=Qn zg_!T_4>1-!kD8QD%zJEn>`&DBWtnS?wj8_gDk<&G7E$+zWfI#(`{#hOTgQTqmUM*; zGZIvfb}e5jB+ThQ|9}zy)-&5WIDO}>xplX$;r7o==szk(Z`sy zahuPEPmjWrtgp(wFZ?7it!oCyfv#H3eV(sc!~e%PZ7<_yis3%nKV3h5;Z^GE=?AW9H4343JY6~mcb-J&x$`jiC8 zK8}<9#jgE$zmv>H9~MQw?6ap*ItywJ3-ZWsF*@<}f!~)G8wAdOi$Ag_h4bqR-d}GQ z|4QD{|Cvkpc-;bP(^;DgQj>K&4j$Epu0n(kH;S&w4~tl{#)tLTwzuzAssCU6w?1*} zt73`kHoU%}@bA>LgZFwS>mTcvWZu8z!?E+l7fco%xrKAG^ou;Ol_s@t9> zj;)0!_XfPWSX*OPGh>^;Mjs|_u4z+Y1J9tvs!DB(#e{lJm2686cGovedT{NX><`UE zpNbV+mowM7NIsChYX4yO{D%1L%-#FgPla~vc+%b%mez1kQ2c(qp=zh7uyk-$@++nRhIGCRDKNvsa76nh)z^Kl(>KWz`O4KjJ?n^x&BnGnCB@>RC5b znt%AHa^UlR#TmM{neQB1tvQ2zmFM

sdo;#LT14G6ybI3wR&9AS(C&WzAsb%~xlM zTk|mmr!!lA{xLsXR^ISi@qd$Ny3F_VGCI|kG{otZyn=_2RN>o;aUpUH5~d_|oY z^Dg~^>sRh^R#lVuF;mEXna>XML;h<-7%!aS2Ms1U?BM8Fz#+cxdZX{tBV66mYefPd zy}R3bf1d1to}ULEOmvvZ6<}=5s4d34kN?ln!*`ahS`^Q}CG1dxShxT3AL*u3h1hG^ zR!&`+r~Tin{i#Il?Tl5X|Wh%Gc#Ke~NY{*m2VYp*;B4liA?xBA?|+{rh8D7q>= z30!no(7k)2pzyV`a(zb>>%h%DrvsqH1>bJA{OZ~mFr%yM8fRYMvt3&m)mC%arst)a z&S3uY(fq>R_4Q?0*O>L@1dDN{`tGO^obg37V4u;I2(SOI+4+A&8=uoG{BXM8K~+`c zg_ef>?6)3J9@td!v6x2{g}{-rQ?Q%H=>`dcFZ zHXpMIeOYJ9dg8NL$a9v2o3pO}d#oOCDDCp=`wMNdW@^~U9pHEEu4#Jua`K|S>AU6| zSWIizl)hXTcEm-(!3#1gA`99Z&`|hf%gPx$o*(P#j-Ik(R%v6OZNswCKa-d5h}gza z*Bh;2IFU8$Bm33N;_j?lFTPk+YhGarHea{*Uv6K<(j80INXVE5ZCdk6c;TEU_F#YY!>XEdGeHX%?pT<>?ZM%EwoI4KiGtyOA5QM&8m zSzqmWET8?#)@xvn-S9^m#FJ;g2$j1F@ic_0g9WOjKsKt zGhG9&^tl%Q6f#I#@!x~@s$=lgMgN@LZ;BlJCs@+`Y}G%(`d{CdCIlauVE6BN?%(w+ zt~{oX4(D&{U%UFil#d6Vzq(cJJjL|U#Qz1MQQG(9SNtkG!*Z*XF-ZHPQMy&ZlL>Dv z<`hh8YI=Hjli|}Ndl5yi5x*--fyvyzJJQ(0MfR4zpBHy@Q_<5?6;rKaS(XU2Y1o9s zGndV(Dd1nAs<O4 zRX!1Vc6K)V=3g0)j&y#_S`?$v^SY|QkL#SfqTVNtfP&k_f;aCxSyNJ>f6nK$oVBGs zOH5}&goe^WlOIo|@0?Z>n#3ceYV9FsFZ5-P^D zGUS*r%ke>n_m| zulPU9q-884H-9p#{uY08Ln3q4_jhgIe@%5;8+End=ciO# zp{{>xT3I)?Y+Li9-|5hj!xcCFSmfPlzp?45xQhQrnYB@-moGo~EM=~2BcZnC4K9J`5VRPiLGxeNv@kgN}JmCF9foC&Och8xq* z%WcWL%yfEMxyygX&2?*_sh9HKK4H0*_&q?(SE$~VR=th%+iR|QjOZ4x6ImLw%^%n zQ*Hm=)Rv!TE^2dm;e-U4)#2;Iu5}vzNUh~x;y2f$bX#w$sJ8W^3k&B;*j6nO6HRN{ ze4&dgec91!F?E(ZGaER#%JufHI8l9?E$eJwaNxvEOI{vg$?5F;{QUglz{PA5zY~40 ztei$(90ja&Qsg;=hggw(Xacq)ts;tS4}w5a`ST3foDC-eDAw1 zKj(UJs}S3dGJkRiN~hZQED&N=nB{W|7&hlp4YGH zeS2|&qN!|+!gQTTr#1h#s;8Zs)0TRA+RMIK$F!0zB}%EuvOV9p`4ZQZ*q@p#cR(v1 zw(IV4kbM42Cbo+CFgwHjnx)^8|AtF0-R`LT=e+64r}8U&)Bn%ZdAn9^mi7AoKC^45 z?q|%BdM}n6ukSt2zc%w&x4gl}2Y)W7mzqva+rRF>q@Vv*PfR-@Vo>w+mDiRPd+M!@ z+3$U(wRhKhmM_ZtVu zjPG;&jMoQiAN+}Y@RsRc;EibCDX(jJ!bJaQn!XgBZnVEl^u%hhKbxLAeX@5yyX2(n z@g0X=2AcjYZ`EQ<&blcVVdKBnaFLGEng53@SKas6+fesp`e{J}(e+zrESTDOt?5ku z(PJ&0irr57TW{Rfe_i$9`TX#NcXxK)3OZQFcdhAA$Pbo;*n{yq3K-AMHV>;cTD5Z7 zR42txCnuZphOK+G#;Wbx(RX)u>&iN5`uIqR=)9~>lM~)qwjW_ZpoqsXc zS5H#zy0QlIyu}3u=cTt|*Ik7G+m1{Xe@lnxC3Fvqi;o(u2F@ z_qVbnN^SgUqjCJyJW>10yDC4cMQzLJDtpVl*!%h>vHyy)Q?%x~K3ywnRsNOnBVw*>c;^$c1-tqs|^t1kZ^`n?xtkHV)Zk6Ohr^JMV*SEa+9z7&+lE||Ltw`<2N@q-_mlL=p=a1q^Gsz_`#*6kB>3;$bA*o z3Z2tow)Xj>E1If*m@T6-4da%jntc*cXSv4$TJN(`r1wI{gcmp86g-?0HEqxD*Dr3B ze6qJI{`J)|+C5+M;=xci$++6uh10UU0v;WIl(Satl!5g&2L9T{l2wzV7cOEv9KrOz zTwv1wU%h{9pPgbZ*t{*`!>M(-|4z@lEOhAY<4%X_tIw5~=5n-mG-m3mtY6(D!v6Yr z+j7>~{x_~4{8FCa`u_Fn)$VOeJYTnK2LG>K$8^+X;pN4*lm2Y|zGqX`jdg2oZ_#ax z+v}~(?XDfOw<@%oV=|w1&9bwlmAlH{FA{skJ4?*6Bxk?F-Q&zZer$30^`-OT`uCe0 zEIuhTk4sVql7E_d8N;m{NnU7oG4 zm~a0Now_&6?8Oo*?@!wEvddVWU;FhGO~Y( z$2^-v2CFq1=DAi~H{-Wo`y8+(Csf9@JM|F%YJQdv>2(F`K;jP9*Uc?>azZe6<>n%D z?)SX1RuR5orGngI+cy4LJ99&t{lo5Fjqte6q^E1PZr+*qt(h&{b?vOk3roGL@3uHO z9xUmZAZb(4Fi+xiSXA}*IfrK&Z~t)h%(R62Jsah(x!U+iUo0{%7{hhVHw=M1L zxF&0~$U-jvRHGN)W?i6E|I-@F)y3omQM0(}TnSz1>3wEgA zo}wRLqGQ*$CPCusqo(CmFITTOu&K_n=lizbW7RCJGhRzQCo?X4-?}zH@u&a1AkNO? z4eyzw7ydn;dgjgk^y3#f3YJfqv7+FgnAMi-ZkeZA3aO{2yqY%Y&>FK>FJE?6th&5v zjhU3yjb)FLpC0d;Al4jT|2Ncs<=(A*!@2n;>o6hlm`>!sSjxZN~ z|L3xG(VD;eZSMu|HshYURB@e<>t=!Q-kL=Z=9pwQ&D!@neRrnr-#q*xJ{U2e=jR5Rg5uZtcBqqv%a zSQm$e%;%%0r#F6`rW*JFLRVn)K9oKFj8EJzM^D@bap z%lK)K)U>}@Sa{*b^XC>H?BqP=s3_=|^k|8Y@YD16U2b-XPSEkU^WHhbui(=owSwa^ z+Aet_oFXD3XZBW=UOp@G$7}o36E{0LlaH@knAF<2BGA}>LsHYnl2i*EcMonzY~JenWOn|( zi;Ml5yt|cNE|>owSZk2?veGPU?XC8M&0#m45-ig%bxD3UW8;0bo^|F+f5&BaPK3`9 zCW4izn@Jj^i)saVz-C0cW&O`=j%7sXsOi7l;gD)WxAi9dave5 zJinp-KI_};|NFXEC8vH!vMb9mb^X4|&!YaH#;-3g7hhwZDVP*>e*S)!LoISMOm~aU z+OE;I^t4s-*YCeWi%%L`@BD0^d&#Q$n}FTFAIZ8}8s~au2sjGr*zJ7rh{YwqC4g~b z)1ny(si|hO{C7S*Gc!0+%WA>xv+Xk~pC_^`)wp;=slcbRvuoAN#TlKGP6Te6A||vf z@~}y+#N|zbP1_z#EZ)Ys;ZZW{3H{eMH$Ptx+)q0ZSVig zkILP$>7O;H*@1d-d%5j=(&lr5yeEEl-MmaUaPO;0I4dQ2*n?)P%{^A0Meo?KO7qoVtBWMdrmtyAs=P1j*vWNAxg(2=Yc*T~SfWFUE*|A{@pe3DAN5*aZ=VWte&3%&>CZb? zt~hgjLwkFB-r|0Fdpo1_b3M!6-cCOAclnPOX&2<)>HV9t!}VL=KXtV(5$(2|qg~nG zM1<3lcLqF=eBC10C?fW#NnPN_`&Lnf)=x7G&~>t_pknETYsoWQg@tNZRM-PtcMKG&-B>Rco3@O4d>mTIruvTw`mu&Y@+Kb~*& zzVZEgrR>{DN2^2r-D7%wWy5oysD1p8R+qZ7Ma{A~v-@}lSM|fEdqAD3i(UZ{`$c{p zKiwP89e<$9uOl-iUTCw|SJp-`Ns!yPQQtVZMCj?NhzvI+m*UYNO`_h0P= zufP=niX|^Dg!WGR!1%n5;|-teo<(6IwQJhD)IQ2FZ1$bCLvus;>G$%X_gfMV*99rt zR=p~Eyn?%oVQuj8d8;;TUR1YtX4aIJ|1V^{7Z}L#$ZX*H{r!H(_X|gj*jX>H3SZy2 zDI#U#?65W-&ksMvdjs~}ad~pilI?K&--hq*SNP1teO@1EjDB%*bN|!W^Hq|+m2 z-`^z8^v+2ks^^nI{Q@@yiNu3@`_Gr8_RU>Y^-ulqp;qq+n!#$ZCugh=R~Bh9?l+c8 z-uC452_D2T0``3*9`FD4<@^*_ocp)n&D)_Zs$UcoHL^qTxUsQ9IYs>WM z;(PY){d%Tb=ITL7iJ#VqhgzZ@1ismr*~QeHs-suIwC2N(nU#tf2Y%{&-udkFo}#B* zIk)!-T9@%TJy^K-;BlD`udeB;e>`Ws`NFPH>nA5B9u5lo^ZRjz*1z1dbFHOc8Xq~d z>8b4D(6Yz&zeSDr-B~!xqe?k+Rfv_Y9luX^{qZ@Ulg`aq-{jl<*H|xRM{~~2AdT14 zrD{`;|6IN`xVPhFJCCH1Lgc2DY3Y}3CX4J{wlkz6? zkDt#=E0z1qdT_nP<;k-jO344c^8OO9=@sv8{fy$fPtG6l-yirEv?^yKC?OVpPg3d zsx`l4Vw}&uy8Q9?w12w}FZw8b>+k;mF^6`)KjU&%s&t}NY1Uu!BR^)Y(=4BMd-r>@ zqkn$JFAhJybjHWf?h9`62k!r=v^LCq^gb(b$R(!z6axcU2puNw&o*m}PT)R4( z+4*0stkw=W)4|$(FId!9W@|z~>J*KMoVw8^;oN*%1HHBQFHhy)AMqo}Q2O!aaKVo@ z+Sai&>)wA%VVv0JlBb>i!!d0(GDqW`*D^W4TWPX)fGpS$D2 ztLo-8MWcKBy(;hjy@s}*x2K)zdb#0`hL%{ZHGi_(`hzq0U$sc=-`Z}P{qN|4#?@0) zmvnB6`Y3hTNcytD%a89j7e0P=s>;$lCex5F>g10)gQjWc*Lq%Fa(vH>>Sr4*6d(Ap z-1BfeD9$7sU$bV(&gG&FNl7OssY+NDN$i_(L11l^IDbHx%)f2b?-RCU8OE=T+YxNq zdgfxhS50H#V&nQZ&s-Cf9^~&+IK$na`6>0teVb*A<=f0ZN*X$TUX^LIFn)5qaCyl4 zn;{Rp&TV*pZ_k30mpHhhRCiqTO4JeA<7V%6W~R8sV*zm+#?L&nZEK_L7pHix)9IEn z&077ef9a}|Npkz5Zw1I~?<-TM~_GX6ARaY*Y<$LziFmp!9kMfL5i(0!MC0{$!n&`DU z?d+~yo~`HQ&Hw$CPRu^iTDffIJ`2SM>santI3~uGxOo(uKQ*iSPr}Ofzz5f7S&N$D!`40ENq%=f&oW-z)DFN;P=&cfQlyu;yR!3uAVA?K;mg-+uE)Nz?O-J)7UNR?5lhRD66_ z#Bxu=@t}QGuw-%mFa0@IrChSs+Z?B@;k+_olVe%>E#sw!#ZTPef4z#c`d$z>Lumjv z%hw=drf(Ogiv9i2e?P~k{ol(UP7jW8_4WOHc)I!di+1UFg=gNa(>RTD&Ev17U2cqh zVZSYX_atNexWl(NxQd>3&pM;2vE%u%&(FFiJUv#F5Fy;JeRuZir$>L~cO9>BdnslU zFP;-7f4{!NUyJ8}`usKF%4S(NzMP&Y^;P=Zq3F==K4Kd-Aj1Z?XGV#Z!+S zbb>~5c#8i0$-T1TX|q^x)xw0gLKO?!B<#a)b5CWL+h6mKsXt_%{N37)GrA zy({i*X5X{g^3xK92lLkKl-zi1`nHV`8`f0Jn_ZbXd&9ai|8>*kKkIzj`NY`w#Ne&1wIG(=xXg~gc9Q1j`Z-z9(;Pxv<`unY|A{Lpn#!&9zpxxc0$4{YXoq|34jWF3#lM2lJkvomaPW zrg+7V;*V*^52Q~#uV!wyxhBps$2i&T>P$-(?pDR(w^veYV)ogmoy}^8DE{{>}tBYrY=|VJ{wf=z4=>W zk7dS&2bWGQ(a}3PJLJgB{e6$WtV_1o|6W8-&bEJz<)%d!ZnIvkQWL$Z5}{f;dCtZK zff*knpIWz`Uv9J5apq~s>*^1#iE)M9SjP11UWBS%+@2L`M_;bcs*Qj1^L710De?14 zPkwyLyga)>o5w8wp2Xqyey7!8YjrZCyE&g74vDczQ#~$j6fa@3EI#<8;eNM*3;O0( z%9FnEi~s2VY_VgOv3|@ZHUG`#O8sljrk!f87Cv|N-SN=Rhtn^0aB*=(U6m;Jyv?D!$|Z@SJ&LEmygx=^%vIsR+PD4B#^vcqTPL!&~XN$}2U$iaf<|4IKFV~2@(QMG1 zY`)O@`lhU}2cJ!Q6!GV6UO{eSy!pO#OYu7gmwJ~QF1TN$6MN6K+Pdh8mel^=TdO#y z1d2`Y{~B~-&5<3F8xKrph!-dkVZSrO{q>(uM+JZE`M8>S*NOT$cRycR>a90vcEe2r zrh^}rH9Rmp-FW=ju03;@i@4wF+iiApDcC0x_19uy=)RhtPPq#WGG+h$W!RVXPqB?> zUDwLZWe@(Y4W6Kp`h?@^e7m@wW^FU&-LrCDz0&6Y5@RJ~c-2@?Qg2OWtaYVi_5T0D zv-ig|Kk=ND=y3mERz=oh2BjM|r|%RCtFy$+ZMeReA=cZl3TN%zc2Xl zA$ICL^C~y{gk@wEK5` z=lAVCPF-gv-oE(#cAcF@;A#V-a4o5YVFJ--4ow!bl)trwU%UOh`v5|5w^PZ^h4#yB|F}YkXs4_5!J8Q(fkMj-UGB zui(}zS*_k1zn*t2)1LbH`1-`7pK2xk)%kiy1_sQrD2&}Iu70)YP?4i-MOD><+p2G` zY}p$fxZhE>=GV#Rt7>=bTgur|H#2-6!y0YVdC5nQiL>u34&Ju{wP{4=-kea7VWuY1MvL-_7Ey?$0QwtqdHh$S<_xb+>EPQO;{GMA-)J)-&=5)meW=)57EqW?17N7U0 z>eui0`>!6d%MIA=$G7IsKijKc1TJj3xw1IZ^z>c5f?v14$>!{LSr=d3z$?wiE+CS5 zSuJdGyy>BPw$DG@ewJ*;y~h1~kw%z~@+`C3_CKvQ%gQ;MrExZ=I+`G|DVlyx$0|P!?gM3S3QmRU#qP> z^d;@F!Q*3GN4zEOKU_2C?{AiWAxcLB9R;ry^rcLwb-1TBiFtAN*DzoCjRqV3JN#`t zxZi#n^V<1JS~7}Sb#r|WsX#x1VjX@9Wkq-3gnrlIF#Vt!8m;J-K;p*SR^yzdt-tHaaZz zP%K5~Z*Frzsy-=%jEeeinM%WG%<>bA|!&jvzjpO5V`wN8}P+a#glJ6%TDPyax>>Tku$OHZ;o z8_KLSySSz`Z_L~E%UXHU505?Td=IRZdiuj@@#F5kcyBwq;J~F>dRNwdFWWH1GG$9u zuZ@K5tK|&;qPG5FW96=C@skx#x_@8%i(!5Kw&aN%0ykc<|IXx*3}Q_`KkvF);XZDYmo0ob_H=q2j`QXXTc5CfI zqNUyb{A+!sEhue#yME#3?Um1@DpWok*ima7u&}nY=5jJuP?~4&tG&`n!H!#XeqMO? z%l6aV36?8N7Z@IUH*4MDwez<{y?-CGuXAd$n8HFK@u|D6@CoQ`dT*b%XGezRQt4&& zPZ$4Qb3yCKSq`gUwTW~2JL5Slybi3@5qb9PO!wR4^4AkSJou*(Tck8?{hKAz1 zTorWVKSk$`kqJ3JLS#-~HVF+{fL!aiiFblHc|AZu0M<_PKVq zME#Y1dEeLWRl`2XsH~5L&p+wj*;^GofBmh^c17Vj3#`+tmV5Q=_-H%9!q>>A_6y6~ z<=?MO>l8mfPdRkk7K^Q)SMVMFze(h5quJ!D7x%^g+2(Sle-!9s->EHf;4l04tL~>F zzs8+jHruav>4fTOt$X~HHZrKZG7zkY-QA|SAtZipzRn-6BUnO1|R(tl$isiptSBA1b zUKsN;hw)I$m&fn!x@X+3YW%ghevR7-fVIEtYZGz)zNtAjuXlWaT_L1Z^}M; zrt8Qb-yLD+lqTVLx=tu2dv2REV#6W`~N)G^(JR^IQLgQ zZD-&9Zt=elVl(b$GW}YT-#I7s^P+DDj(JR1pL|rFUqk2IpHimy^7&22zstQ^Hqqsj zQewf7WsRm;`$)T}uRX!4z`F!$h$r+yWd#$Ie%y@OvM*iZLZ^}=%{d7vb zEc<_-MaUFqxA*t)-)J5C zME#ou-xoeOJx5=Db9(D3@rgHmj^=CC?^|$aQ=yEFxm8-wW3c@2-XrIRNI}r*O;0yH+3BA@Zwo03dhc2@m!^Zu%@ z3Y*y#v{Y-c-`uKS_k4|czAtMs_Sqq>&eG!WgLB5o4HlnOW*b^0UOW5wdg+nd)Bm*f zujaYt%edL9rjdF7n~Rgvr-s*RBxwbH`g77)*Kf!2#~%XMF*_N4{B+d&P0%6cvVS3( zSw0rB7L)#4e>%=sp~d*{>C>h6i=QdCz8C*l)tB-^sCEWt%V{UU9gm(IKDYCw^6kQ9 zcK;RzZZ)>m3x50d?b`b1=i?>R{pH*zI?Ye|SS~x|q(OR1>hp_>(_dUmHT?JEaZi-i z&Y5wS8=jS!Wfq+8eRZwM__>mfu+T(-j$Id?U% zv&rOE*2UU?|4rt;s+CHZB^5Voql1`D<%fv!HBVmjZV%bt;q87dx$HS-mBlOl$ze$; zAMaV^?A&&BL*6N^0=td_=d5C;TQW1p_g%Pue#(Q$E9q>nuB};Yf2L=Kfqb5ik6n|8$MLG$@8yDi-8tzYHjR(?C@UVin{p_r_Qt*h@@Rx@AkkH6-; zf8E@jO|R3}ANc-``ADZMGc$WAtNM1X^^)C_kc*DiTV5VDPJYe>&iH$zC1>X< z9#}IYrtHtLo7t=0ySN?s_+h?6*T;PU%IhjJ)hMrq8ao;v`<*oYsu`;pNvP-`1b|hc%!KI zi1Wm6l{Y?z|GGqVbM9-H^H6BT$0v6tyxf1~6r1l<+dcmSlh35rZ{gVgzYetI=i220 z_OFtkW=_@q=eB*F_Z2BN)~{ZRMR@)kUY7B|&71T8iL;lE{Bbr9>;Awsb3(DYU5mpP zHHXv8+Yj{B>g`}H+xU1z>*FhFGP7-Z9?OV*i+ND+?N961Kk@cC$EO@izPQ3ud5Z4e zUrXH_^{2)}tn{)-7vdK#W`PF_^=Tk6M?7d92*r`KQm^dsba z)k}|mH~1g#wL5)b_4A^uB|*Hb++tVGYKsW`Nm|f)C_>q%U9--zpr!q#$x-pfn=+o! zf4CSGKRgqDQ2ph>vHgaN0{ClgSGPr--C=p}-0yAMGG>)`Y|NYWz2oNopCwQD?)DzY zp7}1}+|Tv|7uMNU8frHpFI|(huZm{$u`;A?1P14TiEIiWKzEdV)f8k@nEm?;CXSFAK^Lpy8ac#-GwP3=6 z4IA!#p0zO`AYg`_@U?g`MxhDvP6vEBH#iADw_xn^vfTGqpzlu^!|v(%tiQ{b_|KM0 z?V9E^%Oq3Bclk2Qs~udQ?F{8LYL;K{w6ut06H@xb+2S1_!QJ|D!b>k-xosDgo|(k< zK7V%C(c0yUGd`Z*nB|)Ojj9{MlYz1uM;h-Jy{ z-Ma-vCQJ}wW)b0GVF?kLIEByE!AnZiMPSOicYYuF|9?LF_|3iAH+uQMb*(@DK0CYm zx!=6sY3It%&#_dFcJfv(es;z#!D*?GpMhg0t1Y|z-r#m8b%x)r5AV<3P&l!b>D>SI zsY&zpZmoZR@A%AG^@Geu`Tp(_cmHqEQOxeI7`f_+$20>?uFki!LRJRNQ736s<2zY^Q`yG#;M*@#_)Z9o4_>udIqZu9IG1usWpa zw$ofIVXF(FIwv`sCO5AuF1i$OaY^;wZ})-}HU&ETKR~=;&W5y`^M*ctCq^1E@l^*Qn0GU$@!e2*4zu0Zm*M$ zM=UR$#&plEF@c%uEyux@Jo!|Kkn)c&EK{_nojx8{zpUxzg;X#5Bi=Id+Q`jXL zexuL1W4=zKr9j3)8O=Tac26~0(9ji|HPgAlVuu*-Y|ZM6!b#6B-7oyJURmSX4xU-{ zcWy4f({Sr(?eo9kJtqC~_)O%{ciYZR2B0bL;mOp=TfU|irtMl9lrd6kp zMRU2RalQCIbK$@Jl6o;aE->WJdt3PVnQ!sA_F0FYx3ku!6ug;kwfCBx@jc1qXXX@E zz2&J~JI^F&eQ`4r=e^f|4o*mLvUO~UGrm&7q7c=0ZQt_GyK7VUz z6PK522+v8!_bkFP53Fv!VtG>7YPN7`!}MSWNeSn_o6O?h9{OM*_C>jgr~l(b&VUJD ztkNW3nLfT5zxR4}`inhjR^R3q`W-u;&C0x__fp5__DeyI%}gB2F8^%Jd||nNL(baC zZ#SO{Iv}knpw*zHBD?xaRPN?E8w+n~osl(>yna5`)LUlm{#xOWlhq8K6;>()%wZ|q znD?nDoh({?(M7Gd#$>yYOR8U^QE@)A|VFzuJgPUO|W=8bKdu;Wlfy9 zi>q(H-Mh5GJU}6$!AVutLN>rN8kDwpxaPhNpKNlVBUbpV;2|CJ z@|#Q=*B46t`0(f!(#h?kxk;~-%frvFC%lc_0R8y#U&f>RSU&g z6p48XsTwxVW{z|1mwL6}I%f;>T`TX#^r=jzPE2_ys3YKGG2K$6MA%1Tfr71)lb7I~ zj~>f>m;b$b;p7=l=Dp9i{S1(=VIPPad5=x?d^!*_yP3 z;ti`Kg47v!EibIyEK+ezbAeB9PoF~5fvBaEYL0z$^la5D6)1R7oWG>EzUQXo^6P$X z=PaJvuKyah-ZUj@uCmGeCe~#a)1{1hKkwWeW4CzwxgZ6jKm{j8R?e#*SI?8U|H!$- z|FX!*)R|4c_iyejO4(Z;C1tfwW7+!W9eJ)wmTy+4tnT`;LWdK00m~)LRo8 z_i17Inu@{;`sM753ja?99+M2*94|aS-G8^m?*A&^*4}UZ^_SgFv8u$6p=^SE@1$=( zH#;$=X7O0EafoS5d|*3Mo{{Zfi(aun$p!b8l1G^)3fc4+BlzmA3xLob$@N1=7V05jsk~?YUyk|{~vP~{C=@_cKO7AM}5-jcL=Sn zc`mhJ;p!QMNwa?Nm)EIUTb)zzJG8@NlG#i(W5w-V)3RbcdoJ9mdo*)?OP;yORO?mO zb{C}-cq(5CJ=xNwFgIQ>FY@i$r3Z|C8IA0iSo#=K8{QbqR6WST$inCvaImF`k2g~7 zba~q|wHEuReQoDH-85f*`QK0REW6re4GyO-RHUCselv2!EKe0yIj$7Sn!=4PIsYu&!sonP61o_Mcn zlzMSm<*9;7S&`>!+}HEV*=VS<$GhL#qd%*kZQAm$ne(^okD7jM{l|0F=`xpJ+pg!A zv1oXGef>g*#wNFwvkTWazrI*Cw{pfYEAPe=evIoGL>4+8QTpnAs_fDfN3BnduBpl7Tvs#S66PDOs>7OYs$^+-*dla{gqn&J<9I0M#0bD@An68%~B1%-oW_&!Po1% ze{%P1;aPjWZ5q>gCf0e94vxlm=k3Z)*H3;p;pLCocU5~rUZ{%H?p*OR>!DcST{EwQ zgPg}LU*7OM5X^PE)OX)px^nwI%YetJW{P_K!Vh6|s+1^k3l3W3FLbEYblx%4aYKua^}ppZ);B%ezsdE> zvDWMzMXwfF@;^EBaPy9W7dm}^OHaIT0G+koDXikudg&=|$Ah4@SDqJ^m^HHSGb|1d zc$gAE$ZCPcktfc=#6Q)<&v}abx7+`H|xno982vJQVs4t^5A4}V7%tUPzGP_ zEm>D(xW#nDmR_()^K6>rS^CfL*M~LUw-cSc8&9}23NUlA3duBRo#KAIxOegK3B{~E zg7yVB$;H+uNeJTEtmTRJ@#euz2R<)B5`lOq%~J@$s^_q9610AD3;@EPj^qz{v5c zR`VGn$5t-U8oA%>T2l@hFFDeX(3Jjh&#x@@C4QedlI`CrLga86H_7PXP3=BL`m*%``5_n=flGDWlDg>QV}kLA3ZPM z6tS<&n!B{&xyXYXW$ePm&(9rg;at42usA=kqeng?@#m-IhwoI%URaS>?laHks*a~Y zbFkLIkG{)GHD~Oo|8Mu}>+9y*<$j6p{^;2W@lL)mfBv=Hu7`hq&Q@BP(f0GRxN_;M zE6e0q_0Jr#Z28&}9{bPavRfpVixen9X(*?!XlUY@(L7OOeia9cfJ{ftx%55HQ=H$g zEcR2G)jo-3?fk-9Y`j}-=kUy##F>73PS*@B9g&O;Z+%wZc8i=>|MtSS8yA8O1RXf~ z;OD^uAJ1;|+n;q+YtQd@4r}5rGfNt)eeLjB>Xm-{`s7)ic24YP=9!+?@ibUm%u^Ki zaIdgWkIYZSviJW)_pf=a`1tzfm230&do2p66n<;H<=C3{&lL+F9kFw0d75p@b9k5D zw~58mN~bYRoXRw>cNas|amMF*tQVfLbXkh}OfdZXx{-O(WZuWlk1SrijM#tKy0K+j zcj;yA837v>KWJ2BY-y``CqE;>_F5!Y1lJFL#=gwI5dp`3-_GB^FnYV*B9@ziA#FTe ztAo|;^!C;BH1J~+ZH~7v4znRP`x}>di;zXu{Y$B&l zOzkW-(AfM;d_P~+vDI6YwlJ`Y>b|_eHdW5x%C-WBBBcQTh6i76`L+42cWBB@lZ@KN zDm?My`rO+xaUuZ*3wh?r3!A82b>j*5opmQ>it^^4x2N3PyzSwlhBmqUDc`?Ww)d%? znZ)9FbdDwW1f!*=3+7Hfxp`eHw|LT>9sV2hoc%o~AMlUV4u5xW%E|qw&Q1CsurWw) zOI~nH$?`w13JPlKt#`7%y7rH)_V>3#GYlX5O3vZ8=)YL_Zc+S(1jmNb-d`oAWpAxg zFMVEK@UZ8_&C}uhErtKCF8$2y{OxQLSJ0_lCp+lLWIE}WPL*uL*sQGQ4@oZ^A%rbh5vpQ|96?w?_|vc z&4hh-Ebo=RnS4;*zE0-sEYm|LCTx0EgHQ1oXM$KGTXIGBX*bR-mBYfAGYpQpHAeY-Za5*9__LCwnYk?Cd$f*>)oCAPc)!07L8KvQu~FHaOirIDt8^u^~=-#e==V5gG?B zJm)dxKYgKS?$_Jd-i+Ri`$Zm9ggG4C=$9PSD}CL4`#V(^sem*2kB*lg?~gzD^z`C`7aXI}g_~S9@^~rB;aHf41JN`Vr{%`8EkZ1ZYIJAm)?Ut>d=kQof|EGt8 zl$ll!>*xPp!~+cS@9Z%Ac=?jvrqa@$h#fwCda=8H)ogfkYpeH}jQw2=uO7`QbNzK? zNv7cUv>(;)*Jwv>5_xg)G4K46Io2=V$UJctsuuLr5KwY(Jm#(D^PELQh9l*ImRiaR zho(di+37YB?N;gx1?(a@^Q%~S_mszdj{E#lp={dvuaHt>I+NA2pBoPt);&JPd$aAP zglSgV;n}s?ztl>0uZpwPf5FZdp<;bNZij8{r)kNBufAMc5&Bqr&-Z(`EB^m|Xi@oT z$;KT&zb<*UKxldWy2#Jd9qWI7(*5)IyJU~#-ec@BKc>$s^qhR`@OkZcbGL0m>V8eT zN?*^~WmS@)+_i}Ddg-?}rOHn;uB8Zlleg#>N_w62gT3_VH{UxAt3It={Ul@K;fX;@ ztj?Z^ajkk~b7bd9P@~id6sd0&Bn{QYj54hFSdIls9cWxQFVl(HXG5P$r;y`Roh@8X{&0<#aj?O)ok)M0_hgNkL(C7y3eTOD>baAi@|9{Vj-3w}0z zp3t^I?EekPUTNQhv-7_lJlI@qnCGNySU&gv%AFMbq1wDrTx27+4FhXx%@`46!|Ew3Px5Z7L70c?@~_eQ9E45F7(Uvu)&hP+iown z)bZZEeX!zKrp@i8g;H6TQEi$x{zY&_aIttX%AU76r@3%N#73iKK0kw&Ff5&)m9KQ+ z5OYPviwCZMHooOx%{c`?_aQ*#~_Zd?RFUEY!xU$3GSna-R20t&SyZ<^P z%lP#6j)wwy_y37MJw09g^}OBlqry)H-7NU`=(yd}WdhaGOTFr{mMEW`#F|&_{8}q$ zNe5$^maS^U<2^?zPlKYvfpNN9vO)AI``t!qtU^*{`!_}6)n zP9@b}UZ`GK@sIV^*4I6AEQ=Rae~+^$`(jsBGFv=h=ltHNUdhY#=I1T_P4ho)n)9)c z{rbAQyaxR8I;?#{hiMLuM4wQvM%NPpMR^kUXMPMs+mcNiciFmW|;)_H0x=UiX6IAYG3 zMz*i}C+_&qx-;g#QOu5l;IjB0=cQhz6YT5jpwrG zSA;z+-pE%I&T!4S=I7C>kd>2;YgGNZZP9sU*S(tJhMPjKLlVMwwY9mJFZh+aXQ@L* zu!4)X+__^bwlo#)S;oEkpu>a*B~kN_?Yy78hgX+Xht+91(<U3hx*|8<{(Xl1;L4xR&Q`Ao%9JU3cA|fgYxm69O)7c@0YUF7N}{+$RLru=cqakYRt6wtD z+&xuh+Pq#qceb#)--Frt`;Lgmo4OuuYyETO=*~wsHl9|x+M~DNP-^0J|EA;S>V7c? zwU634t_%_lToYlK{J!auzuv-0JG1RGlan7E;rw^)=pv2=jlh$WR3jU9$wr0GG@80} zU(lw*%B>UIRJ^C1(U=qL-gA1#nt0VUZ`PMSbY`@k&ockqw|J?VTQ}!SV+x$b|pn_w=>6jq|KW`8PIb?%HSN z-Q)lEd$9OSX5KhfHZ{H2U9DYDkDr>U$Xa`jH*x0~@6K+krWFTXC0Wh-dv2PpYeRz5 z>r=D03w(NZHv83uT_!1eOWrN)++q)^L>aj?_WXEyV_76ug*EGmFE38Mn9BHS9ZOy6 z_4W59gpcJP+CJHgeWmvL(z&|qwXsewFFoz$6FIGtktq94asI^{u}@E3y}0C}%7q0} zm%sb=_IH}6s%saIM!pNL`eu^5OOUHYug7;bJ} z&S7FZ)lEB8Mw#Ks5}mm}yp)*EKKW)~^6J&y-P;+aXrzf*(nyB0LSsyafi=(()BDr2%VxG<@Qjm9lpKNXQ*F*CyFE36o*{@J?d~bc= z(z78dv7fBGrk|8kxV!IP5%aUY_aX6&-u2*V}nTGMoNC&)jhK z?X24FEA4s@!=_ECh-)Y=i1B#6OZ(L9rO(eDoM&5ogz3d^yDcHVvOLt%=Pqs79(+Kr zvf{V&rnj3#J{WNYSUI{q=lTBbZuh=VOKsyNc396y_{@E@uXE=~;YFEu^*2^u&#bTB zl5mi(=EpVuh^a<_8xFE?znh|=XJsqSyOkxRKze!oy6>+y7dme1IOjD>;YNaz$*V5m zx0X2_|2f3}|BNm>)?;F#8avm)*Ri$!?}ox@O!r(sDdc7DjcpNJ6_%_g-b{IDxa5-3 z`KvE(EHhP8>2BvTWcoKrPhMYapO*;#R;^xE-cZZCyGq$FEIwSKBW^HJHSGlJ@@Apq z&NpX&zqsI*MRL*Y$m<=6r#E==asD=pbUGr`CKn`rY1gB!yc4@B_zVB^=N4UhY2X~p z#C2)f!X|De?$#y)k#Q$Ok1Z0f!YSO=tRk2~XFHb$WfQ_kwzviAn$G zFREQ_oR1R<=0vQBE8B82?&Q0-cM65hzOHR;)f7^y&)IL5=5*=rM$bpeZ$GZixcfmm zS-L6J_g|XuvYvme+CeH#hn_p<|KiS!{m7~H$nMC!UTK4h4+)!Pr(IR3^?_sSECQE;G%3@bvR?L$k^~>YrG| zKK{EJ{X;P@$0z=Wme#Z%BErH)9AiH&d@FY%ytcR7e8S4MIR#Pc8Fp=**K{^>l0vkG zhvS{wQc|U{;@Y!Q^BDERSLZDe%@U2!6?p9@usZmm-`@+nw}1BhkeSKo`XN(XwaBgQ zps3_6iJAX(cD1cfXli)$?(Xi2t)9t8ZtWG`?3%9>WiW;9{*EH<$LHgZe}6f7QKUnH z)3!{fzHi5NZa%1{C@TMX-maG9q8r!kBZHO7*Iby~w8LD><1&glu23}he9DYu? z^89ljrSFfK@4VjjeIcX_>k|uLnO_#Dl%66HGjU<>#GmIESqZalaa?g#Hh zPj8Gm;E?e`(Z+GUif7}$_l&Mx-G4I;opn#^M(mQ1_`Xs*b0*h;my-`XZ1zv76;oO# z5EAoWUdt@2XZ!uS+YMZ{4=!X*ejNE?_4cm^3*S6;ZiL*kFwjX9tSInIl5s-MP+ z1tm=lNw*%!Z_sm=Xym-K=zFxyF_9HvLMwupUZ_4#GRa{JS@mvF$l_-XjfdjAwoWkD zUiqT@jh|2~Pm{0Fva_5|LgqV_MmoiH>mCeJDA7z{bbWE@^0YL~4?ckorRpb+aHxp{ z+?o3O!L7Ux9S$#z8K&#)a9}llbDNpx&C5w%TXOhCL??WV%3t6ldTRySy%+pz?(?nr z&HUn5TfXRHb!O4kjKv$}{^i5g0!(Mpe=_Omle`!CMC1dnyGDCks!Ikj9?wPCH z9RnOM+Uy0*f;us3FZ?jgJn?p-=7Uy|0M1q>uBibnmBH!$!rY;{3LI=Nluyg=&nabW zbty`EQJVOp{5?as9%FePbId=51ceTZGL|28TmhTsuYvQmBe(__Xle$ z^s>to*1ZYPR1@lt4`b4+pYPU;E1W%vbR?+**5@|FJd`m!9#& zZ@bAgV4p_540G zFRoj)J*VVlSj;YtKRXy6^!19)c3JLJqU|TcYSy{0dG}(ar;Bzfy^MHx>F|x8>~fun zdSB|VFD>}##Fy}@rs1@bbUmX{TG>>CkHrtVH4m(ck4;G4-!SXoq8E~W=QuS&tPZXc z{m>e0oMSqT$$x1BtBAkqMBfBWfl|!{KHmF#L?c=(U01~1T%a|u|CWh@t@=VJMSFC<@L+sR!@AsJ|Eich5y*F*k6gL$Q4i(V@ zn`$^eY}M%us5vuV(Tdq&S6u^pPZW=pX1jNqOwjSCNJjsemOUb?IySGU49tzcyqAqbJnJf z3wAv?qwcu&ainj)s>u0Qv$y``n!@x=TR-5_jP^M_JL}Hpc5lzWZ??QUZg&}P__`R! zUMbUgaSkcoANyZqU#Olr+c23eXsK8063@v>fs0%^ z&U_~8{s%i}BnclnYg@H;?deTRqibf|vGUiKwUF4M`%Ciik#8;2Ew~@;dE$B_jm3+R zTZ2ipzcxcNK~q3Zq#|VZp7n{VKCHN~mv7elWwzXM?f;%eT=(PuZu0G3(19R@0InOV z{SGIS<)gSFxLUYbcZ6QHzrJv_#H6a6>ZbGVT$`VJE+|P&S^Xktqxr#NJ2u|0rX^FA zx1XNJ@_6R-+kP+OUbC8HGuN1&Ej4<1KryNKQRQ?&?dLC-Zk1-3D*w8p;QR6E!3}Gx z8(Wk1H>n+X8F8FF=|R`C9pQNsgT1CRTYla#?`FJrqXx*?i(YhZzq?K3gBDl8$;N9M zSHq{=6)WL>zQa%Kso;~Eupg<{8RJYQ`yLLT&m-JDOShqhl|fG1@56_=Pky@2sMls> zB@yB1*Yk{UVjOan>)|Uws^-Y3x%`; z+M?nLpDYc&W#3GibttuJ_5tNotp&y2*Ev_%{@(rR2E<{zp9s3f?|HjzX+wD6f-GIb zpwjExJk=gz# z_`^%?pnIu|tc;dkia{NGtZyY(^tn&VVPNFDyLf@LZ&51Gul4)bYsIVNzf8*E^KRU* zxWS?PHK+lg!>Tlc$;MwfFlqn1Y^Z@XhG zmzTo5NlV*Pm^A9dSx+XZHkf3_%C3s^pJp$|%y2AZ-MgS8B8LN3D0NM@Nu6@8vTCI^ z+m3>Bs-i1{E{h(X?76^mA*_-AR{QXs$TvdJikL3)TT&ruTK5bVP zvS>BA#(SA+#4c!Bayc+vX{v|Fiu8pH4F?k1Jyhyu@E;NH?9gk@u=CRW@+GJ@Swfv} zf}(N3s~xW^N}g{v*mbY?KjW$SyE~_?FFUBceovE#W{||?+nmB`Cd;-z_L!Y?v`h7n zRCbm1f^dO5hH`gm7`G~G9lfpEzv9cb+<0BqCQnAcO*R3&|0*}$ov<**RYWUJCbciV zsm_yq^NR_ok3AQ@OK4IpDnIehc3;Aw$kTUs`!{jgEl*mi6}fA0M6$T6P{b|1xy*;t^*b@(V<&tUkhk9nMPVs!QrhNWA%m2q>OZHRrp=)?f8FOfTOZDxnxN|CVuDeJ^ z*)Od!TY2HDvrJ99rhj~N)VOOpk7sh|0hJX24hv7q$%Q5eM{>0Yv+6A1P3O=DSQ{B4Fvo2PlSo8a78!>subNkGL0#)n@MY_!U6^3$7VrY z4^BSOnEAl=V9PG?Q{hhGh8Cs%+u}c2&c7JEW_{dcx2*x0Cnqd8u(JEmQe{@bP9asM z?d?Jd27am{OYA>?h>Zvky!y{};^O^c0WN*tQ=&691rkLRJ}**NemvcXv5ATE)~Wz& zRjsoLQdQ~;E|knNJ7gu?C~D2v8vMvPsxY%zNPcS2ioKl2gmVOr2nHC;-#mxoP2bvE zK@T{&A{;)lc;>V$ZCKh6)NrO-!GV!=rqsd4Dpn3IwI6@Jy}kWx^{ii_yJQwMIGyfY zrTB8mmL*#n6Pg+vr%wp+a!gogB~o*)L~Cw9fM955#_zX#MK&}Xs=nWwJ9BA6c))>^ zEH@Qc*;#~Zp0Nm@QYbxnm{Vhh{wnJx&ho04LX(&{c;|A>+;!*RpAwzJpf1$n23C=K z-`sMlby)ZGFrC`-Bh|1( z@=f*A*gN$fs3E1s72)vEH~;W9)iW9eZ*CYqIWzNe&go>;IXf;d@l3w<^Y+&4_Q1t% zt&w-nI~{zt!0GoA9>1B&l8TKis=`+`FHoNlV6f0zWtP2GtCw>VR~3`VR2u;cfe3?; z^Xb20rd!T94{|1`A$spuTF!2#sZ1tfE@|8A{#Gsiv8?s`>+Ad{C#zqNmS%HeOzqHG z8DQY3=I-F=pXOn(&|7xb;loT^_fxt4zgYI5aeb;_E zikn3v%VSQJ8DY-)Z-aK2cK;9k$|YtJ5^yZwamcKLHrH5tndY72ez%m(e15fM;KzGj z8jBu9_b;k_cFC=ojTbbOV!Tp6Vgtic@9EnTlsf*ir{$*SnM=G9DZ zzgsKSBOaJKFO_#@75o+R^6weJ!(}gKf@16Z9qp~MTpdiUT%ty&Wu?t>9Byt(HB3Dv zQu6wm?t&*~b^=@)lY^hRbF$1-JlNu;GUKk7+8^~rOq_P14;+43KCWC_@@>C}bhhS$ zSWwE@eXl*&S0cC}h>NN7%uM6t>+51y#O^MWxc=jf*m|CRhNd$U6lX5pZPN6;M9OZr zNM+S|5rvEsGG6Yyrte-Zy(uz{>7FZSXzX?EjcYB2tWjIDT4gMY6q1kiWZc`c^Wqx* z$6E7$KALKob%M|OVDsl+Gu7MLWlJuds(hPRHv3G_0c}u=L1Vwz_mh|Z+D%!_J^hi! zwr^$6Go^PfZmtrRum59cmUrjH=DCJzemy^@bLaixRrfmduHW>3Uh1;_^tF6Yi?vK- zLxWPw7S)7+3k$BFtJaNs{Kd|w^~#;^W#UuJ%(AcXTv>c#y8ir|Qw79oi;V*JrmdJ) zYPJ6GBHnF2povfK#t%yx?wU#YWLY0<)|zLilDs%k{@wBGCI{CX(%$s($>G=6*H^nn zGi9}yZ=blJF+FNUE#tk_Li5_hB3QnpfuX5RqA@&o;quGmGni8D; z&b2NNSXCl8iz5 zbJJ#+WKO!c_Cb9|klxe^*7URHT9+?>y9?^bRBI$Kx(eK5RjA|g))SP= z4=PMan(S7}6|(B8jQyVv%&Cxmt_UudIIalUE`>eTtRht$LR-I| zpJ&@WCDVvyp~!5LRly4WjgKrUKdJ19ar>xMs$O!kxnXMImus$ZTq~A%I)8Jyzp`!O z_VR5{r!oBtKH#PCXZdO2jdLVK0zPC1ZEqKfaap3wY*qP2;M^Q5v1e|xoNoDFdUa>! zWJTR)Gf%D;;_}+{qyEu`zWaJQ&&+$E6}FwAk=Y++rHh`s zI2`lvcN3V{ko0Z~=ZZf;Twh*C$>+Wnb=|w^=7-Hs>B@TGGJ8 z%EU2OeqHSDwj-Uwiiw9<9&9~k+{?tpt;M9WvQ~goGhw<-(}znXmjVJD9S)vURO=%gF(&oODxa75mdA z<(5nRKN(#S*S9rp%ex6TwRYB@_xtiHK-lt=h)Zi0Z&S#$`p)8~x;>o6hxj~xhNov7 z5ezueDSX`BDsiSyMu6#`(*@_awU#b0+;OXP)#I(#y=SvsT@lG@|8IlO0avH_Zx8M| zbCjh_QsvM6*=r5IXeOwNI4)S}Cw6YVK%9Ha@4Q3oFTNbBX7kxl=jUPiYP-{V$N7)W zy`8*RTH zX^+jnn>()_k+VSZQoh?c} zZ4ckRySumf@}VW4MYaC9T;+`}bDj2PXYaa0(RXJrI&ZyrW5wM>6~9?b&cC;tzdS$J zve8@o+^;83|32-0e&+AOzq>!)ovv)~1;D$d+>q86{ zB%KmhzVQ6+MG3<^zGvru2u+!ALDuer`Mw`ta<61v5X)pS30ZMXz}RKhr?T^65xX+1 z^yXOiE>hK2y58OD`-}hnyQbIS`pJK`OwG8kh`Vab>bBR{*UhLhU-R!GtKTj@hqg2N z51z~KH?RD$+2_=$X^&l*7kvBwcX{W0^IE35yC01A&D|~;Tg89WoylTFvXg*FKy@t_ z&vWkBR}cPsNKP^mKb>&sXDZ8LtN#aB7?ZBAn|tKTzfzw+yJq^YjQ7v#lQ3MjD0;hI zS~>sal`WIs$KBuU&sLqN3CcE%t?l~0>^9EdJC7Vza?-uIHrm`L@}n?Ij?nBgX7vxl z1J;zJG76u(xb*t(8=L;6-`Vx@lypRL+naOs?^Q zqoe)}I}h!UKK(iTRETNj?CtM6uX;cKC31UHvV8LF=1l4Bb$1(DXBZ|Y{&8mxT%F>k zd{iN0N8w+sFuj?%4V;YuS8ts?6U6b=^u@ou_58}}XPDobhpf9AQsNTkd8d}y?B9=E zU9GDbZ-UkJ>!vJSV7C67`!OB;1ui$UDqc6QPw{+sfBClS{)!Jv7I)3~RiBj6=wcyg z-*WZE?jQw^f`a$=YC`!#ZRj}C(rt@G%3~5fBWCb)8qofRh*Z-QJApcic0-MA*O<$2Twja&k{}dlQ$NU>RoYjLGR9*sA|*Nindcd zQ!HQp*H-!${P5=c6LHh`>DShCsU7Q+RbGDMUk~r>s;)erGwQoe?vSsq5>55$J$PfI z=f%axrR_2+W+=#h)q1hUuT`=6Z)XaqvC*O5GkLF{#wmuT2$zn@#hXq}Q`X;>7x$Ju z;KS*;RcyiaH`LBptdFd^aB9aH2Ttc*J&|WqzMj|^a#)rB*y_cxEz2#v+;@GGY&AIigyt$0&+his$ zU5&E~_*!=_o|1G~-~Qn?kEm7GGT;4+DDf)#8F}t!?PIMJp#`6}e6tG-uyE1PxcXms z^_B!B84md-t&%gln>%M3rzgDHlE0zSJOAcQLGuGrKi)i_x_)s;=S7cMCM#b=Z86HU zjH^)6ojWVR+#I~TIq<>DIm$w%YD%-D0(lzF)Ew>KeCE2{Z(q)XD-!okQ2&40L;t~o zEpLuJ_;ldr<>S1OE4l-=q^L5h1uZ|@ao4&|xbU@Y^8Tdj^P_E)VoiQl1!cNU6cXK# z^-?C7?X={r^?PrYK8h3YzqRBYtEl5iiaq7l*<{HoSnd`8NBrK#oh&TyVOEI zJ-RXR?>pC-zr9RnCmf!os#d>K>+5`v;AKgPHL2Vga_Fi4 zchwEc#ZrVLxGE%B*R&iu9oMn^?80|Wzw??eY_8=!B5K=ey**!2_4va1zUHneC+5f= zWpOZBF@gC`nCH^d{`=!{<^_5dT`Nyc6?acl@t=51BeE(ee`CfNie{N3BxL&$o%T9rBwqaXJ#Jn~fQJo)>Y1DB=apC8QJJ;RaVS6tlUbggf zn&~9|c?{mReG9gH?{qq>p|t2ghD7P}nR;Q2^O?94wE8nA zD7$O@*|CH>>&lCEmrkLDB`+`4eR30DDLYsFl9RpU+uo%PEJ2{olH$jeo#(d(-Qs(2 zZT)QX{70{@u9mQ`vk_W$O4FL_-uDelF8ChPFMK61;bhRyOLsqN|9#e*y=cjd!|Aot zGA#^+QkE3F@ZRzK>-%{JT7OBiFwIHvm$lECBzS=3*^Ft{!8?{twKgtU8}WIA{Bc*U zgWrB!=l6Y{>EX$Ky6Bqlly6m0{sou!&M}o*<~O(N%F1Bri{>-Enpijm%=2Q7^vOn- zu^j0Ldi|ot{MDT6QpXJzRHREi->SFU^6{6b`I##|{%c(`J#M-shnj`>n>#y&&(1PE zcIBmR;Lf5v`^fV5R=qQgwMd7PloU*5_V+j(uju=Blr8Tk*}0_e!F%3YQAxYemmD4dU^S}!Jp<`QG5I& zRtDwu{N2N7Rg%$M`B_XcdRxvi-7Cv4mQMsAJe2-cS(9!xbMN{fsf5&%~a3PU`u~ zz1tcaoR&_p5tuNkvyS=HV$aJ;I+yxy?|eLE<@U_YN@_Ve3z9xPY4|!N;#h0 z_VtFBmU=(GoG0ev$$EQX?Z@DmdpCksIYc--bk#p>b~H%AD9B-^-tBeQdFDDeu9tje ztS2dFUB+{~U;cQfuzJCx7lCs`>VJL7yd!p?Abg7Gdz)|nC)a#T)zs+o*!1XVSLcpT ztn&V4Oh<*%swZc!Bd6l^jjFyeC!PHdOf>Nny13V~`O3@7%3E79 zFE4X#RcN@ zOL!eX9q0$DH`F4yEHpP%f6qHJ!_YZLwCCtT=k^|nJ3nm}t>@%;G3SA!Bh%xP3}5H{ zVypiC?r5K^^@s2A^$(9sRDSm2n{fKM25zgF!Al&TsdP?sJ-6e8a+Qjy(Gurre}1wU z9C3)bsNCruppbaxxL23Xec|K3-gBMU;XA=`gW|ughAF|<^B;VfdD)`&hrqRtBhBA` zyFU8hprUZoDBVqrQQR`VM)2q7=kEJzeooK|)w(RMR~n#oX=SkWo-aW`JhEZh2hs|b zuBvIBGsEs}`I`9hfS$-$*R~!{CZF~EZM^GSi_`ZX&t6}8to*+3_lpY`%zUkM6t)(? zuJ_U{FU=`YW?$Su<4C>vN4II73Vd*Cs`d=qYO`nO=jTtU1&tu>sr+2UyC9(LnE2uZ z$Hy9_0rju0Xy)A6AsB6zbKso)_T>K$l}{)wSh&H1xoe46=PM=&qv!R34lY6}Vk;dZ zEREv43}4t5hQGgaw9$KdLBOAR_v?=@k*_=8_mVqz$&<5lrGLg(oowK?4 zQrSsj6I}u>EKqQou6N?Wi^^5cR)wzK@ay^fjjr1QKJCl-ma}4pq4KUN`t3}*vy}S` zT=L63A02bt*dy6fzU}K08S8)7Zx%;vf32T0Y3aFof}_Vf1hg^p|ky_r;~x>UEa29;Dv8G z3qE`^Pr3d5Oi)Aorzu_mQXgL~pD&eaqb0g3;`c0r%~@AhRp$N|?y@^mb?dR0( zl+?b&Q~AkM)u!~5sUCAAzHBYpedbg#XX=MlS62ij>TJt-`+mLRY8lI*q$eGsJ1Rb^ z1ugMVxYTu5BT_4RLBgXqH(#%a$&{I8mfNc2+9hG&_`%J<@xijLndgOabK`B6GcelD zZQFCL(#iRl>z2(8N^|ZroL{+sGiTSUqnCdg3ZH#D_jJAW+sjJ7@9chQ{PwkfLHL`Z z4|@bmwcmaIWV2&|;o>u!^87hne|~xzxYSE@OXlTc9$GVOHyUi3|Mc|yN!=d`{&^~w zXg)sHD;=>Zg)_~6ew%&y8<*x9{Y7El-mIAPn04`U0lBxI+pJbx{+M}I%44R2TvnJ= z|JLYVHjE;-$~7AA*G<0M@R`r~)Rd%Xl{lXsiAene&w`5rcRsQZnZ7yCYl_B8W+jb- zi+qLDg&%G;H=Mv=ohsilgUL!Ra)KAz-2be1z1I9^Z>y4hbgDVB!8GTo;=>`Fyys;i{?SqTiFtcPBiXQ7-WRV(5{olG}%GFZAG zO;YDQ_foz!bH^D$bw8<%kr(g0dH3LdV_fgt!gj8`QoFk{mMzTv{%t}@kx_#GZRtBT z^QOJ`w32PNQaf3#6{gc}|L;a)BlGXad%w5+xS^)Fq2Qfg*NOk`CjM6Gms{pAv8c$t z5_kOSq&eZuLrrcqC%;P^`ij1jZv1#=oA}4L?cwbH-z_U_QxxV}l}@^>W&VIYkVBg!DR+BYZtEAxMGvpc6r5q5KPw_bF8z^Z+Eruz|LR`^pMoYj zPW8*)`>EV@b(Y_Ud3gz2)eiY{TAE~cU4OvwaxJ@@`d6O`+u6kSR<<9oW z6Z|~p?D+QfcKfxnvlhA@W?P~3P0sJj`|gRMs};7+VBLOBpY_CpofWuBjBG8x0D<+OU-{&*l+}*9dF2=Sc^K!K0Tp2m1?>E!sCI4#6=^WQR>$Bd$<#<`ew&>TJ z8k{r*k3D?recUiN<-EbVqhj)bA|7*Fr+1$!%CNWOPTM*$WQFIC z4mQy+O=F9EJBiAD(x2@n+1Uv9%rWy$S?F-5{d&Qd3vnqY-+f=8k)OH0Zl}hKgcF^w zr^nYhuFtz0pyyom=KWRSlaG$_7ESK%{m*25BtrCVebBqlTP%L=m|}nbMW6Unfh`w` z_D=uP^5alVd+17;HA?(3ds=G6^%wcgTd;Ted9%Djk(MGKHb>Ph?b%YgaZhRVlpoKu zm-x(O$hSGF_3Xv@YaO{=XBV|ZAD+P#Zg6md*9@MWtDPNNgPjD|@ZI+DU+Q4~^M7&Y z#NF$zY|XfM$b;*vosM&#>r|ftpG)<>-)^5_oGxXS_wMOt*2vovz8~GfWMs9-@$b1A zMyYK#C6_#&{a@OOb-_c;tD72}?gvP;8_M71e=L>rQP8(hVn@GP=%=LHg8Sn)3f|Sb zwtjMN-9dvV;b++ow>|u=8T=viZIoTKa`WYT_jm7qH(B@W2Y$n?p2?e;kEk1o=-*o# z;dr3y(9{Ludt!a6{eOQxufW`OAj?bZdB>W(>w-t!YdWW_6j~7ZdHP>pyVt*SCBA>u zUoqEKecIZH#;RT^$<6n<)Jje|A7oi7lENbv^27UFK=%`&;|+|=J+h)&hcC@Lb!n-$ z+7%2EO_YDBW23;+U$!+_FB%rxlWyvZW?43ahJTgA!c&j@3!hIC9aSN zhlTkEKs%&0SnnKcsbk!lb4w)s`}@Q9ZFjYDI2}{oVynK8c2iEDoQ4ePwUU zCsn(;)9c$F?LTEHxAW*Mwf$dqF4^F#6Ef*#xn`J%q>$<{0m0YD`sA4Jn`Jd9C4Ek6 z%ZXlU`OeQZ-tFtj`!h=p*GG1rPLlfVd$a5GQmI`}e}8|xxKz5pWzB?XCnrfJvzWwA zVm^8=ZsVmTZ$BRHmk8g^$CM@i(#+$M-i^QJMN5}-eo*PqJ7cRbJx=1{sxsMtZO6?$ zX6elHQ26x2b63$*xi7WJcGt?^^xV-5estWnsAT7xd6~C1sh*mm`B-`Gf+Cx740}XUdWi z^13`;zmWK3rz87+v6f-T1pn@xvAfF-t+9L?upptTe^vhNbrx~edo6Bl&+p|H*Zc7z zWW}OoO=A6A+=)uu8crS#{+rj|`Y?OBXReiZW5B|O`h+HHORMeq{a60qJAUo@u349@ zoR@B5;F`HRdwb%eTCeF&&z5+8NZhIwu`PeU<(BTqjMU;a9y)Oz?>1e%5Ky+GAUn&} z^0-RqpC1)#D-1=Z3Oh!NYNedJ^*<6Jy{4 zx4+jIZMnWz%yqHId?|k6!$Rjk2Z?I}KVPXI=E^Sj==*)I_MJ=+-|qv2yb@=6ggiY&O!<^G{S)pz2j!R}Q?|Zy3baj}7VNwgH=#9!*_b-=N z=}o;EoK@j8?e8qJ+>Y<SC@Cxz2%b2`@?$i(=+y>=fzKW_Ih(`IQ384rsB=G$3AOSctGRi zX?`((oEAP8zi{RKzQcZj3!dopd?@I2v2yr)r@J+Am9?_BY-Y;({q=gYa{75zebzsJ zZ1M3A7k}~I_!A|bBK-1*N#wp6s>he_-hJTK^iUNobyrLM`dKDw(*8G%2d{|psu zG%9U9S0_(OXqp$q5_K&1-kymJElMwLoYeR5m}RmqCvoccM+N!3F+H!Yezr=Anf@og z#(b;jt1GiIwoR$~bTY%H{+~`#?(Loz+0&G7t+V$%O;gmlc-&fG%Xl8V z6}iyq?Ye5t4z{oF`vo^$HCn8(pmnF{mdQD-`rB=xp_5PE9_qFhxt1>*VJw;dU~kGzI;*5wKZ<1Ha@$MXd-s` z@jRaYme*g9D5>FOY zge=mLySJn7#p>ybzc#scZ2Qdb620rf5BHP7j~usJRRrvnlS`{TIC+(l3NNoc>(Qmt zRW06qSiqeX=l@EvUw-vfb7pY8^HZ&z z%y(Qxl#hL$p?}*`d%A45e%6%Z$1lFi4pW#mhdX$Wh+OH*){I0JKivf%RLf_?$^I=} zIpzKS6J|cz+gjVTS@#^|W1ZP##1{L?LU*^?w$ouJ=iieOSoh_Dkt6@b+=_>j15c=N zP24X3r(1H<9ieOM3is7Ws4m~R@5|4M#p^Gw^GNKuw?oqY&*tMVa^HwP-OBg*S@I^0 zVzw*$w&{zSaV@>M^zD-Lr{bI1GdCxnT-WDT`8na}rqmOv32h1iQ%yM4LRikfzEJQ+ zc&?G=t>i+8O z+h523{Qu{5mVMCep>Tk zwG2m@t&amYg(k7+6k%S|tMM+@tQ%l5ws*dfoijNS3$kD^;D z0lNPmluV9yND(^s|IPEF1J{>+ef?o;&g34B<8NmzUw?msET3jiQcA_vhZ|lLqzIkl zSju|mqF}I-T+TO#7tV8e=E-@92mIE!YoVa@QirMSk;lFbWzqigT&i!*HLCqrv)EfO zUc{r)V20FgC1Va=w+Af3f7V|pZg=>5YwN`DX^Uq)tzis}t6(&2iTk&!d(oQ83jKmV z3I+c+iZnQ=`>H8a>nSvAY&g{?_nK*$!*qGE$BExwNPG?zzF1sgwf5c{WBHGdj|O|Ee)BWGOcC6mO>SD$fv$u|0_ue0o z%pf(#hc-7q+?*m`@Z^c5&+ir64R%fVpW0SyDDAs?x?7*lOC{+{CEx1MHC~*ouB(r` z9^7!_@mvwXi=9uHro1?OJFwnKHaC8S^5PPs(+#)W6(+g+weae@KfQQ5iEZvACcl3* z)CM%ij)46+)rBVI$=}D^GZKo#PuogXKzjwE05c3!P>5P8zq6LaxbJF{M{*qw2 zVwB3hiF1d?)0^LpFAx8fvsB@qRnePC?sIolewJJ2|6BHUInO7Jy=sqkKa)A|{q%nU zTh-Wwx^FN2aylowMMav;i}BXNh>xWg{_!-5Nwo2w{BT~YZBbA7Gmr+pJRL#_9>wb}b4$KL)= zxch#$VOl`?o-d1he6CgcFs4dyX`PtjCvV^PW@l>A_H(N>Cxk6wo}L;zm#5)<@0mDN zj+*tq@|N>P`>E6N5B*US;>=`pfqS~=XEO6=)HvkMvrf1EB<|79BWbhv{?pUj z19m>LxU%kUz_ta`OfQu(h19-Z_|?8BKmW&ys@>P6Sl=s8mhFzh1}u-l$&eVtuyQ2mnajMP>B zc)m*B`CJ!Pgs-&F+84xd`RR`L7K<3q|B0Oyc6i!N`N!Y)^CcYY(%n(`ns48{%}n|G zud)0rPU~y>KkY&JOyO1|Exu`eQp_a?uca;?&w^<4MGjVhZax13(hBC?8k@q5!H zJq_NAt2@+)a*Ka5s8$+WZ?wD~i(F>u2WO-`AfNs-}>B zj%h`}!bObB|JZ-r|Aa+E@vr{R@a?Q3{{mIJjt8Ep{vZE;=K1r||6O+f5}jyL$Ru)R zuBf~J+~aSp`>zN5T^K!=>uAbL70p%Cw0eS-9u`;`?ekUI`@rh!toZ*}G(;4PLJR~ZeS7)8>5S{qhr7SMb2T(n5X;hA z_(nx5LMG#a+zPEp=iVIRS-IZn9|C;GjLyxs(nHeL(u)bVj{N|9Ae^H zx`ww_V}fpS)|#nL*_KQH`RQ{ea+{#~xj2`uCHrPw=66|r`>(&}$(CI^3S)kM_#3RU zyYjc*yE}XNU**K?tP|e3X&E~sN9-k+PpQJ3u`ah42DBZ%aI)i5_tKNDn`905+^wB? zWLB;HZTE=9TSuUux=8e!Y9Ft@8HLfA*Q&uN5*3Pd)p@dRjQ<@R6lLu`7i{8;{LxFuK}U zGBft-92=!Fy_U{%vsKFUt}%(9Z}a>b7p>v=t<%|KL#}L?T1Q)E@)MC;ozj1E-<`ay z9I&fLN1OH0k38!w^V($V=S^9BDubc8+SnmUeM^;+ou`c{Wx#@|n-;d7TU{T9yQ(s-P}xTN&qq1KPTKMF6{qF^K2 z|0i#f{rtd&n92tV$8!ILp8HZET7B|Mpm-1?a zv&D*aZS|PB;PCCU|9{J;2!1%e>*u`}5?1%q3vMs@n^^fJ#ehZR%5@I$`$1}c6R&~_htR}OLoW0O{{`?Z@JEUrQ9~@>zc>ecxBG| z#!?}%6{jaf4 z?$+4}>-~RMO{&ufQuj}Ca7!@Jo%Nr!q^{g%+d1jhI@#B=YdRktKRmbk`^V2q6x}q9 zH~hKx=Zy55fFJJidG5OL`)q#w-@`On>dJS0&lSfDJs&Ga&bBvxp&M-G|JO$${@@lr z&*?$iT?y`(6nV%C`V$V8-Vaa)a@7}Gayl!rO++9^V!D|}x9e>SQ_da9!Jz1vD z{%rN$*5c+aywTC{#YaEZsHbmsWo)#(zj!6XY;UF&Ra133-<+JxT5(F_ylA3zxV)9h zHuXd2GcKDaeOKIdL2dJ!Z2`*_9Ns%iX;~z$uo9UP(6YN**8PlqcW36W?CGs5-x>E@ zW#DAu`ZXo9am}I=Gd=G)MtJ%kTKqU9%vRMp(M77!SUuy%;qBL+tc+3cE<2LaJ6+)P z`8lpj{U$qd&vdjr|9G*W_J()I|K7OqGE_y<=n6ZJuth+4^TMiotCM3gthhCtCeLb| zGF4~g8cjDPYmvKA*I51*G77&8QZ?Rfd}Hqs&c~12_3s^woah#CXo0uX+Sk+E7q3zk z2v#f6V)*skFLy@LBKDHJmUp_@irePw4p?0iD6m9eOUmgP$87RL)~dWvUYs4Iy57A^ z`*(WU;w#gI zFB;tMUNJ#en`^0WHXrZeK30FmSQWMYN-hn5x!hlWb)L3k$&YOAZds-yEiysawff(W$s3(N)-Cmt*qb@o z^4Ff^iAr|Md-kc8nhFG;ws|^b`GQrQALeLIYHm>5!@zo8x-#4$@Ne4Lv%7=T9Cm3c z7wii@#(rFIL4eW1gr?JSUj$zXsWWq(uFwx*vZ(he>*Uy>@Of{+v8UVDmEYW|u)||I zr_%zC+;#3#?~8gWOfR|7UwlcOvEbOm#$wKn=&YBIk9-c6`M>$|Q~L`m9&V4=x2xS< zz0YsvAMJgz+c~4H%0Du`{`0>;?pp35p1Ir9zt}%r{wZKbryN)64W)UBTq%=s7yYx; zirQuU?$(DZN4sDD@=C4xcTv=*V%Gg-pXW1K1sYuOY`wmuaFUAWg;lZEQ{?UCjm$3vkDOX& z^yRyDxXUblHwlFUpT0~9ijx<7@Tqoln%k=bs&h+{PABe;U^_coOSNvQ^YlL67wJ6F zy=!Bojm!TX)v5E7d}{0O=&Swo{xnqvsokfh>1bY)6KfOT>q}U5tUNftAigd|=w*u$ zlM!Q8?ec|JMVh$0G(x-%rt+^ZNZGd1)o{wbh)Qu|wH+4amr@USm-#sFcB4vS*y(8H93&M#q4IBwq4nS=LZGe)W`0Y%L<>}P#S&D z<=UEclPmtdelb=nOZ*C&LVe|<7P5*gbUnFz zitx-g0X9puzZI%1Xz-KcnLF8=(Nbzp;o&w7(~r_`Ufn$Wyjx9Ymkamgly`p9Jsr;e zo~kWb|K?OEPt-=HvuS6K?JQ@rocwTwM7h>1#l8MA)2%9={BYg4!}Gb#QjM~s=Pv%KY&K>Oy!ET%yMyh^F4^-bwXWF3? zlKV;Ij_|}f%Z=g=Y!9|{t$fhr;~>d+KhtB*x|PBKD>lV>oZ_CmhiS=~qIM6}f4fVW zw|{%<_Gt6*hSi&F4&^TYvL(~;w%C-IBi#zBd9SMGh%glTs0dC!_j6LyFQv>U|7Qmt z@OPWYcI?ZaITs%~FtQ5Iv9Q>nTOG{`V!+d z`u+UA^Rr#x@}~#XjDD@z^!MuB!kv#^-Q8`W!QOXKD($Uiu{MP4w?Y7CQLRPo+=ul%x2S)`h9(r(M;_ydcLW>+h(m z9wa?CMN{$QZeU@HaEX-jXNI`pm3txWBq%xmQZPf zg$+*A?VO_0BA(R0em?b?eW6FgBf-dRv+l2AwY%}tT4xlecgjyHnn;z0=DY|9{L(I|=da*wpr{}oS=GdTV=wce);`PvzUz3uB>S8y16+!$bKOc=hP4bp11!e z&gYW9e`59q(57I8EgAwB%QSBA8_D^}{ZV@8awuS_a7sj0>&BW^E)UH~4OU6qi%J`t zek^e_{cz;;g(8zFF3C|>1Q#&QzuqNoyo>kdla6(aqAE~lZTnV!-FS5KB zzxP;1s>|NQXnm>wwvK5}*WzxuJF!j|1n$gzFzL~|ckkl7y0=z*6kEPS zq&#ZP(XA1hB0meu%iq?Yw|ke|>Cmv}iR#Ut`znpCt*wjF@88*6{_f7i9r??9HVM7+ z(A0QpaY)y$MnX3wI?F1MP4~e=#&-uE?wH|hD{w|#^XT+#Cv{dTEO@Zhc-Q9g_Y%jp zm7LUCy@!2sR+#y;jmO^HX%$?qpV-xKqA=BavCokU7TfE^C+!zV^~gKDMErx#sP1L%rR9-yrZ_Jr?U4(*2BETCzPJPnCLkz$b~CVZ8)1!TQBJY`@1B?N*;|u8LPHU!G6i zv~tbPkNx%&(vE2Oh_Idv%6!6oT3&`%HFWY_8SNtxqU!c~jXu*KA8Mbf*<1Xn)i2=S z+nKL+uoS)AEq6?FfzRxywOVW^5<|bUR4fs@%Wmgyz9Ym_`O5q*$xDk}UWK&PxnA1d zwd{`Op-qds7k&74P4}ahgu1Fi$QvKkAjVycs@@BN8~@z%bF5Xlt-&9^J8jC!(jV?8 zCi`1=o$X}~DqHyI=){mIewJz%UF==_XQ$4|-1TK){4|;1*li#Cx3RPuad23l-j=ve zd+F0Tg73J3j`=6-`1PaE`BI2RX;zQ$%gbkGpWo>8rTI|2MQ8i*e~v|wY=&YiO9wCEAQnpfjB^m9!>i!Gwc7-MhFBP75GH?3)X}ZUnbY4#U^3T`B zB02a$>x(Em@rI2qeHs39mHCxa%A9-SpD&(%+xE)T9gAg;*OpCvrD5__T*dQ?Zk70V zm)SOzb`iRatyOgaFJd0N_r$F$)HZ$EAGCJToKB|yj%j-81y-2_ zN{>>UJ}$3Zy`XF7SMRAmPlv8*S-k1bg@7EhCDTq;IUW6UVur#&@$YF14BobFlD+)L z^y%%B^VXbst+`;`qAQ;N4UaEAD(YjPpBr=GqMfVT#6tz!Pbf{~5t)`M?&W>u=ap%l z)&HFMFMj^8{_&~zoXOJrN~Mpzxz+ncX{tu##zloDmW|FOFQpw>rrSuY0^M=IB9azx za3h!II^TOX$Nc6>GKp2KiPR2S<*qT^te4R%nfvXEU}5{N3%>W>YgT_>wD^X&=L`cu zg}Q&UIV=kQa2!kVY2EGq-*MH3g%e&~b4|D^Q_?RfE_gY`iDhD5%9PZNNBrd5vI`NnW4oJ@ILiE^An> zeV=90&v&!y9y9Ijx2k+`iG6=q$m02mj;wQ^Jzg?7a39CrJ9j>8eaQ)r5qwSwW&945}HCJrX9uPh>5T4#i~`gr%&H*kLb)Moi_X>IA1 z&v%?wngyGkHoGV0JzY|v(B~-E`%q8Kw)V_$fvM%oZL8Rx=r!5Q{+{I9xu`&VC3CLx zkv-ZE-}P2({q3LPkdfn|WhJ>hr^>ASaGU!Y6QgfQKCj=b{(qtVnv99aZb5a|U0<#r z^Oo3^x;pT^_fP)9)0;P2u`XidRP*5G4q&+;{ZXOBW=BMWQo{<_4_6C{-kDV}S$&-= z@nP!kfW=I#Ok7s10(NU>7GE-C=I4*R?myqo_Md3#u~nbN6+E}R$WZ^qzr|#w+p3xs zm#(YmxwB03R`A&sYj-<*Y5eVkY0G|ml`P(8E3TLH=bh~WqZPXuwoH0@O7gT93+E~k zZ_gGcjy5hW4wDe8hoK>F=QlVl4bIta%*6fbuFL!1%>Sk?<+!E4aPitnX*-J_oey5# zoBi3p?T6lulK+hwk8b4z&k=Bqy=AztTJ>L|OyGuIVegRAM)5`Qf{I&mmYb<`<$rZQ z)OPruf#60fACW&37Bk2nzB*aGcY;03;lHUreN}~G-H(Mit`>gp zFF#qmR&2qCPBlKy=*_9}`=YJn?911u&e`{Jg4Y@+=2oVZOIvsMx+Q(@IWcp4#)2&e zmwQ=e3TPAvEo^D>o5}wE;_QPhbyp2i{>AJpQk*|e6+Y*BoA75#w)i{Sdc|ezcLKk?y~LK8c`wn~V4=4RkEx%*!D<${ znzqLWJ_fEYZEcyL^zY81E59EL{~yl$AR>~kpP$M;vFE<)XN z-UY?23)i3MEut2;ZbrAk%1g=L?$t;1K3uVXqr)_dRb90lR#Ca7;*Zl~E)}~S|8Kk3 zf5D-NZ?~$5^etJp^35MtukO2(y*)x!2>#l#w)K5f*mTdwr@v{=?%KYn(W5cd+HY~- z_m3U3Ew+AS=R2}&Px#il)`Fyk!2t(%Fmc_xyfy4z_%nAlkMqrocZf{#DiL^hQtb3! zqrmlXn?f$lU$iWuY|R|Sxjv6h{w*!%xGgRslbftF>FE^9q}s{p6IBGO8S*wot4(a- z;_z^u>a(K6$+3lrn^lNsX8iHSGY!d$A4`a2Jer^TXK7aI;XhS1mQ$yB%`nM2HrKg! z-zSq*n)9?&tJcJFdo6sGe!fj8c$wnyj)@xLrZ#M@{S5uiw_QxWOD$DC^;AT*|%G|`zwCCx%m_v`hkniDSk-??AxU*jPihmVYpetciswRM(q z^0yj^Tf0SLF5PuHI;oZSNR7YRG9F9Yd#l$?$e4eAw%t{a&F`$$Ytzon=vH5tbl~mk zqW8UXjbmT5JQ58tn49_PQbVi3^z)DI>zovP_3G&MZ|W=# zf7Ck^_Gx5Zx*>Z0Y)4M;716YXXuG1*8=6Zj+W1w*Uo7K@+_y&rIT+>KDMZ zaNhT;uV=j8u{!0Dy(H6p#y;!geaXix-!%4I&6V7Isp!?~*^13FzG45h|DRmE{dro) zyu)9$^Br46^cl|A{Rj(U6tuZ|rN8fVSmiN)hy0D_h0MKDdT#wtcU$h?n{)o?o^NYk zzxV3YvUaSP#^GD%F1~uM-!rH0KlnDMpLeUN^e@d@7;+}CxO?aCX)T9*c(wBa6=noA zC5`F`SSbDrLR z+TY(RF3a&;;-c2I#Wb{U&I#p0R(>!2y=LC!66(HZ8k!WGE}P6X2~#*&^ukh($I|Z~ zry5iDzJF2gl@0Y=i?7=p{JiYtot_IyyxuCiUsU9|O?pz)aO3%2|2y;cEQyOfoOdcP z^3bcuO(vUGIvJj=`MhA0anh&RZ+|@3pZ%OMRg)!V$I^dqs{T%O%jf1^Q+}LDq))F@ zzm3y~&vVD?Gm(2Lid6g|~!#%6ascA~zI)vekGH|0z^ zTm!cWPJexy#j1aP$E7F6bA$v6{ifwAdF@Z;Qts4=FL>x`CE|Q*f+vrvXouioJ=Gw$ z^$JZ3FL^mPv9NORoPBUmr}cu3SMs#8Miy*3(--!gettpWhOM((U0hkIfXkhIAJ6U{lk5{ojM4Q(>7^`F-pYQzzeb`eO3A_Sp>HI{`(G`^#51NHH2~9^o-R zoUr4`^IkPOm57Ez?295i#aNrPSGWg%Ziky%x62iG-s@9 zaAIUrRm}R`^e9YE@Ws}ja!23C*v0H9)oz=j@$qCsQ$y;FZMiT0KUe#?i(gs6u`IM> zio%>98xE~G+`;d9O!xX5e~V1-u9UnJdJ7tBRee3UQ(slBb!bw2d3ZvcBa5@KL%=T0 z9Tz54R`|bv{W(hQb?VNN9`-58_H!@Y6<+G$rSw8FT>WvD%s$t-9CC-A`YSkfe=k1K zeBy;|mqeLvftQMS`1#v@vQJMgHo8|JoN!QeVg=`)n4E$W8H`;I47$SP|Fklle|mar z>|do5GfefZ>da5YpDMZLbYe#dr^`~&lq?qJQzj=FDt#|FJoC8Vpl~4j*wxQbpAFYl zN>qJa(_JZ<6?d)t&k4a2?U)yn{q3GUjJx{$+~L%TZX)8g3`Y6W+|$-R=)7L?X0uUX zukn>VJ+DnKPkH#cvWk_dN82MHU}3_+IqMXhoQo}cvTJ#^u08i_pT(Zo?N7D;HyI>v z4CAy^y!LKK&B4jmnw=ghLY#W}7Tu2*ro_BjP=CtUCB@QZie<0!VZRg8f`7d*Jn-42 zCw1Zq`RzYm&5ylYe%Jo=pNL4Y1wSNO*Zfv&VoE8vzin+x$h(Cb@_z@Nnf0WJO}Com z^*(LkWRHSGHZc~al&VuaoJ>2WES$sMQq53@f z%c}V6=f5nwq;fWr|IQv(&8KtP4<&VYOYUo8Y&*5(rhDq0X94eD8}CWmU&|z%RyE`C zl8M*OTD%vG*udj^-Qs;_^o(W651#kcC%S#>KNq9_nDO0{T4CKd>5d0Q$CND%oMlBd zCwGZ7DJV7S*zd{Re6Q@3!6co_OV6i$p0aArG*v;l0Jnl8!Co(8g>EeoOKKH<=-MU7 z;CKAy^1^bLuVM=dSVGqtHOsKFeamR+NL-(I?98%F*Zy<-{M_rb#zIdcer@4|*Qb}8 zMX1Z#xvRM+9OUdcGeMz2@iGU?35~N48oqP#e6d@ zljl#(w8`81^wZP|rCc4g;~PE~7;g)lY56$i&Kqvq--qms*43uWx|v;Pce>=+)uM|2 z!etxzc+X~@S^V83*7@1<+HH=f%`V@`2=Vx#*s5N?uj3$NI z+s$T5uKObuuK(s>oBgd5N95Votux$^SDP)Od572ViNZpsqo?Z2ZaThnG_tlbvYr}Y z(Cah5p^;}&f}2FjhICz#Lj|aKf262OJ&QFTW7=F$nSZWGlB1FmiM~bCg^B z(Yd?T5~179uxtMQK2`h0zB1Wwar>G~ilWkQ{$ag$+x4K)O+HpJo|}F~rK}upx{XTL zFEpxUW!!V#Wd5FK4{dw1GtR9yNtty_(yH5dd+W!Lyu2IA4Zi*C{}+{A(t0|T+iY2- zr$bB6?z=}jawj_lNGY-{a_Ui@uym4|ZEi-u{ifRiJ8g>oOgKB)%uO!vMUT+i%^sWo z+$db)JNw%4@^d!N=7;W(m)pe9s_!z(rqU>CTTZ5gj8^u3an6pogDi}#P25I- zQ23#%kS)a-DGc=!NChM;TID8jE|o^;5h&4rn9w0 zZ5FrPkAE{fXE61}0-syk|9@s|^_XAqbk2;GVi)&CtQ8T-yps6mNLMzq;2$CN%RDdp zPFp9fz5Jp4$&+9&-E*D$j{dbiaB!~K`&kkm^Zb^GYyRCc`{~XLe|DF?4%}6g`uq3V z_e!4U3d^=^W;n{Oac56uF^`l9$Nqo6wDx_dv=y-pktrnGFGG0tg&2g?raiU z%)agLva@d|S`_NUTYtJ5p84_f`KcFH78d^5lwlqttoqqBzhK|OGZTeH8e5vUje>f$ z4vytc}ESKA8jSrs?v0KAyJgACU&zlDxn(@)1t3nJN zn)UJpTr?&;C=2P;J`v!sP{H9~)6B+MkN5a5pS-^9%;D;1+p=SvpP62Ed;HAKGI-A0 z#h2}}{;vvK-V)9y9aSfv{lBGCQnKAsZ%4@^ZViPQD-5dnl)8LH=ZT#uyjmW7 z+)ClzSp{|trX~%mPo=M~nSMK0{^9~-?yW5c_tpO9DLy%smGRgF+c(N?Jr~x<^SkoM zeDD8RG0lQiF7?FckAK4W+5;b)xLe?&%0C8^NqJF>C~eA z0>;~F3R@GF|9A8YVd4E!GR3bz>9OfjW)@Mal^_58|M%Vg%g=AoC${nBE&&S|8XFuBvN+EY4tvS6zVu}C&z~~44+Y#l z9dP^bgmc-mo;|HCjw`mBfBAP5+k5NXm(8+no2tz9*su5L;+3u0;W7f*3)dEc?dlS5?gOxUl>w zQxDVgC)^9NKO4<3-7QyCz2oQ3d)sokA0GSa{^H6Ki(}{Sww*|tcC%<3r`Lb>IXgrW z1bj4%m;1Tyf4~3u9n0bit1|>sPEC>&{{H6RzwGOUFJ76(2!6hv*;1+MIhU<>pH>zKHVXDD<8_`99h5pLk^9m%{sN2f;OO1B{A*Q3kZ-gNy8f1&;F!{_iBrSCPx>}z`s z^wxRIwry9Osc_<1ePi;f$EV7#|M_RNX|p3=fW;&UyQ#I_d>pf^%k`G|&(D)tbXc{2 zsrPg#Z@D}6@58?`rk&|3Wa@c%xIOvP6wMC(+t%qa_Kh=`lqT)keRykv zV#-A3o*i$4%j;gO6P$L;s3a-G;h-uXtMII26XsR^jmwyt?_@4sw$0ikE9}&gb1YMhK0iB~?Nbw9bt_}b z;oSo1kDZR(`Y7!b{5bqhz0r^F#bU+>Se@gd&;8dZ_c@+_*`i%l%MO;?dheayFaD=!U0A7AT&q{(zs2G=W(Mzw4j0V6 z{=Q@DUvrloMQs12_e4zF9#mBwC(QI~OW;~_1*ayCOP=M`E1cB)jh8;>7wgF*X_MQE#A#G{~pSvo#OuWc`ZA)aFq6gB*8`UcbaDGz9#Xnbjyq# z!5ftF>O*CxBt*PBt?|L<%%ifrD5g-YVdxw~2d5+T(?>}POERB{M zcit@w;oD^;rY|^ek8JS9#LkZV;la5_E0%NNL0(JTJd4s^F|%om)?mS|JU~V-rG%U>UcL! zJHvWpzvpbh{8QnsFN#8sR7`uLQMh0;Lo0LVZ%x^Og3@!_1rw)!at4>O3Db5C*i_|Nr?JNKl>uhRAX5`pg%Ec=VxYI^z>e!!^}-a z6|91DSf&~ZGE4|GTyJDGSA$b)#&icK#x_Y01Nr&Si+i$dtv>F{KG~kUCi?cO!oANX z$DO|Vnt%GMJLS1wgj8=;=v7R!wwJ3AIWjlo^Y=jU;}RU(>#JY{-;#lDAK)=#gm% zV6|<$7OSAHVAWzEwxEC`(mpO@$MXIUzt_zFc(;G=z0+@mtoiuP#I%{qxfW`d3zlR> zii$ezcY69lP~hu*+ctl*ht@XU^O^lOiMu^h{9E|+ob^)8+BojB_cj=S0`qfAgP>7CCa$c)Nh=srIu1>K;O?1YBWj0R&{T z%}ZSxnUVz#F1FwjU%eoO|KmcV(z!n*iarIq9r?||o)R`|S>?$$Go~jrR!YozoThLv z{YB}bzafWKUvY8XmU^rwGvn&2UX3Jw=eM^?-?(Pk^1R&A|Bc5{WzpOEtHlcQ_WwF~ zAy)89&xu9Hg`#XS&plP0f1riQu`KksZbymr3G>T;`igm8ey#r~w?TlR@6FDSHu;Ww zUS8yXSf1l{lZvaH}`lF->;=N>^m~2?8>~%`NyI{_S(Al9}Y&Xi>+?hv+2!5?ReHNQf7~vEBD^x zIp;O=)EvoA_0FmeZ7Bg#&rSEv)O@LO^5n$AHZDDhU+NPcRBWh?``g6IePQ+W>p$Lp z4_l;e%(vLB*Li2r)1F=ashD>;ND}SoZiyH+sJI*A^nLkUnvvW?DGgdzM zi!*@h#s8M~a*V9^UH^q&5f;dr^ZjcrZ*6>;D%S#sTRaX{?}R_rr+aGrnZIn`v>zUQ zn{4-I*lcnYj21J}{3+?P!KKo0f|CQYZF$>;3!J{&F2$_ADQ3ez(^K!5ztfH+1w1eoXEYI%3pI{9zgHjnuSMOTMycUd`UtxIrSpT|P38|ry$C+9Bu zJ*g}sl2LY2TbjGt5tDD7CgBSfIy5QHWmP*9!o6q5KlU^K`H$_b+BaJ!__1uS>hc9) zWozOVuxc=Um@DxuH@KorY-5t6;pSheecK)wa~)p1zxZ(4y3pK%@(Xl-#5y#&Y$}XA z!0!Kv`{3<8hfdAU^jvH8!|TY6`=#-Hj?UVZi)-%HyesnSw%+$iM)%lixu)yBvz~pP z<}W&C8}l#PVKP4_sW>0MSR1g(>8--am@V=DWQ#6+KGk5cQ**M>_2ZoV zKd&0k_iK6PIk7Og$i)6nXQA(olCJ>|67eu;V)}&G{QFWtb9Ib!@NIvgmUg zMLzt>`xte+udr`+-+arIIrA#_OnbOOEV|)P;FPZm?=UZ_wcZ!0EdQ>?QON(z;_bJa z#8)0Yx_QIxru&<3`fq!%e*vrC|BRH2^M9<(NEDf|Rpw*tqD8^ilkVNJ$#*m^c=`D0 z>FW#pTJzt1eQmxXx>jn^p02iwQ%_0V^jhZEdhX%klYwlDj{KZ<@yE9s4-a$yxH%WD z82T9;bHWwp1t z91pnsrGD{urcGPL7H}wJefF<>d`?$oqOpU_Ba1@yqR&a(^L~j&tm+LDSq1Ue!~bGEUe$vskbDTt>v8 zf9}6{livS}4-|krv8s5%?y!}a|Cz0~+$#9=bV`s&{)?kAs|%a%N-~P`zhiXgzhT4J zx|@Mp&P+HaN3FwXW?{*vpO=N^O`dsS+q>)wR(;$;>fViC10O9YSRVg0XYLA(P_GxC zIoIW%54G6ymdVUK^WEDEO9Gxv7PDV?=E0m$b@RDz?5>0t-{XJ!`%d+*skzg9V)obF zWf0E2^=zxO>F2e7rs;A&*roB%Eb|gW@v}4THHMOY$IE6rfANqM zdbvzUB0XpKRW)f=i?(NlJUY7~8k86`@9&+Z@pi7YOoORcXrN2Ye9P@;@9)YBezIg= ztHh;4|K1i&JI{B(bfs_B#3QyZUPjwgCs{lFoYTNz=&C(G*zDISd7h7{x;862Hmr32 z6y_PKAQIM9vM^?EWW=^zqBfsYzF)5Bk>AbQo&NUtxo5h2Wp}<^&g7J#Hus4M50B9V z%}?6VrREwlA6VMN9McH3vfR~~8nSGuz}3cnsrKjW_Rh_sB)_(uq;_!5 zs;hxZ|G50wxYhX5Qtt=JcD^xN3hdjyMYqT5Mk}t9@q0SC`^G}qZ_~0LU)I&u70_HT ziIY|Mj?l@^+Oy2_dqP$!C%rIu@3l)l>)nozrU9FOl~_L8tz7pjbIy7m^A*iq{1*@G zN)J3>4sy5oDVFCYM)y=cJ@q@E%CzxJrcU>rJ#2G?mikrIT`ab2Ijv-v#BOIQXDgC= zdAq`+U%Q*TUF5W_qgY%5xDNhv>3Fex+cclKQj5K(y3R~W-jM5W9x49)=aTF8|84sE z)`}+-Uw-DH)&4d@??r&aL6+&38VeO>EGStNy!;!7=r6Nne%X2KEV)tVg_FZrCt7y< z+Bf}d=l{(d{Y2!}l9Ne`V}AOy^|3Y{x>Ly;xavs7m$$d2nb~-ZPCb~uCtTp+SF0BR z=dxD1HlAPHz^Y-_pEv(&$g*p^g->Va&HuXQu-u~1tygB|#=S{-u;xY796^VV0VVo; zryIk+EUHh?DB87j(&WaPf3M0tey{kva1w{!^%dO8>vNqVml-;==v)p+e_|XncTHrW z^S&_e>b`^1?#4d#`1O4$|L^Z}-T%(Iym618ZAyWlRD|A%0EdGukw%UNAOC*eTlnsd zV$P4vb94UlG+$lK|JeNF7-Eyk(pGraW49ma!ayNCWNKS}ZBIfDxy`kssep%kqxs$CpRaQJ- z*mv6S&e21vl5PJ|q8A7M`?KTQ4?(RdQvF|NTPqc%NL&28|9{=$NjGLKNL=$LFKGVL zCK1*%Rl+PncjTQDa| zvyGk`Gaqv@2Yj%K+R_oRc^xm;6X{Ni!be@3(t?>?KFq6p&VJjmAkng$NwJZM?*WHm zV~*m2{*a6%-!*j3{O$40Ds-J9m-)D9UiF8`OSgII&3NxE(_37a&enLud7{^f#a~?d z{LNGZS&SB@Rvu=|^XQzEv8BW?EA%I;w`<_%W4-Mf+F=ImHK9UJFL73UYCb(PN~D1th=|Ta)wc=%(k~L{>)QZb$q*Oui1Km%$A#HXWqWC!$vuMSI+G!q58k2 ze8TG2P17gNmAiPvraIa=$T1*rXHhD%jetXdkt1iTm-fBt3%fRG2>56wFm@$aZTNR_ z?dk9DAK&PmePX(fq*C$DW^1hzUo>|vIP&O;;Ou@aYtIa!Ig_}(EdM-OvEFg1m!;_W z8@E=Scj-~N#2&OU2&vpgcSQvU9@ zym;k?@9%$XOjXpEW@Mdi^@2m=+Pc_o`MMXa_U5G@Lc@=lYw=3$VEg?1t@E@solg(w zOq^6%`0icd#$Vf~X$0=s&M@`Hhc91DbTW%ptCrUZM6jr8vff^GPUe5Wtb3cCjt4(* zzNDAVuwHCI!5`~{^#@g#o$spu#^WdSaC+$Dquvvo)K0u8di9{^@9)4PcV*8OJwMcS z{ox0r%&HLWfS4s>SGEhZo^xN6UzpK<_PBQfv-nAth8dzNmVtM74ATWX>@ z?~85+$CA6Mi7AibV>X+0b#}g6x07puXW0H`&bMu%Y<8?CSYH0*E1uc>Z~1(;g@KQ2 z4#g6|JR%29{6)dx8$q4*U!(ru;|9O7p(JksU3ZnzTQB)TyW0rV)5Sk znN6RA>gTSBys+QpBSUO`pGHr@?i({V#9Ua`VC3PYsj+Z|RjJjh>+5^h$Lk&5@Nnwi zD?7WM9#6j0a;wdyxyp3v>Xf6eLhb&S-`*P8@%Q)p#4|HEkLd|9ae1jn?DzXox5}i! zSaa@=|7kJK$8MIj#nm#HUtD!goJ*tO!h{Eow(^F}oW*yhKl-D4lS$;C+o=b^6RPLj zIPqo{gD2bnj;@!N7uWpSqA}a7*U+*|KZfIc$|OC7uCAraeUBfQq#B^+n|0++sa=(r zR`>LDe}zXU>nCc~WE|O3$$BSa=D}spJG7Nr{bs1L|KpTr{rlgpOVW2$;*)J}Z`72m zcieMN+HAp6_dI8*p!$POI+jH&SB-w=@ZG$*BKPLP5B~)fjg76>Uw>9Dr5>!(u^`2X zkyZWf!RGdbrLUhA`JI-pGfENFty1xk*e`05_Vw4-`iNam7I-KYWtx8b(Eh&u__5yS z1_ciSniY$)vzB;F?AaB$KWg3<`v)q5OI2Sko%hzZ-~oeG`OCJ`)AhSkFK8dLRnOdX zl}BDfAnwZpk%8yH&on^RO7xnEy9_|JR# z##c8COCol1eV!zDXik*r+a;A+!r!j8^XGbX8#R`D3CSmx<-07K-Y@8OpylpGzZRd6 z63&&uny0337vR%1US_Q0x9#L)_TcaDjN^X2eSU6X%BqkbpRZ&t>5~?}eQqANQNy7Z z3%-7K6?wq2qQ~#?vuxd%%U^EEXf1efu5Y>je5rpoH>V#v*PXxrZExJ(st%*nEB&)> zuxxX@^}KWQ4-jR+SZp&alapUiN2Gbf3-tb2E-`CT*$9-EVgwJAGo9 z#$OHv5uXijC#(62{rckc|6=h(tJ<&~T}mYmhDu8(i`Gf8#&I<4Rk3~)rZvJr$k153#(sbgaAmi{8iH9bbj_ zXf8N2T~Xa`uiD|uJKE$Iv&>g7R#~t>ea3<-4T(=qOgvfFI*)K0OzqtF_{HES5r)_g@y_v32Z1%nJ z=FPJ7VlFj_phd+W9yr$gEj8D*VPbEve+xzwS7y6ysB1|ete(P$;r*9y7YgjD(fd( zSRORK+kCsAzCzklPu6XIhf?K&y&Q$ho!XDr7v&v0sbRSB<1JsF^HYR(`_DI<=67p~ zW_Rp!{RJ0}*;ZUzCi(B~UeTBHY+WoYIZbkkw(rg|3s9O*T=_J@z$9{!7)`myLp22FWWDni^Ke?`QmxctnTW zhvnFUAHj25yQUt$=)|xNKn#G?cD3Yq=aK<*>C)cm$2axm3sYI?BK%3 zn?65YdwQ~orq#Nr-dDwSq5+?{U9=bdO|#wx_~bLpQ42br(rMe<*NfB(o5Ywx3F zvyyZAWqv+p&8&sMAZYUA+HB|gc&DqU_AiLoIH_Rj|BkL{p@PBX-FX-}Q z&AjWsTvs?atAFhmlXH-q@{J|`mFB`7H7^$_TJbMPi@0jh!4z5a?D)#yJATN%D{O{Mi0HtwqVRr`UMX4IbZ%4?=!$7_3)opKNNy zDr^@xL0IcxOW>3}4+2w}mg(Cki)~UZG2OQK+}cp)q{|U$m6qNQw?&+_sr6A2`_G)w zHv9N$`8%4+=c{dte7fYIHlx-w?pY!MZEgaN4vq?r$F^0!m%EZ1z<<;_%;jd@Rpuqd zm04m356}J{u;)p{n>+VhO`g=Uuf4wB-r=Is;t5)zY#&!1IT7lTY|dU@a^dr=c@9o* z16~MuG?X0UpE}`N36JQRm35rQ4;LM5W?KLBze{Th>sBq(%Z1OkE}p}xA>$e;f2!2V zT11I=gZ#eI=ht?NO-@wzQBzwVeDmDd59w_4EZlhB1EDR?r@opO4hbHiIU8 zg}N`R=Ug-6<(|7}wW7D)F~Qd+zh#a4rZX{pyR^yqV`A0h&X?js>gK6w%6K#uvH*6~JbiM?4{=gWMm z_!J_UJMXRff(MmKHXd*8Gn>g<&uieG7?nV&qpk zJ?Hkb{LY#AOx&-c13o$JuixFE9J%h{B#!Twts^cyda;n_)Kl%pk6e}|G{qm;awuYV zVQforZ%DIsPoVc`lf%o;L5~iE9qPE2~oc4EG__*mE+l&ip z(_V$Jat5g%deL_H;<8fd5Uu!&41!ZUHk@Y>)QTz#b6CjJ_Be@=OZMgi;ZOB$>;La= zU=Ut14>aH^E4AxKIHO3jOtA9B=U=^t$c( zhiBbkVX*zGt2cjhW4E@@Dt_%<3VD~(zkgYjKk>!qR~zR2(&4IL6;6qo^PsQ8^6RV8 zY^CtIPAjgRSL6HBE`RU==-yPF!8V@(R|LAylF|o3qNAr?oxognI?>BNw{wGTu zYWr;Go2L7?G2fi;=n1ac0`D*VyUJc9u?xSkRxj|<5=p38(a%`wf86Ev@&$)(GP2$- ze|;=e#bbub0uRwAuML?w*FVnFOlAA|urz@E-X5`Mm4OT`I_WVs%#kM5;ejTWuFby$ z?uZIre;Qj=&=Z(FGf4c`^Yiy!MRz=D66y)wl-y#MWg>BWbuo8VPk+APpAe(SGL3tI zKI#U3St(J!wrax{xob^kuq9`SE44e;_=%T&sl#dE9q6-^2eRucNcfO z6Z6I}&*1BAB99(LHe7#oLgQ{qI-6wn8h#C%tbY$AKAretGBbCcl~|0BrE}|%4LPwo1rY-&3dhR*joFm_E*6(50h$yhx z&ho>ntp3N*qXnI<@6GrRKXE%8u-1?F%seY$&q+7-Em@g*zH@GT%c=!0ryKn>K7Vem zHSf_qZ*FeB-TjqIquXz6|paYi>qSV zW`VfKk{6_e2EIC@P-fzCaI)8``dl_K zUskiVn>5!1-TKV>u5b-Y{D+bc6<2uFeq8l7IB0CLtK@wb=VkRp=g(b~XBA#@WS8OT ziQ5v?tY;)d-AU2QFn@3D(ye{E{E=JeKe^x2-aFj4d0Y7LL!?dpAD-Z-9w+zhRZMQX z9)F47r2Jy7^_H^ri~&}v>)Sbb>y8|b{$BX>hpWyjTW0lg*~4r49{0=<|7Y3UIPY$c zk(#+=-*SGrY(aURogtU<>im~z&KF&Jdiufp_5X95=`P&eYyNp^k+hsuzq9gsmX49I*Thmv~=Fxknl|6&F!t6 z!FBK2b)~~3;vYYD|KgM+i>p3H<;<*Qy#tS8Ip2e*;Ys&c*>)t8TIb5 zNP(xyv5iNb>+xJz$J(2I@f>TW&-vWVc`IFu6up(^uSnnY{FA7Q=G{+^b2*M#mM{gM zWGOGY&{KBT?Q6mFv(8_{3=TeJ*}ucv{oG;O?HX6B3SV-iZ(JVp>${8?x5AHAp_NBU ziw|zOW-qy5(eGJ~J2Va?MBJPwQI}-3=tx)B&-*@(y^g*Ul)|OXyfr(?pfk1Vnj_~# zr#JB%nMJFv7#%n~LC7}X-|Sf@A}#I1cxU!|_O>m)d}-e|D|@k(cb3b=Tvl=08g=#K z=M(b-7P)Xf7WC43QF7DSS^45YdlUYB`S^JKU2(r)l>8^5#=1c1(aAp-7RT})nXvZNQthjM z7um6GZ`b8?W$1fRuk_32aiH9KCAWjDmTNE1uKvaH<9=eo;i8{aVxC9N@gDkfu5*o- zh~7?RQNjy+cop_`80Z>2>}`?veXx#S>dmJ$3R`ltOmtO(=KVOpebHX=?R7o2 zX=fSu-cHP2nPmHg`Q5&n+U=41gf8dX9QvHG(QbnAkNZa6>Kf0UPp|BnqA76BR%_SV zm+#XJ9K8aK1XQ0LKgeDlEs`J2m>;+I*gMwffN~da6|d&ex|htCb5ZVg>VEf_d9xXb9&&)AXR~wC#R-z*Z%$` zYf#H2`$swd>#tmcvTId>T5bn!$X73j`L|@jmWTiLS|2)-eEQu7jSf|}4~shLM5f!X z;!IfZXxZ!!LIC?|0qMNbuBP`r%t4b(z)O%hb8~(#*nlr&N#BeC7&WH z49`0xpZB5|D{r6t%uXzRonHF&2Cp8JMI14FY(sGy)#TRR}_Aq+TdcI z=UZO1{IRct_wwsj-~QdL;STs=?)o<%PNMQ+eg17$!)@yvwgmsbF>gnu=>JZ}5bWXU(YVOL`*`;N4xPZCFrTIO$P~>q!>duWA{;zW@6Add2+}jLXa< zTaWE8`Cz=kW6nX|{rwR+K6TGJ^Ox#4tN6`7!>92@ZhBCK*2@#Y#&6I3|9H2ZLt0VB zFUPv+>S3`(->*$9@SJ!d;Lsh@`pM=^u6=T>GrOKwf%dlTd*r<=HvZ)} z7V@@#-u;^PQ&Y92lf9Ptb+^g{%ZBIvZa?*@zZ!I9a@WmGsojcGBevztTo~-Q;qk+V zi9J`}-cD+|fAWrZq>=UuvG;Mefh0ZBo6m zP1htdt(@6s!x~%e?I+S_z0GWzF#UX-g!s#@l!F(~wimoU#QS(wYT@!uBc29mKb*iyxbf27g-pRXP z*Wi@;j2GWalh4dGI#dTO*kpdHomlc}R|QW}eq>*VcuM%O=R9y+g5c$NbB} z|6Bi?Ok6cBxwUU8!@+xV7lrc3zdKhdd37u!+p*Fe0I?+xV?GSk5v_d z{$FC8RyB%chN{>5!oW)u7 zJTG0RStKVwCR_FVzp2&}ZPK%1?8+GTn@Bf43*OZ=DUG#e?zYYb{&3~|)idwzz4GMu z(`6_3Cx0xzd~sW8^b-I1_ij9ORB*HkuyUMrVKuAoeeX~At*y)6Y{-{R{Tnjv>AT4b z9#nM|Jt=>iwaDe%k2a&Hu@m1EMJ|qPw{!c{ZF$?NclOlcq=Qc?l&x}CvRtXLE|_%vk@KHYjlY{dvo38c$2fcC^zvnxx+GnQw;QFF1<@)}djDk-; zC}b-}-s5<#yML;HiH5+-H-V8tku4Uprz^d=TmE_rzwuMigh1Ky&(oP5AGgQoeZRn= z=Pj2K!onZ-U-{0{ga0{fkG|S}?zgD=6CcT2xgB#Y?zNvtzVrBwOLLfx{l49=-IucJ z&);vbL};UnztSvmTh3P=1+O)i3WfaQQTCd7=iSd&OEo6!t4NL4dN}RU?&Y=H$T4{p~Q1hMav} z&Y46RGgDV0Tvz;hfrMj@-fw$v>f_--bZ2Twi z@Ebp!T&!y6Yk9WOcXQXbMyA~tp03Dw;Kla?P}x~#TvWnsRH z{f?sy;v37>x^6kVn`eHCSGibtFH8+QVaWZ^O~Y7^8>^^zo=fhxMthUEoO}!;RcH;_qL^6 zFpm1uqMZG0vEN#&V=U|brq`S1@kO$&*L`lzH6g6CGN(abuV_m7fw?B%S1(G5xoNhl z@BGmo(c=t~xvouD*Tm)rOfsze@;&3yy5(uc$Ii}dY}vc8QDP%Qt8%Bl{;Rb?Uu7bj zL$}!nKDfXCztjA4TQ`(OhrhX7?s#Omt3m3=xa372w{j&;<1Z31{qu8qzhL(7tRpwK za0u$(+?P~St0!^Kz}UBm!_x*tlb!q#PZsNAssdaIsQY)NO+YK5B*1ukSX^(=fmU7~H%xj9x{eE0cW=QZ6c zEpuYfwAuUC7b7Qsfp|;d~0;e{KJtAiBD5xEt~g-|7D-z`fW|%g~h(Ek?HlB{@a!AX)d_L z`Ik$5MnGqaQDk2G(&tfn+m8noZj6~(G~-T%g_+-vv(*dzW_@~a%=XLKdwhv<2QOH6 z$l30?sQy;oxrw`Ky_K-m!hjD?m&9(Fl^Xb9GidhzyxQEa2?5IwFyC~#w$CxJpLDeA=I=#+7wQUz)&*;}dTErrRNnA= zsgRJY_O|0p7LB%AeXHjuzb;)PYk5y>LBXAfC{=4i)_I-1|IYb(FGyl7FM0RqWwQ6- z7S7(v&(aUx^*R^ze6rT=u}kB7*lCqHF>&R;2EEc1DNPDWX=i2}6JmV)#dETnYwxS8 z8*^V-`DB`0%51#-()Rd8^YZExkGXO-1>TI4zIFZ$+Hj(P<3#+0-wnEOc#6Zma0$`eR+K!ObcAnHOD5 znUgXjZmQAAgq6Vy7C0Sbd22dB@UvCkogGRmS13L^o1f$`?_9;V6J@889k%>-F3p*_ zh3)ZczQ*)G|HGG5mh$K5A8ffb`@z<4XV=&}F7;W;`qJr_MYl1t^s&dgRRk9MxzAl% z8D?!anW_ETTL%9bPiL&UzUWNyy3BvFQM+ya%w7~$>F3*M=)CLUv8S~+KDcKKp8frI z^_i=tY}eLh_a189bz1Igg+`2zpZ3-{=3Empg|=J_yt01sEXynU8mtP>COnT{6TO|U zcGH%l2Nq8j@H_XILuY&DnJk?bro2y2O?6)6()nhW0*fPy0E_4=QI8exTADb&h5iJB|F-k-fZNk9m@@XsTPE0ZAAW6ot@Ofz1JAyCPqe8l`n}^*{qo1Tj9m*1+rqS) zIar_lGm@#^rY90DEbFQxdoSbb)6?HCEWNCx6S2g7!`|0ClF5tPi~|>^@lDa4?Ky?x zapKok&on|;F(&!M^sd>-wdZus(KjL&Jbly+y8gYaSesduo0gTb?@P*q*=D&;-4auI zwA+pyZv1d~DKFC;7EU3F(=vOmrSeVnFi@HCz_y8#iSyg^zeR2a3%`V`Eq=Quax)v- zp9}X4T>mOM2Q?b-giWnma^Tt5(}wA%GvuTCVD{gAAiyIZ{Y`M=i7%kRxRZ}vU+&JEp&T~G2azNjzv zDcttP(=~9j;;U;bI|?_OE2rM_dFH<6M>N-qBd*K5+|^c3mUi|l;Hiwi96EDy=Fx6( zbG{pk?sCm*cibx;wQPy{g<#DSw_okkxbSOh>@B`+GZh*W8ogz|Xw6Jmkr{I-(7@5* z;J%qljpMIa)Yr`TyZG|IO>@62*}i+X^{TLyIdfVKOAZM=UendmdAH~HzZ)#8HR_ig z;Qm?vVb8>`tDklET^A^MbK_z}*W=&E3zuI1Kg&M+ZE~U5f)5jQ!jcLW9q9AjpM2&) z-;FIh_jatyZTA)om}67vTqb$sn_z&9ZI$Tiuz$?{=daxJc=&QATk`QRx8ui-emuSC z?t>%QmhVLZ*p7Mj_MHivIrDnJ%ng1`Aup~ZUl0xWu!hZ@iOaY-HM;btQ)4p|R}&|z zWSYT2vuyh<_4igjlF`gj&s=+I^BaACkEL-ZS`zO+ILK_d+Jo=0(q7%?dqgI^R{Zhwl&T-!W(^49g_LqhKOPa#LR6bVWQ&WDf`F8nE`2U>e)2AIR+t`~{wRwMv z!yEqG<}DfbLKu!RH~-7JJ4Hz8|1_T3%y_3;pP!X`9q)haw*I}k;|8zOA^|J59AXkV z_@VdazCzQ!cM)w9ZD%Qz9_-pt^|){E^mQMTAAhKVLomc)9+B-Et&Q;2BEL7u@?TkBzr}3u9$868JCNbLhZszI`*s= zR#Z1Bm3nS_?B!I&;-(OXh2C;3qAbE%OD`1t(N6h$DkpOL%#%f%)HW||Ii&v7``Oa# zMqBp(yK44dr#<&_-K2XdR!huh3jB7fntZIJYW|miUC~0^|7Cjp^}Ubxzn{evDQi7- zNukWy1y29Nf5m0}c)7Ly52v@&$3m%}(S3=vx04>fir-lI&B5j8`*+n34legP^We~~ z_j8%#7oVSYvpHagBmaB;I)zrD?pE$^Cj)Q$7JfLudZ+$_$n*2}54Lib_dFF`-`^(e zHc6vS=g*z>yqlh%=$0tFKR4-xX}-t4Yn>N-n2ogN-m!3;q**-aNs5O*C#$5){=Tc9 zwQmag_#SNV+n46H`t8NeD9H^>oUFn+hi}ThJX=<^)cm)bra&X}XN?&FoUE5+U8~cV z&)mGl|G&0XTh03)@{fL>_^A9rbwvQfnUa0SWxQ+m%l_?)YEWvpC)RaO{M@{T*VgIE zac?9TL$Xh-Q;Y3ViLhK}e(LJ$_Nl9*e|mZqe!ixBq)V1H_x3sSj`DA3bYlb@4%!!< zkK4BR?9^4l>N$s-cCv3QzGvXlC&4&9e(n+Hhz6&ZkAqhTOTAn<@27=j(IaF3NpmDl zKB<}H_hWmI+U*0|Rz7z*`>h~o+4at7^~}BJzrA8PZ#3uT#3Sfu~qLlD2@(Y{JW8%b+P83ZGXbg%RfA|ZKl`^ z4uz_0)5=ZDj=quKy{PEu#2?AQ$NTk?KQtJ8R!lTq^|$GhsL{f@W}B!bf_jlP49)Co z4zH^`{5E-(6#obFC))he>dsUj`BZd}b7Byik*SqY=3JGj7Y%*Cs-3a639d;Jd0nIQTkW6t*}nwte`@5L>c?0&=l(#$O>@e-FNhxw?#T`Ayw z-Z0=~>$Ad7?^u67EA095_wkR5t3`Sp4y|r!dcXeX-7=L%aFH@>11iJsM>gnrX}C)va+r3zx=rK%e+e{y}hja!}BeT9@8}bRUdV!sO0uP6dBa@ zz2nhgR&|TiZ*yc`PQ6&zq#~@$9Bi;K{(Rg_snZvVOx9grb4}*^LdkC`xy{>}b1!zz zcze;}efG>8;m;d=pFNLU5qDd=U{S}Vc|Dy9oqoU1)@dzJczZUX>2K(AlMC6}JQ7?5 zZ(c|g{dpl8QgDYu$6}e(C%MlC_Q!8rJjW!`x9r31Plx}0w`s`QwCZCg(^oB#qrJDk z2ktr&k#qCj(G9K#Yb=XZ4?kV)TvK(#W3kinn}O>W<*j`gVYf}(XlbGGeWOQ`msAej zGrksPo!|0N(Q)PsqxiOzj5!RFlLCLUsECA2Sb6dF*0xh`Dtwn;w=+37Z^>j`^|-k; zXWu5frLNHNKiIPK?XNkOPl{)?e>_l@!+rbwgpy^Kvwf9?m5P-XB-p>4I!%hL%-$*M zuZ*%)?eR1J#aYEzM6`S#*FFnOtDhohqm4> z4_x?$WzGN1do$kM-gmC%%o}H=w_zbV_MDg5N?aOcUe=ySRISxwJtM55Zfm5&D;dsq zY(mfZH(%`}?;UJrH_X3h^XB$;|2N%pOlCY>zU^u8r{Fzv43phJW7(!zS0=1Inc4I0 zb@;+crR)3Z{A~aISA{p9UWkT&psknSSdFn>dT* z9xE}_={N{7xNKWhpvM)UwXx#M_76E$q2lA zy{1YzdfOYRt%vlNH2BTqQj3sz{HN*O?sEOCYil}Xt@oRKN!YhRtG?2vX--kR*jO7H3lw$3X^X1{;0jgeS_L1_=wsW!jK3)5N zef|9ferGd1B|{~pPe(!n-tFAqG6JGNkBVnja!kW-z=}b604W1mmK{Q<8aK=n^lA-Gk;Os-lz_t zuZ&VA5iL(ohbz1H$+%5EG3U$aaOYg@`CI2CYd#Qls95_p`TO2y2gN_EVymrE8|pU|Y}dg)Dc zf&nwDFb`)A$E8`O**zZw-<|Z_<+~y8ZeYmDAi?ZwYusO5S=nooqrZ12|C^A6E%)~y zKc#j4zeYmv*RT6Ot(jD_@PPl~O!)Zr#zyAl{_?_mI~V;6?qv3Ud3X5|owIKaf#!;5 z2&hXu`t9hp&gX99)!z|YuAMHMFwbP|-NnbR?Dh(DFFRvYRPRGl7w7y~xIOZ$&D8yx_c+Z?Ex9`rXaTP01>~)k$k+U10Z> zK6~t4Y`Q_&nKYf~FZ^}$f7mW^>HFas>o@n>-^rh%XVxxnko1|baH8V+sI6RASEX}r zF-^K3{ra!pj)KI1tyyJ@&sOPm8l|V6Vmvuxs~@Xy≷i4{xYE{XSyfqMOyXSG+d= z;4phzWx(F5)R^66XZborcrqjJRp%~kN#XkO_p-K1@Jab!a#!92@XXvJVgCM(`==v% z5qm4w*DTy~?8p1)$8VNq&)E0G|I6!$>+&le*|j&zFN zt(1~*igs%AR&#si?y#}=d7015D7zB@i`~xpWL*7Z{&<0L=$R`|n;Y1KSs1z0JWfY$ zPD^}t=H!iin~jr>^}Oj_c6XniTEe4+FG8*yyt9*A#ox!-XyGq~nggr-TDLo`X%TQ` z1RZQzz;0?SvR@~ro%izZ!?vAoIo{is%O0A%{QSiqx91mpcp-VDPg0aut7=i*_lXvD z&xEwPa@*q8W=mzSl4bXswzep7qUBcUS=Ti>tK$0)Y`iQ!(W+AEjO;bhz{M_y);yIq z%($eqWIFSnm`BH4@++rV@#fae=;+d&S9vPmx8LJrC-Q*>tB-6SfMaOGct ztJYivVU+~|oJU)^#V6jeeb+8;cK7VeWCN|RGd=fePf4w;ty(YQ>SzAAqAJ*|-8Fbg zPkXnxzGTCpoP!ghJ#TJrNI0>^{pE@SJ=OtiyuTP{Z!c?Ez50E-D4%p3XRp*b$JJqH z4N_0>JT~zbI?W`?SDo5aa`4(D_X%=#$4&PYFYY=KwNiOixr~xfr~QO*zMEY#noP&O zWMxKlzPYkNNsar(37u!l{nyOR@Gwf_@~F_Ae0}}u?MmLWo@7L=wGdl>yYbJ>%MWkN z;yf|!ob2YXoJsn=f?NHf+twDdEsov0X@*I??mW+1>sN1AnAOm$y!x2Z%50l2GN!i+ zWw&^hRdKB-Ii$0&;>!!eq|=!j=08gFx!>wN{axKD<$u%60SXc0N+us)f`}ZW~ZmL?z6tOLF zdg1JZ1OJehq1JlC1PA-aB0=Ad-{HTOY}w5uwqV0O#(a?-b+7eyJeg>+kXIsS z{`bX-u?zHCZ^rH{JRY$uVU}U$-nk(=WESOpIU|~3pCELo^XP#jCZ!jCbN2Rl2;QH& zSWrF9MEUQdtlyQIqB*_)`{Uguk5Zjoy9pv8WRbE4({g$-S=&hmUL_FC5T@aO04 z8}r{+{(gLV+P_{A%}0rc=bKIZe`~9|WA}N#9NfE^sr=fA(#U!RIN`RC?_&kr)EXipNJ z{Y}pKWz$Xj#m_GF{x(=wZ@txYlF-xWukQEOWKNuyt{IklU7>~DzJJcXHIGBJrn&#V zx;{jN-9PTgqpyy$_t_?B=({?$?8J{<&&|obWC?>c%QoCl}!w zzNO;hmWVw+I@10>|M@<=dXh??SJ4ygH@8YTudl0maHR8g#nYdBJB!r0g$lE-t&v<( zv8_i$-LHg4Dn_>SbtUJTq+6e#%N{x0u77dq>*d)y#P#kRpD$eg=+e=4jX0&bLchYi z=6`(_4Pfy@JGR!@c$p4 z96Wf;SZJ%&f36~axfQPVp+e;$niIv}->W|`^R&4^}P|D`I$ZfAux5p4@!vSi$AnlJ~c)TFM_Qw&p)?($SuU{Hbo?IaimNdMzrwZ*Nfh zQY>ayq4;O_y5zYrJN`e@H>*k7_apM2q;cATvuAzhauzf-H#0vv+ReT%?j&>mC#EIy zIbD+cFHPM4qVn5;%a?`E&$mB*dAWb!5|4=rSFNi1RJsmNV9}l??|!dFbAo2D)7S35 z3!eqeu_|T%IYpY0<*9*0=(RPSq4QQ=T#y(iV_)Vyx1HnGwBvIui>G|p!ZK~!w#}P^ zubU(*hpmZ-(lO^xzvaZ4?sU$4g7K#xACs@Fi4>ixrSAS*PqoA>{oI|aTO_R0q+ebA z&ted<|CZ^0C(WY=B`j8GvHjb_)-SjBz=VZy4IK{IxBKR4geK{<@yRxFiE6#5I?)(> zFCj5DCsaQA!ja`B7RP6v`X*w)*!nh8N;4=YXs^nON?()C+1>lL9_;$~?buV_`s@;U zWw(+y(FQWt^X(mk%F}f{`xE(eC0(6Y_w{NmYTJt`7foD}COcb6v`hOE0(njNSiFLQZa;$GUB0Jz0;B z85eZOOD7+@cKO`h>M3q}chzg3Tyj-cLD8A5Ty^^*-Bn>PS2h+rJk+WyWTL98cjMV2 z-76hd%Q!WhR<2c^CBvoY?R9kKhOjvQy86^Yzjb^8zn0GVXRUnDU=Q0V&#%u5KZ|e_ z2D0n>FHU{CY1+xI#2}5E;>)^jiA-n>^og9n>^;@FegAfs!h(+8=<>Fxeb?8$U$axF zuP@F>!m30k?_>7GO%smp%W_HWVlxQ?~^ z*KRiYHQ8^H_)g(H?_NIn!}nOdV!Cwj#-y}>??3ZqK4RLtU8ORzFn3eBf6`CCskdA& zzqqLEuRAZ!=F?OE#+jREFQ~k1Hp9MNa8t_3MaMT3sNcvtR^(}Ox#y0E0i&zN)_j-A z&VrLt9xmBj`77aUXGgxg_{X>QvX6L9)|}mk zLSK17g54&G?##L7cYEu$aKBIrNR-RHwZ*YhXkEtrjfc`-MlV;t|0L$d?fOZ4vn>j1 z-aakeef0H*EiJn7cEV|AvesBA^lq&Guei5VIa)5rE?H7_UZz_d*Ng_GPup^Lte<(Z zljFOu$TLvV*l=VcRReO4a_!OGj$)$c)3rqb~Q_^I&M0)ZVrCm_2kR+pp%m8%{}SKn4yR`cn$Pl|zb|N$<*|h|JF_%`m#twlU~ynEW)WfWc^Da4+;XTmQaR|R zkAvXEsM9*xJC-v%4X9}O9W;xTCztC_SzVX!rauevBKKDO@Ok~5fBvJLo9yd9h%A?j zJ9zr?@ehv<8t?g1(Z_!M-J_km=01HikD-q%XG^B3^tp3|SH(Q6>`w>o^b-5?;cwB0 zR~p+Z+x~A8WaVD=N$mO8*X1i>3YCJEp6WU^b#;cp!CbK>&LWSL*R3&KA67-U1>c;< zq_$v#t#7T5q0-5PA2u>ATkOd=nJLk)G2_*=smV&|YyWDW`Mv%9hYzo6ZT6J$FE^8S zd>b`+gDdB!QxlC>t`7`eRuUNQlD)uTU*fB~eKQJ*w;z9|dsp{irS|Hwv-_`ZZ?58B z+GZD_bz3K9N$TrX!85u1+&35X9Ayz%c;(7scYg*u8P275&g2~GVtQ2(8<8se^0Vpp z$IRD{+zIoXqPX(fP3p~J+PHap#)Spzp8nEqWVwAz%3;Ra)FoM0Hq35NnsV#sRq=cN9_>Nf(##fm zFrGRuVUe$4_20rwq`3D^WMQCD*0pcVLg)3kXXGz44ONLLxGQPDAv5y1WZ3jY_o(8{&8x!ix_&>H-6gI+&8qq1%Wdq3Yk!|LdULau*);3Q$1;PE7YBMpT+*~(b2d6 zZZ0}!Aj0|o>GZf|;(q<$eUY_DO`+ zXDs0>vUA)zYx0J|SohMe@b~TOC2jOIrc(w_J!;hA{Ni{Th!0TwPjYGIg{(&IM-b->FdSg zGq%2xxFgK|_M^t_scN5=s4Q?2(|s5aaINt~MnZ1CjCGgj>Tsp7EcTlHU(Im+s2)N}mr%SFG9paYN!^$&&Y5MfRHWIA4ALD)~U;#;E_ZP1x$B zEEpy)_qmq8X2V(*j|)pCInSySIsBz7MZ~~TWZ{hkos*Xx+4ni$F#S`3&%K9@ZCgJ* zTK&KFimJz)Yrl&R-JGIvyY#eJv{(ACcKcsz;?^AzTYdT6N3IZ;cBY5F@9kZx>?+XR zz?rSbdXUAm)kfB~N<`W$-{D*qt6|B*`vqQagdDTPza3SHjXwW_Z}I9s9ei`e5-!c> zwb#D#uYTRC!!tKu$UUl1yCIlq$5OwRX_I#^zL&pt!>{Q-Gp;SNoMrM)>h7-6%^cT~ zq)#ULW;%D~&zd-$DV}52m!)0yn%^1s{czPxzqfH&PR+i$AD^CjpUtZM^n~+RzdV2S z_AeRNA3xV{<=?#N;9`e+A@PT5a<7z26&VT%UNw4V@$Sc?747ZpzxZGDi~rx}H2dY= zi8k(8_l3FG?eyjMADI41^s)tC;bF(jJ>n17K9}avzRJB-dr7E%ca`n)a{}q-?*;4k z^|_wTyss4f)8?LXWUcLgV^InAY(zr!`b);Xls_ zkI&x{f0f$BBRMNtZsJ1GvsN=iI$A^~w02D1KWE{C*E3^xl{Dg5|(VWAnL@NeF}Z$aO=qdAXvh%Q*L zYD0+O<6}lxjChb{W*X4$eGFVX@gGtCb?u>7A&-h9@+B)*uERxuXw zCj#I8Y!eR>J>9YG@_x?iSAK+h_Q*u%8{G1J-8(0gE5q>C?)2Pa@~3hi9d`Ypbt`E)MNm7SDdXF8liG>W@Q; zRWlkoi>>{WDxS(;ytr5HnCTDJdH#F(LW@r)6x|MdG3`kctxtFG;98jdO7e*Aa#cZt~Q1$^c^T=e(Uer5B2UzOmp^R6S<;M^yTM zM4xQ%^=$k5r&iitwV$5C`Qh)ki5VAb)HYq!>-%&!t22DTnN@$KlmfRsnPFABimkzI z;+>rE%-asCS!{gq6SdBMb)!p&+;i1En@I_t-`OZP7dx$PxY^5G22`qe>3Pfxu1Yxw@Q zr^UvP0{8aTbx&HW{c5v_uUqMbkGK8au9f5H?kwzWQ^>p~B2ik&wrX|u+~n7Mhb~w5 zfAnkYRbKt?;KQfY0Y7I~RXNW4e>WxMO(Z+>_x{V7IROV6Up<%3LR&kv<=*nDOq7x*;p_3#^JHx+X)uQMj|WX znNDtAeyQ8a`&#x@g)6dGWX%0yj^40pzF?ZEyyi#hI=wB$M*0`8#~gVdzpJ^s+V6Gt z%tG16it%q3c}!Ae?OSiV;PAfs&wO*mzpx%n7oK7we_hg2`?>p^AB7L6sR^!=+q2-X zYa_wR5&*%NL5Bfh;3U;pszZ1a0NL~^_*J106jwI5z` z##p2=M`Y%Yqdt?>c(=WqA$TI>%!9z0Oxe@Vx~z-YsiWY;CqGF|ZN-l-D@4TQD=WH6 zI+vZ3-Bb09v-te)?I!b@Gv9`?UJ9!*lM{Cm#GBJRw~7hv{## zW6j#%uUvloKgr=>-n1QeqE5tV?*G>(sV>%KroG&ByXm(x_nKbL{pav&QRLO6`6q?i z`^!~B^7D>_J?D4Xxy)yUBhUWWpdFDZSrcX#e*NI;lWEVkUR3wd>h?o#u1a#O3ZBik zxa(0;hgYydv(=ZM7w;E&RYcFcv&?gb$wW4hJ@>rN8jIw3e=C*SU>f>lNyEv794@=L zIs_&+E0?~yQt|dD)9Y#HIbFQcF0AlsRAw(rI=X14M$nRuc@eBW9`l8aS6y-eVbm$TH0tocrg`ZmY9)f3)|J z(1$fl>l;56JU!uUZ1C?L)7s-P^LsD7)owO-uhY8VI*I9^>&k6YCmNMU-Pv7n`jMaa zBcGF3CA*IA+V`mN{UO~Ynll3Jbf5iTX9Cm zTN{&(I<1~|_Tv6@;Xj+RlNo}In_XSGWNdeEzm=NeusTfl;j34xSWP}opY72kQXnXr znY_mMU<(7+tWE6k>&s%#yfI_!&$l;XAjz~z1$#YF2BfsTUk}MO6fkH9i_i^J)Nb& zd?$?I>Aj=36QsICm+lOFAGh}^)6N)C@kbXf-JMnF>ppXad7*R5Z|aB1Pw>aFeZPq+Bl@3=31<=WYYOREFdg+%hI+$dw> zS?b$7yIs=$PtbuopEy!(OB`(JVDg);CtLOXU9a5QRSSZ8kFAK=Em`zbU+~0~vkxXt zWpe+PfBco}}pPem#~P#!F?(VLeHG=>TkHx-Q8cv_-N4-MQ65> zS9dt?r{%OJE>}Nnx$?SYOT&`q@e=xcSR3sQKFvPvxB_bHmK*naES z1IdmnKQBFQJziY1HZQwo<*hYat^5tw{Y%J^*c?*GF1P5^_OBC_(xv9cc6Y{JC|@d|bE6OFdejc6ejaiyE7qj~k`3yj#tk z*M#n~a^0ABMd-`v>ymfkG;eLuxwd)_|N1-KY|hb{=RDtk(G_)9QM)95a?z!Lrlo&0 zmzT;p{(bg#@g6u{HQ2-YxiiU)r!p$g5$!OaWaT*NJFon`L%ADWWB2Q6qh+S3JrGN#z9|@mG__^qK&%>>~2l=&K-?iO)w4rFaKcn!UWmmlRAM%o!b?1|SYP+TW zJKNs7o6Q#|_1~&kvqKfs5nVffmWP4;=}b-$kJD=}U;j{kTCn2p1>v{1wmK`BvPw%*6pdeE>+-_v^#e~$2C6J z*;B9H>6|>fPhsC|<$cliEFbUAby^~Iy+8I?*TLe#vfqVIPR^9J?0R-#4#&B6w*_a) zjGMSN*SC8Vx6F3&br3wTY=UbOvy+-Rp9~Y59$!&cGg zFYAYiW|wAlCG_TmMQaz$y0MLMq2+I`!%@s!OdNT+0#h!i9C*NTW}AqE;@>mZp7v|J z-1slY`I7&Z9`j3`CQE9V`z)LGY+8C+DSBFt#fGm8zkITq--v!*oN~MCM1<0Awlcw` z_qJ)MpZ9vQI@l*WdP(|Rwx@+MQQO#W|BGs#?X}JA*cZvbIAAw!RQAp@6Q%cZnA6`X2M&a`~IF7tR}ziX-Lg03$H zO`HoA56T{FX((yakKJVQ>DjsN;N^ZR*G`yN@%`c4I(6mp-b1ehQmiJgb9@uB$J0sg zNaNAnTU7kq)!mnPOp*Dy>iVUI8J{N<+&fV_dEeQ(bKfTYJG;>#x2aRX%!Q>hjj7AQ zWQys62Q6uHu2}pmWGHO-7%l>;Z~nd7B)Z^%!m|ger_EPsU;6su;^G$<7q_b&T@kqW z!V*v6+4>4KRDs5A6IZ72)xJKoWc_=Q2M4cQ ze)Z?*$=>#f|kp zg`_5hF+F(c7j@bHnBq$>-AD6wK2Y_V;?eKQ&(D9yqLNv={M@l6w+~Nr@LcG#`bg07 z@ci5W8&mmXU8lM}8S1^aXXiS*Yr|`md76fgIfd0*vTMXcw88_=o4z@`WPOA6rSNU$ zJu&7@T>owe+HaG7ylL)=RKDI*U;56Jnlrj8M28<}cA3nmG+CNm*0H5PZO7fjvvWE0 z7ECxOBk*na6yr^^tjpdw^h&)lxL1GM{_5&*_Pbuw^>Qz3ZI-n(^s=*4J-1Jx>#yUl zE}bg3goB-tbvrM{9+JL1VauXfnkE9jHvW?_Hp+XECzCHzEF*m7&Mct-#TX~1g7Wfm zTQy5Pd7FxcYWH`EfBQDi`1@d@)cW@mO^UsioSHAEIYq^rYiE)F>>r!87YOc^y|<@^ z*XzNP##7nudJks_J*@qGhnF{JZtR?g8RGIgji0p?3#RAS2LIA}>C>RFMeKrugW?=6 z!B*@q5(YCY!mK<&#i%>zz%I4n5SB;qb5{JM1hF!$awMK?f0njB2Nsf&#L@?vBx ze$+4t%@IjW5;5ku8MVkjo-b~b*y2sMHl=d!km!*%_tRHd!YO+9+xz?LEycGj;=inR z_wVAJ4NS@RC$L`NP{>$r?W^@oGq`pUL+0U)N?#P;uIwp!n&hg>EqB|ap=Ab(aM5kA z1FZ!Dtbd9fRi1lp3&|*w32u;O&9bfjz%xC3&ViHPe}D8Ubcoabem$%3-+~>oWq#jY zJ=9~j*m2bTPmAA);N2WWt3ONWq$h}ee){PD`{VUnCurMgxFv>Pn|0v9*UjM{cGSf; zwjZx3``(H=S4EJUkVm0zx8zX zm!;3PUkJ_6{$g6B&LQQI&&Yph>x2zH-5)c#CU9{1GI={2*W3s?z#`(fWIqp|q|PzN zQ=I`8LMFM4N-hOg9KXN2YgqLq!LMg={SAss@y8DvSAx@e$bnFTO&`G{`m21@4Cft zcXpomzx{>#?@QTmJ0mIVREh@VIlDet` ze4m|UJabUyYUuB8Z%sKlG>*s#nLK>rAuBLzbM1;FBG&tlp3S)4c(&dysG;Q1j)ylE zEz*n<(~JCZ!|d2w-iIC?nz!XX>CWNW#l5|0Sp%10uY>a&773Z=KU2>!UrIke@95v% z-@kM(o-#*L!1dgrY4+jCr;l9Su)VuoXQfforLv`UCQIjE@Ke3NrF(XZxOrP*{8G)j zO*_2pmt`)Qe%UE_XYjM@v2R#u7pSo0<-sq^liN4iOT3-$m2->9iE-gvodtiEO>6B=ZtS+d-NwD3 zfr*Lz7Nx z&~b9ZG1mAulh{9hJFWNcTI!Ua?mn86w{ZIgyV^FE-`&FiJr@8pzs0o8Qe9(aj8ei! z<&=p#4-|Yd3B3_~U`Gb$f(J7e#EQ8yiYEz41ZE7mY zGhf-Rb0vwZ;KB9_j(;+Y=6#G7`_nLkCz92;NJIt>NxnSvn&F$EtVO9)ObyXfYS`1V--;{6T0$}=n~ zKdD^Hi|Agg-naN>%efPwYZu6M*ZBRpHL-g~)d!|??`>M8m#a>4pB2ziyAa(EG5btnIP6@5bnQ>AyXd(ua-tK2P0p zQ#!*O)So)QBBFSM-{NAV_WV0*4*&RmwzIEGc!92nI>+Ko|IB{5hFXhD`E6RpRWCB( z->FvrW8%l#YPbs)$iJ1bcia8ra_pO&eB}i*tjqP(eCI9MW@!_+BvUfQe~$QWwfrNO z_8C7c{JrpATwyrFp_BJGBPXReyu5SttJCRCC!NmzOJ)&Z5xi1V?Bu|DSTLes;k4Gd zFHa=-?ao*%{q{=8ncznqySJV+U@SC#ruwp5@?TYRgmmC@D<&ssqlApu4;1NG3kR$QybV8 z@4o1JO#JqovR89=mnpfgn^RC&z;FtYJ^@l^CQtOLdIuHp3YizB>h)RMWon6 zAC3xxSxMr2tcSTKOmK}qwq&a&_wF5^qwgH?v%hsz#QL(Bx#3a&eVIYQ7te>NvHn@q zz^Uv)o6^ z@vQ!$bk9uPiNrDt2{H?`n*!>WFvKrdZ(0t z3o0`nF!=@?sPJk38OS|j!TbjHS%(%1tZQvawii}V?6!NIa7AbPmE$k`cJGk(T^xJp z_Y<``pZgilx3Bc!`r*!4-1ep4iSfhpCyuHOjcV#se}8|Ue5^+@Vt1bEy4bzTj{LUU z>XXFiaqYaq=jZ3$kN3&`<#{|`U0uX__l;MlW(j*u)#_apn!PAuW0F|eYM-oQo?YKQ zENXcdB=>mx+S4b$so4668fqrI;ZK^*8v2XZ_pyXpM~VdJc{_*tUEBo=q(u*3?5zv& z>Hgaq#lY-)^O`_{qkv;MqcD&D9nNQ`OP0<_@~S){QvOW4Ec>#({bKvK%ZxMyL_j%R zBgFW~#FHLs(zE7-wDHL{bqXI}>~}Xt!eWKNrRD3Fwp_D_P*ZmnN(4=nh`zg9-509u z|6sS>T+tmKJr@@rQ}mr>GOe?B@*JNDCaFr?-A&wkXKue?c`|t7H=~JJ?f?JXT(l=A zc(QZilB^@!z4r@DHck4USZ}mzg{-%dTY&Y&*)c7lZg-Zcp<1bu54G%liLEVtp3R&otK* zxC9!R7tlQ8+3KYx#l-2S^Y!)h!;_a!-&pHzZo1NCjkC|3GgV)7dZ(IrUE@|M@=()nNWUer$ zsk+!J`~QE0!`IC_^ijC>$`Zki2U<41GR=B&;IjJqtCGED5&LR->y`$ndHZ!4C0^p; zJ-Vm$+?L```|AGgvick-xW9iz9~5RuJ+?{c&@lL|<}SnWyue45_VOe0R+}wN2!~ z%&9J&LJu!2biOC#H&tt@oyP047yob=v7VVH&2p*mkwFt@k-D6HT~FZRy5nzd`d&FA z`0?beHIV071+i^lzK)@m3bAf98)1c2k zi?gh)t>w4=FxC+_%kper*1*ds9@u_pVff`amc_58C+o-TSYRpq#4lz~Mc{nV(^Iv> zUx=zqdM@%Ye4jyBm7v z(Vo+t_d_PmXF8<0flXvns-wVxwR}xnP0Y$0448O6F4o*`&EjF8n z-`Qy=3x0ihnQ?E=&ZoDxu*?UwU6^=xUXT>G)+;$ZUEh84vz^ToqJH!SZ4457bG!P0 z(gyc)ohO)NC#j16;&N{>`S#^Yh_FL)$~hta{d_B*C4Vb9zvxf&_Pk~C`HRjl9Ghyy zE*fI6@8rS{6Q4}H7RbQr`%sBvx)Vcd16LE<;$^!zl@5hp&$zQ=&1yF`3v znQ+Fp_B>c>$mpsdeL>2nVB*uJ(x0DPQ%=t_n6R?-{R2I{vWjnSmbN-4-uWhA)#Ae2 zlhDM_@a5?@*XQTn3R<7@{$skbasExeS$6~qA5|@K?_W1<({C27KR!oJU49Y}A*eE` zo@=_31xJ$9PLV?^1qvL4P5w$uV>t4}rKSJeBn3w%-SqU2kB&xcO5yAh)8*PV^Hdo) zYiFE{KO2*%Lor z7i9S5Vh$W=w2wTcpDMnN{WI4JBTEs+r6Er^)1L$g1aLSAOgNB#@z6noCiY{32||@C zgf?a$(&fu54Y-`Abvx*QO{$nj)hQ(vZaHzO^@r=_n{6v&c7EbeuDjpz^|fEL*VpUw zcMIsp`SeHyi}SwnJvK4f{zYxLcZ0yT4g2nxJveBjcVf-@22H<3U$$RelIa`G#CfUg z%8H3M_EsyGy?M3l+v;SN6~}nC7nMefXj`$>|1o5X-Cef*$m8`MfsAv%`0}*|=)9VF z>|gE{cM-1>J0^XKs_(82G0+ZOwd4tl>DABIZf(u}>iB*|K>o_brT@47n^xdx>gyoL za_XdhjNZCM%#+R>?dCVgco4Ox@^9S6NB0v?T#!03OLwE{tKZ+{CkD&Dy0hV5&(YkI zlNQe~-OV~lt<+`N8P2QMPAPM%w>*7urO-~!wx+Y><=rVR=T=3%db(kubitPwzEkw) zbJqS2J+!m<`-T1QwJL1tRt8i$n*6i6pZ@XBudDe_{5D_~zI<2$`i_SwSP-;1)kH9K4Vj)&B`_uuRK?e!{s>Z-`V^>ukWb{#w1 z)+-yhW^ zTC1EA9)8gc61G{~`~F$kyY;MT{#E+_*vI7USMfMCn;rG=yy1iT( z{B4nAv(2N|eW*{_vH>Ayqk$%Pvx+}_es%ypvTk%g#$Vsh9s{p1-1ak8_;^AELfTKS$c40J5) z+k9=#dzZ;ZQ4ngHvOq_jQ6<4(D7a!f+ zpBnkaf^&z(?Z`(@PR;fCpLKqAI)~RZpZ-%*Hz$35Ry@o5pu@a*UTKH_KDQ`*WcKN) z|H8+;()S(wjs9%S?^$kJ`}6p(lPxaBDr(9L)f>4P&%C?4`{L&G@HKIl`CeYy`Et@+ zt4j95$Hzj-|JoOdzt~v+KlJEA&jl8CrlmXPvxPW*T4KNA_>H(#+dbcyFhp#*R#Gzj+p759A#l z?pc-gk;#*>DB|+c^Breq8ok=StU@95+&=?NCeCw>Z|ogeMb#E?q%1y>Ftt&8|0?EL zJ@L`pc1sVMo9zfa#616r$Cp>J*M#0IVd6SFm0{Jy#jhUR&Yza__t)iFzYR(vd{z8p z;)``RrFbrJJn{MY`m2#W5kDUOewwjD#XIWw>L}CMsHUsx4td}h{VmYttfuK)ZHKmYyd?B85Rdkt4Hu?8hQTx2G;+;+GA%k7>V zYA%LzEkDm%Sa|qb7I&&%(UTJk_p-PYB>Y?&QU3qX57GNyj6toY2^%%lj*H9>(~sZB zBy(kPeQx#twl_PuXGfPS-W7OgQhP4(xu4uI``zX%%$EfvKR-9ut1PID*RJ=#p%ZI4 zazcF^1UYizJQPVeJ4-_R z-2LPH_F)Z^lVi4I8?%4o+xTIB{r}}hg43P9&s%z7#leujr}vf2_dsVE$a${y{p65uX@|FOTu`Y*r_QeHBSD@ymk9Ho9Wd* zmFySSzIN*QyZB#vq1W_t4fe;bx}BbOc0Aze~O_#Q#~e#F$+xvcu({e*C^8pP!%K8geXXsn^4|zY7(e zCU4KbzxulO{j6_${Uz`0{p~YfRKT8T&h72khsM-_UE#b``mROv{cVx<$B_N z{T=ItmBE3@hI?*aUiPkCGkEr*h>u&u$}g>WdHr(Xt1Gfkud}dnE!*@XGWuBm?Um|A{_Z50t$`=dI?FVrfANvOlMTnz4QB~ z1aJOR^V7n=L}u6(a^%I^v~UU^dtDijviw84@N=j6&m8}IWfp1tSv-ONpPPW#q&u7^ zt={At2=2las+yUcC8LP};dRFRR9?Li1s*5$CiM zjfKJN-j!>w=||~CRY*A3-Cg4H;oK#5d&>$7&dqvDq)$J->|kUmVi$O?gj-)EVSlQ; zS<~mQudgo-JG@P828VvH$hlJ!wVlUh& zW8!2Kp2J)_oBi+VX`G;Vdf6YBoqPP*^C!=LK2e=_x4!8)|2&P)n)9@OKmGmv!>3=B zHg~G{zw6yU_E~&=!1iUUZIaHUGME_bWczRo5{DEiJm3 z*Ulds9@FzOe=Tq3B(&0@RVaK{Se5%zlK6x@i-IJIX4|j&cj#^P&1V9W>(ttn zTB@51RGchkd(5>F&$3=#5v6`^#+mE8>MH&-S})IcnVKcMF}a#wcfK=I{2m*RHMe9w z6o&@R{Wey`>ozv%CoL2y3S4) z>l0F0ps;+JOvJhNJGZ9UiQY*1b>=>Aj#i+`p0De}R&L9G_b7i><8p^BFD?eG@>RVe zdreGKtMB++Yk%hX9bE@2QoZ_J|NUv5YSUdJB~h>;;Nq4%>yMWidH!2mXlYI?Fk7p} zdT0)l(NEzoLEO`o&TxyJVOkSmAzG}~e_)DgwPF2V&m5Zy`M1@4PVdW13;w@bo3QL( zWeL-!`}OaZ?U)|u{9tN8!}IGLPAhMgd$7M+pdlQ<(wfy~5i?W&i=pc)bvugJ?ZNt$zQv(KX={OS7r|fEl5yKV z@5_9zDzV;o@1j?gE#Vb^PYE~w+T!-q$@Aj6M9(>PWe&±IFaRhHkwYW&Y(H=C+X zrAr9+&d4|VOdd=1FMfH;J}tl0^nAb%H_7@tsfI$64_1GTS(JQI|9<^c84fK|J{{h} zj*7icX2-e*{V%alTllD6^LoXUyXs$`PLJ0Jc{z3d*8TByJg?{PFRfbmH}>W1s$Y9t zr@pFBy!^|Lu}b01*VpA5(P=#M?5ev(G~bx$?o3szZn>0~!>mPk`xJ!sL++q!BY5&B!l_%yx{{1~&Yb?2Irn$U5_IB&-z1HR*YoGs;u*y@slzy(0 zQ~6iYgYCKoX}6;8>@Gemy}a?h(Kh|L_MMrpPCfD8?|XP6v$bgWBJEXIvpeJd9#6PF z?W)9w-#kZpM5X^$n_XOxbmvU(j)HF%yG+(Mq~6xg+_16uvzJX(fw=GO`tOlHm1Z@^ z7>m7QX><*k%BjKRY591O^KP9W=XSoM6P4Y+oSxJ49`JtmTyvTSn?t>01jRw-!t=|#<|e$%-0=h+`U z7W&z=!DEUFYtXWj9jTwy63=hj>M_+!_}<>y=7o>N756RPTzbju|2(^o;xRVW6860F z+b3A0Xg4_h;(hLTt9g-+gCNVQLd8c)M_fJ6uhXoO?N1)NNC>J zxMU)Wi$V6TkF!7a&9UeT&yQbx?7;fKO{J=(t8Ut`Xq5vAN z$QL%U-m=zZIcpyu>s`In^VZFqniDU4iwfSgZ_vYrCwNme| z@SEhh9_f(u)_pUx@c+H#hiCG?yX*GGqR^3#`LVR+t~;L$wC2~DNc=r1`Ox(0RO|Ig zmlv`}t-o8&UApgG0*~0DZ@RCiCA`{EcVojwW-br&K&|BY&r@ zf#j?PC4(zLi8lprMy@=$@PVjw6Q@`Ek~vI_!Yl$Jm!3R$u%K1rqTc-y{kL|K~AsnV4Xa|L-)E-rHUFrzSO;qO)P8#10|_Q>tM8a?-?Z}~6dIsT_?bCYWJ z*IJzVy0-b*+Ngx%eX>>2Uf<@jt}Ql@WS??!;fKyy%c3h5Hhc+v-Kw}}ioV9v2fcl~ z+Rq<;u)kgUg}>{vby7fi%BgAMo+23Syl1XWoD1DA2N~Mr`TYVBMOU?{e{f z%8w~bi_KYbW-i~^W2J50n^@+z_H*zpxxMV8cHpYm)KYt!S<@0PX8#S~(YwT($N6Ys z;^I9s%)cwN_228;xBGZT{r^{?9&>M%`MztLb~HNa;h|R05>~d39Q8mZxu3a38ZBDO zB=2b|a64D%FRy>99kIqHs^k~r-KY>JncWqy(vEwloS4MT?0)W2;N)c<&40KYu4ql= zlQeoBWN!2B;djr|TjN)9Z8gllB~jX~%o-CTtmJ<%=wIsc=XuX=t|;Quxm_msw|;h) z-P~76@1_L4n)S-pB>%qq)5*fm{bWCyuYJH`QU7elsqJcF`gxseZVE+DDk|vcEq#4! zVa7zmf|4TVUbbAFx`#)T?D)h^OcQW$U~u%kaKL%S`uP3V*tIfG?d1G*T0j2S<3%oK zwXKwo_c<+#stkDg`+WRSo|K7uSk)K&D0{2#5q#kF(sSkO??2k^Wv|rd9P=gS-{$Qb z1fMqlo_{tu^w<*h(+&+Pug>k9EfKXz{aDYM);!zki_UMG=rKicWd-|*Jv+Yh9J}&I z(py|FFx-1{2BSVp=anM2w*4`$uC8vkDt(n+C$!vowxjZ|P{lf>eUqm@iB`_6yxr8UF?p%a^5biM6rNK(u^?d0 z0gYv%=k50O9on;AdmYQ=51Q`}ERmkDVf(sxx7k&!hKWhOA?xSeJ+4ucwjko7;S1vm zsjlg3-zCpzuzKeERWD)do$xi{Bnu1^S4qs_rHC=^@c&& z%zt`Uw{5zY_3-%P+x-SRzk0-2mM}_or#qjmi&u{P7E)1lojLec@4=sUmp_+4Q?BRA1fgm$2CV*Fx`|b#qDU<^^xJ z-23G_Vf(y%gVo1f=G>`}$XhAA@ZW?mkGbbxulu=i>uTDA zJ6!a?+_t>5X|Cgk3%N%VO4i*Hw+-jzGntXFD3d$Y>EW}~57)Qs|2VtqakJO0pj)%j zTpk7#G#YBX&R;x#?VBb2fsysne|>hp)fAA*&G{X~ZsU3{=Rqac{VCo}Eep1V&FN?6 zR-4h*zxFXnt2$h3moIGCm3Xo6+xHzsA8hz^?yda0tW2VL zy4?AXZ+FE9uF6&AS^q~l&aSS}{rdan2MZr=xOS^Q_5P3JTqcrFO9fWF4Po^VaWK+i zn)va%cqBvTMg0Y3_g388TfY67ksuFOdZ$7YbJwf~275eDP1(eGz2~VYgZ3Q5SxMp! z+1i(9Y`d|PCrEAD#SlezM&;V2_AEvpK8nNE0;ri=mpWM9`*?THez_SLX%W)F4dJo7 z&GcK%a*GzHu6FvjY@Tv@kWR!6nfs+{)~%Z|?}PcCdd8l=C-!fs`#D>NYyXPJ?edPS zf(GJ}8@W$RI5}mR&xbo~50j2=FweW|u=GvqUk}@gIOo$<;@ciQc-K-iclApbbN`4| zBiU6-(U$9%AFLE8`M+qc$4qruy|>og@6C9w#mSH`{VDQkR8hk*zbQ>TJ z!KDn_L#wv0U9sesTFCY*r&uj^3aMU7{L^ylRgY(+Puw2)&MTqUHiW4y;BewDXnOL* zck*Nv(YJFPPHvpgIaf_wiHY-!-v|R}4JnpRN$3Il(!%w*8LKMK`g2Y5oUK-YxpRF!la~w;mfbVmSXXiSO>+ z#9LzIHsQcN)xSM|vu|(9onf9Y*C%hkZvEj+dls&ZiHVtDS)3NKCPL6O`${xj$8o>?sy~=dG6U*D6$uRp-@##TC9a1z(sxC&u48 z5y{l|aNpfm3a*#>B}{L5e<@GMirQV~XH)g0(=T59_!ZUm2QOr{y-j86Id&MuD4}15Q3U-x=%BeuGBGde-v8%Z z&78bA;Q8}3jVmjr{CoE8g!R@7`zwD&HM8j5dt&xs>-oRjxjj^&xhtkS<5kp^Yx}P~cy@C2v?uRk_w=S` zWw$<;aSHla<2mh3q{)H@+eB}7I$YS1(QexA^lS2zPPOfN*Vab2|NC1XV&1#YVRe}6 z#n{5mP3is%#6F$Rl%Hgha%+3u-hhT@iqFnQwl8$9cfGys?ZvIx;d{Tj8yqtd4hfif zhs(Fk;Q#jxLB;{cZObLjMT;HG{O)yRUlEt*isgJqKF?A8u%aZ^MmIq2fA_=Bp>GV< zyRhgTxF8?>xVzunpw>;Mo)afbt&#b)xv!N+@ctps zi3ggZb_f?OYYKN?`W8s>x~(O#j05cUT^<9)41I5$@Q-u zzM|Y2SG;qh$lv-^9Zsw=A7g)?>eyT@8r1Wu?@fHV>A1HdP%Ca{(5`VTW z`RRq96-CFL^IKWII%w{cU#tH8$c|sl8xzk2?fn}3^!6{0iN?L>w3jo9-cQZrZEx}O zm}WY4-%H0^sk+mp?Vr8txPRVvYk^rwOTwSWKPSi+ZE^qG_I2xxl5LZ9bWD1?>W*zQ z{nff8R?Jj$Hp4v6$;XnvzG}6o`V(C$aU*-Smda_t8(U&yA1D4hq_!gNt^N|9nL@|= zWSuvqoHRL+7L^msm~rf*@Qs=#;d>l89?#$2-k$vO66=fW`3f**`PudL_HFI$>XViidhqXF-HuAj-`V!?TSs7}U2W%%k8gYY9JkHC zUn;bJ$}hI9udh2E+{5}sHfJ8=f@g1K?wxoTo5Q86ynJQG#YGZ^Ni1BVpZ>FyP2hg& z?4noYX5i>xSo_Q5%bT0r{I+e{ovY_RDf%H`8)NbH-BzC2=68>*3eDbnC8Fp(hgwUr zdUsid=(NA{O_qB4N)_vbtqfvR^rx$iq{_bKD>wdT=*D%RuN9xLb>Az$uVixJiiLdYD|8`H@(Ocp2qh6-j*L04| zh__0*{_dT}ygTW$zW&UcxvTV>T=KDNpT*2`U)9Yp+N#+l>i_y|tlho6zZ<+%k1K@s z9C=}yBX&>CM=#E9))Z&M+FvEzGF*@E9GBlW=QoqFoiAT&f6BJKi+$XT3=1cD&9kxG zRraQ7C-?J*M{YG|d~#o&5L?fau`*IDTS2k#WaS1n;V+LX*^7*GmV&khWmlE?#Z@<4 ztGvnNv#0JO*XJt#Ll2);2d*yDP4Sz5a#O;ILkG<^$+;vRT=*bKan^w;-0b3Aiq7nh zA1t&JWb)a-TQ46d{PoqjBR4iCUtI1lpMTcNsyL8;t?MlgwHwQ>)LdR#^;K)ehxFKx z)zf@!Wz1!Dqf8E7S!rzgAyIn692Un*`cA9&-0KiMB%dwje|$;c-@@nb)DIlc5!Ofy zVt>B&Xgkx=g7}wuXSEjJn*KiExSiOW^YYFyODFri>%T7f`GB#%=MyP2iS0IWkzU## zk1FOL_Nw?`k@YM8U%EIO|8F^V{`&XYGX$(yk3P9}a@E<`fMsV=c;*{2#{TBv_A+wp zOD@uA^0E4_eCo;3RtJ@vdux7PQaafe)1~e|FJo`@_q;c@8~A6QesU$6$8+Th>~^|$@1op9}%4A&I0LgfD-GBuQ zQ6`z|`cCaXx?XkP!ThK72Of%6JO4SA!VzK}qk6Jf$mY&X-N@uQ5gYiQdQW%$=yddp zbFNZ{&n?FT%{p>_Oto?)Zpm=@@x{QlnBF4OszF&8ZqS|85rnzVFN;?XWq zsTg5}lZ?KXrhVo0d@5{fYhxoaeU((ymNU7G!aB_LPj%xjO0o0j9iO1Mze0Qcl$kGE zOJ+Jxk~_BLt=v7QU2lsonpLMQDK6!dn32E~ugO-B)wF8 z8{b=B+vYl{(d#gaNTX8t1Lk7KM~7T=6P)DuAIsU+a|RcmpSOQXw}@WTdTFz^$G>+( z+$c8MrIy)p^R9Zv$E%I!ZL^Lo=;=8t9G;l)Q0_yv*z{%$GHdQ z_nw@ro~^m+36p`H>1FPhMfG7T1d2Qg3rar(_bom5Ddh}fNag~+LUXoF8h&vyrT01= zni`r8B>hcdHSw!t?0fKBVqvgE*ghsBmtwc(sQT6azFbr*mHc(O>P=KJqqdak!KPoO zAK(09&52v>5Ua83!fZw7jX@7}wI2pdIC|Q%m;G;#u<&W^1I^uHhbM0JQ*`8)w>z@b zcDiBRNBti^-n*x)o*-^{g?s6g63rr+L(#Q|7uj6zi#ycG+g_0K)P6_hD=i^)-zJrJ zofFr4`SFVM#3(9HSR7H3)x^ap!lNy-bi=cN4f`ipAN+SHcXRsrxK*EiojaCuy^QbO zow}~jY^$HQclf$DH1t>ySF-fitJV4jMJ2HzD;^lPr@WZJ zYfZAx+sSx{Zix$la+QhO-e@4HV zZ(72uJ7NcKIn6dp)nC?kzq!`yTleAm+cNFDN)LYYDs`T^MQs5`Vi9-11E!;#9m1O8 zoPGsYLWJ5Jn*Lpx^W{X7`i}?R1#>tmpFMgubM}l)MI!fB$2YAzDXftD@Qg>i$?9o6 zGY#g>Qe51ysHZF2UH@^D5NBQ4>Z+PI^Nqa(u2-x0&ywkky*pvMo1d@sLi3lNRzbzH zo1=C(C2_SkCEnocIc@g1>EO4AO@7-i1buovy|#7MTmOWEfB!~oT=YAF=a;NS0q^_! zwIS{1wzYTX$L{!GFw0bb!K;_%3lh{swbPE>`Pnsh?Uh}n%MCLgM8z{6op*QohvzC^ zUtbZQsxx!%F6aMO4lPXhZ`Q}UaI5~s<|jIjd$xRDbl>{RySvQ!_wOEkB?h;b-trhs%wQcuWs{P)zeVrYjSDzaeh%*J?86r5469odbud#zw^ws z&o3;0p2Q>lHS?pq)tU5_GsX4!fAzR%htE6UUmKPf{(4(M_?*1Ji}&_0eQl2@JfeBz zEGQ@J(Rygi>}SIyvBRNoooQekD>S;;$4!c(!@|mfvdr9;xQxkXjQsHI(O*cL|t#|g^o0GaPpugj;WwrC` z)BopO?w@;ie-Zn)k8dA7dBKv-u0BOCD(L-L@l){u%?ykzOy^j9Ce$%$w0JvCp7ria zy3Dck=#z^M?kae=I66avQFxPt(3ulSFT(#xo6T`NIjQE*PwzDmbun{!HI}Rs*yna% zKK#*>lefKIgc^mcn#211+`YpG=3mzct9Q+nv##QnG~Su~le@Nwhc>jrsu%8nLRG|2xBYiZuKujX)Vt}HQstyyYz>CW;UjfKJ+oF<#3?A!mxh$H>no8!}RV?*vS&i}qXuYI^0$NcR5+JQGgIMJ3+XTp+4z^2_v~g{9Dy^|qqdAMLsH%HX*$ z|ARZ6`nm64lunqO_^2*Q!ns>aaoYLCzvnYtIW0baQy_Qk8x_qB%OZkA;&$*8dHa2K#g2)Na|~8iHb43I_xFx9080>8w})$fn$B%mcF6L9r3ho%lBE|W zH#C`u^0gKyv&3}BXP&pn;m=y8-|do7tl{DPO*m3yrv29Vtb>0Jzqqh)M%0~u?TOCj z84eq3ZWgNezUtUmeZr;N#~7t9EfP*F~+}W_Ihv?oXnp{x|+*-{*X7 zO{d|5=UR25fxkJoyS`F6aPodfBZorfq&k*l8TZL*e_KOWi@*H+-!0LJ(R14OfB)x% z_~(k69&h^DJ<)LSFW0~GCmG5FI<{U>zH;V9VdI&ddray#7JENg#UyclK7;%B%FL~? zogrRQ`Jb*he34ifW->eFh^tY?TFbKr|%*~o7!0P^Q z<#zGeCfW)M|C;!(l;`}K^{&0OUlZ&={U`K@Io&zmHMRE?)h=!Zl#- z&Y<^AA94k_4p;qsWEyAx`G|DtzRG`IyFXrda{0IahoHR=1#jzZn`Ha{e&dN3hJwaX zJhA;(^fV_}`||O!q$G+nD5@P~-g;rGf_DRpFB3--i}C`6Tvmq*b8ktsX9gPcXPSr2 zHknrQN1@qw4~I&Mqe#HPKRX3}FSD_|pVTXK{`JQfOWBmH%j1HCzQ-k9UbcBw^MQ)L zzrw$X)jmG9`A6jfaR!ZbVrPza_h%dk+!&PRD!x8<>X(;yFRU(?8(m_AAgbBG@Zyr7Fx<1 zv;F;5SwCN1AOCv0;Jtmd{o8V%RcO2Y{&qBALx9$m6%(B=UKc()%QWoe+6&j?duv<% zHkTUgs`S>7shbnASKMN&?HjLWPgdo%Y0TZtQSg1&uBKC;#lo*_JCUouw|;h1L*+du z#vLvvzc9&^wSHAjzxur3|1>6nEbHcIsjSrp7ya1g7Sz;X;B0QS5Du9YKYTAFu!tGWy!7PBsrYxCLMj_{f*$EToWZuV;@#ct z;hit`327J{G5x3wwYa3gZXg{4`mV9f8;a}(A z!oo$-9O8tr}S=uq(Vlyb_62YrJ3^W7#d`>E@ha%xJz ze(?!bTebb>?rN_28MLDAgWtt_+ziGF^(UA6u3pEab#-gBXUIw$$FHi^OZ*-;MQ)DH z_}(d8_M{Wc5 zO*1AOpY(RBwwy)nuXU^$7nNr^&QeSby|ec>)7jbP$Nv2E{_<0;&G4J(OZyW3_owvw zejSyO6R9sz^?JC}Uc>CK>DSZ$PD$zq6n<@bl%OFSoqKng&-<;pP69&Lx{53+UQBuJ zn5QMM10)huNEAH8}dzoi&NQRNDclg%p3fCshJLoi1Pt?2Lb^jvO z0QKy!X8#+}&gE@9zkju5a;98*xJVs><@p4e0;F^TGF2*Z7=SXDn2`FyCT2+!!xI6 zXE+b)c9*@HUjDrA^Va+A{Qp;NXLyx-^3mdbsS8~u9Y3~qw*QqqKN(A3KXbc#YqAN0 zW}MR6^@TN`5@MIwd;QH}RGZP3@$!B6|E^sO0)(^VJ^pyrfeur}F*oe0m#vYtplWhuI4~H%hk8`NO^OZLR#A zJ+XyFmj&O<*uw`pC*_ig=R`eUE8B&ql+L?e=U`(oxUghWzTdVa^I6A!Z0Vl%g_Yxk z`9a1;$E{gczvu_v>TPiR!)kLOC1#nn+UMunr}#&&`B!ND^w!qwUvIjmPCskEH1_0N zrRN>#i5ljO+iSGK*Y#|Ff1K6o{B-^MS$WMaHGhB2z4hzkt=V;-o=nX;d_exrQ#0=S z(glSZC#U>1O?G?V)5UV&6 z=<2l1eqr^|`+E(_&V;RrD;0Ww)l8u=c(U5hiwl!Ko_}Pt{upoiinlK8rDn;i?k$^a z~Ei6;Z-F`K@ooQPYTaNj|x`&zAq+yaCUXj`onXc zu&vmbTl_UH*Yw`5WezQmW?fK8cM`Ewy>rTAorCF4A+w;*{qpYbbzg6E_Pp#f?TqL0 zQ_(S@A~Q~Kr2DK`)zZo8PNJudSpNnkjMkf?k5tp>F&AoJuQFnQm=MmR`5|cpcjx<743lg`b~< zzL~R)ad&!dpt;f`g_YGpAuA(X^;vXQ>s&HPH4?qdAr=w7a{Y`)=Ra)mw#lCSQ)25A z)ymD@6P=Y3kHqdfYO?I~AC-E|+4-@?Yb-=QosO-m{2gZZ|IS)|MXnhMdlocs9((+{ z{Yqo#oAx*Lm*g|m|E^5&@tE~U@4m^=vl&Km$D6;DE{k05r&~K=Q&#BxoxI0NGN0f3 zvirOI$IA_;Eb8Cwd-mi*kH_h-*1fE$`)As2mz-rjclCXBNz0-o=c0cJ`)X+}-hSfY zi^ZoG6$&%K5(b^)`VUuSC4Lb3eNOVjA!A z!%stfdRD*we=uG1)C~J)9i|~Z!T~K@Gi%SaOy=F{vuI9>g=W#k$4nl(H|(+ug$a0t{+SJ_qMbMRlSd0RkB1}G?R-<>*~4%3CwGLbS5!69^JoygQJ5rP(VPy zMN`Rl!^P^(F9IBjCHt$*?i~N*qh495SYEfk(%9NM_x!ut)!)vYnJc}%=eJvY^_`V7 zH>W%PYzzHxNq_OWS>^}-y@}W=S$`(Jd20N>H){^;PUFKK7sYp8sR>yD0^GeOsyqC5m|HC~(R@{{|c;G2{NwV+N)$WRYpC0X+R(?dR;h+3$ z^Ysm}yZat>pTGXEJz&E;&B@`3KR?Z#VfD4NUE$+Tg^8(^f8~!~oj2ECg6W}?Nd4W- z;-*~UPXCrIO%a-TCR$)N-?<4K;b8^wf>lv3pA>MkyQXAT?ul9Av*eDtu$JYyzkB|3S+Vu6C;FV7b)W0v z`X6VOo{_$kK7G2C)TQZ*_MOo9^TX+pp(o4Zn!uOdldl<1&3IAyeWSIX;|cLKZPI2r z2exlcZ%RJ>ecyKtsgmrrj>pSWG#;D3iP_5A66#JvD?=- zW0P$0^}YjlM7bwuuC{T0WSn}siF?zgrhkhjT;qP8;&<}(^aZfB027Y9Tz@2W$9Yft z=XW;WG5TqK<7Sqf#LuSBmUini-mF;o=z>UlpvC-U6W`3-knV53CDW8`|DRt=GQK{N zn-#e!MSE4K_N$Axrszn%(OLX;->aLO{~r-BR5QuC^1E+o=->X2^~=sLE|;|4XU1~x z$Io;bOVP%lqWRBH6kKDi)c?0W*!|6^Xcpn}?%b*8au>C^SUhi1+y8%Bzg*QjeW$go z>Lr(4cHA!PV+)_ULe;^*8gq&^-^!atk?Rrnz2V74uXbiBEk~`jLsQo&fC9Zo$1Es?KUO`8(nMT z@)8~&Tk-K-bw>0)o4mCVjOC%b8E-zV{aknan}kii9h*rWOP1lPPSc0s)!z=ZYlkQH z`Yd=8YrtO@Z6kj+WuO1a%Zqm2*dxb(%1OwIkA0420MAX==8GBU_-EAq|GT5)gTOqA z|D3-}bK5eb>)aRAX#BM;V2F$SqC7AD1M{(xp8G8x%Ns@h8Cr{I)-eCsSh+o5NkDMZ z!F{V9lpnvl#EFxY^@F$m-X}e}mougbaja8Hh>5RJ;nP`vJg5K3f!RJ9D-%|vG^M`0 zDQ0{6QuhD7&fjKco^gJD?)BVfwdZ#0ojqS-#XGn8^#k|y->r6t%#U?Sh%>yVwy$I^ z*NWomb#*me?{^gZ*#Zg<=;D4FPg2rFaG)2 z+M|auB%kj;{(rvJzw|3hr#*k)xVx%b_Uq1Dx%>Hko8~m|P1R0(*4FzY4oKs zi(d+z(EnT!HRWvMD%R9b%Xe7L-nJlbnZeH}A_xw*F>!-0G3m^Z0U&%&N!78Vi%Wj=$}zJo8&J_{Cl3v(M+AOOBu4y^jCn z+-Y$es(*YIH~93-G3n=L={?3qWu}|-u3oOYI3u9@+2W&Z?!JaXYL7oopPO_%V1Dqi zv#PpG?I|z6l+QMM*to0oW5wdOzWueQLelkIC1=_%)_ec5;EC{Brf=_E?tPg2!qwNM zLc)(@`iV$CwTx4r&aJc8bN`t$v%o(=?1{BZQ_UuptxxvlN}Tc9_y5w&6Omd9S?iAO z6HZ*ADeU0ve`D$3+u-uNV9DYx4DF7 zv#{}en81)!+WEbE;hTq!+S7TSu8VJL-@(1CDE?n*t?%xQ$+CN%U$orz{vTKQbI&t7 zd1eN^n|j!Df&Uq)wuip#iw?}TPq?5k^N2XVyR@8ct53|137o9lZoUC(Y44uTxq9T= zonwn)>&2>bOl$IPL~WDZ(U4mG{n6^@?G4&)r}o(jea_46 zJ>>2*;qwK%l;vx=KN_c)e%ZOz`$I|Rp%V%%OS8{PMQqNu-}mqJdzP=(Yg7Gp)|W>) z-)g_UTED9Hpr)b1K?TbI#)CB~j908yq&3xtye~TA{As}vM^mAyHD8TO_Gd^g@764F zel+)Zt=`&WqTZ^)Jj?z5`h0s~@$8LB^`|GE$3KX+Z}>OSGc$Ty{!{~&(CDjflYie@ z^y_Tk?xba*e-`b2_r{X>!<9V`E8Z?+G@7p=Q2WcILvOZv@39uEvVRiAlt~zXzdK`W8L0;;wGcyE>WVqkI@A`amvm^Vm9bSu;zNx*K z_h`9A{Y#~&>Dt{Bqsx<`zUN8Ur1Ked)pma~;tNtg?3yCpbkTQ}t*BVj&iXY+mM02I zTxr!@d3>g9*OlV+fA%d`;rZ3H*g#1z;Lw5MRh0@C7lbcb*yO{>-sxO&Pxr-)>Bpm{ zJ$S(#>b#%*%y}yhpHA70*DtJ9RTAXkJB0<%!wm3U1o7{@CBSU2JsjIo~nm$?TR-9tW{Uu`}nvbT5WFng#Q9}Z~gl_XM+9w(%szbsT`{NGps7={W{K0dc0iu zqTd#I-u%6y;!pQ&mH5n9msKl#>7?EYOQ-$?vA>Elrn?{Wzoy63cXERG+vpQ~C;q)V z!`Hh1{=$$;_M(q?elB0Q{6P4_BPF)ay8TbG==-1Fd3wjmX6th{v*J&Gx^ZW#q65#> z&1#eE?^$2v3tar=?wy-AHfFQ$+FNQbb>}XVWN?AS!&~dFeP7BwMYpGMbNj=6(QOZI zOEP9$iRcll+@P&!RQOgDE6>VWvocm)e)pUhyqFFC+b&dtY0&MNQvp zF!`NhQSzz?i=M|oK{4~66(br_z4=)x?pAfpO;d^>{$@|17g0mUsNWAo4m$J6V zDDZRTor_;fb$%_pA3D38SK3V8{@*VCX0d*UWjZn3AL7GT)CAfXeizM&{bzpfov~2; zzn|*!YQIGuH)DN2^`E-L^2?9^UrW)HWARY>#$CB!>!}HMjhG|*t`^?s5#kW){1LVP z%BI4WKl62~9@xF%v+awyxv8uC@A4fl#kYsAzv@vYez&}``|H~z-_P?k_GfkG_{Vu| z5u3WWC($$Z-y=}0-{}mw$G>lnhtLoAivjUPKFM=ls)%^0MYKKN<6LfO>s}$T+c~$azq`IvHvGt^XFxj&3>C_e{=8TPR)-IUH`|B?eg#5<9EyN z_rAQm{QvX%gVi3-^Z$$Q-{7n`S$U_{;f-OEo5HUZy7{V=Gd#X0dxc?w%a4SPg(>sy z_-(%U??Y_(ii#=wFDR|rZ}RzVPNE)5a(_kC6&=mLOH8>=XU6zB~2*B#6cRuazl!!db#`^vQAi1L^*~hm+^?9(-4NJ7x1uS4}?+j>Nlq zzDItgN!+^ghcT}HtJxll61jQNR$P{;XYBUrd4F58quj~9=2o)FEYs|^(A8m<|5g9i zZ`MeLG8{jVu+ z8g{9A6(4*!@oc?IWme!Eo1{ZkT|O`Pg4_PbInDjGPu)b; zMaam`-!FZCkCCMI{wS^AjVVsHip;WYCFP%F{y&#cv->5-m&@x_;9UKqs{&4jFZ;?OA3tQcsKXq-urF)n_VvjFD;ugopa8)XDN3! zym8Uk5Er)ag18pflu4a)?!@RMtJUvc}g7}?9s(NX^#J|4JdiHs>GGl~MCgZ%H|4eO6K4$7v)Gsqm z5v(*~%HJaSCPOmrMOHCD z?(6NR^>$=`Jn}{SV*ah-ABAsZ-u!jG^7PR%zCQw$-cODz2eUoy5SSxxKZp0-U%_bm zxlPmSt@d4Eo_zj1`*SyyYajoAyEI4e$E(%rWma!7sXSj>;=wUhLv7o;kjQseJQEIE zCEN?|o6E80Ut}AH(-Uqt7ypZs8g8^XT>ss8ZEoWy^Cfmh4{L)h&M)>8e$cn?P|#5q z^LBfq6Xm}f{~qXSJrMr5@hNx8rk8T*8W(m~WZ6&CHk~deaACbN@8v_L%fDrZC>{}& zy}d2h`D9dT@RLc0_+*OPbarTlx6aNxaQd`+rdhqel zcV$~7SWk&ed)T)zLd)gJ^i2Cl-3K4L9u7Ej(IzA-Ds~J1&IyraQcs1>y>F}U+w@ht z^KVboxvzKs74d)3zjarU$w#9$x$=v|*N)=8S^e&<1_7@Nv`l1nrfzz!Cuye{w3KDr z`a3PSMIjNdRle1?*GH@ zy?sUBhK2fy9${80Gp#Favg{bu!-|siO8*$?$0%i4I8UpHJ0Jh*lPQ~srsUI|R;m3N zsbwcNaBO$Iuc>Ie-SN5D>!m3-RQw-3)70Ez*qYX^q?V#{SHMG=Bj7=R_Q44$O-Gt$ z@0c4QKTU^&2?Qx4!!{Wyw@OzZXjuEOUGGZF;rUJn0hO`a~hS9n}`cT52AiJbz;1-S&W8 z>-I$GTc3+PF~_$1ja1bkPtC~aeV_DppSfw2{A@#J#OJp~EKg27eEI#{)#wSf<$Ucv z2H6hp97I+LEY0j#JwK{EsI~f9j_b;xr7}(nucd@CpZM*S^w7W0y3|Zpm+4>BoQTye zH`)*63C;=my85(=cFKy*2LWz^io(}7?uys{X|HIg5v_X4z2jJ0O49MP$R@2L-M6HF z-Eq<=*~i-HCbaWd?(T}7D%HTL8^e;Nx*{HKoZQxsFR0KN@Lo+c^vH{I(;glcD73jF zIL-6sYNbV%Ki=++U%zV7VVR#^q6O1@=Gh4rcFX&Yze3>E~QFN_#0)2Q(!2 zCmt}@uRc{I=e*9T(qP%`>_vf^?YHMGovEl0f3oR?)f)48TGyr5oPXODqSyFWx~zJV zlXIU{i^?up*ZtLxx!IcqXRBKjo8Ei*V^XkQZQB+eN7oY!9XRCnp)zqszUCSkkR*Vp$ibZ%dietw=t%#MOZ)%Vx9TEDSZ zbkZ|?cVnT3(82Zjx6%WqyKa$n_3l)2>PUE)l%$}P)!IGJ?{cDpK!NjI4S(bN)~QoD z-bPQD@OPU`#CPv2T0)b!Z>If?;S%Hg@OGZ`rir^H-3C>?_e!lqHXt|bN%+3V}jFUq#}R4wIbex@!; zYgp_Vu-4;j+1Cw;7p-LOD(goSzD{3XqL${SUc#Sq?>$d%im=h)wI{c4I^Puj@YK<{ z1#h>1auVB~^T~<7uygHvzh^3cewNW-qrxxxH z-))yaa(nw;g~J?-7KIu~yPvOOSyH@+ zM(~N5)*nACKW~*9Wq0S?%-bJAe%hV8z`Ru`=Hw%Wmft@boxis=a&wyHYOjot)uF4yGH!3v?GhJn66Ftn*>-D%)kpR@kv6Q6 z{&9-+^A?Guha$-8Ih+_ialX5xdIwwz5pSKjK)nf_C8)6Z#sJxk1#oy-!n zLiTk`Xq|EO$#bRiR-d&$b!}^Y_vE3$6Ar#3elvFmEQ@oEnez6o@riY7=G?HXx-Q~B zKk4=I-SM2g!HbQzJ&*p8d#$TQ%(vuUyId7V^6|b>lSr8`OYNqN-{0Qyot$A)xoPpn z1ryhrS}$&ke3YdzS7Nf(oS1eZg_EtnuymD#9f)#JNr}jqu zo3Z`D)RJyhTP7K|TdGHz4l8X6??29Zy7>HFmmN(lZCmtq*SkuaXg?9yRsVxgu0D_D z-J72?=5-ma3AWnn?8Ysk(U5R6)O7iqdLfOplgugeh2M#~1pl{ckw{iuC%kN7L1SW| zvw|9nMo#TRkEj)D%O^b0YV7FXm}6CX>SpW|we2tYp)@>v{zV3vcXvEAw5h@V)&ZQTZ>0mh1QejSlX)Qgfm9 z^C^+#{V}?S5@&V!uBtr2a{l-=@p=9|(bk{EYqX>$H~r*#^m6(9X|g__tq%v+SYCd9 zYU=4q$Kb^dYg^ylnmu9qmh$~8X5QEt%(B-)&wH+A@iJwRur(2l*VaZS-`P>wygY!=JA z;B2yUqk>;+{7nDE=abpyH}qytNI$v#ghjnnk=$fOw~ylC`UlU88!b5VpuBKVo_5R$ zv5u|reaEMBn=j(K7>YIOT5wJ2t1(K#;}A&#m^oad?^ip!sLDZ5z|&B+>G6QjLQp>svl)>Vr; zcRXy9zOc8tyxnI)&#TRc7WUsT5qYWXulzjUUg`TM%M+TrGnOs?dg`=ZP>+FZeu;FS zc}~0W(bL(RS~YtdtRkCcWnbmz_?KzbRQH?n<&(P0u~QT$KJK-C5Yeq?5xO*PPj#kL zSA}z`ByZ!Db_R8&RpH9XyAErFPPmzJS!vbwpQ%yHd?cTDS6p7^JHz@5w@-DE%q^RH zoW0fC+zK|Wlv|{isZLi;T)6YY{A1@_cXcO8Y^+J0vVY~C#d6EKX3RU{ zc(Qp_&(#DO&z5_)vtKHQSMW#}9GLlIXW84R^r`IgJ1;Nyw@y)7x2w4>x~Jy8zvH=5 zd*!>8lAHIx@teM0&{aw5PS@sE@2pL&bu9<>Z({jck*rwt$ao%)=-Q(`Ec)~M-rWA@ zaF2Dq<-eH5B#Uc%x}EjTiG5qU>`%a?cf?5TG%$AUf_!;*3*M zwXMte{|QzzNiSP}@Xq#qP9I;TURdGoedW~!1;^^38?rm37^Tif+5Zk-BNTi;Iwkz# zBh{|OWlv{mL~U8IxKQDs^27%_jBU3HuA2D3w)pnK0H?xIZch7E;p^w6+wsq5%-a0u z^`*Ji%|-h6rKiTu+Z-JF$||bEKvD3y#l}l9s~jIV1<1+d-M{`Z@_T-HhwHrg`yHP- z{|BhJ> z{V!k6k3!qJ`J26bYV(DIxm*Jyo_UnKtZ84Ys1z8`z9jkr4ZuPQ^$d)fMkA57m54Uhuk~zh7W7D~hNA2o3 zt>^y?cy{p4tTodUT9hImZau}@*rL2ZitF6+fFMUF$He8@zKfKFMT+K}IJrkAe0{*e zCS#LL`Se}ijk*O0u%v<(P==l*RjkP(K za=uSXtxsN^p}2`P#rfZ!p8+0kSgyPcx~#~bcgbx|=f*7c6$=A`4hFQ_&rr8C5xL$~ zwPMBLv$6X*TEeqAe(mMo-xst0oOsbkQ>OnBZ9NZ@W*X0&E7+C7c4L(}i%=KC$)-tM zTt=#bQd%f-_HNEj!8zLedAZfy#w81*Sm*9vbl9)U^dyU|si2pF$-3tr8WDX<0@|%B-k*_V z+UM{fXj%?O?zNjA_ht0wN-X>!dg)r9;sgekDFG8)0(PvtTyiyLLcojbMyxZ1xKu^w z6fgX&=b@+ZWyi0ECr$GXYsA^P@&I%vEoi>idVA&K}@=azeP}&r+_&Z>kxc zn-;t|*nH7he5B>8)J%^PvRn7K zJa}OL`@+6LPWGPfYG<9B)pGwz&Ct%*+W&4@={W(9XpR*h7AgcS&$LPmW}U}nIf*sj zX`!~$p3c^D#}iKI=e%P-s3=;cD)sQ8lTJ#ak!H%E-wJ?5B{<~J^-L&`FY zzLx}5ZudUdnQ_~&<7#CFf5BGQq&p{-6O%YzFG()^oYRsKc_iVAWm}$p=0tVngLD0z zzsnW+RL>WA9652hlAyw~9h;wX6mW!HJEL^PIHhb0@72>GuIp~~$}0N55wYi;>XNZy zht|Y^B8Ni@A||_J%z0VmciJGO_EqKkFaK28q_ro16X39} ztJW5@tBB1>iP(_fn0})5`IG{UmkK)LSy1;8H7h;Fq=_&bVEYyZ&>e@&9Mq ze=1I4=G??44c^A*uj6zW(gNn|?frc9e74>U-3YS`oo5-b8V@>oJ)6oUggXOdBqzCO z%F;5U~~mM_kqA8q*U^HYsTH#7dv&gm_O`189;PYMLEgb1umDB^Y(ZPB{n zA)yg5)uN(Zg@4vD3tLkWsa0u*W=Fi(D8wnZndkP>vgRX?Ua#Lj?XQuQYKKpOa~~q;}*}BFjyCn5#xN+t-q7|aTLGvvdk5S<}IAW!8=u?Vls>E zwSdotORn#d$cnxt-}VgDhA(jZ7u_uPw&9j(@q??0Z*7j|?c2%a*0Y26Z`HN~Et1(2 z?jQeMQeh?JeT2hR=fMq4=L7Nr0SAx#6p;!~T-oHqI%g7d+bd5g50xo4J08FL{CkbcMQf$6af!cXIc*WUT; zcP6Fj*uraPBHB_nt$w<1fBC{0Z(1T9ubmZN8+}FaP9bOd-mtxO-!~qU6yk1sx8u>` zS@p_tJcQvTMx1aPe1ixc8=_O^>ZAp^Y3uHZc`M9X9+3DtdskEy6w>XLwyUk zhhJLovTDu3W!W!$bv(4DcodvFa_9Z?q?`JuFT8vzm!bV^-`=|uW7$7$muQbNu9e)j z?uT@1x0vI{Lq`Mt_eszF$^J&2)od@Ha)#957cHjiCm*ufH6inh&hx%(+rrjc%i7M! z$KJWi_Bo@z;;&8mIROtHrpb0Sy!C75_0FIEW%~aUS9gD_y8iRnY}pIDucue6i%;KU zTGsb@P1SSH&SYf~&Bk3@GdWZknY0dQT=CGTI8(_d(qeVLabcAzw^Wv#-0sI!aVOg* zajyIIpl{m!{|5X!y`CS~j=y`dW2@qcf|+%@Mcx}93N7fy(n z@utc>oWJveB8#e^)T<}%s*-t32VMuQkN?kemnoKY_p{G@TkVz^zmwf{*<`)-+1UBp zp1$)Iy5qN6iC5X+^=khQcc;HkzWGT##d+tCOqpZ#mw4w|`yP1h{OiHd?P}If%$6#e zI8D4%e&Cil%Q`_u(VR7HP5n=r<}poFJlWKy!u@W?Bwke!DWmzb1GenlsoN)G*Cb>< z#ckIDo^#V4Ccm%bc0aoBaHsO|iR%OEo^YRQw=r5)UA)=sT;J>*cUE)HzG&%%t5r&N zrpB~5t)KYyW$CWV63;DtA8*Z;y1e+am&BJja+~K^ult&{BG*E8qrCG2DZz+MFRXiH zf9g!rsg3b@{=BC=SJF7`K=|eAg`ubaC!U#oe!>3-f*1gc&I^Cb(qDR`S&C~?XPIoCv%@!rlv5J~9UC1pZYx-iw}#U}6a6)jGtqAIfQ7P5&x;(7VSV%d&)*SDEoDdFAA+5W7!T`E1g=kjc0 zmPta5_pj?5-{5F#SpV;^MzmR0{L~2(3~VeDtoF_McOcZa*65%3nOQ&EdD7z!{pB}v zdw$L|#i%&#%*@{kv+jJ+`@|xC#FpQtHdHcRUSi<~(?{3lDLznUx#JYjy3qV!LiiLP zjgty13xv72e&pWXW_jA#Q_7(Bn{2GpdyVG)^w5Zy*5@=k&Qv&0>EMnm%{?bs zUN3yGJiXFNm8s`-=}Xg{=QHojZ7=jq_gH?ssxR|ip>J}=IRTF>mTmd}yr;F79eE!A z#p!I+Q9-#nw##S2%6q^)l0x+E=1ZwrMM_ zKl8g>5%c`c^SNDbjth9CvGBFLIrw;QbmEyA=Q1uXYPu}Nd30NDd*GgmdH)5vuD6{% zY<}OaCF^RpRb{uxIo1CcHQp)?*?AKl9!oIEnzv_nm(|>kXU92QO%_B< zQk)wai;181}CfFG0U!Iz zh_fwfdE1)!xuDfX*6}_squN{B8uSG%>fUrDyFdCV9`&G4Z>x>3xY#6vq$jcwyVLmV zqV^n}T=(t{>&@)vBHvC2oG5lqTUau4PenkR&8O%5U!Ei`acX6nVfwmmOMX4STg?~S zRadhLu2#m&NG$xodg@x6;sh^&0LF9gjQkY+8~x9+a59`^ox5yugvQK(+}3HAZFHWi ziA#km9h^`Z>1XT7@c7jfk&5=Jc?;)#?%Vx2!lsPn+&o+56vON{GvDMa610#l=qN~EKI%emPPrW&rFmMr%* zk7;41mx@VB<+P>d&#hUS^xryna@RYpnVE3UUH{0<4YOlhrr$j0-^C}UuIuh#mJ|QM zP^zRl>Y446W6J$zorde@X#}ZcX=c8>6u8=9MqI?S>2iX?ckbn#eRX%o^4l&UD`Ko& z%>;HY3jXqLrmQqmo5L(OhgNG&hE_XM9u+nf(U^q?pDwy%Gk9p;xa#&U}Y-C^YV*~^R(m@sdrKw&u9J%VL4rTJv3{2o9%shYw^Cl z@7A5YufM`&;-82QAOA0`e6N^fuXJGZCFNyx{c^SH?{?J6Ec1)h{arEV$mZr-3i122 zCmUt%espbHvcV(CnNw~q(mQwh)7$OKeO=nE-*>M(mEvjcG*M?m)Y3MGS8feV@h4e2 zn`g_ssNqx<-V>zCAyp;J?wg)*&R*bwt*z=dmm*fyAMWj!BJ@{H({G%a9OrO6@32An zy~Qg+ckgIE&|B)I@Z^AW_`*Wxtx7kxq^{V!W{X3k!trk{X$xYxCUvU6d)p!>JKOr? z+-P%$Z%SKDyU#mGfBGPK<=OnW1bvXVlS$2~^wEm9Y8ArQV*DDE6ocR30!n^G|-Y#RS zSmmGYU-<9(t2+VLir=j65afzi%zZ1tw33H$-;|Kc4ht8@oos6J`L4zvbr;fz`SC7yAzr6dxw71rKCQtf%gTr-`a}KWrQ(J>#0*3_CMrDH^2iIjp zE*G;Zmr$>MJg49E^8`oZjm4J>YfQ5?WUw!m-;`C8Jw4*JN8e;OjY-U*t1k-}u<$g7 zG#vOS{BY%h>6y#B7q{6oBvo=fy!USMk%X8jp;7Nw%YQxBKf}D*_*tK9S5D;SiuryQ zK`kP#{ib;w5=?ClrxXJ=ypz?uzGLejc@~S3A1^;NS_re<4E)mlf6o7$S6MxhdqueI zCEM;;7XNu(b(908>S46Mfv-y_BO{q`K zUCl>b%(n`v&T^03J?Hz!fakj&-`;AN|4+W`xOuVA)6I(gDomGF8M5#&IxB2dys#?p zZOFx^&hGt>`~Dr?m~gX7C+??uUiFLrAC5nZ^}8Sc_rCazUD_(4G44**k!u3%Vs8DJ z`}p>~5bcFWYv93u?12-2*%2?M4?QP%FeOIyl!qQyji*-v5clVbaJ`{iL z@kw*LiM84Pj-LLz>*2ZRyso9w|CM%?X>1DFQ^maT$-9MuT~FL@yfOznRbYo2%e!4) z)&9)SP+E2G8o%t3O)9N*dtZspv5GcZ(RYx)%+YP*jbF#yjSb50c%NHxG^{qx<@T|y z1rIzluI1io^vvfG5GoS5{avQu#PP1Lvkm_&UjOmx3rC6XPisupmoxvmV;9hIG~>yc ziFa52UTLmw@m%@*9Ovum_Z%imuU?d}@6PoS-F;d<)*{(uZu`HO?EIDBEO)YX&DCsB z8f=)aJR#~$K+5@PthehW+PBYt+dO%>-{REw;s$lU_`?_eKE2B~BV@IZ)^y#zIgN}f zVwdtvTle?&$FOsYA4}}w7GJdQ!snQ6H#Y=vICjVBD||Vk zc}MNA;;U`%Z68cvmt%k4wJLf3mFbQ?XWP3z>i@c(%6|S-YFDNp_hQEb&}I)P+qX6R zS19;*h}Gv$+Qj9~Ta+|!M^@Zf>OK8}SDW3mHUFZYS;rKGHLdwuZ7p?u-J6q#PwZK` z|KY;jdB-N#S|z@Ir!8!GT1>xXY4)Nml4qQ+?};;w*-;_bfA=!q-|8*L4yW|$s4ME zd0bf^$jH0fw%ge#bMx!7y$2QwZO^{HN3V)c>M2iafbhYZ)&Fih;#q3*`I-yQWX?G^ zH>H(5OSmA$vQ=o!`!hF-{qClEi57G{&{yVbra(aM9{k+wu z+&rW>8SZv|;E;T^gX30V$GM3z2KMJKFLPD98Pi>?yed?q^FTw+41;9BLw;@!;Zru8 z{m|hd@-6uFl#8*-m;09Lx80RYj(>Xaw|(X%6%ma9hJyzWo|tK@&cw_tC?L?F6S+wt zc$p99rtgVTT{GUbEd>{L1#`iv^hGYml&j)j?)jZ$xooLuZ063(ZaX*2z+(BLxi_|H zL@BJ_tn_60$@P`1I#&AnvvpSp7&tv_Jfs8;_a(Y4LEW-lPxfigjk-Erb(!hL{|^kO zXvP^h#y>rIcl(iR^M%SZa@stT-cOrq3`&p(1a{;JJP3R6)Jtri-U*g`uD1a{Zb?GB zCy?Z$SRkvE(DKM?&&o1Suq8*>8{hTsmztk5?YKaPIFp;kBi6~O;N<<_m-BCWysPKuteg^wGH6fN3;FFGv3$1>k6|iZwl_%T>LvF|L|@`Ws!RwH#UM5WHK~v zPFK3PEb_5Q!P3mnLiej)+s=>QA;>k~IgeKgR2fY8#1XRou7G^+{7Ii=H_EA4GTk}; zB&BVSXIy@asMNv^Zbj>SP;Ng^+I3*!7gd3YdZjtXYv<4y1iZkiPl9!iu^6TbtsBkdW)+KQX z2h9}hYFUt4$qTB+J}9$wl#q4XM_U;NR=SeGz)YUCsTMdap@x~=WJRft< ze~P`&q4K5D$f>Z_p1-WLtwC|ZF#(VHc6&D`r}aviet6d1zii{`?WcS7ncDf@xCaFI zsaHF7I{V~GEbQQv^adCG3R4vvgd7(>;JdvwyZvnadSBJ>gu7o%nP!`>ao+ey)#mor z*Pn}Z{~9b{7K1czLZ1moXbDJz^B6PJ2T6{vcm@a)VFlh*2tv%jU8(v|!lzht*w zm^b&Do_`~CY-?ekv_ zzCLf?Q=ESOmt+50Zq42UeQTu)!f&ry5wKAy>*}hf6FBY6?;UZPzqaMeb)INd7Twd3 z(44RMz*o&E?F`2+oybiKN?u;7_^Vti2FM^#uvKt&{S zbDC>Gfx)i(S49&O6FWLO4(u#`UhwRUWXz5NM~-8z=R+@eh~3@)|KDrR;^VfRVjrbh z_H5;-TC>DqKJO$}JANBJE-t5aF*{{=#pd%b`&_y3`RBuX-hJZS8KEq~>3I4RI3Yhs z=Xh~?ep3TeV^d@NToEyuZzm=yw>!&t@Kwx9X?i|2KqE6@MWOv}m)Cww0vDb)x=xXq zkn_s;;B%$wZ*Mp_I11+R3vta0HafURXSzf>a(HtvatX>ZFE0pq%hJl$$-I^SjPt^S zpRWW3SUbwgPBch*=x%N<7yEGU*Rok|MGIP*&a--|&FB7V^UV06nyAifiJ-)mL%;Q) z8GR)OW9`pih6x52f(t%qhj22Ve`&zM%E8$x#4G#C!*zO2Y_B@gp7%9&9vXMP$D4*X zHa4Csc3R7N{_^T&O7nbUrk>?E8d`9p5nO7l*rt&3l-1+7@tbR?LQ^|J=&!{W&S=`BhNJ0j~K2}yvFi;0V661(iN3+sa? z@o@Eiety1si|~~=jh7Fe-Y{sFzjr13h0uFj+xgtJW-A&WYB}xm= zN-y-#NSbyZ+~~A-ZeVKYThh?tG*^g=OGs73N;0_J*~fWdMr{2;J8>_KA2}DaHwF|P zOs;x%*{IX;@u|tvXIu(92G;C+Kwsd(`oOn6a<6|_ZWUG$eC263#b-vDQ{e78r+f8Q zZC|YBER51o75r5*!DB+si{i-=Dm?)zY^K`Dpc1cPzT$!==luqEkBf->k-D15C;Lia zqJh!FbaSK5-HXpNI-9Rdxbybs9fN*5`_7kb$jQ{1MT5sfAjIIq45#MIcZz&heLe_H z3CMWSK0P(oOMz+6|5qmO>eTKOstV0o_Ebn%YM}>D7Sz%9t_@9%_k49E!jlf}n8eQe z_e;>4XwL?Q`K(s8dJ^G63k%z3^?y8Kr7Y6vdhV1Ys0e>>oMQ#UM>XC&r-#SyZeOQr z$mAtqlJfOkMUY8)i;`x*7Ar{R31->T+V*|p{Ic7-rg8~-%sKcjq+;Q@q1{{niOC^u+l9 zhwG=rY*TP`4l2hp*%*~YGM|OrZUyI1kkHBY1OJ)7=&ctDK0n=_fq{X+)78&qol`;+ E0Oqf?RR910 literal 0 HcmV?d00001 diff --git a/akka-docs/images/faulttolerancesample-normal-flow.png b/akka-docs/images/faulttolerancesample-normal-flow.png new file mode 100644 index 0000000000000000000000000000000000000000..c3fa85c25c1259d2349e80c93dc97f6e18e84690 GIT binary patch literal 84198 zcmeAS@N?(olHy`uVBq!ia0y~yU~&Lq4mJh`hSXU%%@`OMI14-?iy0WWg+Z8+Vb&Z8 z1_lKNPZ!6K3dT2c*(*d{#oO#fIRu>AFI2zZTOP8cS;2Q#Ly03Rt7~A0ivYJmQ-{dJ zMH>4(HYx-ySt0Ur@AeCxP8_a3zf0RB3JZDc^!#?O`dOvg=G3&ZXLEhOou2vaLhfnD zxk3Rs;C( zJz!;84PRuDjOlCOQdpl|v~~l;1d!QG9{1s@y+(}~4GU zT{gwF>cztLMSGT;gL9~t1EdBA$_5XRRpB?7EpS>o;P^8VB zAEHp9jpg0nRYwe%d)Ki%@@i;m|2Mz>ALnOAMn;XGEdpvjGZNO_-BqgnW*&#KpjTn- zzGdN%^5ebo0x^a6*~RWxXDGEbxjA($Sg=6BB(05g9AjpIrtc;=#c^7@=MSARhv5;fY4^Cd*tw;Q{r)W=SOg?7Q zdT#FOha275l}le;SrNZ~pP;Shq&u~_p)8fpb}!r-1Tx`>IpbTV?)*cu>X)#*dDMRB z`LnaL-Tr)eagjIkQcKdMC4FkKA0OS^SdeJ{<<(c7YwP3tUteGU_*k#B+P)4ox0Je9 zkN`jmDLrL@nLie8kTiNyImf=9p?Ke`%*d*D`(}l%7CrDqe^=DC1S1IIgf0xg_Pv+p(?CV#AO88axLJQxEss-f+t|h@4HmU;NOl2PrN5*b>u)+Fu zxBjx^gH1OhHkZvXy{7IuWoP-j7+FTf#urys?iKwX{p0uV_M^VDzVu~0nxRk%&1Wof zE)Kd5{7Or{$2v7Ua^DmCM<;5_iCHhd>P`Fq!)8;8=jQ|Iypop$c%{V-8J{mX`1JJj zvojR9G@qS(XHaG54yz>^e+vdMme^ZWomXXXnQ)>)mP@RxdTrcZDd%>(e)(xO?SFn6 zyZ??%KEBm)q0`C>+w=8V*xB28d!}t|3Sn`TUg{PCil_=*Mp1{j+g~_?3kB_E-aG4G zo39(aZ9)3^yczo!n4g+tei*rb?twDd%OAA$C3j}{O7sN zwQ@ad`{8!}|3lfz`?l`d>9sI2Q7iPLzk#pmTDamd7MB|?J`x|T&TBdy`E;1^yt>~Uhuvjs z4U=4yw`7GHCW-wxx#mnU>}6 zWUNXun*HW#M=>q$Q{fcvt61B*Rp8OxnF+72y>;LEiuLf(jq{iI&*!rL_r+#ac=hp) z1>fG@o_$-X>dk7m+m~N%gcLj#=8U2a_wN3icTyfdCsQlv9YzDxey z`>R{C)kD@#lwH5~T2G)->CMf{jjua9=RaCH{aNzcTYoFkzXVS_HnrQbXvub$3*0NB zpX)C1pMUR_U3SK{GYdG9DlOAISSBvH7yxZy%s$|J&E5MsPxW`}ge?8_jLy5o4<4Ri zr=r4A{q4=ccXxMhEdH+d=GJcMaHjPN8U`E(r=8&B<2n|4I=E!X2Vc(k{UIN|em%O= zm@(t_GA6r!J8F02tvYex{q1r#-C6Y=O{|*}Zau#{Q8W0}iA8sd-=4SUTUhwGe`D(D z`yLaOT0fqgq`LVSx2j>2hq>=w_L*_uHcN$eW`v3r&Y5!&Zl_ zJ+x{5w8zWm*Sj59dHF<`f%qoT)4e)Jix+3CkMfv#OZ;T;au+UbpTt(1x)YoB#q2H; zUDz02sB6G-P@L)GTkpvix8KjHvD)c992f>vucY>nkKsDzI))U^h?G}9y$p=R&%3;QxX_xy|c>~Zq@D@)mb z*P1#OyKU|IQkNp`KW9frR_J}V-``?CKAG$vmE3V@#@)x6m0t5~EH&DWaEU}#?Og^* zd2#LuOSlyDvp=6`;dkZ#+sZ9&kbBF-XQr8GWcPE&sq6X|x-@rv@O}Q=$jkA$Ub0Vb zorKwsz%TD&-_NV@7Lm{Moop}su4-q|&W(wWUq4)T^5oI(gKmGO=*J8D+g?2;#W9!h z<)x*@N**i%t^%wR`j~~Xq_jYdqCN8(${H&#Yb!izPijA|RI_8&r|9>4S|yFs7v)^e zda?b9?h~D<_Hh?}q^X`7=R&-m=us!c@a@n~uB`sP zV&!ZO**2N?&iVVr=GK_@fnv{rvx1#5mh%?(ElZ<^hYz0p%g%S;^0g~pmc1`MZXD0& zE$~r!M`mJG*+J=y+y8=d?$w0oxofA?T9y9dQTLDAeB^v`TAO%z!7R&3ZN+s$AtqJpFH%{dDOn}&Ecbt=W2g_Kib#uw<7uYbB*hJ+!h6Bgj~M$F6otk zN#-{}J{gXVGD$XDb9U)#@~&&6z8=VYC-X#yd9LL(_rBCK{Vf;k8X$$T24lbag4hJP zJ4YsV%9mC1*3=1XleyC<9=@+%<>Vwat;S7fS1JGaBWm;QNJaLsn*}Kw(;Duc-)mkb z`AqM}<)~;4-I|)3o*RiCiYkh(6Dp!wuRz@^)ZyWv>mb-!al-N_|J=KuoIe`Bdb|C@ z=~to>e?C`t>eaE7o_*IF@O0*~j0+hy>tZ-3p8nplIqh#>zuNz{hxI;A<^EOl)UEzkt=2n&vhRTA=?lK?tWL{P~Gs9NcIjoLh{*;rlp`YCj z&vWaGdU|2YQ}mP>$N|P z=XplipW*+K-`PKFe^YV0|8cAL+vIr~uClk4`TK8Ow(gFKKG}6*xd7bVgQ@V>JOcz#TicE+QuCe*9*^j3#z>S6TmXp zwpwg;U3BZNTIKjgOEZb0shSPv^Vc2NBE3K1z#;R8(=~bTt-2+CP@Fk=^4ys#ujW20 zkm3F%Ub{xu_(j<}o6c!#)gw1wiF3Wj)q418^n$3u6#ZG^(&l+Qn|}p-xWMGXxa8XW zf*oI6K)znUlEV_ha5JIel;Y9;h{hvKThh)-iL49bIvMn1$DUbplHxYFd6vvK6Zv;7 zOme?(WX`<8r)OMiuH;z!TAJT={q*-E_d@IKKiu@3q~`)?`IwI!A04WhH#+(#wU$u^Te4q{YX4F$FeZ#_QHzwZAwzq#(OVc&(iJre`18LqI z*bM4;uldFkE`3vAO5!q~nF0kbQf{y zzEg(xzk6SD7t5QuovW2w+>EK?d{XSokUuVN-s_>+Q{ckX1+6WIoy+2H9(Bp-di{8| z%<5&jj(6jHlPCY#m)UeK_Sl;``B|n_UZ!hu#3FsBD=Tl)z1?xCch!SWFS#u?f48tG zJ{{T@o7=5^);uvqG(_nbd9y}X!wZ3D|gVW}G4=X`US*tOODryfc@e`i~Xvf7p!QDwIthj})YYrYC^ zR;c~;WdVcNRISh%43D-+T$KDH8@o?$bwk<`NOJDPVbSDG8=`A97&PiM1Ts`7-o2#R z#P&_TL0P*zDdw-ZK*_(Z$*Tl1BiF_6ZIe9Tb9Bix7fx8Z1YCIe$ESGf_t2?9_!j)BOHceGOcZptxq{FXb@5sFtwB)r(?I z&w4N~rt_u6gX-6hN`KscZCvp532Vr@0M1q}Q6uI#mZ!b<+~rbX>X@d`C7(E9#@7q! z&`~X8&KEic>Dx}*E`4;|#%l9{8515?z1e)w$=SK^>#I-}R@R57rfNU5S$^WP)%*9i zxAWgFdw<~R>FEKh(}l0Rzc2K1&C9o(FZJeH<%o!gi(GKJJ`RS>%g9AgE{{6mi`=9cco}cM4I=gD&l&v!q z#SK$p!mXw~eseJRNA^RlKbHBb=c0n26+b!AXw>ShRQ~SHv8tn+E?-h+I-%Eegz1RI z7KYBR?h-pTt(i3)ltvslEBF|K6b^)kz3$s>l@Z%>(*yqUnl9^AJLrgQ}8^i zaOK>>48x?FHg*4bM>>Pm1q=S^G%+_%SQ_B>L@SzM=P6N_##KkpihN%oaBo+o_8RTC z&%R1?Se3ILyw>nq_*&k__Ww6}4@-Z4@MN9x2hWGiYyL$}RE{o7IX&%cM8T5VeTUi} z*J#x3*vG$@L%~v9(B(A$afXu?3ubgInDME1)w!xzNGWrt>3|1l{J_s(n)t?BF56Ft zIBxmT!SU#Ze3z8o1dol*7nS4AX*Rr3>Tv3KIdfW}e$0-9nhRSKWNwuU zp7O2-+~FS_A189y)CwM)&Q$pNnC=pfsY04T%NpjZCIv13;@rj~*|U7d!l!O)LPhMA z8Y=V`F)aG9@P(lNL>GPLeVR2dl9Sd3F^X(t{Tc&u*Mo~JTe#0mo;_W@l&@);9IcxW6KazFORq`Z(rUeHg({!7sI4Nfl`A<0mq zQG;UzLl;wzdhD_b6W8fq>vnQH(!?RWao>eqP8ScqQxkL&krQ$0Ic(^fZT_dPZ@z7R z`~TN*k9F4hNA2fZ?ay;o%9M5aQsZ?UlDiK~eBN2{&{ppBXJuh0r3wl6cg7+Q5C8J4 zwOM(0iyowe6{u$kVK^!u?fz7UNioUeql4nXRzXcIiJ*cFv%3!+*R#v;lrl(Qn6>8) zVd8EMSrGbx3ct+*SEbtWUmjx9IbCb7#n(6sc_cvdM138L`D2i>4de zZ}}IqtAum!@2>~frfd(~U2dH7zs{EL-%jbLr>437-1PLUg)Emamx!R>7Nt&xMH7Ss zodcaeCW#wOb<5c`KXS7ObUmMBwtrIZB;{M?WRY{}!J4LvZ8a36(B7?Q^Pyve$M)L(z^=)S>e} zTc1`VhftSjRK13m3rpwL_QEGptOAb>_C+*kR8{1N+xMKWP%4tR)OlcH@o!UM$9mA5 z2*?>7>`Yn>j~G`iPw@;qWN0m7DPoZH_MvH6XQ05v&eZnnnl%#PPI8~^J}+ASAaLVj z&HRpQ;AZQCeW2Pq|Mt=7ldelTSUZ^7oZ9-nYm`*2A~dmqYk^D4`z!9jD?F}?Id@6!zOnFtAfw7PnPVHHo8E-UA4{AE9|g5&ZQxQ^ zzNLlvw4#_amjsv0yw4FwCWy?uex&ZrX*Jy0{ddb3 zSO`sV&;gY*tR}vT;5v7JM#$dXt&6IHm9$<8VxZ!IX_gvzEFi1T$r9gDz=k)otW_L<$q594%f0(d!)BD`>tEWA`+R-hT z6})p@?DO;U`^($bkd;K6PU#&$u8l}slR zb%jC;Ll|6L)yl3nH#bkTtuB+XsSuDfPBWP~$G%>Ul}qHn$H&JXK7TI0yT^1Y#Bn>A z8z)+^osga7dHO{W)5@TwPTgXJD|h0GGZ8iaA#>e7dvk z>MYv^hwJNN4U>=cY>2RVW(utj9{gk}5!mv4`^IYqTuh~}t{hzIJ^jP?`}NJSJ#Uy+ zhp&$l+4yx6#NG-aMp1`(S7t>`mk?a=;^n2K-5Z%s8ag8k3Sp?Mlg{RS@7%z&BJ1j^ zw6?o#ywdN4gY%)C6?MS?#wiEZO5P0lc4D1t*qZwPf6X_YuF~aW`Sk2;H;<%|1mls% zzYY5`WVg>y<8Av5=cnCwAABEuJp> zlnNwn_5Xa)F3)FQ@9=K-hKO0vz&fJLDC+Q$V|wK6R{pizHrv~a&g}TU+3v%?udlC5 zt|&Ha{PObhY#HUp+K-OQ1^wwrciJrxr1<2~v5RlOjku344y*w-jj7v|>i+$FKHuPB z@#|~4HJ{I#&oHg#`*&{_tGM2r{xZ%>%Y3EZ?vT2;m1UBOXOmNF_o9f6izGh3%Sbsl zN3!4MQ^&^|p;rD@f!i^z6S8k+=Ydje;}3-eOd578qAG74<=3?qaTKxlFMrdLd%Ha8 zz=n+yU(zCfq@NY^SG|xE>)G&jd-`v+oS5IjpPrxZzq&enammX|$FkfDRl*cFV}ItH z^@e1t7=H)W39o0iJw4H{c9g%B|F8D?eO+&@zdbm;v|f2;zkh^{ZGjzQRCU+%l_kfg zuj_fH8WiRb6--BI>-mgJmsW@6p*YZG=K zUcK;WFl45oad%nude;ZPx;`^&#K!+ID60DU?Ck8oGq0=ypG&fIMI?PY&{)gC$!Ykw zK1IM}_x=)@3F%ulUUTBqU_7rTP-iH7eqDCc6+brnV1|oI3QQfxU)i)X?B4lry!pe8oQ@KAz>^$?Z>-zHA z+LHW8-8);=LW>hmZr^-3cK7|0&w5v1Snd1vw`<|=>V*pzE?CZxB)$2qW?cxw&f3da ztW)fSS9-(N0yGGHC}7fRh`Dp%)5pnr26ZAgS)ZjahO8^fWr^F@k=*`WT592Srw!%T z#boR^$scSCKeM$V_;{h-1CdkFg;(#Y9x!ysiO)%g_m4{ zryC~kK5%K-`Hq^_uH@g-tY+p3mcT9~}JXu;CMlJ<4Nv`HZfMpP7?#$$0)t;rr8`^zmxeEcxxNokY-=4%HJc<=x7fbMMz2XZrGdSA5{gs#@hHjkXut@^5w& zg}#coo5b$G9e91;VI9ZGjQysW$!pxR>tmB1#ctQJO;)S#mHH|*yV$6rIHP>C-nrBH zzGl}oCTvPM-|M$G&ZY0p&x_?JtRsHE=67>2WJ-_Vtu0n6daj-G$oeOHYk!CQwa{C4 zPo`em1P-<-3=1wlc>GaUz|iI6;be7dt_9)jWd~LYyB9or5-6e^_@>>9Z`NrZV}-Iv-0r-OK-j(>8CsQ{Oo?TNVV*Zd{tEI z4qukoBcc;D`=0&Xa`;HRL2^^)wCxY0!hm+E=T&dY;NJxT_@%ZDx?;${8L^Ko51-+YscaCiFXs`96WmP{G*DR%d=jF ztSge;*^>9SJ-K54wiEG7mR#LiDs%T2Z~cR3>qO7aSoG`1kGYBxUoXmRd38ftH~Xpk zx;?E2^0rNUoZj;Ey&wC#3+vt;2|m3F)F$!JVA>_Qs@t>Jha;V>uyzJ$B#o8z;iaYC zGb{>~{`}!yc>Tddy}G1FDdI(@H+p!J#KXPrhiuehIsRwP*0kD<(Y@0Y%8RFdRy}xT zrtw5oZ?Pr*^W`!xEoq!TUp{D=Pv?`9lV|e=DVK5Xik98F2UN{4@~avI={o4;MMkl_ z31j@dv0HxXikpF7M0BFG{8At6Q2yQU(js+QN9WsIy|=p8Ll{((4!>9oZW?}9T)_0g zuHaFTw_U>-XS=S#lm3^j<~MBH{Q0(r_X{Z|!TVo3p0BIydL|^!^sRp1o8yHiFJHd2 zis921v8%tp?W!1$1A2KDuP6FBRq{N(@%UsZukbVnmxYImcYgbDxcs@z&3@r+E$6Cc zaVey4ZJ!42Q$65dQp#|7V<4j6bX?AA(S@iE116;+i7tUhhPJnRudDb)DCt`VFg{7; zU3V3nRBSpLV*hl!c*xSPzyFWW{rdde^3SLB(>5J%{&ewor@un}y*(A5PO5)AV|-pA zd|iy&mw+E9qVjbXZs$@Ib?E$Gm&FOL>H8ENK004iR@`32$;<2d{@&h={~rWy$@tFG z)*Ich#LliybF-LA7E|^1|DB@tXIxhXE$yjT8@6_qpnc9`70*c$9}g$zi#TdEEUL@@ z0-i)Vu$m*_OUHBfiI3W6TTWjez-akd^zf3()o0x=uU@dDGL?J!8jfG9zD6$!T)bmCz2&Sv=rA@u3G$I;qng)8D?3R z>-EXl^;n)RU-_f(v73JMO0{KY{_{5f+U9g|k?X;AhLy)%Sw7y8zE_y(ChFjHsPW;V z%K3?3KG;31Bd4Ig;{F44-k1l&Yo~WBH7qa%Qz~A^f zaXY({hbvw+mJ2+Pulp(S;qiG^pZ&kGwq4B)5SywU&L?f4CC}bg^hUF#h0(e1`$X5+ z=ej?nl9$(Yt9VYX@(SI%O)gdIn!{qTw;zv+$0z*w@UX3U&uN~U-{yUbyT5*8sp^a0 z7dwyI{yQ#m)9b=yHQ%mpUKjcws1@D6)tkM%Tp{|{mWOPsk1q0r_Qtv?(*951 zA5l5?o9@*fRR_M0ahIq)rFXOU{q5|6|hqiofGOQs!odLU!p{Wztj64i|KJ(J449X|#?dS>mIH%IWe~+ENaXA~~%Xbgm=QB0@Kd|TD zFOD~Nb~5f?Q^9z8TCVfvwBC=O&+k|ApJ(&zv4T8jne>IqQ^%c-$VX0S?pV+*uD{|( z;j!1RCIyM;#d%K?L#O(WBE^B_@ zlbs#7I%cDbPNUr{mJBYrABVcR4m)$6kgwYKC*X`u^fn%4H=e_jmNu_B`sm%;D!HF- z+#xF_NZ#F5dT6S4_?h-*d456eKN&lIw>LjcuRS8=V3+bZFYnQGd&Ts7O@U&wOJ&T9 z7KECockR~Te)mWkw31`V*{8D8E*$MIi7CrsEDD%hAP}W>UxZI<;&-v5(f=4KqPgAG z>dbO(H2nKpo&507K8-k=>M0&-{_i@ryv$2F(6BG<56jWU<(&$jHg+r2$=p3#`{|yx zMftNOn&0;5+qvy#TY6zpTHTqSIeT_~3wnP|?akF?W=`u{CH{Zg5SJ09yM3e9+h4!^ zi|hZt%at(wB6(((k(h3_sKfNMsRyzjiJhI@R@Oh=r0(FE<-W%U<%LsyILcpq$Go%Zfd+>V;38gaFy5*BSu@`6=$HZ#`muD6M(`zf%=ZVS(j z*|)d8^;gd7le@*;HKl)FMAP+SzQ^yplz(*N;opjPQ#UU2THpIu#_nYBgS+9s9z6QF z`NxH>>wDfBSFFocW{KOG^0ZL&^uFIhjLZ8H|9m-mVvZ`io$UWvp1Yq-KRUziV=j+` z8C!mxg*11X@1g$t=Z(Mfy}8@xe{5fu`mAz?X>WGQcK5&E|3~k{`?lTt_ZzmZWztx7 z#AIK%K*s$=6T`1uj@HtYFEjl5x&Qw}W%mo`q^=vM`Dkai-nn$q>EYwaS8n+wFl@_< zZU25>K7Cuqg#SxiEeuM`)#V+gMMoGuVZRg`dtl|`9Yrsh*2VEU?L4*B@ayZV9`hA9 zZ+#XX7razvC%1ydi!RleXODggKAyP0fYavUB4#cT&DK)w=`$_%tqe;3(7``pyF*~f zo3Bh7t6uDzXDhkZ`m5{LUA+M-A|$uGwi4L?XTyOzkNdx!=W*Jd$2{?re(4R<<+2-c zf3EYYK5ck@@6ne|9rrF>iaIYL_@m?u$D7VTuHH8xOn;jF^dGf1Jv}qiIOFm%Q8u16 z&3gr(3A^$(cNV!zU~*QXzTuwW-s$n97NEi|cQ^x~^IKx8Uw8 zqs7*BDfVUg|G7V})ibl|Q$ID)dcWnfeo(to*7|Eh?wR|Xnc0GNLRMSvtkap!r4W9p zCpqr2{p^;FvD!xs#eLVYOlc69z`v{HdBC5TeKnH)_IJg6XNeu;*0)g%Jlwk6FZR%a z=g$%jUjKVIcDiu=f0MLr5pScC&nV5%joxPRn(^c;hj&^{E#j{=#TZv^-rWl7v%Hx4 z{bK%$jGTzS%64_>8lNJk%~@3{)fU<+CbzD@_Wkm=9`kIL?v#^~YD&45_2G9}-oeAy zS~^b5S+Ckw#aAWUZcT09ZUi!qgLuYO9I?vx-4<=sFeCpj%wB!DM4NgbZ ziSrEK-8C0_dGc3;>H|5JC(OG4w|Z$@ldaZxEUi&haVB&B!Gi~<9bLxVCE}Y~T6EAk7!r-diaQ*{OOq&Iz0rfW{-{w)5*%=oZb{=J-eN7KD4_xxoRR<#hn zbK}$TtScImjON(?zW+u)?SIAOZ8vs&6W_8foMrFrtA}r1R=>C)vU>Bc!l_ZbXV$En z9d`X{hv8*|x+@7KIU#&&Ux~VPf3po-Q*!k8v<=4(2&OHIJbjDx!XCG{IouYl>;AUg zHSfJwBFFfasW@)7N>t~$*gZ+}=4#gdu>beFe~wGzotsL36I7xSAOAckZFYC=JE`;i zm)_*ExEL5L5$5LacNMt#_=DY1UUPk39v&0L^z@%Ue@1Fs{*JmUb@FVSyU+h~*7*;Q zoV-0rS6KpEqm`5BlQPcy(PY%Y46=EpoQIk`6ZI2kt0H z{5mpYTxqGSkYPS|AyD{ zXCs*UzIsl)Brdx8I{RfA>n;2<|7?ydG@ezVdOGyzou~EfOEn|5h={GX&N_E_$?1kn z?(0p-9WRQW?X+(Gusihoc9~f$zYYt6Mqn37&I#<${5)-z{gw4!GR4<9D65}4_@$if z@bOz}N{ggF-@KmjwW3scg3`Uq#i|0bOl6M3fln@cI-DI-Ga>j|`P*BqvDIbC&+onbQW6gSs(OmM~ha&Bd7C6Gw=WZVJ!aq z#jH0@>+VJxmR{608Wer9zHhoXq(B$lssebt86Xs@MHawziYF zCj8XfW$&!j*;&hWs;Dvkn<#qTe$N-1^uVp>*_%!GE#LKbns;Aw)4GSf6Ynqf zoV;w=>)!5Jo8)A8&Q|U07CoVAU?QygKn)amD;7$bw>$t zVr$Pmni}!@YDCW6T8_nbGt*fDQVM^5{C#DU)5TjMt3J%#YBlGG~E53XJ__iUsJdm)T_1l(1NxXzix1-_|DV0#(H9kx;ESYKaBI%|DI!7 zdY7M9`q-QOe}CKlIMy4TaqCI)lhfgsgu>_cGI*s$%Dr&Ca#w%vy?A|kU;VRs7B4?1^)Y!p%l`|PPffn_Yq5W~|4FCH4QZG8gqK$S)jIKRzF==w z_z}<5rytc7N-p=j>*X=aq;Tucn{QXYxq3NSTtHyq;cv^AKUg@mrFqKsyt`d>%`=BptT8kImU$p(=&etY6cM8(qwB6~>JyEqK z`}fLFe*2PlcdmBG^X(JU7IQw9c=xI%qv`|DXrRm$xfu2nC*RHPZ(kgR@zOQ z5d7SVNrXYEL8B&Rnpk=G@(-DJ7HR6G{}#`GH>Xc;{{D_ROW)~fJ>M0?@Ka0F_502B zi+)ZPUh~nV{9AXI_rJZ>=Qr(owJzc{S1I4@pRO03^}kO26UxHL$vd^dm}3P)k(>35 zAd5;_g+Au0+C$S{+~24fcD|C|eT|__xU-Y*&Y$hJxvxSrCh4nJZOLnxUS*fcGNDJd ztHVOAP>>_#8uvOq?Xvjyfpt%Df?3p_i=vr-E+4OE^|pxIpUUl*K>bKT^K7F>c`I#bZ{JNVdevR#)xeK0~T8Y{_ z`8umUkYVb)eg1(AU(}@k95n{Tf@ykDg2$4tzA=1ek;)~1dQUw!@4QOUIconiwiYpufJcj+n;`Qe0#}?bZnyJL$!mzQr)w3Gzar#|n?5ftjag<-zev{JY2jcUO z%U7E{?v=fkcW0mGqkjt`o_%<6AdG`&M@O5l{h{^tdo$Kbi|l>*kz4#z@-_R~(vgq*sf7?>ir+ORO$}Dv#Qy%`cSS)4zWIPpRbobN(S8m9NaJ*{z=T zZAV8~u3GZPMgRC^YW5dB%`18T&5}z@r)f*(WvPRQZ^nt(#gt4`JpVJVyI-T_$<29^ z_uP)v`zv4FKm9@cijo~#X(#6hR$tfQ+a0zeEqIP)wH;)n;RWYqgR)ONDW_&IhGyCI zIV!8qY?VFS@aiBl=L&`@g+(ph53ZCp2=z7CofPKi_>t(rsB$-MbK2QMM@~-uaPQ&H zw1ZB|uWY?MMf_vtYQY!Z&WO*j-RwT)Zs=@tdDq+D<}W%euK)OQS^lDcg-+m!4Li-? zC~kvaKO{an3%clsDdund^WzSmV`Fsv;qPS&GH>0ko_6Q!N9M`q%sv4rI8<5l4QIV| zG5>U}%r*O`{J(e8@3`}-r$4d^yr<{%7L-cYRcB{7l_!?z#P@5B9~~7t1hT$DbFG9qgO+ch}B; zd2^o^+>M(4>eEZ^8J9ll?wPQf_g;OHM%mx0bj{|=)qM{)pL;KneR0OG=J%fIMjPvv zoSntkJgxt*_j;Wp-|y!;P8JiMusV;`q~>R4r;wOf^&XqP`4)M{eqFewJWXEmk4|jD zyqv#^Jlo38t@)Q$*<)Z)H9>9 zNfouP`m*Bmr=lviB*sMyLV_i~zGN=h7{=D$E8sCzk(cY5MrVKYp04%nzDL)I&hFW| zyzIPMnoi*CjkP?J-d%1}nz=Phbob#uMoW)&^iB?3>Rel6nz87o!~G3ME>3%2e|-M^ zKW*x98QmQ(-^Bdoes+Lc+s@^6nDB&kF@{@Y@87Speq)jQt%qYzQTl=P5j8G)m2AjqwN3PTiGqgc>e#}9y#{oM`nH9zq?z&YFR*B*o4{FDkjyf z-y^#`iaqk~%=z{IEVHhy>9nc*l<@JTZvg^wA zrYT!Jz97H*W8miSp3?N2H!9Wk`&4e8VNl%o#w|g#ehX97zMFR=iav#D^jzNEuVnJ$ zoV}>|y1NedS@Rc^EY|9np_lYD+$vqkyF$4^V8S^IZU&!oyiW%+*<$P zD9`x`cGZ1vEH+if_r$N*zc048{52n!_jIML`f{=#REnl+cU-PMRIXce;LGy2_gy5j zzyJ8CzVF!S{k0O)`q-6x4#oByKKU#^Y;nLoCN=h`>K`@vrMB%oU#_jPUUzTtale9? znRB8PwlY|*N$pe9S^(Kw~4jz$@;wyLVheN zJtdX*oNw--ozBZA?yZPhB~|=3ZN=8(GfXwCKDD&=N>F|dm(S>;hUZqC%GQN>)rMJyHFBWe~Ki@Z1J3Q&`uF@Gssa-F> zWnEtG=4I~7Os+!Rv2(=1zcXj}5`!n10b+YcOYFm7I-c=*E` znJ8A>X+j-_jG_*PZessAHJZ4t)JPq^SH<|dtj{gug=vjtz2T|Q6*+QsFCNsHRQ@^F zloK>7O?Td*-S7P`{T6Fv|ERdcFL!p*<#=gc{W_lXn$1G3r(4pq^4vbRuV24@t8qnk z$?I#p>F4JiZDQrVu&1*4bKse&+TkX}h3_-I85(>`{`6Gjqn>~HvG&;G3TH2sPh4Mg zF(Z6i)bitj3rllZ-q3_31jqW|p z*>tfw&3&fconH1iI(6A*PHT<|o{rX*KmDb=Eht^BytMo_bJvpXQynB4`>x&Jc9Oe} z-}-v<3Qn7pueV=&@Cdi~FhI`**t_EAkso~QwV(E{$iBW#(Zb@!ANDhPlRPJ@9X!8Z zpo&*c^0AF+qcLa5x#=v%>E~MZ@82(Im3e82p%_D#;<=hf?i26ECm-)KH9oW>e~P;G zKTSbyfqM2I|1NhGJUu0v>}gT{Zq7r;wD)f|?TEa5_U_r*{LcBM!P6HNxBH!#sNBBs zPsi8x*^4*54*tfP{JS`6kM$1r$ObdHUH`pq|B3A=dB2`z&7XtG%O|QPTb+1zhP`i| z(eorn`;tEzVN#48UsZr^x zE6?)7yF@g5zD|0t_rLICf%&D!*)JaQuDDukz!=5&>e|}wrQXwL`5Mc{giIIn)lvMz zoE0xSbJ2v@rZ;jmyR+=y#O*w~^nCC6GyiseTAhqb z*d#XL%Kh1E9O`vgE&HQY0E`HbM zyx<#0J@_>0=GE3Fsg!@2v|KQN@kyrcs;_}eoh}Y`f0S5$#H-l43LHNYp!320j8W>T z1+J5y&A3ygQunVyQA2Oen{#ih`=)c39l3vh?PeW&+1{(|VRlhfZ~hy4*J}RRvg-}}t$FJ~(^Y%2TkV&#nA z{{qfbe6Qq@wU%XkuHTs5R-4{EH+R|MHefdY$#))L;}65Oi=AF{zDB=)v*+IKSm!DFffo#Y_m*BS zJU->UBI%)@qk`KhUmjLN*~{3P4HaKO`CLADd!(mN@Ko~A@aB}@W&tH>i)++ zwl&|_Qoy@S|9(e_6XMM?K5_dwKp|h;MZw!E3jmY zs$|<|%F1kYFEVg{TI-`1UuSJynfoeaLBPU-nWkTAKgaM*xwoa*{$Bk`_ZtxkngK^A zwHhqSum1Drx0vz0=bf{je*Cul^T(j`%YCMZ-}}pRvUZYZ3Gdx04O|NB=65c=-*-e> zzl1mZP|wWGNhjNOtDQR*TJ-48&E3D-U;dr(@#mYx<{ytPDG%7-p=on(i>%cbi8rNp z#UI8Qh|lTRu<*k|jzz+PDoh=PN1Rl`l=Ng3W78rxr5Mg$8N?%dzk;WD`rp>i>E~u{ zt=%Mgw)c)G*TSbO-g!tJb?*?Hn;pJx*3^Z}=VP5FY;`KRu=BJ<^#c{YfBS^h%OYc? zt}Tf0V>~y-jCq!MW*Q6cY42>l(~htC_Lki3eCoBvevh7*!ZP92)3-;~$9y=oLx(%6 z@?6}GoEAP~--&nFS&LF#XD<4A#(2~3beEY*KhGpQH@+q@Ppwp|)pvIP8UcBp*PYkS z?`=L-s7t_2g5d>XhD|9-m8=rd!lgzhe0$4;-Wi>DrMzQ#DIKWU56=M5er zHX_zre%;wqdAVX8uZWaXR#@jVDaV+?8F76(t4v%4Y#$r$v(@cS^;KNA>uyzvzDdo- zx3*8##c>__!RCDMX0&F_+R&N75ylrLMJ`hF3|jKVvyn&AXvWN#E$>dukli+w!!T|+ zN7D4UXTEIemXizqV<5*mVae1zcLEtU?tQmisHEiDRL?i14eu%i-xWVBN-)Tae4pmC zdwtWIgO_9?=AN}bTwCOSCwdoOz?Pr=A>095B2`0<+}0?`=Dzf=ny>Qn-M5qdRxGc6 zT&$rlzdGQ+r{+RM4T1=B9%dwJj*Kse8ik z%wMK&{k=C_5ypa7RBal0>P}Alb*(JNKZ<$#8ePvTN9Qr@-uXg9??|Udf`_3KqoaQ| zm#)2ttWw<(p%ih~Gq2B|ogJ|FwD9*H_FLQ2q}8_93b%e1VOW0CINr{grH@0wGW|rh zi1D|g4LV781qD@}^SG+q{5Kc7GA&yVKEwfiK$NnShqLvX5gh||42mA>=N%uZ@O z|70VFnA~Yoxyze*YWVt(?ov)_Uw9||M2$rIqW0rTuMTl7VfS+21Rq?&m$htewE2UL ziMlc&pgzz3|M%Qpf6LuimcOE$sUUH!X4#9IvThp>1RQCO@O3E($O!n;akcf4dkg>N z4cxg0&ABgRmGJ*q_Stx;&6Yy*W##hCQ}}-_C_NpPreN&a#UZwA@}IYhj_b8agu4jz zzs*$9k4u(1CVcVmpO8OyDkHD`-74xo<$IrRee93dg8Gq#vJ0$*e?L~O?mrd6I-z;i z=R-d?=j~>k-)wMfr-tWbwT*1|@6~@kZef4qw9$fq1r36ZM_vUxTqu6j(80~cE2*RV zsCiLy=5oK;T}ixJ3;fnEyYct7*qR?7qGe*X3t!a==Y9PtDQ4rH0FTbEZZ3Ts61y3a zBrD?A-8{?gDtQ@o&7JWBc!O$hQoTYPX*59(% zmdAyjME(*`op6E0MJK?fBh;gXN8p4DQb%6*+}>JkKF3zLVNED^Lc~G8iX*q| z*5sTt%DAz7zF^>)_Cn2y7n*f041pIqW(ZAGc4}J~lYHXy2^RkM8)f$Br}|~o{jyPA zz!Kx_z&atj@|fm!pP7FfXKOV)V!7zrsXbl$>7qF%($$Y%k3OXq{sEABF_*=yK^Zr@s*SH_ea+DEwc`b0Aby2}0gG8$?*{JH2S62N; zgKu=~n3(QiHScrfp7R0cD(~x6E%A}NqqA|`g z8C~2DI(>B7sC+k0#967MqW`1okB$S08ZVd1lpRz)t~Tjk$0UaE`Spc2G_tMac7M34 zs@ABQy?vp}%~hR|EZeKLaVfByhR>Iiy-iqT%B* zC*Z&ZA(#B^{{o(zuyo~p+{uyT@lko2e4B{5Qk@c)?7Y?8+Kuf63Z9vNe*9d$BGO2_ zOY}smN!`A}vmuw=Cm%Rl|L^EopQVQNN8_^h+?lJYv}op))r(g$e0ufl`YCtUgm;UU zPGjhLwJxq-pGibnDMurwLPx3Ndqr<-l88%*(lnPh9Rf)wviIm8a(4->nR@s3=e_>| zyq0KQWttoJvbDi@dEr&5)mv-MrG#k4-&%09`~!!US(ZFs@a<>*39qWxzpm|e$oa7) z^B%|h{@)DKC;d_`eRw1Hz2G{>J#Sphol6UwKldA-{Ty?om)Xp+`RS#l-ZM-xg`g`w z1MMcx&OTP^e9$3#^FI6S@13)<_s{B5<#zqKre)X0+V_Gnd&RoEmU7MP56qcg{rZ}{ zMcFT%PfyQC-PTE(qPI($>)aem;eY@BrGQ2DO`fj@{Vz!PDILf^JIAsZ)T{rzT^rnc zo8Gf)-KX05==HL*K>Z^`f3m+!Ob$knhA-C&XVdrdFcOnxc1I!azUV z*Iv~J7xUI9T$>^8BRTWnv$^aSo<9A!Bk?Sg)Qy#vzijUwS#^AG<*WAd56Vu<=U8(| zL{)3lRQP%KmDFwwD)N1F%X#as9jTkM7fy9(5(?lBXnNGQzx}OEfymBHsa(SLH9^0& z)MZ@WFs<^%hVSVKPa2kQEWPNm=4ZD0wy30MMGrz*Vs>oqko>;?R{fVHox5K@Fub#6 z*PJq)e=G+T7e=H;PE+f9(LC#EcXGk?X)}Hw)>^XR-|c&Och66=w2k|ju*|dk!41tn zTa)eYRqo@yWwxqy{w=Tb}<_qV9i|Ox^uG-1q-& z;d`^|cgejVp0^jJd|!ucS`(4&bH{$$+vfTCTsAqi4Bus}1gF)lQe9(qM!x?gcfHex ztJxho{=$~i?9KFkYE9ELjf~+8IB~)Ay4d;5tAgACF4_F+S6@|McjfT%>1G`Dq35SG zq=UNj3%)(Ea@bh=(*IQ#UYi#fJz$EmiMm=75@j2AchkR|9q$+aD$D2znf75t z@Jf}wxmMzO?m3N4xSqTx@NHK{Qs?}tP_O2uzNc|}ij;5tQ-37!a@W?+ z-#d+;2X4&?T;lz>{k{7BwAj75rvxJBsXdPPqYF!jE zjVp?^zq2u)@gMI(;crDbdawBVl@;Ee?K-)!<=&_2B}^wA*PC8v>{@#|pOQIhd- zr-(#OX{YI==o2&awpu>3Sh87MU;3iy+qcg@++I^|`0(`6W0!;>liX~VFE?FT;8r== z?b4^@W7+%vZamwe6V-BLu2nVT$=XRLH8=fEKcbt>l@U_e=6Z#ByOM%@;y=e{hR*xM zm1>^MxUSk49U8#Bnq`W^BH;kWDKC5<%wkH}+QPDX#WS5a$!QN=ybG?Z3Sg>Sv(dsa z%sjBeRJyw_HmW~NJKN>r&hGFtTkW^KH+}fGuHYGO?BOWUGi%mOud-`(+o~nS?SG^s zylGw1qv`e$#^vwcT%Dq{vwvg2Qs22;uAiQ|^k3sW@I}Ny@TkP+s#l#-KaNN>Pf_-E z`w?k;>&Vly6OVSt3Ra0!Y+qBTbn#^Hrw+dpr43gsn;SY7b}TgZdj4~1#gmCDX*$!M zU%z6VC6T4;f1qCF#SiPJXZ_e6s{>vaANaW8qw$BU{QD#Cx|=_oeTeHgGW!OY}>v?$|UR?*FqtNZ6<$gV%I zPg&jY@!f;pRkAN{SY}%At5ziYj`Y&*C#Fi7cP^y+>=EB>->J6OU1{&tR@a4Feq6r3 zF{$>)RqvZ$ujPG^@TuB+-nMf8rQN!6Z_Vy-?68{Z6&beNq(kR?wa~E#P3xFNm?NaF zF6Ovl-(_=dsx?1o!Jg`=kZ&uhs@Y=7yVP?fNFEc=P0YvYB9ouYu?ww`QS%=f}waZ@r+Ms8?)^vK%zw4pL_dK4pd~eMzz6o>N-b{HC^ zo)X#CHZw=*Okt_f=SznhUsyT#Wj)xjj#;HN(LC7YeZ?@zuH)ei)x);XvydiS}) z!D;W?DyB49h3o07Ey3q$yz_RjtNO&a9CXWP^2t|roPK~z!YebeSFLHnf_4IMrn4lOP%X4=I! zVYc}KG5auZ=T>Z-8r4c>3$F6S4&6F{mfZVd_^a6 z^P-Z@MJqy93f;JRbnBKI+cvd{-JX5O-DSe(?5DGYRxuRux}QCIn4>^;fvsGa`r1CB z9rBDn&&MC(*LC>g@G1G^TeltgA-~IV&&svSC?EODmmhUkOQzc0U)|XC{dXDld)C$8 zbjse|N_`czH7itI%+M`0NqW;?jU>&4)YlSS@^b3i)ec|iW=M(=bo?ZDGJ+u3RO4fT$5!L zIK8LvvD=gL^W~5C%bPRluTC{AbXR}Wn&j4^!>V(8!lU-z&rix#?KxKcZJoigg_CRU zOmy@)wT4MUZObFecQ+f1KmK7kYqyK#-;GVOE44#=@=AJ_v)q~25X<2CUEF?>h~1Yh zp8|9`QZ-CWu5`$s(|M$0R4!tlvt_Y+ztqM98~sE|+4*D~E-rGtyUq4y-=%3u2Nz!u z{5V-Fbd`bNk=G(Sm8x7VcIG}!jwldyFk}(Ce{uhxf8m=QGivPJKljgK_&>q?#Rcit z-^xGF`roo>*~0QYjSYW}>&rXTS!h;hx6zjyhznm8y`O%|Zc$f9exw+Q83$#O42m~$n%ROc~RWDZR_O@K-sam0TmP&n} zGC5Z@Om^GMj<7Dd-Ooz}9eVU1y`0Bm`=trgk;r*3e&6llZ0WTVTguBaAlm`7&GVca z4;|gJ>ReQ$ssq!Xf=d^b63YJ=e3H^-yQJ#&dcxk{r|+@|xZZB#l|FMuqrSd=$<}`_ z7lw9=O*-7p|M*ZV_c5cC=MTOlZ+u{o65W-tHM;3W+ZU1TBKBW|!Y^6xowrUa{>Sz^ zp*yqeHmVh}Ig4_+*8_TW8dZ0E88+^*|MP)4>)M*kZA}XpCRJ^o_vH=SV$~HsGmXk* z@(OiI7jAgCu|sC%?3SYbh1)yj&6aQ}uK@n2YBMbA=ImVd{r&m&3G=FU8DES3&v@yhc>THX`wKFb9xr^n zko|pUn$Syr)d!6%KXyAfD$555+J9QOF%gc#s zzEVGa{+wyZ@<)HBJoiuKDG{kxo@+JU5L&@96x}0x*-7n2H9?2#yQLP=`Uk^P z%cAe@DmASBmJ_l*&NfNp_`KUk=g4?xh+BV4J|3nvk9!%D#yrOCZ(g8NyUW!SObjmnwEPBo-4EIYtiZ;i^m~R|nJ!zTT11Fm)rpUGpre?K_gk>3X&v9_5M$gP zwjlHJvY9gz1CL&P{mxV2PeJ|nHO-H`bnAH}PHlY5wUV_+lCj#_yW0E3ruGasF9*&e zoeXR3gO~f6Hn|x+s_?BYpBIxJ6SZ|mQqjpts)fJ4Waix3;+dEILXqVP)00PSQ~2k% zoVi!`=F#!YGCn1NjdPXfS+7_TC8GMEl%+3XbGl~AGXME`#~2EEc+88Qo;rWt{(p@G zzh7MD&5m(|zoZNAj+I5*#3 ze%pG<>1!7(e!$7YbL8pi>4($y7e0ORyI0(Z^K1KU!2m`Pe_u6u=e93DKRrF%bwBOs z<;y0wThum`KTmsdX6o%NYqu0SO?RBXoMno`7tVsYdR;PkPZjh2|NEP``(8K0qc7!A zRYqxNGQwKYV#=l$3P=3ht@qNjB%jHO!%FU!|F!Uh6Y&ylb4m+7S8?o{Ypg0aZ$+e< zO5=rWMTtwVJwsMS-1`%rk)JMid37+8*MUfuAJr!v&%WUjKHuuC{BPf5T;KnFdUfKNj=GB4g7oZ!*cZ%2 zmu~y16lC1@+man1)Y>g#y87^^{cd`*j+v!Zd;j)+al|TP=9C7Z9nKBSbCmzByYy@E zIcts#@d%r(#`P;TnqH(n-WjvADDlh;!x{g-I@!eO8UNe(Yf=8aJ=uM|%NJ#A(^Gmd z`_%bawvUqE@`f;9T^-7MJ2ui~T}ND7iQxYYyH-fd>JP4MAie&wV5Ygx81T)h0{*1osF*BEvmKVz5prvI6VH{Y4Zw}c-(dc1$a&f@2b zYJY$G#Ic?;IXGWXpXX+u49m&nbuzQfa-%lSkYW$neSHQH3J5sv5`38_5F!=m;3Og{vQ&k~LBu5}AW(2hz!H|sK&vIYcZ+%?JSDR(iOZd3}e@{$kS?FuN9_W|>LzwJl_K{qRvzk?QsA%?RQicSKIX{Q=rnO=J+>I0BP={Sm;xHIIc4YTvMdZ<@{M7xT8nhe@(;`h zIc~{kHE77NT13x#VZbUb?i+E+H?gg>wW@`IWg+_^{;~-V?ppG;eOOm$_#o-P(T48E z7rl((3|$Ss7BaA$Wn__B^Y-CgBM&}V6)Rt#ddJ_)-S0W1FERz}DtW22;K)V$QyZqu zmQvtyTw%`TDA1Cic*M!VNrKCF0ps-@5(S0=dkr7>-ahA;_2t~o*}EMQ4v01|HM;5- zE>KQ+-g!=tZLX5e7N-{`ntuv@aUbHBoOHqa%EM1yv(0jM{Oez7-@fh68tvV3iWe5? zDY66#ERrrgRqF7^eA%N@XSa7Z?r37nKg9RudDn^&X5G6Lm0xGLHYTqQ8r#bQ+MV1v+!nbVcqj9)?c0I4@68*w zEcDY6v40>ldq>8hs5~x*74A_Sj(*+w>plpJYo69E_BqCIE9S%6+&w>U-j6MRbFe|1 z!8IXvQLu{f&AIA-a@O|SzHQAnXlXnrC-(N3tMII%+q_D5w>3CBveyZ;B)kw8FMK`i z=1~WqqYc~3#Oj{>o)M*Ie*a(M0nvs(hl6K0H%?4A86whkq3}?@80P`mi)_|29xdX` z-g#4ClU$=UmvafTk<1-%vQT)Ia6tNf)#q+Y)uQ58`fM?bC*pltjb*F@mQP6Lc6jJ> zX#M@2%4gRf*r*Xuar!LVM1d6>?{4G>%?pg-Ibz9UQF-&G-*L;9BNLf+GX3oO827?D8B?=;zf7d)%XDkNtOHyF!E>~FMW&CfLkniv?ejS60&yl(pTn-!e_uUoR zB~buL-26NV!Ogibj3p@;6-G7H` zk*}o>o+&gL3dFYdywI59S`fP9;%#ITOS& zJ0vp|WE#iE1G(FXm68q>tg{lrg;Hasf%y1Nw; zq7HYCGPKCP`LmUG`OzHv>$28{1^H)>pEKIEva17pXnUL`hAVVC5f zx-+r_YItME;fB>8J1y?GJ)aTFs9|-zZ?-wyO9nhYE-~z63T#YsLc&+6=4=op~bg z`2K9zplO(YImvK;N*?DPP!#S-Jo>d5=Cp`@Mh@$HTWs(BNwPJ1l)#vLCw0Y?bDV+c zDq)8lt}0(v=vlEv!amVusj$YL9Umd?QZP$6kXIZj_k(qpe&QXyo;ZmPK_&r?NiAL( z_dob1J)QWhVtIVTg|jRon;>6!iUv+b6b26=zn%UzZT->NzK|H{pr)z`&|#0`jB2Z zgYVL)qZzoUh4cG^W_~Um9uud3UY}It{v1E|{%*PZsV>p(CueRJUfOk4{nFO# z^qB^Ui#o*%m1UoFzx+8_>~iAGfMti<`MvM%+M06f%ehHLsY<%R&XGF`Iz==B4sdw; zXq&uTAhYr04IbY+2hYFUSMqYwn|phs`(!MY3gY+GMC!%w(g@d!o3#7?ovAIH!k&kY zbgDWYYMG?q$aHd&YWI^k zNt>?!?V9&sX9W)0wiT^7AQYe%q4%fJ1&d7DtroA~@^}E(rO| zHVd`;owMlArmBV|9uqyD)!p@76)LT}e($$cdp;gZn9O&nsr93~#m38x(oyZj&(C@O z{k3&z{{5^=t3oq{ukT(wYetb?+@6q4X=h(`O1GT*;&J0yd&D1xc?liX4;u0~EKX0? zpMF(4{M5X;)+yib)fPRy6{@+w$@krsm8y|VKPDN~)tsDQ&?w?F$0OQu;_UEqr+bUl zpSC^vcg+6(sYR~hssC?&E_(1KGuK3J!kn9%Jy#l~E{!?b6|^qK(sQ=iTK5N~T&^N* zkGx_R$TieUA3FO4lumDc~0xkB4gdff0L~_I`)0s`TW+&iHmou+;i3} z`S544$~8y1QwRR(>un1yoBONC-J7vPw5O{zfT`l`Ez``qyLQ@qbg7awS+Q@qP3dc~ z$qNs!lU{rB`+a>Qrsl~jJXz+=*I~B~Ue@e?zxMmwJ%7I?MDAO(Kjq8}rnpHOfi*uL zwu>)y`uT9xQcu;pyY2I>lC0N0Xi>11UBdeC`J2WwNVSFI|L+PJc?k$yWF}*2It-kJzHMM>E>FKmt zmBrqNg99oF&Cq4sT38{$zIkz98Qm+r_rZd=YgHyWUpB zZ0~M-(ZQI%<<(hzt0!~cDk$DMCLZ@fFMOTMwD>wnh1gv_*BeWWXBwxkiwtTkBJ*d{gnbH zK3knMLqA`+qs-!H(fH9VrOZl0_)??8^?kL`no&Aac`A2L{CU0Z%d1#tzj?cITK4Jh z`w?~7JMlx4(a|);j=5`nnFU0ipP8wASp%Tp)sqSx1SU*6xp-mhME+RV*XRZ`*UWxv0#S2fMDQ95t; zdrQoYf`c5A5nFtc67T%!fB(bpVfyK5y1UF&=AD^$IkHRq{J90+53Gp{K6%J>Z_0}c z=BXba9c6mh!<{`-%)0PT#`5O(9NqQv>i4Zy@h$UK^UEps|MTs1hu~S2`or<){+yO%GbRkamF-XpBs_a zR)rSNG+MeiWNnnejS?=gGdlO{Zae?__FDJ#wY8r)9N#eHC^<*@$X7bfWaE=rkX`@n z^qZS=Lz6{nL~DM%G!}aQQ2Wwye|_U$X)`<2kH|;!yly?y7kK)0{$AD6E|E#=t+q^6 z4}H<;^*Li&&!)dmG9``EdM+|7+?T7Uq4inp#g;(!C;Sc^Z|7RsuI@guKKt>O=70~`7vo-uuqx)G$FN`8yv2ur|VDJn0#E~p6mYR|2M>U?vZTS^q~5F^#!Ry z{S`c6n{@y9%rtTpm#<$ZHGSSBoyZ9-(lL4tTY{I{E_c*hacxDv2 zn0Gun(kW~;_iBW=#r2@CbtfN8`cR`C`b%)8-QMOv)4cop?k;KFF+;3V^yPux{1c~V z7%mRYh|;@%_Wi=$6PmO`o#C#z zrTM;E{jIpIo5Y_S6`ya;WOb2z&a&uZG+shyFg&XgF3aeW$cr56= zhIm5&gZFK0R!gzcZPSLo&6g_a$4&flUiy4k#~Mc&z@<#-87)bI;uU zp!aO7Sfct}^y;cfuZf%{M)v)XI z;{G&Fv)MC^mYPT{D!(tgYIm%7WrgtDRd@4Pr9E~2OY9I`lIS3!Ez!G9S7%McMkn_< zwP%|i$LzdxSNQX$RO{4FOQw49ls{RyTyCma?&%q_QDq8mZbU74U2@seX%PXcW z+x@er_0)r>P>vlYls_@&}G>BQ~B?cU4d_NuhM z`=m8dIeLYcN%mhg$={c`ESK-zv*Ex`W>FPmmHpwlGBGXMCE+(Z=1)6);$kw--37(R zA2~%l5&qJ>hq-8CV!=n(>-jqEXS**i^EF+3;%auB;c>@`)7GKyZYchI*uJu~Hk#kx zV9wT-A0G3Ug!?mGf0(4eWyrAatJ~yzf0T@8=lI_+UnF~Hb9(=l+}mDuGq3oa3P15W zyzJA{(;Az$Ujm{XH~awHe+e<-e0!_sBMJ7N8~ciZpR8C&n$_hZv) z5$&>k_wD)jGb?w!J-$oUa`(F{s}zg(Md-`>Mm*b^abx4x$Nse`udeuq82misu|MK? z!#QrT-I2RWUoXkF{5SDqT-h|kOMic#_xN8Ie}b`fpU?Yy{+`WjtxJ-Rr=MZU;Ry3# zF1WfX^pgK|?z8b1KJS~G+EM)MO#D)>t4u-b|0N%rrZe+&cF;`Y1C;T!X{KRc=n^Byr6@4gGy` zwyUHCq+a(uWUI=%lYB0|sa8MGX!)66dnQLp?=F2Eb~(R6|7QLMS#GmQ5x)#inK-OT zDJxo$;<3Zu{%(}rtRGuKR!#YDHzog0g~O~DQgiG6X3x&ojh+$|`oQhw{NJ$^j3{%n717=stdr1)L%ytv49sngZ0PfytApE$;7 ztGZE+Q(%(if_9m2>*6B0QqCkaJ@cLKC-(k^=%xH;zwej7^V-W9{Djk1!m#|^9l2co zPbsr!^kqMq$*WZHae92+#n$h(U*BlIciJ6Ww@Yz>Gsmi+$`x5U!Cq^lw(`g_*fx>-*YE%=D=3Q{M@DH`%j#bztrYh&0fc%KhL)M+M2+(cb3l4+rDG_{1-Qim&NR= zcs%cos@Ind-xi*~^Z5Id$*oWKevf|f|LxP$`uok^O8&TXv+Stpy&fUn39%>FWXP;* zW1hptzixGOw*3C(pMG50RVrD`dhBTT_0+Sk_I|oo?H%$tXwLk-PH%3^GGE`WC-pkJ zEXUE>IPHwa+@h$});pW-N17%5;Ac8?a=!iK>pSMoo9|Z{CTx%2n-RxfpK zPyg~}(KWmBX>)BVPwJH9-F>C3`SFK>?Uk&yyr280FPQssN{!NVOCg5hgFBpgF4=tO z;A0c`(VN-UR%`}OxtjOIT+{Zo~eNnzI>h4?bNkIa8{ZxUm{$He>e`7ZB| zAB8NPLxs3r+-Ujmdj0;YEmJzCo4oSh+o~04zM;F3A^AXFF_X;J^DjOpf70GszoKmAvh_K4p68e4oPDdd-0$sVMdy1a z&%f{aR`oWae46Zy`moMF%dS+c+OX%J8}rRAFBfdd{rs`E(Arkx;`Z$8Zs%((p3LM@ zR@{8J?4S7E`?&|3m;@9|q8lQTgV!fI->m+*YL7|zzWYZR?QYds1<%v{-EuQb{!;(t z>v>c1x4)aP{MD6@9OfsQ=kh;kZSoH>nV~pUamo9ff{PcL@)VXlu;{q9Hu^;S&${N+B~%<@HH>Tmh_KZz_QOBi2sIVxW5`fa<(FyXV_ zZQtAan?5yfIoK{zw;_f3wAQBQ9qXIFiN$TcxWvo4JUH-$Q%r{Wkt}z%v{|m&{QQ5hYHQ=3hZ}~!y{oUTA3yQ>mw>|)&Rk1YUvhj- z{t~yhR%v(nFO^0`3ncC1bnddZs!^A3%6s7ET=C~e;o_|NuUBX7y7!>-_om7xCn{xo z>vUMT(vrkh^S^!{I=I-a_mELqPVv=-ZPWk${x0n+kSW4zbBSf`u46|X zLXr+dTfaQ3zvy#^N#a3ik%p?TRgRTU=G^oAxh7NG`IPf@z2K$4KVNmellbSHwQosn zuGHN%7mgMm(7Vj)@8zJl3iKLf6R`J*yB)cEqdmk&&h%S+CBOjdjRHRPO+*2L}MjJXra zMC@N!+DY$O#(bWc{oD(t?cDR;@BI3UcMiYErG@=3YO21@S{yy)_$yJxl{P{(K57YE z_6;I+8b2IfG#7f8w`PZ>fh`J2p%&DXf`WIQa>u z?R$3XUvpkIg=tPu%s-*dvTa7%jT=l(KMs~E&uGe9E6!W~I__$A;j#YsRkLngy7YVL ztTgdcZ*JawcY8&`|Bgnl`*r@anuYx5*}S~S()j$Ztip|tJM+Yu8;>!(kbm0?o)S?$ z)N69F>~PY83;XNs-`@Uq^7{Rf>j&dcc$Y4md3XE!DRR$)j;(+5Q0U_u%SSI&@9gbm zJ27vOTU^59rp_)-rB(z4My?)z0GQg>&_OJFGwPB16xsHELts-?mrh^uNBYUC{gK{`bFK)}du@ zWS(t&-m-MTD-u27J-ZVMRtor^={qF9wB02ZJr}4>J zE$`mazFMX3vcTQtlfz%`TI^IcJHICXVDJ57yzEPSU-vJ~pFdqSc$rUI!_AcpLdWft zJD$l~TxEU8bcn5mk3(t4LkT8<4_xKf)YU*2qa6JO)lwtfF(^9RS@n|+zO@9q0DyEoT-vp#(5m|s(mD3DZFri&NFm4)SWf%7``y5vWwT1+x*)7w| za-%9=F8z0D=Vg<~okibkS3bHUpOGea@~S_VXpO6Q=DN$5XTQEL-Y0iq&6SFxEmzh= zhJSi?He0IN?0|06mP_AVE^RvMGMWFMaM+FY@5MLO{WYBHl#x55?d1X=fqk>gXDdX6 zKQY-+#%z7>)D$@-)=&HajPKJcpH8hhmoTr~=kV3FzvuN`x$W5}V`&p*@?e66Yl6|# z=f@=;JP;9T;fjd<&3usA$>Y)zPg}7j&wqb@E?tt9y{d1ng82I1*I4Q{7IuI6`Du5; z#t+ZHg~D46ko3v;W2@V&Kq!PiMJ5W9G$Ux5WN^t1igPuuc5P|EXa^k9g(6`Nn+tnIG&O z@+-Ihjqsaxn^RCMQ7d%SlH1p+U-=x(IOsO>0Z*Fav$MZ_XT4sRQGaG@_Vy*uf|Hl{ z%+%V|xnki#=F6^i0rrJQTh>H(Gx!J`Okq)BJhJ$a;i~-W?;DQc~gRlO*x_WQwsj2frPVQ|u$@j?JYRiW$clUjgiT&GXe}!M6fMfgR zMQ1jwIj%mh!bvZFkHXf{S6QL+<%InG)yrnx^A8h?h<~?U?isX)m zjt}^!RXWx+pWb}kBldhoCu90;=YJob$xoWUzCN&^=CGdHn{7{j=80r6>^#ir#Ce-b z`ceIdgTKDMK5JGi6Vh$QUeaNjZSeB)^6wc73>gkDV9#avC6Lu-7|>jJX}#~|KW_H> zKHEQQJtX>xFM6>XuWjPr36bkIzM9Tx$FCH-HLE{zdV8{llhF;;Ya0%>@5)iyt#>`+ znIO|eJ_h>@Z+1I%X`9vb3o&-EFmNntW@IrusNa9@U+n5I+o?hBr)R~ppHl8$^7(<( z{o-dnuPeVl+p&&&+Wb|9UYC}hR#WwwVp7<2J| zzqV)D#l+h6%5g$}|9mh-{E^eIC8`b&?nK{J zUSnQ=O|+#*>etVz_WiPLc}~ykzo;K6m+qI@_^4*-^*Cvr(${-bGct;w`<-9$t}!Fw z^U81SYTffTfBpNt{=-#mo5DwnE*{#JecjP(n$Anf)Wyb(d~bJslWKe|`PRUpYMbJn ze|wB&4m(90{I%ioa{t+P5A2x5@$J!4@1l2yEG^eFOXsleFZ$XQv%AVS?}ee-^{-yD zUcagfV7Ka=tU9q>F09e2F>i{2LEQxD_f@SInE5kyuej{=|MI`5jnxa*8DIII|F_!v z-Lf6p_w#>7{=4dH@n1v#e&6!!M}2wIeTBE5yuUrvxJ3KJy`SEuKL2yzs6Tc zCFpF)q$87l*mLI^UQ<`|pB8gziI48e;AKYHCJ(1um1gbTVX{_m%>%Z#&E_|!HOedg z2xw^Dop-lt*X^G(K0T{mCGcW-?!~{~-IKO0DVcdk@e*gjy_qA&@L%IsYQCLk?c}4t#?~Rk$Wg(RND@X{LUTS{I!T9fJm+0lz>(hG_HkaMaR$O;2-7W9lpVKRxf|s39 zS?V>_Oj+>3sd;UTe4W96j(?GK_i5JYwv|$B-gMBRcG1NI_c(9q?)7Q)ddgz|b@Z>V zc@{pg&^}^q;e3zAL#?XS-`=?0Xgt9DkX@iK`7PHAwwDY06h9dFR$s|9SK&Ck;f#I; z!xsh)?&jG~1KqBuzL1-?*k|A3z^5iM3j1y!TbI2f@x}4-Uxxliq@`mx)Mtd%Zu+J1 zv-ItO-z+76;#Y?W{>*yEX1i+9rf1(Be^$LylXaMKKz%*iY~fX*wc%PJE0%3*O0Qzt z!MZM?k^jj_(~H8)E2MYVJz%pvd?W9Z+F|BJy|yJUCRiSoP?waqta9m_XQQU>KX;O` z)`vOPwSMbfe>GWYv}9t)%u61RLaffNF$;W^GAY3Slm8^$nJIpAT|d37{&VD1zh@|S zc8-(h44sXF35%q%x37EomMJ{{Px{r`w{3U!6gjV(zh6oGirnrBkGwaxw9HGw-}iR! zN_>8$GC6;Rz;vy-TrO1x*}Jy9R7w8DGfA`7O<5*Upsl(3qS}JmmGS#0`ph))oMkGj z|9M|^VCA!WQ?=jky?#vJ#OD7Wo}2sLu6g&Q=2%Pm-VH*6^`!CA$?T<5|4lXC75eW{Xy<%`eJb7yGne=%o}8pMIi+*zLSdHDWf?y%uFSo& z!_Sn<{K@mwzgvxW$1mD``OrBdkN?a4?p{24(e&&KrsZ!AC4CoHcvtaIO0=X;pfBO( z$y*UU-;t1d#8p=fzS4rC;ktt+N1kXj%SwE-{{sm)>garEbjhftL(Sy z>+N3cTMb#x)xnvPg(`llxHp$x$$a7S-qgEw;<8Ilx29zB_$IV;wuxl@;gK|2VP-OS zA?F9VkDL$L#rClOX?*|e%+DBezMLsnv*Y&qJM1rgou7HNsLAi~yU<3RMh9k-`a1pf zvjr5kgs-%5dTG@CPA@w-L$s1<@1nxx3hY&_3>;BgTek{mpW_u!lwdf2@DM}Ljg71_ zyBHmEM1Sa>uCFLa#N}pa~-B}X!d*|g_b(%KyFSf+kl(;?Av@HL!CC08i zFlbqzZqThOlXL=uEUSJj&|)ci-Q&7E;Ca=F2|tZ8FG-0mikfA{Sj(R|adXkYw33(~Lh0OMZUx z)r?uZL|sOz=e78jWH!gutb2;0LU*X^pXdx0fBI#eY|*m^oIC4Yu3F_I{A$ZhcB|G4 zzwJ8b2`cSNzvX`|@Ji_B39~O79pKfq5_rJF;{DdiM>T4S_%yb>l55-SPqh_I@>2*n z#Lr~xHZx#(`iV)dQT0~N*S^#TcBP&@^x|E!g>%Nq#=k-7H;N|-DPNef>w%?jk!-t6x9em%haB?eEZc0@wD>y?$BdcEeeY zo12$^JLUi5+KP+qQRSUqf6bJdXO((9^Sa@O*ERip7X520H&t@yU7IM}#IN`u=l;If zZMpZ=_P@WOwfl?L%x*1{^b<2(jh>uU%KVf%>C4v7UJGqPRcq&+DGb~3u{})glB4l2 zjfnVX>-g&QWo(s~f0LVfxw_x8Y3<6?hu^cmgf;7z+;ruR-+%YHedQ-F$?p%1R)()% z7G!>92y(Oi{A94vT_RI5l z{CE;J7;pOX`O@xe)#4*9pLc)$yZe2e@$t^*%wd>*!N2R0IZW7sF_!j!^&o|4u64U>DvimBLY3jeOPJAj~r+c^h%%o1? zdHa^jE%S@jeQX!0slQ%MUhg6M#Alk}5w~i8ZMw43-YaaaM8Ww!-8VYbo1SH?{1tTC zfZN_fd7t0(Cl`vo#{S#aQ~OD^^6S?1sV{E(emI%z_VU~&fA#efRIIjc^eqf>5D!dd zl(pjNxwi0Uqx38RuMgYUG_1VdugMn?%Hy)~t9W%d9d!89F=o^JD+i-1TykH2xi)ryw43Vcq|e z>6g-i=36NKS@7V2;R8*M@SV0hHppJkNnBD{bwAK@J06C4+^H($8A zI{ftc)6-9W3e|pcV&UOQpEuS_TI^G&`7en#Gi=p#KjGyaZ*q3Ov^1FDl5SX~E2(=} z`^$xY3M>8ef8U$?r~4gWb(DXe|FZ6l8%-F3W8OF$c(BP&vVY9<-!dRGPT7_DNs66; ztQ2#E!M#gMy;J}3bARFLP@UxVyyLTdWDJ*Gs#WU7hBrQ8q2K@>A@|psiC(s&4N-*7lQ!cTd^B zO+9OVG<|s&bW!=@`8hvV@A>n~=H=!;wwC1|ro8!4I5{VNi=&>L_3GWXygZ(AO+8~N zc;TNzQ8|}OMuH-bEBoPH+ZCl_FCBcp|NlDfyXGgNVx_lcURJX#|2F4|W@Lbo$T_{4 z-E6%k#-gVbKk6tRyT7US+m`^(z5n;r+}^nNWx>R>ubh)r61Tc9J$XX!pPR7y;k*0l zf{%sNdrF0#*M9$FqjS-h7p_KCSNzjzcY^zTE`1siN zWKznVU8UKTuh(ur!y)PJ>odht90u1^ZQT6U(fgam~?ch)6Y-4JXiEYGPl^VoYt1#kbt^G`b^u4{*0;pOnXR~_hOvhKyl_Q!7;g*QFye){Ql`06K3d@HhTLxc72 z=dGUJWnXoF4O7J=rvM$%79~H1O_BmW9tYEJZ_72Rx#AJIF{yP*R%obJ$*RSlPg%ZO zxnx4zhO~!I?>MIK{Sx_UoyeLfni#IE;x!mINzv?2N z+I;5gJ(FYnS!7FIXg_xUe?xz@TCKkH{usrptHP((?fo_5m{c(Dix3N0v6(Dqtk_-V zUS9Mr@O^y!=dCry_4DF%s(n9&>ZQ5OW-Z+^QzQC$C?*4uf%eX9POMCH8-yD#f)ewy!U#JoGjAzZdm z9$Hg0^I5oKZXdeaB~a(EdP(@DOpQ&V>lRqBF*HeR>HqoZsc~VW-$OamtSOR`=Vuxp z&*-T7{w}v7``(_)rrU4lSHH8g75tt1nSWV!eAP?U$bB_CE%(29+;49tAfJAAmg@Vx z-{(oXZqL6T_x^2lxI&8SlEP04w`1RYs{Y^1pYp0_LnACjRj`8bgfw%~D(>P5>-T(Hr4zr?Y1Q{Ut*h}FDcgkI z!#tC-t!6BXFiKmuM#A2o|LiL5=6LCpXO)Ior9uW)Gdm2{6hF@po_qMq%gawMX9hoc z7Qd=0H|k=>&!VYZaXY#@Z06l7_j!Ice!}zpwb4b>Isf@>=rps}+-B;g|eDGQR{eMdPYj=8m%fF#2svSDzTkdVitJYWE25c?P z6_5(&a-3**tK~uP&Z5-f7N$$gYn>t@HzYI)cD3=zX5CuDDXccbyRi6G<~w0^KND$V z(B?rF#f%*VbwO`$ZS57yytAV)?d`2qp{vha2|7Q|_G`ga#bu9Q+}XJ~FRjAs*}1u| z55_K#HIonea8Rh(f4R>@!S{U=luEt5r^;3FCtY-Bnp?8s@$Ra3n_{jmcE0rNl!)%{ z#uxMKr62A;_s3IgpPgr2q@}~F*Y8f(^se%_d;Qi;$EMQcxA)(FzJB7*^<`eqN+h55 zhR2I%xLb-od*iOuY~Osg;DH@m7{jMy4FYur{r|36J1OS0(-A zpGMsc($PzjADrK6o#J#jJawYn-GD_Yes`yqJ88Xs`Q*G$_1Afi751dQX`44|ch#w+ z$e$N$JZC9%ez|fnZFAb$GgsD{C(8EbxR`Cpm~2)0>dDM|O$QoYuA6C`{;E{r=+{{d zYHjTY+;_}lH{n&}U~Avvw4w39x0~tnUp_s1U!q>;PP;*g7yqxryB1E5uZ!g0$h~W| z!2g8Wiw%s-J~tffPjrjaFc`kbxwUDg%?z`JCNkF?H;Hvt9b|X!^~o~7^5&9@eM51< zt37w3UhRKZ-!AKMCpUUe-1LMO8wFT5&CXD^u0QwXaMU(Y;r){(^=lNK-VU3dZeOkH zbMb-58`I$FC;m7FFRfmlXZ7@NQ~CraMGk{SmBv0Z`EUC-3OD4ixukO(WdD3Nc2~LS z;#G~=$E!PZaw<#ze`055zxIC8$KLxB@?v*+z89~mD(l=h*>N$ibll-z+gRo2$Q$Q8 zDVS-SzRl+9^45?S5!z1YnGT+K`lfMm)rF{C1>9FP&d>a-erDmcm79L;>(|{=!TtLG zMb-L{o&R_2=)ZDR`1)(Uw@Rr|;oo;hXdGYnH2TSH#>uJ&?^b+z_PY7Yn}w^~1QdIN z68cZHZOgvCE-k^J;DN*B@{A=LGG?o}_sO_zuq=BsBkqw^$hEc6$NirjDaa_=;Ft0K z-rls28g@xhyq9Y(h-}TedWysMiRqp2_KH=nOG+2;f@b7z#XWib@woi-RU`vXF1Bd3^8NYc>CN50?(HgFZIX2@qU%M}N=0kC?hB@70yWQgfB8)FouPiR z^zVhGkDs5}sdE11N7i$#kI#Klcz@ddm!DSIDs77%o@diFxu33PoIk@^ps+%-?mqv?%X_E z?XOZ-r>CobpZw{qa{lp1kJR5Uo7X>`c!|~a5dWd{GxL3SovSg_X)k4N>tRQ#zWI@Njh_fv}ki{% z@D<)yoV;o|5j_95O2_DA=&$DPdCixpuY14V&Z$-Y$YOIAhrgejlDK=>>FVYC z3}5@ZU4N6VXUwxM=6}}D^sj#|#8@^KWT*dC;l4LhQUCQsX6JIh$>06!wYP?<{!d(d z{mG-w;)AZDiuV^;)t~#aSM<}zV^g18-e1pGIC1Ir`j_p;K6gF~ee(6b-86miI&FXZ z+xKM3Iva(p3NMMj?ETNmq{ORp>UhX>bLPaQ0Slen6mIXS+$?j2**vk}FPE5o`TKi! zP5xQ5G3}1r*vEM}ZpV^}^Yd(s!8$YsXr#~@62uE=zAiuF@M?2UDEe+Cd~U)@0j;mh(#vPyLszO_3tvA zp}+o^TRCv_F^BEZ54M~C?BQvLuZkBJ)|_$P!Kz>&$72;{65}8w@qhBk$xEL85I-9m z?W3dg;Kr`?62~NhcFo>1XL;{ME(6&Wui3@2^lOW^OgLZNZ`{K=Rr}e3g|-q4yR8xp ziW;OJE-R7HOi+_BxPLsiME7*ZO(TW)=$RKDon5dYwAyoj%-Wume5O~)OZ(K~v_H++ z=zYA_J6||$W|oXy*3W5QPF{;q+a{NkC-JZH&-S!08y~!VsrG-KJkyf{=T^P4l|J1f zIoXEgOkb;0K*XdZ=k8--9}{z3Zh!Ol-oJG5(l?h)4+*8N_}3WlX|vvswl=lMT?_bF z*}cuA??q~ruE}1f;Mii3Tb(ui;xa$?uxd}fygRv(ZBh?dZE(?? z)MDAS|4ZG@cU6A_F5X(|EA#EQ+Eulr2jQ#pxjmP+FEy)OzBuP^;@XFWEP5BWOq!At z7O9XUnjdj5Mt`En^?NT$&3CAAPG8+NW&h=Rx#_k0b0%9cN6lVY%ke>=ZQH*O|1U4z z$MxL**S>$fTK{EKKb>l{t)I2|SAo#}V1}+$HLON=8#o*i9E=ntJ}jSKH_0ZIyWK)o zu%pSLqETzh-y?RPo^<;@`5gC{=i+?(qA0td#-*L-bieyIX5@lT=A^%r1~p1UR72FuJz~s*X&ic(RH6K@B7C?tMsy9$kL>J zb=)idf0S(x`QVu-WoexxGb1uTA-Z_E_@DF=`RsQ|&UeFK1VkUZCaG}!>!w@J-j^Bg zw>96nzcJ!F^VaQ`L*8CHuqKi_FNHnvP4wI@?-@25GZW^%KX|cKe;04=KZkgMbsNtm z-+1@ZZR-EXb zxTJOZ4X=|#qgi`!$T@?*zrUMHn{Vil2>+rJwWNK~E5)@txVhQYgTD5=E}Efr*eRnX z``(_Nom{gN-xx)2PV2p?YnFVBXKzYbpYhCwz0K+iJFA|}Og~e|e7sL)>SODsEn$q_ zDhz^4uNvPoIQzup!NFW_GmE`vf7@5Bm+_q?^ODtxyZ0~O!zV}19Zs9j+4$_iaqHJl z50^ik_0Fu>b7}PdOocBCh=4!SJ-ww3=buc-ZHDj~j77nE|``Q0`JZzUzxN~#LMvuONW3T?d?`-6Hrg>Q`^DXxdU*Qjr6H$R zJ)3*UKF;Eo#_ew>tKH<)5*p++zcd{5u6i$gO|3vsT71^dkE_&SYXKIWyD(w1;dk{+ zfxWTU1y+Quk5&G9@AZrG?-@CpFK>7o_$2Lanr|NpiKaKZpHJN;|Lx14y#43H)+QH}JnYRg7C)Ex z=#zPUb}9E8<*Eu5{inGWKO!qNvyFGIoW8Uz#mURwWQ$->lw$4qK&AO_?r-&PU821VN4&Qi-tErc@SuA}9@8?$MQ#jpmHe#sR{Y3W z@{rBE^0LIsf6sD7L;PNL9=iE*tvZkRg>(8I3Y=$8D7r4YGVd_I{H571ZaOMHVwbOS z=$E|ydvXXzl*udZ^DmO`Jy#R4C!QZG1Rl&il{(eZLGA9f_LuaB6ksE0*Ad>Hch) z_A@L(?tc`r(fNJ-{gUbI?_$2q_B{Uh-M4jo^L@gLL(+F|P5e38zQ?Pu z%gpx^&&nr>S6BQn*5cKyJtisnq2p3OQwwLxtu2{pXQlMq+NQDHlHyPbOuW$NC@m7p zTOs&1Z)K1pvwn$}LCgCowfBrCbn~UIxMcdmgTd!)P{e^RZ z`}o;zmDl_3>vBy!zskO6n)s~;x!z`{CKx$pZ*A&++{pg#c_hC?(7y@Wr$kjXIzZ=7XZ z%g!QE@+9Gw@$dfGON7!E2jIfZa%#(HQHvDlS@Nv zns7Al#Kx*%o1U{T&P|-l`QWjjQf$S-Xlt$dPwX{wZdG6TEG_l4UD!^>#C65f&7b++ zN|s-Lz@A_8xKl$&cE+j$=Vob(H!ahi<7xl<&E`*6!{a6MFUVhelAz1STb6WizSwf# zWJe>OQ~fWsFX<|XDTiN~sJlP8s_Q?~BROLkt*+L;6~_}hzJ2GBGHpl0_PM3c+i;F;_mX8LGMU#FC%%#h4m|Jo>h;bGJ63Ud zUf$Jr@o?;Z6Z7-VFFqVkNEc_BF}-;E#4lBjpM0;%lzw<@Ba_i<&^y=0+3V`La1O)z zuW`BA)520JO{QCV?tG`6dq%c3v*dczJucTXX8*VBG+Gt5ts(3Fd)b0Y?RMkZ{6wY8 z{LdB6zWF#$L^CwT%Kq=1h4z5@VfDJPrbRpj>SARKk~!sgjY#V z=Fh))hap zl7IUlh2=%zIa%cd<%ji1oCYli8f{2sr#x_cc&dAC|_shZJ$d`0%gtOt5ByqByN z89L4Vzpz{3^p?s?cjUjPoz(mECp_Ql+y8exzkgqgPpjH|a6$T+Ji$4s%|8OR?Ob1@ zbo0xW7>b_*J@~J&neg4|zy%3mn+`j&9 zB8%0WUER$*7d_asWnQvNn=!~2hDT37VyfysZ_YD4C7o?0Qg7BAI@n?(&vA%%+pNP2 z_P)Hjdiu<72e0t!h{=7P6nsI~ucFQVKl|Ar2B+Hd^X>o7`>&Ybz_94x+=QOl6Yeu8 zHe8SwxLm&N;k1fn`TollZT#+T{omf;%3LqAAfvv$!Hv0IXUD#hm;XvNEN-uG?A>Lw z^Q-lw<&PEz`Lk!nygC10S-R<8+uM1GCSN2@I7B!|W)z4C+`4G3wa0S#vXAn-%VxbT z|6zX7dsX(AthBzP2Wp%4cYuzSZBFsd#2Gc_aVq>t_+ zKixS=y^JP2xBkX)w@KRhtciYAd4z3Wt6FVKKUeeC#u-O{l)g&RT>2$qtJ6~n^IUPU zJ$Fy9Ozd?0pZJ`^Lvh7!xg*~{_j|K)8x_jArbzenaIY-+@Aa?Digl`+`1(}i+X@>u zoHk38wY*VyrT%SZ<^F>Xk_&cU`1kjD{8gi2_dKPq1x?aV zmkU-GiHZn66MzbSaIntE)$mbNK#KSn~AhMg1~|Bw5~qkAJ2&``(kk-Yu~48B+=8 zLyi2O0xj}?PtUCTbHtH%Pp83w{@Uzqr_VgUKUw|M^l4$fQ8Vs_o>Q41&$BFSUChhU zvy3fVzb825|JRS(bK~XP%H#hxr=S0}$6D4yUP>XJ?f(7B=d$thdF=|B_vknGZB9Pk zx6f|2AIG8j`psM2EMjj}9y0l(c*HT{$ito;^O7rC#Z~i)3C?A)p2I7 z?-FDF1v`Iq&fWKKZ*uJNy#cr8@7FkDkmK>wxgvgfR`YqM>^U#nzy1HvV!!e8gm+?d zm7|}noo2)O$?NsAwKn0Jg|c$2Q!>*hAM-*uLRX;L}YGIoPjhHf{ZBX9d1e=6K@ zdfK<^*rBX9mkz3S-`Lvx>)xi+?lb>had*yF+h|hr+fwsS3TOQyzG8_hZOZ>b!Pz!mX|HZE-JUd#w}us>HC=ZztNGqc zoqg)?Q|l|291~Sf{)?Emqd{wX&;LXQYxj-bm2bbumqwmoEf&oBcjxCB&ljD->Snbp zdR)g&zIfmrl_B+UF888~^)6EdWEY&`xgRRAjiDv?nn1&W43UNcxeJ}b>Q7!*-am2C z`1sPdKjwYPIJmbU-X&A=j`yDm?DJL$>}S02 z{GI6j(@M78FDA@e8lq6oSo!GVq-7IZ6VADbnV6KliP(GfSI^A2qpXW}EY>#JBK37? z?;aV3^)fAweDrdTv!73HXfBfwv*F^dW4(VU>G!{T_TNuleC++B+nyqSwhx(ydKfEPG^T#0!20<82Hq`&9B5y+2^y z;#l#9ZE>0gTe8+4CINSa3tSt5w`^s~xR9_=HezGd(iFW(-miGGp5Hqm>#*t|=gMMs zpGoZR{%B7<9Wo{7<|fHQtt|b=1jJ-{SQP{eIV8+Xglt|*N!j-pX?_U$kl4(vD8c*g z-QC@(*XLJ1`EfBhXiZ+E=FLT$ZocYjJ-x0|TH?ULiLdSAwO-r*ziBdO=jP{=_Edh7 za+KlKVkz3auyq07uGVEjO;(wvVm)TcOAaYA+^jiSHp3>vnZw~h>PgX`7S3f8dBJN=ZLjNZFRDM~b46Zf_?IJ(E3U3T|Z zE`c{2&P>p2PVtz0j>9(N_rc&d><7{xm`8k&)A_~iRQ;f*-ojSw@bbfl)EO3j%X!9} z+<0;YYm*tnTxEB~27#7;H#UY|dfgawH7+OTTHXz<-y91&A|y1Ils-WS};sl?aIkkW5v$=mj;?SS{g6Na=Jj8!j%AN{(D<=TjV2nLNpl?4Vb&@ z7kVBt&v@%U-&R>%FMdk#^7Ah&GbKx#9qLZ5oXxUH{B-iWyWMM?jg(n3H);g=ZIr*c z$)E403?) zdrvzP+4F%n)KWUeq0ek3qtafd4~?w6$~-!U9{-P6kk`M#OEo^pz^k#1S%kx6i%8uC zx7$$)96s9wToOB)qhr=_Rli)tSF$cHum8_w!hH1A^sBNlsxfJob{*}gI_^Kai?` znqyfs>!Ohrhk*JT-K~q2A6{XylBi*x*!;lp&xVClA28eAa&hkdb^PeW=Z3dz%ESI$ z3(VN&E{DC-9HET#S%4H0tGg;Dr|Wx(3UWfZC~Ln>9@bGMqfYc@a%hqkXU+(+(OIgmV18!nDn~&=USs>~1aIAq_^UDzz1%xK>h&!F_V za0>$)(`3gJ3lA_Sa^yLzN<7(-5@I0gCh6WUH#J!N+=-i$(^Z=LG(LrCY28@$uVdb) z?D;F+%r_zG>5C>*PX{aD<4}OI?{Q2NARl~^6Y00oqVmuu+(q9+a&eC zhf%uMr%jx9!D7=v)-BSp7c?`b@#?&oD%Qk**U!q;X2InR7qYG-8+|Hb*jKXc=r#ow zMs~41SbJj4cYfWT*SB-!+eW#`g^ypEdQJ-3 zTC?l2Q2M%uAy21NH5Gm0Sy{R~Elb%()oUfQ>0SfViE;kT(kE*rW#&l=w`e{QwqCn# z-d>&XbFcDjwqCz-sP^3f&z%J>rrA|tcb^(pWh}b4WTMJWy{&%`2jPB+ym1cxgHN&{&4(jmaa+vE?KcW;qbr1oHzG>h(Fe8lGOQu`4ZQDhL(sE zEC~WevevF#HhvzA91~f(z5>mTiEmZ|z;uebE{3bVO+ zzk65CRnW^h+EvrD;`{o$D+|`gT-9vdB6-Pk)sj`Gt}OmxxTx&`6I07pYf-NcZ)`41 zypR-9aX6Z>?;#O}NKovQzet0;lu0WG;wgT!=n=n(flf7n9yG zBx*9;6e#iOHds<<_G06TgS`PlO#3I;Yc9B9b86enJ?4u8%c6ujLgo$>$us$*WPOZR^XIJ5s>kfZN)^LzU$z4@L0+>O_o96de#$uHfvETMn99)Avg@i18OQ<`b_ zXQLNVlNJcNd3jDW+^TXpKTuI2QT3nd-fAvc-g6m@M-)rN`Q~4Y?&d!!`8MZ~dXN6e z;8%UqT|8A%H!YBj4#;kLFvrEWiG^k3p{B++eMiJ94g|>fPTnD@FR8g^ZTsehsdG0n zy4G&;%k`*tXkmS_zhzCfQ@Zl$wrRXtZgXTx?_HSq%s1B~|K24wPxsl^*T-BAT(YV= z)Pu>_fwf;S{nQyN_jSFQP9`h<_nI|JU011Mxc!&?|KHl*>oqJ7pSu6+;8od-n*6Nf zp4V~f=CxiGTz-$W{q}?PtM30)PCs*WNB9?)7wuEpgw%r{s%^Z~(Pw0NsimR$qS}it zlP0!)UOj2YOKJ+4TqT_5FR$D@pyW3@?``=d^?!*rKFZTfcl-;>oN;)g?lJ+Ve~k7A zC!OEp$YsajzFoHOzp6h3;Gup?LFY|tKa_L1oLb4 zDi*v}d)RV~_166>z13e7c>8(9M|rbDel~mkdA@kq8GNW;Raoh8eepr5@(U~PFNs{P zz_{y}`K1lKg7Q2Hf976zW;yv={Vlmqt-J@zx18Zt_~blKCI4dodye$zHue;$Gt!^s z?QFnxZ{z6`8SEhryM9hL z>M3j5UcyrNc$XeS$kptoBdffwcsoAUeq-@CwD`ZU*46mtlDg}r77~JYt_d=^aXJQA z-|^OZn|-`_;(>T8A&2G7(l-oaCwPSJ=-e87Q0xfH$B=|T?t?6^d^oHUCOqhv?c$rn z+H$o1g+oTJ;)!p`6%Ini2?`uDc_m&md~Mj}wD1N-^;XF z4cl0Gm3X8>Y8Ef>Wt90+_PejI;C)uy#CVP*t5<(s?+;XWt4cevthD{GabbMeU&ps+ z^IYp+sxB!wAuhH#StZkLdes~I1?g8-R{rqbe>bRVS8eD^U-!w|4Ht#%-(Sk|N;0+S z-IQbR@BMUlpTYU$A&X;)VEn_w{3{I>@|ON-Z{DJ}fV?UWTx5OhO&Ee@_yIv60|>Od23hk?vnY|5nN%qb@dDXemQvM-L&`DPPmEI z_^o7IbOn;VJmvJcPAUE=5D4u4hsEk94+$N484j{ z4kfpi83(PLH({=_qP2^Wf+U}h5$T=IHUbF`o%THTuh34YGml^iUF%6bou>&c&P{?btXspzi$5fb6%Qd{lWP*&tJo7kURy(&LLHhtkiu4O9<#LjJNwV0LIeCLwzW#iN{Dm#r*rY_kjB(cZg z24h`~lu=&c>T8J`71unVBY<33bLONcSpQWtUV5G%+M;3-Bncz_ zdxGrmxU1^c#kIXWzVZD9rZqFSMbt@HdH*YSSNX48$to_fNM@Vr$HVMiTH+33d+C||Our8I zKYslr-Ou%=CG$P`l~>2CTXd1>k=UoMuTqsST7IO+-n{T~+x>|uANZK-cSSzGr|>5y z`?AWPkAA-rW2>4MnE$X=JbY<>qJ;i_W8G)0KhtmeG~RKaVVLk(DDRTptA;-e9&#J* ze&Oc*@^$~)_ov@J$^T$)=8|)JpKo34{;;fXxr(O0w|>dCWWM(7!IQ_J$!OVb^HUG) zzs)S2npvXkdD5wWX6~Z{j5^Z-8@?F?DBW#LTc~?fFRDgajx$iP;IZ`GKhIkJ$WO>y za=P4M-|Y)8kL}h!ZE+xY-=Pch6FDEA-*n*ox&4|kI`u3e44XO+mI>d?VR1|lXmWTW zlJjDl;*3LLO&@xDoXjPr)U}k!M96O{`l?lInEH|JY4cjYHD@C&f3h2U`A^i@Y4gj& zQTfM)joUgoTAIow)E`b*!0~bJw89$`J__5nY;$0q^YHFV_xgX$6%W}|?#* zVeSp1TSxW=OXQ|$&WmQ#|9Rx~-h_h-1-{43Oy&-GaC3j%@+CftUtbPdYP?;0v192q zZt1uO7xHQzEIVi}@q737a%;X_j4$18E%CILFZjJ_iK@f$+3$Z`|9SW2bKSaUyaH_( z_U!0yNII}g_Jh_d=0hBnftxesP3)qEVi=et6+ zQ@_r-I?2DBkw3=zU{1xyHi0!;ZB|AfJ^PWTis8Y#)cvPd+V^|DD#=s{{WN7#r0E(} zDN(~)g|pQ>cfV+V(4WfLV33jgah{v`!U^#m_Lo*%zW-D9;TwaB$} z@aFv&{r~@dEnk-Uod0Q>R3@9A;w62Uu>o6A;|osCNC z*8jxG*gV0-y`$)+%agaVbvpL{m0k91sMvma!{ro-y6uX0|Hx0sG}NEydZo;&U~{7D zygjB32Tj{nyq>UT{r}wh_`KP99@Y^t`|}=G?Um15zh&-)(=kGo{0o0nG#>c;T*{WE z;HKE-bmA?kT08%qk{q zXIXQ9Phj2cJ&|4}YgN|g&&lZ9?GRVF(NIsvZu9#cbM92$i1}VnbU$!&sm$?a75^zA zTq);tMZ2dOzBN0=TBoe?zEGgETu1tV^|{Ljtxt8^ab}xrWEQtz+AEY_aQK3J-f!!} zA9rsrzhwXavGpOP^?#=Sd)Ro+V(!nQ)`yOS3MVoN2y#DZS`o2wMM9a+ix($(5}0=E zb^P_w`aWZ>BIhCg>%zKGdpzo*zHK(CdTzP$*4BfzU%QLNx9Q*1*#BCiwq5S_d`s`N zlXES1r|8DTrJb4a@asAeF4YvJDTk#KH_bVh&mlj_^$v%#$M(5u`vr~stZkn(ew)|i zeu?`so6BA0{&Q}krJnYe8txvrRmbDKYvcWw>CVRvTg7U$On+V&<@GVtlgOUf|C&p5qpC_E?m>2#DNU);q_d zDrS?&nU(Hyo=jGf^qzfI_42&xaJ~809_yEcE3+(^lG*&u4M_o-DMAM95{EhGo=Mv8{9~a) z3m>mo_gwRw8w%WF_4@z4zir_8^;UTESINa+{@#jw8^1VAe9^q?Z~JC$P7~|7G}|oe zQRD`mXAO)Wn>7uDHYmv3jtw)99Ssjhx*{wIC+&vQSTnt$})mu%;qSpG=r-3qg^ zxCd^v%Kyz$4re&;U{1}Q)hKPUx%_=x4 z((-R|SB+;ixy@RBwKitCdtpFiR&LAWS02WxpG1N-NAYfEnE%yjSHfa$j#-I^PahK( zxSwEfip5WWy|THYS)S+b!!(OF#gbp2WhYGg_(A&8*1tLR|AeJp^qfAYf9H($opqgc z5(U@zU)_5ax+5|1#QHDHTmC-ScKD<8Pr>&Oj-K8BSFM#xbW)e7)~i0(yK7fT`#e(Z zRrfS{WysswxliZJqVh=a)iGD0rXsUTA@8j#d z+GP&Ip&y_7YAU$7>wnkpaQ;+WSH92K?uW7OuAhAd>pu21dI&bmFwy3Ize2@`zk}}x z2M2e(fcbsK++z!Gi#1I9v%}@r_t*L>jg}eiX8HH`w|wRHqWql&pXMy_n(ejKYNl%L zsVhPIA~HRLZF}1bC-58bNHDXtoR-u~X;FC9L@=J|ckICodXH}QJvQNuss&uOiP~WC1 z%Xw*BireAs@u z{r0)(KVR4-iLmXtD*r-W>r~m!4S#polii2(v5nNUc zHrjjwY$a^}l>86(fAO?Svd;Z1b+@6UIez19?)NdwEjABj|IL`E#>bF3r|yqi{WHxa z<)3&Cv$6}SSDp|5^M$>nw_c(ngz>~VA#Lfp6%n&1o^UZ^Irw?*5q~wKV5wbu_XaQb zYn$MCG+c!7wS4A@?EsPqjcLy&Z{j>i#@y z;bG5+es)???!8~OP){TOwB|_npMm>V@H<4+q(Anwceu@;XDzdTqTh$UHr>s6wQH9s z`^wBda7<_dyNA;W*9fy+jWW5D+8nYAlw&1g)DP}b_hU$Osybf3@6e~&vg~*K>VM1T zNi3hg-9q5px9#^ImifLp!tnY|q}2x%9>u%&DyG$N$|;pxj! zN`0)5ZkTZYLvNnLw#Oa(xi%(t9n7_y`y3gZtPg%-y4CjY!((&lOK<=9)jLGBZQOse z?#uG|ybsHKZx{-sGEJD9I$Pj`fkN3XPR}o9lNTg2AKaF6^OA8)BcY_@EH;w6r|OSFGn<~Qjm zpT>E4ZO>#2h9!l+K1%I9sYf4z4 zpryva#+J&UrJ$s$s(APA+<(t`{^#$sxp(`~-n+Z=_ust#=FHrA#>MaFeD^(R?PTA! zRQkT;zQa7)FFmb@ zQ$6e6BM*t?^Owh73cb9aV@DH%gU?Ek(v=I5*CzndrR z&7YFV>BlCEd$VaNI?Ul;Hg}eOOGf3zj7>d0&0ap5+pjpzTiV0(y5~91^DW*M%RQ@$ zf}1+i_b%1%@T|Wz_txh5C)oWDU7A0u_Q*c2B_^_EyZ|FZ9$qh~WOt@9G>&}O>y z{l~tO+b3myh`h&HDgV@VvGsk~ea=oba}p)u?@70nW<9rlmRa7Gyz|9*8;_Cw7F|Le|^iP|DBBuuQ?{Db6kp?vy3N5XqtXoNy)5Bm8vq< zd-u5X9NzN8b;9%>eMVE!>-87>>^=PteSRwNR!8{lr!yu;-j#fRrj~fL|NN>&?hYG- zE=$hJ)tmLVDN#k{u4l#)-Au3Fm--*RI94|OpHil$mC*Ig`#r7h-BFeQk}f&tRAk-q-M1#|F|AW~n0LWTeG15(X~)GfC+slhO_w=VTvnmi@pyBvvHkLCML&$@KK;h?_)GBadO?M2SN>#p z$tUvFq|M&!`?Oi>Z{xc+%x|>8p*la)F!rT+l9&9%EkAY2I3H(v*(dG#q#b<9*F3PK zSnK)b%kQ*#?$)*z9}C>AG(mu4gZ`(vlfO?Y+b3<}dUE%{?;cCmJJrm2IAhx9xl<~9 z`pf)-@2JSlpD8#0>0D9!X{{fBuF={d=uyPtG0WLX-M1`^FX2wRD5 zUus`cqY@h=m;Lj<{m1fMD`N#Y*jSe2pWgl8XWoaKKbc#~mM^jw^Wmgg|`iDcslj+F0mnz0vHZvC+ES}q?&oqDTllFH#+WMTn7kRv`Ma2(z zN>n;#bZu+=GJmyTLbT`p1jo$u*b11IUpYZ;-})<`_FX8++Gk($E!FC#MVr;tKp ztd+6#{^Eki#_UFg#rs^o6lb*w`j%_Wy+{Yo@(7^$nc&e!`ye zFW*zsWwZ?6HvbD|k>OB@WKx+sOD&#ZpU%1ICx0q=)=EyhCD~W#>OJd`{&}JJvrAeT zZukFfdh7l4jq;xxUhx8#oa?hP%V+tWE&1{0bV*xk-6x;jTCNU)JDM2ob64tTc0Hs)!~w-BM-xPe~smtqgt7i?^LO z>gb%CtN!rCa;=xj2fj`Dwmi0D?dJ0r;NXbZH0ixCYBCg1$`!Vjh9V9bbg~FH45M?P^dGux7dQLGI*^hdLY0&6n@L)y&?w zK*6DA;*HFewVLL8a`v5C_O8*n;jP|Z=QkTq`u_IL3Akj?<#5T};a+LF@1C8Of3M2P zaHwQ5J;`a>!IO8cBFkTn;{?0!nJ0nSYnLi5;P~Nm;ClbF7j+)Ly}a*ER%6}gSg0W2bnt_ltuFZh=-FuvZDGL>QH8pkabT@6YLI5e0zn;Dkc&PzEyJNnQRw_?u> zmMILXCXZTOJ}XbW6;sTXeIyhzlEGhmV9dP!|2utpU%rYZj_yc#0p-U)?@{^7{b zX0Mmd<`rdhbr1|-*`u&PNI`#rin6+@!^ssJx2^)~(O?P}{IKob@!7YpN@Q`Xu}ooL z4bIMr6@lnU>TNLJv^Z#C#Np{rn6((aKKZT?1v}jVR6<`Al+AzL^k~+;wsLzWuZBDK z-J+bpR*5t+C_Lz8sVwGpxHfAEw?F{XlP0$dN*YjS+AyhD&$}d-pT`+*bkX6=CnmbIY3p~pINq4CS8>TvM#v?yVyUJIoMnr}av>WUpt zF|81CaNrCO%23f=1l3wEsBrP4YnhGb#QNfESMD@52q|pe(lX5%8hZx?Ke(0c*{ND_ z(ByD~@&Xo(2Ljn_PzUF^JoxnV^mS41>kP5%o)@e%7`bjOyV|7*2?0<*=(9{=`03*|tnZ&IJaVAe^N-H*g~eWHgGcq0H%EjvDDX@(;&@%+9Qx^Nx56?PN&N}M z$9?YK?rWWKYUw|yS@#4#d^@{r_9UhE6|A#Y3n}!Am1>{7Abx4_!Y_d!*ECAI9FV^G z@mX2(&5V-iN(+J$^l~M1r$NHX;ep_XZ;vm{j(*_1$Wp}wL1d`(h}{+t5yr!ugXPL2<@RCTz1U?D%$ zV<#CI7455D6&GdXzAp`7dLk=c8VbsO4uTv-EG;aSt6!cy`*Qua2aXP$0Ux$3z2*Y) zEu*UfA4kRPcb}e#wX2wB2swe$MwX85Du{m@{tJ9?Eh`b+^53AW*vqTo&K;{LGiah> zad+ShXmXyXA^FPEp_6HM+A0nfex^-nPCnOs*aQs!hDM)sKI8CI^<}r@ikd&I-OC=w zv;4W6|2Nvo_iQ|e2ID93NwfNtCiDm>7zz|AEc+~HE9sEZ&O868?mC0YXHRR=q9$!y zqJKW`topjAmp(K)JN#PrZwu|a6TgADhztx6LV zI3{QmJW%Icpr&|pZ?*ZmozLaIytyg7*uCHF^|iGd3m!UoOPrf;pYN+EQK;xBbbRLZ z)$8|h39ZSzyzJqXmBBCW>@=Rie%ett@v)QNoSzrYm6+w;YWel`_2Zq-=N;aVc=*B+ zPhszlmKM&P-QISbmmFq!t`VHD?X!w?$@h}((nV4Ro{9@tG(1hGi-ie#*syr;Sy-HS z;Qy%b$qB(b<@ak>1TJQiGSBM?TI#jXf4*JCgGTlb=k5QyOcd@^&O8#rt*)6E^USpF zPeqTkIp5)Se)o(Fjkt=3tZO1RGBLC99JskTeetI*ZqA}-hbyLNyuH1hpI6$<<@2+% zf!p(9?^HgYTk+@P@m$;OIX8=Z?Q~}IEHS+5`S)&ngZ`$)EGKGnMLUFEN-c0$uqsoF z$+4|LKwQzt#DAVmrGPYG$Chd87dAV}-wKd!P=6p+* z3|_Y9ljhfH{vk?PoD%vVs%l+qXyX?O* zXz8Zl_<0_8s^9O;xUxb}uI7W|o12@DdxdGlPBc;RJzw$YT!D(%ls{+YT8rP^Rm!}4 zzsIsUS~@yB>-Yb&I@TlE+$pU7=*&!GGyP+e_w4b^nDf&>=J0%r!b5)d4g33Kt*th1 zEc0s73R{!md4HjE`=VDzwnT*G|BRX5VYP#caobGw%%!t+?v;loZZ)WLci^0GU-2r7 zXyXG%hmIzmPb&6v@*bIpck*68@k2@{YFkdHQR=A&7v1HLK0Ms6d$OaC#WT+D)93H^ z>lf$V-X>v^AaCmCMl&Jt@wp>Tq)V+pW*K9JX+rNYHDxKFQBs zt;hB6_4@rE9(C*AiVeD`dTDERxOJ-QqXU}8{z-?ntW?og|M~Eok>5t!uUCRK!q>%= z+*GLl_ow3J(&-mghwJZVnBCG=x$%t8q8-|Np-5xJuT{ zOG`Qp_1Aruv?|f4`|*%n#-_qROMa5B=j>&xmNYh)Z+d)IH}i^{x1>1B6o#X$y1c77 zRG3*js$8s3p8tNoKEFI+qCvsM!W>af=B2LL*Vk=TTYh1MYVIV~N$i!+N+hq}p75g} zJu*h5Jg3mmWck@0$=yFE6nOrc)IMRpzx`j4D>3dOLPA2X*LY0XAv#~>?c445_o>M% zmU%TL@un|gnPT_u%$&ScllYl-aqB>;+YUiSNfC<*>ywA__y0}1{&#}LlQT0v&nlUw z7n@a{IWeVRqCvS&Vf(b;k27yy_w?J*-K{2C9yrk;z|el)+sms$wMFz|B;MZMzWv?3 ze`n`d3QL>iY-m@i`ej`I?%AR*FJ7&MA3rMBXy}OUySVpJn$>G1lFx&d2oT{iV1d zCRN8Kmk+bO8k+X^Zf)ywSR+s{NnVk2$`qaGZD-#7cx3h^I&bIGSzAE!!ONR9Hflz0 zQnCB_Zw`Q!Jw3hXZEs4aMsClWYs)e*(QCS1uT|-*go}$@ueS=b-`seV_4J*M zM_rFh`4#Z}?QQd8b5nJsugib@aCC-9d1+vj>*pIAlgk9nJ}*AfrS9?jR-~JZ#JrTY z%LP|#9$cJ$YPR|MSoO+pzLeO32!%tJx8S zPx{tQkFUGwxANmDo=Nh4YjxxH+<5n75zC3`vkVfMPEFVEulf1uCs$Fgb$QN4yO@r% zu^IpW{a!zn>+6e)ho7G_n)lIp<&_5qo8Ow;SrNE6=T5fJw+fB8Jr-Ql8G8)9uv_L-Z1pBm%+gdfz3WOns+7W8<;!NE9btGx>29JfbRvu=1q zd!%@nNvsc8=w!I)pltM=s&ikBW12soHNSsk$L5|(8eS1{RWBAwT%OLbDLMP~wY6od z)&(#3GkmA>SVa0{P%D?HPUNB~-MjZC1*xRjY?zqcI75D^gUX~MAs>1*dB29Uh%~l1 zJ6vZ_mAN)+veE-DmXv~L4!SHI%-eSyUv^;%%hgq(r@b09I3M;Io{zt_@6yCQH9s%U zefnsN$4wbR3ezH09OMpuy~xT@p?t(CDK@}H*Qr4;fJr2w z!L-u#z*~+F&HlQ~rk>2E=Z`*Id0xK$&qiB~3(>YaR{Xr3zkjQE>!tbZT%WF_|&~x-|yG&zx&It+)uVrYZCvu|AJ>%8pmz^c#vJbf~~hIZEnR5 zMR7a(PbZYal8#tS%qTRM5_hf+-t(w=_b!S4%!1uE%eSYv&)9BZ^SER8lN}RgbFYtN zpCqq1rD@~#9j~TYaBT2hvB$G|&rVf8P1EZRN()#tCcMbH)#V`M)WFQ-DX^92&XhSH zSkm9^d9*iXUf1gsiFwoK6{+6nUh5IX60R2(0Dh$_az)1=rc7m3hbE7L*}K?IJbnB7UiEv|sTVSzS85(yvtx1_ zcbu!|-Td8e%|cd%c+R={MVQG*;DX2}$H_rF^Cn-uHfPP5l}pU;RXm=xxj{oz@#2l$ zlYP0JyoW_{cM-I+ubM4{=sb3vtq70o69wLtmIwZ zzr*{~$`|J^Pg|^PKj+z_NGBEH@;im?z3)_1RGgjmRO~(S>FMc@|9-!>-rTu$?sM*s zADpL6XF2QXcI4ciie1H1wL%Z*e!R2y*~&}H{Xxme#^&$Nuk%~_Qu@+WJS`6P><9?9 zd^_*ui{eL$4|*9Yi@o;`0UQS6FL(k z_T+FXnYG+5GLn7O(ZR9X&hwR5=OT|~J6>3C``)i5z@!aq-)=kN<>z+ZfqnoqB3Y#PWrV$?Xk1XH+z{oMzB#wkmJweD+I1 zh|SCQSdXOehKKG7`FF&^_2B$DSG5t1W<=JT; zLo_EE^(w}$41W0K;^ZGcc780Aw%UH<_>vu)b6#}oFG#!oTX3SWv7emDa(j8JzS?7V z_T-BD{jEJ@H{Yg6M*J+_+6x^9E$!{fCMs9F=4;%2G}C2Gc1c%DAG@}YzSjg6=~;U| z_o!`Ltm3gjq-U{r_+QXqQ=(u1)07Pr$1Tm*?R~H)kk2ASg>g@a(GAgZ@zB*_XYt#VDw02E-*|epEA*qs zKaU?PGCoCyJ`i}IpetX0%eCoXSAN}lwF%=0%k|iu|{O;3j+h~1y-n&+-y|a#X{W0KM>LX~W z6u803a7uO*@7F5Zj^oRD4sH}lH^^yuFE260qo`Ksq-7^RC;!R*8BStzgA-Mz90~Fi zm{>kNss4M8cz)hHt*z6JLHqcOUhVqostPI$SGH8|+rW6r>f+?w8ygnJvTv#UoVHtZ z=H;YSf0`NW!wzaZgslbmczFk`F_-$D_No;)whdT&gs78 z;+~M_sxmtYM3mk3*2OMdc78@*e%*|wcK34kTNAX+SH+8KOyjHc7I!=B*uThs|Go!r zKl`O#fB)rGBiGvO#ZPQS=fo9mTkbKF>1^EV9@FUSiEnnS-mR~?P+zUKF}`kY>y(F? zhA&T_{E_%90rzl{o`(n~?zN%E?8P4j;HynKFd-|~BR?u0YzUhD-85t(--Fdb2U z`EUz!e8jz-Q({Eeh36eUq-p=}(X%ZxR!O@z{O#!Co_JK{<3oqKCnp38xPLyMtd`WR zIL~$U)MIPcu3mKT?c0e~wNcXde!h29p8NC7#=sv5f7)N%*w_4ZmVLe5v3>vLqWWK_ zUjJor`1tj>Ew6GdE`L?2`u48%`>`I^l$0g5H$J?qs|{FjEqBF-fHT=GNt4T8m1JIc z)T{n6^SI!NDTczcW`FKoQD+idH@WA@>bvTH1*VxQpPOgVtLuF&`K-UU!Q-DZH6nHh zNZq}BVBPt}N7}nAgp=1XEA1;;KAUfynB4V0#oOBVNMCAOr@2H-MFBMW$gyUh@Ko_L zl80u`zP#U|k?C9W`GPH#r1{KmbQ1Mm;B$OdM$9^H4be`&i6XXN!oq7VC9){-CO&Xov3}3DRi)i$ zX6x=+QTR>!@(LqAd8Al(fLgk+ey`*Q!9nGG$b=K=nGrd3T_v^na-;?~zCna$*b-VhmTZ@@H zjKzH?vk0btla!2^9QD7GZ*#)O$>k~$uNHspkYiMJxcK<7d3@!@46Cgv56=4j$v8K| z@bIz6W)Gh%sn{X2UPmKl$A-H~b0?HeXY5*b`JB-S1+}Mw^_69lU%shUv2m)hP$+(W zWrBEn(m(n72j{{{Km54f{6=r>jw$xe#{vpAev-0}J1E{*GJq=J@y-8=iV?pIH(rxw+pc*OjFiF)t*C7Uas z+Z<Kp^Qcq(jh?&T=2cu- zENrV;G#cJ&H}ltLa({gI;PA^i=k4Zqh@Sp$rhn%k!!(D<-^7_@wseW;_6ezWCEr-R z{>E32Kc@b^M;UvXC2w2qYo2-Jd-%-7-s+*Vu4u&k3@e%SMMmRb;L~`P9UpdQ9sjjs zZ>FeE?MLGtT{AK9b9Z-qnO5svukmyH+DE_l{n{=vD=JB00gHyi3hR^n_7N(MD)ze3 zTaT@}ntkEx%Z4NGoampf=l?tr zudVr%Hh*HB@cbY13TKF&*8SVxJ*RS)8UOp;tIyi5NMp+gZFFsDx3e?(mUTW3?D2brwz|!0=C7|To__ZEeE$PK-ivAd^t^Iy=f^YJVQahFt-rYZXFPxN z_nI5~R3ugQ>G@8nsQwr6v8F8Z;?{^dhX=3DhuNzv^Nevl zV98L}@MQh_M@vsBHt#I=2)*R~pwDZoLG>#aZ=EG(pS15~cEvl*KKJkV+-7buodbQf zy93_uGh@_0b!7QHv+Vd)N%ww8S`@z#(b~GE`xF1Q4L|dKB#YL`eKkEd-!^^wgzGPF ziMW54ITRyo=D)Hj!Z=&=)}HIqzs=qq>OZXh@WVRhXIq{<+OfQxd&$WOV!zGa9Qo5< z@3Jkr=<)ma&y}k$?n%>H&zQ#dpUJ&k-sxz2*6cs8ZtR&>Yt7HCes<$G>ob|>MWe24 z`DU*^<1o+VB}cX`{h#!^%lAa|G6O08yzbPtKOb`D+rHkW99zM*d-Z9z50CddrQHpE z_(}D{MR)m|t6em{3a({dFW=ag8Z?i^W8PNXB+djO{$)Z77)%!YnYB2i;YmC5g-xla z0}lDjUBSi2(j{rMyYTUTi`rjX{;HlgE9(|H;aL}ZhX@E zWO?BKYtts2kuj7MKR56A#=Y$yuNprN*qtrBrDn5q+1h(euJQk}{$BgPkLmxJs3xzq z?~m;aPXBmQcyWRE)SwLGZQFG}e|NUq=4w~}%;wsexGrJ#zC|A=e6!e)%5V3|L`GC@ zO=tCQo0j-{e{LO%x6fay8d|#B-1YLpw;Nxqj^FrexvN?4mO1tXO;d;*lSL zkDMQcetP%mM5xmKol=XnxTa`J8Fb`j$8c^=JA3Hn=Jed#9{VT0XS?km`bc5H4RyYE zol{qPHT>z>v&Z4jjKWur90G4YDBn7h{(PIS$4srMksfo8UwzHqCu`dyV!qtLBVxz= zi++2a>`;3#Nl{$Iq3ETm?>6JVLdCKIzuS10y{W%)=bPO8(;knz3)UW&mVW6YxBCB& zFX}S4^7N$tCU2ek?8L9dRc@<_OTJHyogjXeZ{@SYyBgXBf;I`yQx?{eHgXC5ee#$@ z=>GYtUNz4$&wi~kJ$_a*&#m#3YvCD>I8_{if_joV zB6R;Giz!WCQ~q>UkI>fB53lVikJz7Ndg{@GZM8er{+s)3%WIQwGD}R%D{9+)ZyevB zbM#ZvFp7(DMM&KG>EJj;rmf$?XY_oaE~AFWF(P(|( zo4a}0evS*@Cm&D#vih&UJWXRh`8(-(r_QzfTi@sW^VHTy!FMtqe`x;jH9h&llZSrO za$Kcv94I$cbR*=x!-QV1ozWQF#(sTdu3h^q%*k1xAFKtcg zIDaUL)gS%LMd^CnMWO{XdV*yg#e; zO8)+i?`r?snq2d?Hs9H@-rjBgIo}Ug-)ucGNq2dC_K~U{i952qvuvf*nT}MPx4X%7 ze$NlRtn2j*>F4Jhkmq0`1f4$oO7U=)pJ7MdwZ$XnKtXhS6$qhIAzN7ORmK-4Hp;fnd<+C zDfi!>b0Syv|CaM^xKaMYxieSoNhte6hwTnuUTQOT$<0sr3S6bx_ey%9| zFfsm7pTfs&bsw$;l)SLJFiBp`snhv!yVauX{o#e5ju_5<7yr5L;NkC6+}7SUT6F(0 zzm(?1y4U|+-njnZ?h^hrbN|~(8uMJ<|MQC7_l)ZT`~QEky>%yE(07&SvGDo(vhV&b ztvoPm?(FdGDs3*$mZn=T4t!cu@#o>Kh|NhGzuw=KDLe1|&0EJ_CoPlqld*1koc2n) ze4o=b``le$Dn49PzwmhH^}=^=BwueUysKrPb6M)va$CQ-Hi>ul|9K)io9}Y<@2Pd$ zeAN5ox7Pjn{;Jsj!S?z4j=qtMPyY8N^V;77`|iH@_hRpgU^U}kvyA@lE^96Q9vb)h zWqZAfT~6}EZ=8%pd>M5o)XsT&Sl$rNOt*h|dpG~SH^$fW4ElN*0B#ANKcs$*kU z_+f_Zn|i6QqJ1-2FU-@r|JHRw>COEX#eYnKmUwmc*8gkfzqLTE>l;7+ywJG!TK?l*Qzh(wrSeofwY9nO zPjipm6HV`mc~S;RZF+Rx8^itDKs$zUiLqd-Kbs=MygeiFETSI_I~vP*>pOxyfr^Jz5v_)$m!jW!c}u z6PDk;xrcAYU9b1OHFAmzAMRN1T0O_UT#Wnqzns4k`NyW5=V6<=q58Pko10tD?y|rB zSMZ5ufW&;i=%$!T?gvZLt)k20pKhI3_v83WU&Wm{$$V*fLFeismUsx3ytyGbxq44= zuZr5jQ?o7W%S6^h$Sj`CcVss|pV{@LhxJ^(oteLT)4Pl?*?d#EhVN1PI^@qPy*k!> ztBS4r+s>$VHj6wlFXLYGcdz?}-HJlC-ralg*h2Nw-*4|&8S(q|eZ#h1?}qm_1W`j6Z>b@+<9_+ zu07ka;Q!M(=X9t>6aG@=NBG%{Be1_!$hIf zHOsbS{JzFpc9-vO@pJ7QRUiAmA6gZ|Uh|yP=x}oNe)RjKeA2=7#uN7!7;4{?G<&A) zGGnV}$l_!EhtCJAKRo_kE$Pzn{Fy5k@nqaxsxN069IoPZv-qj=>vR6MH~4?4l;qWV zy3wVJ?@sE~@)sGe!?!0EpP6kdY9fB_@5xE?Pw0eSpOkZd6QlODKaJ-0Gds7mEwQ~; z%wbpeJ=FV5&D8Jnj`iQzlK(}1Y4w#)uVZihb-Vo4z2@hH%ObHWgH&bg3z_7%*7KHs zuM$`)9kwfvp~L@P?7j*+t*G-a*Sp=!eEI9^g%!`;{zfl6H`%vfa^&j6ua#C5&2T+< zciA2R(Ka!?nrCI-9{=udXcr0+a+&&kTT?~NrK@{h*-7HRbrq3oGX5vV%iOFy zV3&SsvgxV0+$-j;y7o9cf3kl1w$q;tj!${EN}yhIUHY%Y2gf~XKc;=D{FPa~dr$hU z9Qkjuna2M0wR}8GNAAqNRQ93G>%&e>My`a}pPnvK`}y(fqNA$6eP(W+S*n(~@zwj* z6@}%Gvlr?8OE%k-a=taz+U)VQOHUX7@er~2Tk_<>)XWVU{K-4tD6Ty*>+P$gOC^#{ ziD(U377Z(WKnMa=JK*=>c+zp zR_jV%TX1l0rHbb6)vsPW)ZX3Xb*IdIO7&c}vlkcMj1_bDyL|WVi-j8YQQzL&$@;AJ zb74~LPJ^noJ9ixlugrC@|0CRVx@O7V$FtY$x7+38erb|%nnvLR6N%DP=cY|5=X%y3 zuWvp&x!$#zaV=9>^=@;Wier8`BG><%IP~I1%Xc1Uo;gR3U%r36IBPT8N%^ksZO`}H zGuQk7Wq$fzqkmQCYNvGv3f}#jB-?y$r9}Oc^_fR{8lIQWJpbw45t(Rbl~T|$odv8v zg(|&RY~r{4NI5OO`GBtKhLnSIvN0_&u69Qdbp|2i2y*OE=T02qbW#E*>8=vSFaBq5krhC8m z*{#8u{`_^PU*;~`mv?aE;&z>9GA|;ZNn~hYQ8{Z=No`yxd>Db^n`xlgxyKM7GZ{`O>R&pxp1C z@+JZx|Ti<%0``mPGZ5*4B+NAkYy~U42RR6Rnko7xf!|Kp^>Cer}Z}s%_ zcGVrx*>*2Q%(!~hBGvrkx2Ad@So>j7S?+zFS52<@t~a}`Zg{i){#z5#@|M4r-`%%l z*E~0sSMGG5d^vt`yKVQR)ORh~neb@ufOKVQj&(<~?cO-Di|?B4K%=late%cN?ztZ*iJ8A?L@toBW2mPCDoq1u5Pv z(#u>WY0GpcZGYW6u9{nwD#uLZn-*)AFFyIVH*mS1Y|6<=M<4kzxx8uRnAp?h(((4l zw?$!7H*lOV_bd8ZBUWzO%+ENLK{a6Zuk3_1L8}>~N?-M=pW$26aYDx;bJ+r}J4M=T z`~PiR%jqc~E?=x3UTECACI36$o~&z61Fr5_uDMU*zuK%2{T`pT6H`_OT;1jVyT^A#yI5F|!v*|Ln=jU6dmv3D==hyPP zE5#-nGO>OZb(qSo6*1{>vS7=X2{~k1J#xvJ9XE@gmR3btU;p#QeD^+!((=XW%&`X1m-^)_D`M|H3cB~2 zOQP5LXp`p0lj`#os=vKCF-3E-6*+O#X|DCX_ z{PUymw)su>m6eN3f3Jx-@9=w?ZuExQ>wL~71bE}vgzBobz2sOFLPMj zsQzM4Mc;*a_2mh(ck1W-U9Y_%{V<+4!tTO&7l9`a0b=Ij?xP_*Z9 zwl?*gFqvcGhRGWyar_MW_ey!+C4awnHSDuYGMo0*{$5n@@X)qb)@5%F9B5?zc$nW_ zA=&NzY~M-3evUEM4{9-fDt=wWP|TO)x7&X6qoj;?hdwR!DA+SmrY>Rc&fi|{cUu2^ zm|1y0=#SaQu4_*YJ$Z3xlEYD#MT^cJc<9OWbZuUt=6dCw5z%vV_1@~{6)HMxJZI3> zVAQaQM<%3|r|_5espAG;CU12#O5o;v^uTwe>%~Q`x}CRnm+P15-t6PuJniv=easxE zW%5o0D1O*8*JdgAY-hgB$6hrYlIVJ;^L)4fUDHlNjv;?1%@z0=#S z6?d`l(StylJG-vGsJ*d2QJhcm_r9~cbu7<#oSYZ=C*Gh~E@anNKJBZ5Z&>UVC;FZ2 z_o(-%bKz1c6Ovc7@9{osa#PQ%Nod`2i~F4E>-?Oal!RA*0Ije4aVsQy3CE4bDU;;+ zE``X7f zMb0n&x5aYFr8^%>U!CBr7I^fr*8R=N%g4{^trcePdw67F@{R<(#Ipifr;dHKO!Lw@ z^TtnS)?$TsI)Uee#D#>CRMkF;=ssz^5hUzBaS7+ik0z3ChYam53oV)@!WO5)=;i71uM9-^sXS%(E&zl_j+1GP~d73)^yU z)IPL17iZCa_jcK$gPU~&*Lj}d{xp~O=Xrf^gYSP|KD))WdBg1qGD5PGX8zav-yZTz z;P8}{nvpxLj!wVkb9x{HgzZC%=7m?cel(8Cr~98cR&&T@@w@Oz4;0JiRf@uI0~%$x{j% zmYsYlIZ5_b>z51X`ZatD#f4N@I@JDV6eTS2T%?@h{>W>GsCM{+o{1b=&8sIw@F>rzRSg!T!Kw5i~H8h znZ?YtRY2Ns?@nXhU)HBj+bmggNp0H%y&vi=ZErPZy|qX@Xb}+fq>JagCZ}QiP(Bked&u_vMvyA3hELdjItF$M@IHK2W{e zBJ`5AA>azWwvYm+Mha&P)|9pSr#!=jJBYiJXceP7x92iejG(%dgG+H}C86 z{lx|eHJ;%f-1Ap@=DqNreEy60+t)K+8uL89=r`Hx)YG-^I$x?iY0vfj1QIsHVbl6mA%jwO<2~QYa{?uXVT;{W| zF!Af9rQRP8{@@SX;ul#WbCa)V!lRXkk8dpgD)#61Z?2q}5Bz%ZdwTvNh9rB|Me-{%GFss0Ypbb_W_5nHMQ-$lug|%3Sf3@(Eq9 zhNeA@TdTSn^jYS(c(CQxF?eQKOqd*V;oQLq%P#9NFZt2A#lgp`en-N$)B5|5ELpGi zc-a-rfNuv^ah#vX$*q3wSYq+BbA3~17kqmnS==98_QZs}{MzbBaX~@B>l65<9o#u1 zV%vntJe-_LmV&xp1%mC~1cm#TS1x<0qMx+<Lj#p|^S3tEQ>X(siQa&av0LvU?e%+-{HH%Nk+9!!&baZ(8O4bv zhWtMTCg0#-6#Arm#9hVdOS;9hiK&zOcTNzPRWiBc=$9|$Z3|-`S^6%&JF|a@=j6mm ztCFgNHtMd_{42;g;r8nzzghU@Z}rDia!ajDh}tfG-Sp|>-^a^6?#b^vJg@e)X)AAl z;}ix~jvGJJf6I5p{E-%%&VNHiZ~a`cx9wLw+x9$N`7iwBBJ&!y(p=80Rd;{$C4PVC z%kgtPPv8A1H={qKJXK@fH9;}#o@a+eMwgh7i;@%5k%N;@z4R~7YFlb;zfHwJDN<;X z;*qvG6>Gmo!ooiVnGSDR@+awg+NPd)g+Jb$jlCpX-zI1-t=Rd0a#>=vhjb9vld!Fs zf3s}vZ{E7|PUZfW$}bBF{>yEtd?Z=&#?C;&HSWW0pC>2g%CmBb^LzZ_oU5^X#nalP z>d$*8e>YutdQ!wsO-3)BCwrglF1)MtC8yZ@n!h}6OY04RgI^lI2Bn5uK0PCrd2Nx) zqeDkSBc`t6I-{<5?9*05Ypp*EQv|-bM>TpJzRB;|wBpCcC65kxnzv1?y5R32XunRy zSjE-ogptnO7wRt!c(eSOROB<;maCmybaI2z&xwDQ&T$Gll45zZe_3$%{2eEaee*xQ zlBwRKo99&aesXR(uu=cS??>edu~QZZDTtdM+Y#9Oanes$;g9v79xqy?x_Z%) zU+-;<;%~L?)%rQ@oo({ZcXs&YY5fZ;XRo_k+%zf#sz&z$%woQ{#wIrB63yS?U zl>WQRroq#Gy_x!i`A4SpW-mT8*X;alv$fmL|Ni#oz?JoOPW8oUe4b}7*u6E(J{G=X zcOW-tRPyP&HHjs@XJ(svkIlI6J67_y{!mf>bog5U)00JUkvj?mMb1ncl4oa$5xy3}5OWY0u<5m~?AP@Wt5BKLQiB2?gJ3zr<+1 zYVz@ZB@>-P=@-j#?q;akD<7-vp0J_dO70Dr`1)^p&(6>6v-!#Wv&X&v)>nClxorHm z3{2nU_2kaJzPN63n8$QGmYGKB&K?;O`+nu{$YuOxdABO^ep~g5-10NCzAaUCD5==t z%x_Y;WAfMH*Y*<+OnTu}?frZA)-(U~rVA<5D_1eUto?EG`hnJ?<`bISLRp058Py#p z@~@iA@Ui)IiMYMLkV=ZjiM}7+zt(8JUG=BEVGFmfq{!dtyZ#5A$`n&xANNl@YQb&O z&UbgVaawQ7JK)xqSOp z10JVyL@E?-W(|EI%7o8C_D4q}=T6BYG(C5H+F zlTvSD*q;fXW?qhPO8Rkes^hdokAxNm&W(y6Cw?$l+4{obZO{5s`#C&rUzoD(Q$F86 z&))A7Yd`+~YJN_{zV5rIvkTYbd&hL&UaPp~{EKg?(|0LPKGD0bZ*On@JS% zahdM!8x`~WYIVEz{4sqmerD!RUMbrfd^d}pIhrY-{Q3C0q?ye26UADqOLp!)v#_sn zL-}}$X&;EUCe*0R*=P-Ne;p6Z32+dB35p7$dZYd<6 z$ugCJQ%KLCy8L~M77vT9X~y5p|+B^E%vuozmc6QIy4&Bebdt3gtx|e6GGqSu%1UQLb;Hm9{-k2q|GiF;>(8?Ol4G{t?VC)VU2vWBn}6R-{&kj%PEFol z5V9k`{=PtJc=v9>0H!DJzFoYeD#TPQuIGAf)l*^3?@Q+EvP-^t!4|4&-Ff@m_Pb@$ z6?LK8^X^_);Mlz4(1x=bTNih&`@2X^qB|I-Q?qyRqiyCzW{pwGB!F(V#s+Vk=Wb^3EQ6JHzU^ zzKm_1k>u5=)vGvE^jSQ7G=I)CPUoAZ6WMfTrg3xl?klT8xgQ_zKYnqs`($y2sM{&+ z4MGayra#Z`SA8exAg&kV^1LyB;q*w4AICSDzWDR!cEy(WuhXiZZ+)I}M10fB{kn&? z@7L+bFX>&*p;FCK<9Yef>etNo{k(n)mGhlz+-w&7l?H|6|{l4#0c(;;3 z5625B1@Wy}?`)pk_`>~Lc&&@`y_p?*EH@?G;l8{0)Q8W1Qg3Yh&hY8^skY_qwGHd* z=JcOR?SA}IUEZNFMD14eS7(~{$*#JQd~6Vz5KFZ=i7aTtL_B~o!|e}@|OAP zZhN)94VRW_+UpP`#PVX;J)>H)BZGXSsDEJ{L|NKu4LT1boJW)y04!yA~rW!+UOUyPIjJGXS8ZhRv=50 zVB?#cn;-vvzkmDg17+{-bWT)uU*t2>=wL+X+Q~`}L>rIDrtT5b{As23&C;Ru)3XEb zyTcoU%ABip==4Fbxdr<(ituQ=Nq>$)+O{OZZi4ASX-e?7s5-D9fK z0*)i{jN9iciu|9oQ(JAhQRq?7q{31D?F)KB*Vbs-TzeE}Qj=q#aq#Ab((RFN{umtM zxwta^FQ3h=_oY|ff3cbNZ?oXy`So{_E^N+CHZgcuoZ9WuR+72+XzS}LIq5HES@$EQ z{I}R#MhVD zTK<&tyzIWVW0}^Ug`2bAP2Jl3vrSxa}S=JoNQ6}h^0@)GKpWB`)nA? z+-kmWGo6k$X@0!F>!saa;Y-Q4*VVRNN!hl0_ne>a4CU8+xbUS-Ay(BohI`7bH8&rA z{9vfr<~#Y%e=gtTIC~zb9?1QD?QBellx`Pz>;6XP2W!RW_r^N#F6G$4!Kmt>m&F5>>H?j< zp~A!D)gZ+7JmUGMliN2Z$%Zkru`%Y|*R6QIH?VvTtD3lPDENGU|H>D({}=eLF118c zi?Jy1FrWPArS}@%HGR9W@i{}#?Bbdd>Gj-F&fODK1XlcX_^lFn?n%IjZL;fRf@aIS zs#;unrMMwa#L@oP`mptypJIP-|9HPbKKSaAM_mUVg-TDb4ZpX;?ed2%hfS^tH&<;v zx$Rxl$0gkjUJWtV%A$ksb)DX5aq@ENQNCG^J~&+qx1ZH!r@v3$r1Vmk2IPc^7|;a6 zYeUbF>Bi=*39+X>W&FNl9kH{H``o71~rqWqMQh*p%*!cA9ez|r+V zn(3*x_?PIYgYzZ`DTr_TaB{Y>(jpIDlXpMalum7Tco#KygSP8JjlWl-FCW;wYEIRm zrn|5H*Y_kHO8@xp%k_lm>wJuJd?KZ^gF+8~n{xDn{Fki-53?7#2C*!1Z7AnH7rTe+ zf8I^e+#LzuZBpMY2ober`C@eC#zT?U0p>-LS!;|hHmq}S(049%HO^4{({P|St}W`# zp`@0-GJkLb#VAAiK*A%kpYd?J1!2jWD|L4G+IfXX=pD1etTIfsJe`I=fFI0?s`ufA^hvy$F&adgx zwAXjIcA{X<#r?*oMD$zsU(azm{`{UxRq>^wtJZl(W4BLtXqFQF@au&z`r<{gR_Y>R-?0Yqu-`CpitKIDv{ysmwNpPhzj+nlt3re3IRiey)RQ=jO*t z=jBe&HDVHW`*QSA=>J)zQ=>P&xxM|ua%W?nn$O0wFCBZkdt=I1wmo-N^P0WC(QGQc z{k8$~{0fQnd%jQQJ-k81`EmTaM*U^~`)b+ylTM%8`{UR0pFfH}y?$|PlcLVtH=Y|CP{v6@K`Q{q)5NbNM3nS3WmA z`oHjA(3adF_L;_Qw?AJq%e~)hoPPeML359ZhJT-q#te0KyNZvRHRiAVYb=Z9%HCRM zm@ksCD?8*-=k;jXpPd&LR8>l+|H|(U+!G*oCi)Y9&}{C+@A7tSi;oM}|2=!=CinW4 z+wFdUmK5gaTyY2II9CNR4wY|~4s*IrNyv5e@n1U}viVzj#`iFZ?@!NUd+n<{9{1$L zEX`AIFFifIqvGSF+H-RZ&&=}fRatRLxpqV0?Yt1uNn_Dz$F?93Q3}xG1S= zx!-(dRgWvSx9;?dn7^Kob`~tEWq$9BZ?6|=oG-}?e*CAk z``gVsImw}SPo?&5N!z;h{k?Jy!G)6Rb|3uLZdt$ZwDb2zmd1a#zP~V)IkPzZbWgom z-sVe6ag&XIpBFXj{dM*IhV1%ZrBxRu%s&>fuguuVXvywLGynMualG)n;JIk#mI=^Q zu}|Q|&25Ie_FG(g$8vg_q8wOh+R^Yy&AEm(Ty*P<;egC8C`+U+q_i*?m>{nqbl z=Utl5HJ_vAmT1YutBTR*#<-!I{+IsAb~@KMd? zt_{rm+nz1p_#xT&X=&GtqbHO%rvFcDnX&f$LdBPhi)Q>h7+@S)dM&T>`4@@9E$z$P ztDFl~g!xMgi%xs_=j&AsX}e24-E-}V#a0F_+fw+bn*-_TXv0aFSHrEY{NLEj7~3|+p%X4`$Jdl*va{R#_fBv-%Y*C z4XOTTi$_iQb)0v;;$roL4=*~u9TpRmYkQciwm9Ud71Q!rptck@OW)f6m+vH2e6<$o z)?nm(6!ZA;nTF?#ou5SA{#;(>$(yC6==pj2+Jisx>kckle4MxPRKupU>^&=T(vHtu zwWPB_Na6a{+w~enAM;&e>=L}oD%V z-_2DWHXhe{pm@AjDu?e~*4tS-pEQKuSSkq_!}GfLopa}w%qb?=R0hIVpZ4 zNZZv!2jr>`KG?uI@>jaAxco%YLFo`w3V~!715ouIuZ&=4z z6gXK_FXqTRmsg9FRBtcJxw+5y=CxSucl9hgW?!CuDKjtr#ERWY0ui8IMMTZxDZkq1 z*UB#cAQd4Pz;dUX!Lxt)vqJ7gwtp8Xd%jz%x*`3#nYp-rU5SqmD_V)Jk z`afISQjaWueD7;hla;ColfC{A>FEVuZ}qjDvtRjiIg7^1tuv;XI|@YDc`q&GWGWFz za}siPa`tU1et3vgCvK0#+1cjq%l+mC?60d`6Tko7E^bDl$ptDRoU7vYT3uT1-#{~DRxBb9#<9{wUxvu`1o-kqg5|$ILymxk2tBX%ns}x`A zFfZn$G{f53JpuxsoL}~3Oq%=q-ERKcUtcypS?)XAEaSog#$ZrI)489^ZERZZMo82;`)8;@-+hXf4@xD^^Va#U%Z*`-W=t)!pF0ww|(zBFvHV$ zW6fc?E$^)kR5O(B!#PQs^>?v8=kG<&|v!LcEGbvDSlA zW?Wezcx!t;Xl>f9EuL+B@^(GTd}lvum#=d;)+4D~=rL7GG-y_~_==zzw~x=9_nhhF z{kq@0pP!%K{!qo{aYwAr^4XW)eNq2;YP!Dsz3TV2JragY!fHMTve)lzn`>RZC~|XJ z#*GaROMl)z^z+Ad)l|eTV6RsH%TOzHO~T^5 zd%HxhzPeZJYt$=kdgR4K;IIzxNuPDQQqBMKYM(dj0+w(C~I0aq|d87c1-zUf|N?orjA?bOy(~N zKRj^U^Xt{>6O+~XtG>KwJk-jae6Wf2$FJAx-&QFs4|&D5`O=H725rF$ng-&gr_b-# zHCuXfeunC!<9nW7K74k)uR?T+8&B%&hDp_%GK-w|suq)2UXftseE$B-!5+ZI2(&}KA)JYE?=Pd zk>kR&xz!tU&bs|E&g~K1`+2e5$GcD0i+xifvebQ%M}W)EYOB&)TQVnYS=+wkz309~ zOepnUARF89ZIzONJ_WXGf{pZ(pujtdy@Bg|ZuyWUm(2XWhUu7FXE$huy z<*H0f;hW!FTzT~>`&LaRK9-h0AK00euUfHl$Bfq%`+mQ(-hHos$+4Bo{N{G;EPj4V zUeI@z$-&t2*xUP+m6UWct3aD4*BR)!O*`1}`eX>`?7jbj7fc=g9XGkBx$B|K0?ozq z4~OU5bo0Etv}o_dQvueZo)`2syR9gk>-CNK?wfbJKm2{uYUWX^{8BN|YpRy2UDeX% ze{b}grU&Pk3dtS&`LjPYwc8Dfo_I_+Vhx6R!tx7bWotc?@oh5LS%h~TOv%F3{khBjtuNkyt3%eDYR;{mjqM$|f zuaYSxMms&KY82N9Da4;pX{t_|&-~?jSLu#|k3y@a>=vDxuivPcrnM?z?l+5yClMl2 z)%-FWS=?mu_We}b9Pw*T?UOGvRgh!#7n`o_E4$36?fA&~*am?mUNaAQ z_4&=SYxcGNcEiAObJ`{$mEQ~gi!h2SDO`@5@@vAJKc7zPzqr5O{$9;z-`&a=HlMho zdE)Z4{GCt5UQg8y*PGw6C}5#enN;uVj${4u{nqbxD0>O1TuwYQ|I$JS-nermndfKg z+h1E3)ifzpt18TQ{$vFvdmp)WAyqGzukxAshLaPfK8j75%5YSA*MsgEhB_k7ZvH00 zq3VmZrZ>q)eRe+@%Kx)C{cvY+`l7OqzxyggUr9`Q9P<2p{D$mCwSD)*LA{qnp|97L zKdiBQYo%y+CU(;;y;Xe%E-NAq+PwJTVEe`Ra*m3F(~3EnYFp+$XIL4;GKVAJlhdbz zn)+V#9t~o;Q7y;i>t*!!ei7ml(Rjd<_m<}{>qVO%6S(JiPO`8)c9_3*<)x+Gw>RuD zYhLbAoGx)WabHN|6o!*qqXLBOs9zEgwuYGQ*$rzKF!nR@v62670pz!j=BE%#)9pEzt3HIadjQ% zs_pOEzdYOPD;xq_3vgyqcbbC4PapNo-aXbg)~&d*GI(Ly(c2f+-1eUKBt$n{XnvhJ zyYjD6hv|-s1DEI4r||F_axYz9vVZw)bJxRq%@Z|kM2`E-Ido-Xv&u>rW9h9gv(7QT znw0S~``9~|29^$%7G}^Yui=wOQiswz3!+FH$!}VPPBJr6=uEwe`7)e}8nYT82`=QIVzZSW? zo%UobJGkxrO@6(uvQ@_E=M{Su&1(|%X6Syrb9AbCdRY2|>^IfD_18{tNY6Xc z_E6JTYmPp<+4~#aa{B#;zZ5#>$nXh8m%nvixaaz_O5Y~V9|E_V1^$MrBxx~A`-Z-A zI-vRYp|#&i9#zwRyI(78H7Uukte@nmb zb?27*?B5Af8C<*l4^@E%nuBgnNz3h1|G6-J*Ou_|#RUtCOP;hp+?;;WKvw+w|u|pYRV^n3VYOyXS>}QWuf}zew{v%GEk{S$A2w*db$| z8184h#{S!9%$vD9T_XQ!LZoMdh3eOr%je%>X11)Tc=u}34w;k{pPrr7_WSlFc2d_2 zk@CL4`ad6!e{-K2Gtsfh=g^r=N0yy2eERrH`mRMY<g6@RWz*_D zckZ;UGuN9fue<6_zeC%EfE07?H{R2A`nW{3ZfHhWu8e!vmOOj2{zV@@FU|?wQPvyJ zGjf@T?A)Aw?2=}F(&KHm62?1LPgP^J^7pmRbm|wzm6M-u-m&_nJj;@^Ho?i#oBrPDQ`wMX?^UtmpU=A83tR#@ z1P&YVUH&|ES-RZ84{KO7B%%yCuQuvSUYFV#%lg^UL1s!y!l%^q-xa?s@6P$?KRb8p z9Ic0Y%dZr%%lTfMvu&YW@jIS#5x*si*IlvHzWHFv>!se|>+5!Wsc%wjes$J-X8+EK zUQxzJMLAjRZ(Na?b9sqh_R`5OFU9Tj_V%fJ!V?(^9u~j9xA)_l&F63BewZ?)V%0p`>RYi%dm^S>kK2-Q@zA~fPbB^3c~0K zU!{Gi7b?u#8osb@Q#Sq0d0%jw;(hi>3R#MdUy>!VIu8qZSeb`OeLo(?^kk7))Q$wX zuIc`-7YV7}U-Us{L8<4CUq%9!>}ne~N8Nk#=on+&_jh;GW+m(I=mW%E)%jMB8t0Cr z{Z4Fpb4<(gXMW94KR5sI(a-9IFV|Sk`0&H^Px-|+*Q{#!#Lvw+-g^0&&fLq-`gA|n z^eu}#UH0mRXLj4HBKMRYhaHx?)h|uxJu`9UL|yN&Cnx3|Ubp?^Jlox-JHNFxv$0nT zSthBZtjM^yDC6E9OOFyz=gOcjxbXMyC7zQvBp>J7{jAC5%=BAZGAFABt9b4#e!lIi z>chmno=q!m_%#L`iQy1ncM+1|o3lpU^z^|!o_#OeWjfj?Y<8HcEl|q)%;fFv&;Pfj zSl^WAY@o}Fjgc%?U7)wytYU#<1FyxU#6x3|w^tgBP*TV7L~q`=##TaV(-kc+|YATj7>_=W2 z_{CPrwVhy8+kf+<=-gYI)BC6Xy0R+t^tzpivpW~v{T#mYa{QsA*QabrWq5MH^N@3q z^U209>c6@}I#edGcyP>W>CBfjnO83$z;wjUUFq~UrHhJ<<&zZbF3-)?6;gP~{!g>> zcW@xnlYeHHgtVBhD;#J%zcTfKV3V``Ez7%yUQXGXeLd;zuhNM9ALDawZjp57lXbkd z=jKGiaIusVNoDmZrk5SdpSc|Uo$ng)&F92~i`!cs&1zEl*gpI0mZ&zLW&hlcy9HL2 zrv&~=s7*nqV>evk9Q)= zUa#Hm#_RRz^c=O5>{EGyj4njF+y6_Bdt)ynp~)z;=;)u9Zcb08{NrGpWcTv;qKu%A zE)BV=we7iTOQtS4zVf9WOUFaztIHon_BOZ&Gl>**Jf3z?{r)*8>EhUyRY#l~pQ|n%_XXBfDQ-RIng1vaGRnsfLGOpbN{x z&51(Q&T%V$r_~f%1iCBr-gE zZZ0hNsm-(?AldZr>;F<-@sC&wOoUzJA2Dv1ik=kuZ&qgQ*XvP5Q^M>2Zan6|TrXvK zt7@y}|B9Rms+%$|9GLWU^_qRZn^v6EXO^5ClEA*$xcHgRY>UE0zVpnE-m5NOwsWrG zwPn@p+4oe>-g(F4>nBJ((Sn84#*tbiu)`O!)Bjz0bX*fA3d*yDU0yn{>WJ z)H91D&Q9Me5=-vwtyZ6=&GGuWr5hKsDuaqtNUZ9bW4n9QzXq81SsdEp`MZ&&j^)(E z?dC9jWcbC{y9{ywcU31m?XB-Wy{I5T~R&u73dF|iu%XhY) zxK)(8*x{Fn!Q;jLy5{-E_v*e}y)xybk(`7`u7&9NI9Wfz-q{BC)cj}V%+%eq=I+C} zYg6xUE4#PUJu2}p@QnmrR&elxGS{pN$!>pgF>{Q9qb%Qs!+dXFn#Y|EXl3(aU3xhn(Zbn1_DG@)qiw@xfj-nbFIz5K70)cX+K_2M*UOc+mWW-oKEd}Zt84c`%iKzxy(YX@ikIJIyZ24F`qwJ^=*`FFH+Sppc@e#+{9f(ywKsMg z)06*f{n+I8qVLacE_zvykZ5jGUZ=%7{nbQ9 zRUUoklRXo@{V{N3t;z5{A>6kv&1AvH1$I2Y{r{f$;pit4_cLqRRny>X76;SC--EL6 zZgForyYl+&y#4=_c7JOMUh}!G@>;_DUrSFNZ&`JjVM4L9o4EA9KfC$rru`9(Dc$-; z!}EapoVSXlXJ#0h{W&O@_xY2|>ynUi>mP>}zrEtDU*>*tyYyaJRhM;U*$MLHfBv_Z z$K?6PTApc~kydu><#P2|zfYWS&wX}W{>IMlaa;dZF_gPHTuzQ=&Md2-=uFJ?7okDE*pM-p{7+Yh)eX#epdYd+fx z=a^3xff1Q;8Rmb483T0Rd|70>c}?c?e`X0EY`5vuZ>-;(dTaK2^{A@fcMre0_fT0V zafZ5F?TKHz*+mKs_vZZ7DL&r&Tc&ukgOvwPhJ_I10iZ@LrfY*S?Q zP5%D|@%Y-CbtaFp>eGW(g>A9i(fn}#|IdwP`Ip?k9e6RZ>~-$`?+Kl!)pmMwd^lLd zc1%Zh{}a|#63eeFEcC8st$EPNzmormz3DPW#t12exG8m0xGRso%`>rUi#lq1PT#mk za{G~2ZZUm3?v>xKy|yDYeRs}>3tH<8#lnN7EEYN2u6`4pV5Gz#|FiJ%v6~MRuiv_N zR_f+Pd)vKNg55&DTJF~=+HSaBchj;;%TE0hb9Jj3Jth~Kp$Az|k@87@K z^ZDH81MR7r3;{+0t6VNl6%u{GX}q+H`?7Mpv6R3E!L>8PuH1@xoV;S|w(O+C`--Im zeq3Y=deLisH-q2i%YwOoW;m|hu%7P{+e#|+n3qzH-EFWzG%GqYYP$P(`|5|+V&^@~zP)&H?1uvh)yco2%WgFo|H(0yel8iT!y}s`6>UEnUf_k$a zJWMpd8JRx!sr-#6Wuf1M3SDNXa@MtG$Fucb*>S)jafMsITrR)ew+*~fg}wEDN_XX- z{1sP=e)x#V`tf#gH3!StCAZ}+T|Ce7RCde!{6nYCPrG?zq4L|e?s3~f`||Iv-n=$? z+p)NxkAv%`r5}D(`j6$-Z2r9|&+mT?&c3i?;`!tEmX@`bZ8>{%_MKXn-?z6c|ChPi zK0a}0?5$s4R{HNt%bvb=+cfqq8=u$xopAs3r%UhWZ@G5&+vf2170X;^lpi^E-}Kgb zgR}E(P8WSU{dKdCwtaM#uzbzQ`abdepR>=({a?4$ZpGg0|I56#pD*qB-Lw7%xB36G zX+3p^8+Wd~_G|)QocVc+1viepbe;e9<-zYa>s}-toyB$a{>FUs|1&TDY%jmMYyOSf z^Zp+8IDH}~$CmHq!|4ZDjycadS9WC6VzyTs*wl_mF0i^&z42wKGxIM~z8``Xa$oOA zye|D$Sf{zl%Fea%b~Dor&oW)BZQJgpfB$~b=Ie(q5B5hKuVcTtAkqGN|HOT79K4p@ zUVZ7cecwhg_9v11<^LWC(~WpG|6;}!lf^EDZS#^rleKc3Fby8rwCj2!M|wMpVU`;VO~c6hhA@N(|89g_R@vVNZa>!z-$+=H)M zs|&U+`l=njOH(1^!iM=bgBQ>9v$?>v&37=EIiVb zI)C}bZ4o7_nBr+nmNOF;kym{4{uB8V6dO{H*xZx{dPYNn7kG`wXFO8hN`u* zFWx9T)T_G2x=()62kEf2TP^eVyl3>h|9Ss=2EV?q`~Q^8wojc_U0XXlb;tKj-^$kS z-F!cN^KF;2a*JXMYtPx8W7bbwSilv0{kU-2E&e*Q3$w~Ia@l3;cSIXy<<8&xdE@lQ zmRy%+h*}-mI=R;KP5i2y-*y$5tG>zT1fuJm>K#J#5`N6Fu~ zXZJs6V*P_@g-6#E?_fE0qgyXaAxmK^+o^j$XIT07tZiU(O6It~G1YByN~ zzT>?9=R)7^M?UtKKOR^xJ5q3y{-^MpuUxg?=rh;Po^boySF_tY)@cZ~W!|#79d;)A zQM&7l)(MYGzupwSx0Siy+9#0n*V%28+AOwbpPao-H174Z+Y@RZtxD|NGbbuJtSdNp zkKg*=6C$*3@2d26^7C=Mb$s!%wY#^5i}C-MQR?+{U!m26)gFH*t>xQQ^DZfScf9p} zeh=^UyVgulz42lZ+aflv>j(Ps*I!Ca@=`TbNItPKMH5&ZL<2|L(o}616WrPJ5!E>jI^>r}OS#(mlC$`!hd%3%|mR@|rgbZ}BpywZF^i zzP+h%^WK`%lfON76#t=W@NNFfnOxr%Fm6-kSf#sH^md@MX+ZVA+1ZQB$`<*~;yu7| z=re1}^Vs>``ad_`EqL4d?cDA4Z(U9rdgoqXP)IwGnjTeF*7x=0W%Y~uzij+9uk6rc zmO7_-MxQ=?dNVCLf8)jLeA}PSuX&ex`RDTEEC2q{T)WcW$0KC(nXA?h?>!fKxGhik zb5i+{S&u)Sd9%U&*~ZiF@BG_&ry@;ck16ZdMf`ve_H7H8_wk?`D`D1G`&;2 zw&v|-m%Ol!r(2h9E@RNp4>%?7AapwN^YU}wav2(Ii$3Po-4E1^tT8#T#OST{g81^^ zKjy!`d3{~&7s2_WAKDkcWoj!{t9$VH)NkQy@$+&*m*hvq=akR=odyR}M^A z>6-YeG(vy(pG&9T9DFUmDew6AP0IR~H)k+2uCk|vqbUvt?%_# z2ja@q54X+zDZM{Gqxtq1o*#+d*pL5pGM+a#-m#bg= zmvwo5<=we^w6@qimrGxJCvN&Chmh3U*Z(fL_w9^+oAl}WuXpz;nT3>FB>lGaX`fp< z#eQbnycvp20#lz)dadtYmws8&UVfj^*?VQnANpB*Wcj}5m}F<~*IR$i+&GlE=50=_ zVf>W?<+02k)m~`5OWarUqxJm7^1SAIFL<8sDt(>v_SV*IRbR8BH>dS}-1PlOr*Q7K zH#hCpcFh*(_HJfl*!cF=*3EjcyCUxItJO6&zHA>fbMoZJY0aB@ec##mFL?an%n!yM z11AZ!Y5PpnDo!;R9^`%4)A{F5O?BECjoH@a>tyS`ZM=3{^3N~c%gikq{0k15o_YK9 zr02?&z3--F^zDAWBXvGF4;J(|k# zTW$9neSdRy}>Vh;KXE}1_@z?q+`tUJ__`8wsMQ# zik^ILQ!Dq~)9>Bob8Ok=a_4;N%=>=T;o_{)cl#F4G&_9O%{1r21wOkCyV>m;a*xSB zp74=fpj~MG#EB0Zcf4D|w8vfCRKWCuF7J`$2K$yyitU+j@riePs$h@8X2u#xLu-Z` ziESJX&Q@)q3>+Vpyv#OLW|GfT`WtdUeU0WDXGVpFKAT!8Cb=i3Y#QuwiGjaj9y=vB zZ@MX7a!Ygh&&Y+5o6~RJh_}zGzHJf2a zH$G>{bDt*oNf|rr+U+bYQNHTYjFqQM6%H`X5DiFdWB+h?ea;F7ftL+ZDU<69+moA2 zp0IRiHWxB#i2ZPFi~rRA)#l8!-P2!-@8Ug@eB3@OdVB5L{IqvB*3YiDT5M)#7xi@R z25FHKmm>eQbKi+|-<*4JN`J3#@!F`TQSE!Sm6i1GtLtff+;4c|M}yHn2B#i%#==I$ z-raW{T0bhg@xGbDro?bdbL-T(p7AykR*VX{0V!_nC6|6YN{Z)T==gGhV_K}U&ssiS zhM<|H?+I^ito3-Z!bA%=G7}vY3AGI z=I)N}UK~|%VC_ex50h=aF*rt5UA0J=GpXNDed%C(r>No=Hr7O zOjp)hFiKte(e#L$ck!_`{0tlql$$kf#%8&k+2buW;o*Y)AAY6k7AP%ti8#h(`OErd zpjh0~3)jDFJfN4^!#TBAF*#F2Uw}p6Qk-G0WRJpT=UJ96byrt~hV58!A?^IEtl(>F zx0kkN>ng9_Sokr%`&{myk7a)KsZUM6yYK&MIr+BCVbSXh<)XD4OSTs7{_@K8bnewZ zseNKMZarPSwywEW=W=dH-mHs9qEbyazjH7Dw4k-HDc$G7_YVQbCTS%17#^&cu>3sZ z{WA_-?T;p^>dz}OeW&%$b5Y8b1MikLR&eR~{*{%o9*eyz ziOw@F|8r+A3hV^Ai=@lJA~! zs*kZ#F8=-Ta}QhZpG_j_TR!v%A7h>OQDvSXQ-Gq*0p(R2RVVPWGBDOGV6ywLL~y;$ zD*t3NraO02mmE94Q&_6w_=9q%Own^fO$t2087opYELbSh%FPkFW06M0Qz;Hd9-B5V z6O$OD3@`rAs^(g6(kmEv|MGiWgxKhqVD6dq}y9kkN*wTS^3c-bFt6%hquovrSAA$vqQZ7>~&tFO0UC}m?|WW! z^sRr6$tV6PMbrH%(gQZxt+L)z+P!=J>AOXHZ`SI`l|1d_e(`LP{@q&FcTrs5P24Wd z;@x)fZtM|HolEAq8r#}_?>he`J?@T=PwgMO&ZX+p7BDDENe3`KFYxJTH>xvfGpY8l zdEn~cwxHd>!F@`_oy zGxp4^+j)7_g$b(CwkM}ut@>&za$bUA4?~Jmf^x&+n{t6EN0gc8^E6FZnDa<8KJM$v zyo&`sSv*dj%lY1IVyfYs5xDPr{BB8K{s%EPPS(2ash(4^nc0qExxBr}ks|Buy56!= zy&f}Ovs}Ob{e8v9PRG{oFEWnr%PanpeVtEEm$jsJ>q$nJ@`*~-x!dvzuWaSKnZNJm zv*}A1xT<$=Kl+W$X2bnu^FCMatez*cfg?Ti!N)H<9eRI7Br@^^KAOmXyz@|l$P<%p zjStEnloXU3oFnF0oa~-%^Kn)Gr>on%G|jo(dEeAYb$n-7y8HZ3r$aMdcyrFP=;4m9 z36S}y__guJ@?)kB`yUoAEDWsIiZyy_Y9Yga%3CIxkw4?$22K&jr;M_XSZ6p%JyVR6 zV^En_$mTC&6XKw;WQkx~(am{>x`N{ZA8B@*?fd=Dr|H&j4h9Ae1y2{pkj*_+b;4zp zzclps9>|q?<~Qf&3A^9eYx_NA=E{ZDmku8_FFifw z?DqGI9p^JJuW)E8@!R{z^{~ZP)67ScS30z(u#||b|8UlU;S+d9~)R$ zwALqri$oC#URT(tae`)@CE`&d_PNs-)u4)2sWY zYKPz2GV`+T_UdZut9uod3`Xga&1)feCMj4xBD)N z?s5;mBrOsiWf3~-(V;pK{!rURCkq+)FJ-XL`!Yd%hh{g|1qEffRU1BO^?q`;3%&g# z23+`t_%I7Hg!=f0DjgLU>(e)qX!o3(DF0MS3UK`o#xYWhvxJG2}d8lsXx5!0iBwmV6u^W%R+ihLCS^SoH^!A^>dy8go z|8m~8;1nl=faSIxDv9c{#qNx|@_S4m~{rGC1tBW_>YSpQkA@^yL zvfh^|vd_4m9$8)YRC&#|&bIj{>ks@;&r--*ut}<8_5$tI8#UxjC*<*Ra0K!vxFmEG za71_nOUx9w)1u&(*&sGm^jWCnk=Bn)5nAB~+9xMJj;}tew#V*ZRB3Md-t+(GZM9z` zd{+CRh10&Qi(Kp#pK_1RIlAr9E4oXkZHC_<|NSk zkJy>`gxLO<4;#)GZYw`ryJkm0^XGT#=3ISOCsWRt@bLLL{n)4P&u88-xvZPKb9d{v z+(*+lZ`!WB&E8i#gMHqmCHGAz-ZE$Lv);|KepnaIIGVX9Klyad z9@{g%b~IBGenoHi&6EJ=RorM&)tKZi~bBikbeiB-`FN=ud~F19#y z@~IZ@EG=IJx2GX3tKtGr9b2g<-%$@5*|@Z{v4Y1gc2@6GH{K=A%qNctI&<$hpuv5D zBZotz_2Usiv8BC_T(>@Pm0&3Oez!b2FE5YtuzzjEJGNUp3X>n*Z{nhf~zF>9*43OD#}0Oe$DlCf+DZF z-4ZS5N)_>A?KUqVIliCmk=lz_7mL@ZW-Yj7qHtg_m#COP;Ea-w0Sx{x7@Zg--u`{K zMf`xpD($p;^U$&aaFnH z1|bgx?zFsIS@D$fv-dgi7yne+MddR=5wbN~;T%Kg?o-LFfy|lL95UU^SRAsPI2xSd z7~B$=D-Q@RU~3R!Qmv5rs^o7~)TPd=;3yT>z-H;tn^rz$`i7JRiFRj?2u$ai6EHny z1$)IDjXj^*Kc1{SRIq8zp{FMmAZ5VY%MN?kSA6^GnG<-$A<9JIvA$H8P?LOvcg7V? z=8y%mM1=|z=O5rrXsxx6`W7UW%DI8#LJNbtv2{SB!iqy);y)ULmaVYsUtu@>(8-XO zi&kEm{8&WIKXcEiZWBf23a@gj>k;e}-o#$EnxS-m8c*_ZQUKd^$-WI7228}CjPZqP474tGEm>gCP&2VGy z5*A=Qka4YjrqZ002dp6rW?gL5=Y7HPMq#m)lbw#R-7b&|Ry4+5bYOAVy=dVo~afZ2Hra_&>DAD4HYJbkA@SPLgz_x}=Wm95UrR969 zXc5~!WhsUlhAamCqFHYR4LBY7ZRVNGNpbRuta#*dxa4DEU3|nJVN(H90n-U;yeS`ACpf+8lvfH@6(doBeX)s~e46+#YI97;187)-Y8 z((3*S^6V8^UW*NnC-Nv9fBW^LP2tJYvApT}Z0Q0B{W dU>5!_|73!-!(@JqA_fKq22WQ%mvv4FO#rw8;2r<~ literal 0 HcmV?d00001 diff --git a/akka-docs/images/faulttolerancesample.graffle b/akka-docs/images/faulttolerancesample.graffle new file mode 100755 index 0000000000..1dd802b7d5 --- /dev/null +++ b/akka-docs/images/faulttolerancesample.graffle @@ -0,0 +1,7302 @@ + + + + + ApplicationVersion + + com.omnigroup.OmniGraffle + 138.33.0.157554 + + CreationDate + 2012-01-25 11:07:14 +0100 + Creator + Derek Wyatt + GraphDocumentVersion + 8 + GuidesLocked + NO + GuidesVisible + YES + ImageCounter + 1 + LinksVisible + NO + MagnetsVisible + NO + MasterSheets + + ModificationDate + 2012-01-26 18:37:58 +0100 + Modifier + Patrik Nordwall + NotesVisible + NO + OriginVisible + NO + PageBreaks + YES + PrintInfo + + NSBottomMargin + + float + 41 + + NSLeftMargin + + float + 18 + + NSPaperName + + string + na-letter + + NSPaperSize + + coded + BAtzdHJlYW10eXBlZIHoA4QBQISEhAdOU1ZhbHVlAISECE5TT2JqZWN0AIWEASqEhAx7X05TU2l6ZT1mZn2WgWQCgRgDhg== + + NSRightMargin + + float + 18 + + NSTopMargin + + float + 18 + + + QuickLookPreview + + JVBERi0xLjMKJcTl8uXrp/Og0MTGCjUgMCBvYmoKPDwgL0xlbmd0aCA2IDAgUiAvRmls + dGVyIC9GbGF0ZURlY29kZSA+PgpzdHJlYW0KeAHFnEuPHUdyhff1K2pJLnRV78fKgGV7 + 4IEXFkhgFoIWRo84Y7mpsagZ/35/Jx6ZeR/dJO3FgCC6MiufJzMjTkRG3V/77/tf+4F/ + 677Z/08/9X/of+m//e63sX/6rR/t329P/TfDZe31vyn4gYx9WZdj5+GYl3E7u7Gn8LlS + cJnX/mM/DZM9PfO0XPZtXCw5DTt/J8/wxNA/UWbv52mj9nPnz/Nln4+tVp6nkbdq1Z+i + M0889ydNzsd6xNtTI54nb6SLlJqnmr9S4vGjFVnO8zwY6dDT9nDJpA+8i940FVpZy2u1 + /+cWhQ/A+Hv+/+zAfvfOMB/6d98B/2iJb/QHcLunj15zXMbAT08NfkoKnHGZL5ODo8QW + +E3LyZxUfu+mdbpMwryAP60Lb4WfPwV+nmjw8wxDJhtx/Lz5wM8TXsq6bR4b/DS4Br8c + eMVPUyr4KVHwU+Jj/459ypactCUH7bAGtXE5LjurvB+XaRV6//i+H6fAdOq/Gc/LNO3r + 0n+zsF3X/v3H/tt/GS8DuL//0L/5w9v+/c/9P7+3k1CW4nONHpdxnOZhfdzoD/2bv7zV + kZn6N5/+66fy+Lb7sX//e++snjDOzris58kh2qZzmRZNcOS0TByi4dSYP/bjyWKX9DPp + 7TKM49JFifHU9Cm/X1ayeR9pDp22Y6TnY1Qx1bf3Qm05OGClvXnfVCL6i1QZT6SpP4xe + MXJOUPZFsOYs2UVv7AR/652/nMiCi0SFRulFc1Jd6TQnfQWS9swtbB/6H3okTv8j++GP + 7a4pZy1qjJtNuwAd6Qp0HzkB3LgV4GzfjbsBW4Aez5hClj/LwvjCgbAtbQAdqQS6i3QD + dOQ4JGM2l8lY5qukJWjJ3l29SaBj1Al0nVSubk7agO4i1QAdOV92Qs+5j+MpjTNwNPV/ + v+zHMZz90l2fTA7Rv+XJ+c98+O2vbzkmHKtyqH7JVyXn09cfs3ljdeop82Rd+84z8sxs + qwvVkp6oXFZ+ns/mfM1sK8noer5mHU47zDOPueC9Es1qK+lLNmcTmTyp5Eurrmpufcrl + nTeNLFd3LuPO1Y1p+eJ6ollbz/jYf7g7N1rGODfTcLTIWbKryKFp9D6QmkYksKNh0mca + 5xa5ad4rctPM8UB1sX/jwEzLlMjpsSCnRIOckg5FNlGSe5fIqKss1D6V9zayRK6OO5Hz + eV5h0CCXoHyJ2lqllFJtNediXi7LNiyI9BuNxbn4Lvd80TJ/y5xf3naud+4Oyqd39YT8 + TzlQT2TeK6XkdV3yOpRl8roRfJ3X6emKl0AqxEti5SKxs1LiIrPW2mhJP9s2WE/ykU3G + T7RLjJXogVUQg5z0zBokp7O0GEZXGjC+YS1Txxidenn0ZO+dv422zyhkya6Mt9A5Tayh + I9prQed88miVlxWKDX3h+Cb51bkuE9ULgbTkqfaEzrSe1sVPrABbl5AzpfK6SEwJJn8K + nDzRAOUZhkI24uTNmw+oPBGl1G3z2KC1mDxLtMrAK3nTlApaShS0HIUvOAUHMy2HoOFu + y3iZV6joizTrxZPQvSmK4f91EmCEQc84C3f0DKYFbpU0WLKRfv4+KcAYZNzTsBkR9aI3 + UBKqaYLxMGbPASnC78ASc63BUxF9PFfJ15FymVaqR1KmQXm8f8iXPqAQeh0EN9RcCr2Y + jZMuTzRCL7H4Ysq1bDLlCniebMDzjEBk2cISLGksN8R5Jk9ZaCURhmVBbzkLo9VjwU+J + BkAlHZ5FskCCKZOyTuvzo6fyHia5ogKjTBl3ohjTchQ90aDoGVd8qnvB4oECitqbwTNc + hgGx8F4ze8Stig7BQJfV891f/vbLX3/6VG2fLzNHdomJXLHOUs2CWTrWYIelgZ92re1o + I1h1uYzrhIkCccZkx3WQih6BkwxJcjG0rNOqRLEwH0SEVU+8ET9dQF9Wr3nIVbLx5CLV + wWbzPhUnRvbcLJGlP0OL1kF6KJHqPflcp+gZgc06rFe0aDXOVMBaZ+M3jtYKL7imRSva + LODSY8FLibq3eyUdl2yiJKdCi9RVFmqfEjUfWcJWx52w2bQQbGa7+hwb4Dzjam+zVx9a + 8yuIaG93Muavd/Q6TtP4iBW9S0FfWNGn/0ha9KekRa/SneLGcroz4/oZJ7GKj6ygJL2l + ONsrZrb5Vfz1apJ8HNnpkhmZFId5itQqQeMv5JPRhi+tdCvWqLXPw1Ovbnl45gG1KLpM + asZ4KhUtobbjof7p6Mezx1Hdq5G5LyMrLfq46wS1SjWlzf3Dj8ide5NZ8kYFF1ao4uKp + BhfP8PkuK4KKaTBNKTxYbYsLa1mBQcY7gAHvOmnyQl5PAc20sQJlJnph85eXT5U9ISWZ + T7d//Y2Nw1qamY0PsTbrE6gzTYBy3vekBsnqRi2tmS/ASM29fG7oDb7Mma7dR1U9U/9H + Gb0Mmw4N5r4WBl0mw8JSz6Tg0mhof7sOMjpErF1GR/pwPhKpTXwkn4OPdKWdjSNoPfQ8 + PeF3sr55pi9k3ymfksogGegJJehbPtO4Ocu77eFTeW+jUqvWUhlz6UUzxAtW566latO2 + m1/xAmmJovyCh2rtErzekppRoucZAcoCzTJrtqTdsxzJdRxMUhjUCC3fnaWpdeQoOIJ6 + LBAq0WBoxWzqXW3DoaADq2a4KBEQ3TwWIJdD40skuzL8gqTProWC9guW/vaR9L7b+XvD + TL5SeneN57SR3uFM/Urp3S1mxqf09lQrpex1iKWRQ1ul9xI62F/Oy2FrqQPCiU2pHFJq + XpDNJqX05FJKT42UUlJiKCt3njistLK5sbDX9W9IKRuHi/Elh1iEn82ItpDH4UpxMZ7z + ftFQTTE+mocoAbKUTTTVm712DMYN5uHaSDCMwej85TQdTDeeZZ23Yryb5gRITw6QnhqA + lBQCONWrGFerDoOeeN3F376+8XE4QGWIBSCfn2k2m0qKcZ/po81cr2NCjPvFQsjx9mLh + Wnr/PbyXcVTnwyh2EVuRrnILR7yVCMnEItrdQUr3WfaiUxUTVvNqnuki/ed1c4Gewqub + uUVCrIf8ilQRYZFupFiWd8FV23PBFv0VQRXpEGcPU7Wsj71ItTK3ItVi7tdYNXKtoHdj + uFap5rejVUuMh8m3gnekK9595ATeIypMChECZPiOMi4bvKfR7h4K3hNXVrrbqXoHxqsV + Crw9ZRrUtF68bfDO8o53bS/T1l/B0PtHh5omidFcp0rZGHvizUx9biif0Po+99TIgUSD + d+R84dmTPnYT90qRsIGnCWv91jn6wnVeOcqPmwPFbZCb6ba5Fy/yzGfa2a3hC65ACYyJ + yw15+2O7SL5F4tmuNAcuhQcuhX99sNcmyHJT15uafAPhT4yW7Rg+o6Tb4hCYoKdQMy5M + L+s+Ligg+bK2dV65Hsfomuf1ckz72euWbmN3QexrFhJgHP3qfB1PVIxtmRlt5HU8jdMS + p9Q5iHVbgYm2olFvocmwXti1NWuXRmFgE97F7di50+dW7eTCAw8xnob9ZASomYUNLD3A + /twG6Qk00YEXtJuwVc9zVSm27jbutETxDVN3xsowmaaWCE1Ab3vOjOnGrRXThpQe6opL + iYX52yXPMK+moKYFSOjVuj+xUSo0O9VWzKyaE/iZbgkE7erTQa13/82ldREppRVd081I + hWYVRuT0dAC+TBtWoR93E8xlFSJdVyEyCsLRQoHcLgPVS7MKE7BOG1ZIWQWcz5dh21jU + XAW70NjN85ir0E2wEfo7qBerMOH+hOZrPWMVJoxQ662sQuSUVZggxqjtLleBjcwW2BY5 + MuZcBet+0FbNDcrqX9jTzSokfk8dFCiLZWY5EA8jCAqeL6p6Brnt3FGC1IGjbb1X9sX5 + UEjrJ9wR9Wbl0dqPGC1r5xEO9oxFBicaF2713OVjlxn1cYQcySrT6S6yoxKWeqfNoYSh + zQel5YMS78+6o3n5M8WN+VXZR3JDwqmRGyMBAus8YDSjBn3H1iwMGjyYkENur1ZO9opT + kvl4BRIc/xEP1jZs8W7inFtjWbNkROtUqFmc1EOCbMZwPk7MPiTIpkNsISWEUfQLx5Nb + bfMbcpdCiAbmJf69dbeRMJGTDbmIvI7IC2GENCBDFqlaGZYZNk3WyGXLsSGyl43x6woX + s56JcVONB3A/TFCM3KmyTYlxol/+MvWAh7GdzBNBkTkFMAmKu0xZqRkkdLtZpAFLBaic + C5aaw/TmkxUpsO9ojkSd5wp6pzeJ51HqlRxvu8GcZcEEBbLEHHcc4OvIFsxHDuOBzFE/ + ifmIT3OZkA4jUTeG+cjBJjZEq+eYj2a/JubcNxCSk5jrOCwsZ8F8hF4gX8y/mZir3x0t + UbFhp8/nss5dxSZn1EBeELx34tTDVPBjxwUDaZzsjRNnv6BNTlQOwmG3cLHrOIY3/4Q/ + MkOM5GbvTCezDQ/BytnCMMc1ajp5wvBie6L7eYcqWOT5MxmKheMvpa55SVq3TWjmeI3Y + u6n9+OI+SpXIDfUxshXQgKUPyU3GEX3Ea5Klj1L7NQhzwDbCZHENcvMFVX/q0JbrCfDQ + 9cR4jZizmESMi7iKmMw/RrqOJukOOVJNXno2+41Rjxbz5i/YBk2N7jFEUWQbJD1pk0ND + K16vadGys8HOSr8GR3bMAIPRvoRF1waoTRWLW7mgdkYPiLQpKhXyNAL6uprB8LWiNYOJ + PfeWi1LLxxnO9dwfUDF7cLVkz7j1pUcsGEvteApq8jmNZKP00sht1iluPpeoq6NNu5EC + z7bs55hsp1g589EjfS86UQIBwUZ84wCR4Er0uED90DcTTt613wmetAdMGsYOJ4f5KANC + 09TyRDQJW7J30IpT+meG0C+cEfNWjNI/WMyoOxgsjgmoAowZx79oP1qAPMwV6R8W6RiM + ajFZLDw5t1Ep3NFTRj1Q6bwgKHEzeAbLc9LGhuNjvQwoIdTMeW47s6GjyZWQ+BeM1Dvf + TQk5AowXOYyQaQFiZGDc5gDZy7rH95gAnYmzGDlrCfCsIc1nVwCeuQAxgP2hAdgzbIpR + yxHNJh3gDlEELFDRArD8ZQfKpgI8oyJmVzYF4Bldd0rZJMA4KCaZ3LqLCYDlgrgC2DMS + 4H7WdkEWFYDVEXZTxAw5wMpjs6BxDD+N90DfZDIAqgDn/Cr/NOF/c+elLUnMDKO7C15l + 52z7znIn82wkZYekxED914iIK/EOT59KoM/H8lTeXrHSuPVFoqZwvVJHYDHsDK2fCBPV + wnOlcUyWAcGTNKklgI8MpCCr05b4c1MmWnksdGtTWA901pXOZiyNq860h5RROssSbWfe + iuRHxzQfEv/apU3nHv4HGgo2LA21VKmcV+cvgLiwhINC0hNEjvi8ktFNAWIt4SBy0jAx + rUqUwAl/28o9iNKwtVhMPzuDLTiIpUSCWDrLErUzDGxbihTCD0EsDfp0XgPxxmx6M38x + iIi4FSO/7ESPTjk2ZF5AVEsEiBjP+2FVCoi1TN2JD1wvtVhM30Jh1FlCVEokiNkZdqzD + TLxYGfIXgFgL22ANRHR33CgSxf5gJwZXWiuID/nBsWDMD2iQpAicq+HEmA2KAMPJjKQI + JQOqqXPGgbY2zI8gmlgyYNviAmZDxjPWkvqDQDhrimThDfneFD5tiTK9ZMya8ikVnD3A + 9ifvISmDyKhNMTPEybLbqBTE7DWH2HQiZDFO+mASuNFqlrQDZGJaYRMLqtw5hNxwu1UJ + SjFy9TUcEEN/j4vCmgR8r58Z2YeZTF4GHRD0QmHY8wyEugwxemF+P0yEhl0odmTAjiUv + 2IVWQY4tWxyRC3l/UB60E+RCzWAQJ7lQ+C3+3LGQiwn2OG58y9KQiwkhuw9oUuUZuaig + JL+oOU6YjGLcZn6WZZQKVWtmFi4w4xoV/2QUgX8yjoK/ZxS4s35mEI3qZEYma+Bf2EfB + v7CPxL8hH4k/pCXIR+JfuEfBP7lHwd8zEv/kHqxD4J/cg+1R8C/cI0GRdej0o+bkrHCC + 3WVWEvLoM5q6lV+gIjCxDUaFRf+iE+x3t8H5GZFTwjRLnPKnSlLuQpZL6RLIU6q9SmPg + Lk5jzGNVbcQJ/89ALDwa2PzlnOtDHhzTHnYjg0QrZdBnZl7rBE+UQU1HGTAtpbKlezUs + mVXKyVtqlDl7REEo1lVCNduaJ/s6hpzoETUTZZoeS0tpYj5ikqXNGPNryviGSb7Zqh5J + RiPnBPE1eMpFC3HHIxpkb+GaMxGPVJM1IA59SGooMm7nlhaNgPOLJFJDsQD4c11H4Cqz + nPPgymDCReTNiIeJVRtxvmr8JQM9C/GxYDsCPIl4nYijbkeAoJxYfnzf2Jk2AolOvKJ1 + BHJP8+WVPFjojuvGE+96aCplKEWFxCtg86XLBTf5uTegG438h7dduoYeKW9NZQE8dpQo + r9SqklLA/iKe7Ps68jpFXOO14WoDi7p9xm1Z8tEIPHNxSnyOPb+mgsUqsf+jLM9Rz27v + 41ltK/CglrpWtwS74+EaMDP2jSCGQ7sJlxn3d7qHkqMSpyCuu/B5WRqpLE+yfEgy3+UJ + 5D4CVxV/nzptNWx3cxBybmR7SI6rRqSiPQSov+WQme94gizIeMdmQ11iu3cT/kAZ7+Qg + ft13zMGX8Y6w4y7WfMcTc8V4ZyDc1uI7ZvewxzHe6dnid9SMTHeoKUcBTySGu+rTOHa7 + 7TAMdzrBfed2O2asGe7WsTmPAwv5ELDbTbVUaLQ3b8F63W730gTwy2MMnck0rA6jrkC7 + II8Erf6CV0KrpIO3QKJUI1PeXkK7IDHlI0ZVBbQL1rdcxMjZgBafbLqIC7QLYQXmIk5o + O76ClItYMiKg1YLST0DL5md9E1oF7clHXKDlxiF8xPCFgFYdm4845i7vgSz2hMKmYsf+ + Oqcqy0dy1jaiQln+fgb7PiDodJJuH3SkEGcY7pwOvkqU3S5XMYa7josMOk5/KYETzBg9 + 9/gY7rUEe62UiVYe67pSTF9t2i7LzhbzBzWd6YtvU33ZWZZoOrNWGPK77nuWGkFt8+DQ + c3SWns/TP7z4JXAZic/yfnGqEXVriT4052+RLVgDMV8Uy6xHHAgcmQrsUcx6CSqHuJYQ + xGA+EYGEUV9LSLu3rTDrD91DNZDFEpzSWQJYGioQR2eEwMl1YyECt61ISE8M3+fxRRCX + fmyWX+XTf2js30AsDeH7WruYM4zRzwVi7mLF+ou1JcS1REAs4mS8zofnFxY3rbywi7Oz + hJiYAe/MALSrkWgoIS6dVYjLgLIVQaxrbJ9HQNy9votzJD7L13YxbOLqquAzrgCxCb6m + atmEksYh2MPNM47UyiOunuMEO8NYdQMlaSIeMHA8lXEgRr7ArDe2MGDTqg6K8ZCYUjvr + pDzCSMTtngmkEDmEAKzY2gdmne3lR7VvOAcqmutCNpTInxNYQghOXRhw62Se7LCW6Dgy + tOZ8BYVO0ed5jAyCJGuAUCO7dPWUFB9jPonoitciDiBZqpshR0aaY/A1L4GtPs2mWFGa + ExsHDnpZiFcgtBGfFFGT7G5UF7EnurbVcTvN3ww7IlBD/i/EzYEDAtudcFX4iBxgGGai + GhqqKsHHuSKAiPjYoWUKm+Fs0JEmJzfADjgSDVirFmSpYAFxYvVO94qycZAgnoQM4QfP + jDIppPYtkJ/hI16cLxvzqiYbsG9SDsX7JPAMS3ZSIh/JCj07WQXcZqZgaSGhzk6oEuCv + MM3drl8SfG5NYNUb3QT4fD+BbeORLgX8FVqyWaRLgo8PRbGwOGQS/JUHRtGgHzkyww1+ + 2DvuGb6IKvCrL+i8hbok/Na/hboEtCv0dSPSpcBf51Xxz7zKWqq1Uq+0E0uocP3kuPEx + 3ln4N9YKlw0vWPhd/SS5mOpXFn7Eb9/5Ax5Z+PUD5ho+kxap9MOpeJH7B51rjuNJTEjP + iupQW+TBQuC8ZFS+g+iTkuvqpKS9A8h8azVfUBFRhvb0myTZvInUpn0R5bYDdoXpoezB + a5tiQGvZcLFcuDeD33xGM9QmHjDPSm7KssVdxQPL3j+pArJvf8cP/fzpN75ZwQImFgNb + BC/KiaThMoFfVjGKg8kyyLIh6lM/tnJw1ShNN8ymJQgtJMJDckV5uomnnH8C41HwC05T + hUh4gQHX8zNUn0oYfZ432oesXDw8GAW/MmT/+K0cBjtpsLccyW4+sqq+0NFomwnIMLA5 + NROYBOZGdJDyYgKWp3iPMgGPUi8TsAK0ZDOMCVjeAWBa5TKBHEXEYmKgPorBZEKzJrQC + OgYghg9bwazgmsO8dMNs0VbIeVzJMHXKyOzCH0udyFCM7sbv0qASImfGseLNRiM1w/rB + T1tzuHPlozQWiesK1ASyDV0/iK9ipCHt1C7+6JVwKMTcgl0+7MRA4XVgE69TtyBlCfSh + FHyHaGfqoXEwmVWGzWm6SU3hTEbtRdaCKphETOTGloddO8ed0nJ+E++pzrAXdt3TK/4N + NtjghbrAmaTvw64RlLGceV1B9XovLY/2kk5WVkRMhPsucywWU5ZtLgemFTdznPGyHJlR + lyNzCtYK6KSRrmZ4P81yzAqS4s6kLgdnCaBFSXI55J7mHMkfUpaDL0lYNAAuyzELUILu + 6nLIQa0haxVjOTIrl8NcD1zul+VAQrKY7I1mOZQ1HxMfMib4/CCJPGvNcjiCdiHQlApU + g7E9vlYs2xtWZZvnztZm7YeRMKpXPNLFlfy1aimUUKn2ZY7nm3AunFx4OPl2aCIo0IP2 + jNny61JmDSoLUROlOPzISc/C/YTN3pZCtty39lhRZUHIk4sKdrQItRqc4Y1Cs3bLlubA + e1Z2W0vVbpvWXl23Mhs4ErP5Khtxrz7oz/1uk9xAx6EYdVT5oehoqWTIK3Hglgc7lqa/ + sgpMkYQBQLw5NwLiXdMyyzlGZaLRyELY2GWvMnb8e/B0GLbCUuBkx6zbPRHYbTPijBsf + z66L7gEnOaealwSfmDBH/GEGcFsg28PqnUS4cIhAXATeGt+JnveMXYSeARx8fmHV+AyE + n3HTKPlgWLVQ0yvOX37ABo88PvMZTjqs0HvL2TCebJTt5Kgnwce3Bt3PL/1yicReqTjT + OOPbA1KhYrxcN50GE6hwg0bAuhhDQcXu1GY+Idc4AxWdfwasyQQqlsNvEZHjqNhd3HAe + LSpqnDsMOIVgEioawKhfSVM1oUIOo7TfiktUaAmLhttOCgUqljPvsXZCpUzuyfaGSDF9 + ayH0OSwF8OWgAjGtuPph5UcZqZZzyFxjcbjG44H9h6hTDhIXLsms+fU2tqIivhjsoW/l + Jj71OQnlYAnJYXFkDlhAj5bL56y4+/XUZRRt81N5VFfOIaVI/1zyRrUFg9hGyT0WEtXa + 1i8EsX8vfK+o3uhfNyvKoC4Zt5PTVnhEn0pJLSK6VuZ4wgBktKvbiwKDVoMNzZgTBsGK + Jp5tYAGD1MGoLwETBloih9UoMGh9/EAUGNQ2DnKupRMG9X/qBwysmmCwUe48NDCw88DR + enMYLGPXzwwFDLbTbHK+9i/FBaki193aE2hhaco73SNjWp9ux6+WwZQ8HAMb6d/Ljyv9 + d/78QAnD+mP7TcD3/wv4lLZFCmVuZHN0cmVhbQplbmRvYmoKNiAwIG9iago3MTMxCmVu + ZG9iagozIDAgb2JqCjw8IC9UeXBlIC9QYWdlIC9QYXJlbnQgNCAwIFIgL1Jlc291cmNl + cyA3IDAgUiAvQ29udGVudHMgNSAwIFIgL01lZGlhQm94IFswIDAgNTc2IDU3Nl0KPj4K + ZW5kb2JqCjcgMCBvYmoKPDwgL1Byb2NTZXQgWyAvUERGIC9UZXh0IF0gL0NvbG9yU3Bh + Y2UgPDwgL0NzMiA5IDAgUiAvQ3MxIDggMCBSID4+IC9FeHRHU3RhdGUKPDwgL0dzMyAx + MiAwIFIgL0dzMiAxMyAwIFIgL0dzMSAxNCAwIFIgL0dzNCAxNSAwIFIgPj4gL0ZvbnQg + PDwgL0YxLjAgMTAgMCBSCi9GMi4wIDExIDAgUiA+PiA+PgplbmRvYmoKMTIgMCBvYmoK + PDwgL1R5cGUgL0V4dEdTdGF0ZSAvY2EgMSA+PgplbmRvYmoKMTMgMCBvYmoKPDwgL1R5 + cGUgL0V4dEdTdGF0ZSAvQ0EgMC43NSA+PgplbmRvYmoKMTQgMCBvYmoKPDwgL1R5cGUg + L0V4dEdTdGF0ZSAvY2EgMC4xID4+CmVuZG9iagoxNSAwIG9iago8PCAvVHlwZSAvRXh0 + R1N0YXRlIC9DQSAxID4+CmVuZG9iagoxNiAwIG9iago8PCAvTGVuZ3RoIDE3IDAgUiAv + TiAxIC9BbHRlcm5hdGUgL0RldmljZUdyYXkgL0ZpbHRlciAvRmxhdGVEZWNvZGUgPj4K + c3RyZWFtCngBhVJPSBRRHP7NNhKEiEGFeIh3CgmVKaysoNp2dVmVbVuV0qIYZ9+6o7Mz + 05vZNcWTBF2iPHUPomN07NChm5eiwKxL1yCpIAg8dej7zezqKIRveTvf+/39ft97RG2d + pu87KUFUc0OVK6Wnbk5Ni4MfKUUd1E5YphX46WJxjLHruZK/u9fWZ9LYst7HtXb79j21 + lWVgIeottrcQ+iGRZgAfmZ8oZYCzwB2Wr9g+ATxYDqwa8COiAw+auTDT0Zx0pbItkVPm + oigqr2I7Sa77+bnGvou1iYP+XI9m1o69s+qq0UzUtPdEobwPrkQZz19U9mw1FKcN45xI + Qxop8q7V3ytMxxGRKxBKBlI1ZLmfak6ddeB1GLtdupPj+PYQpT7JYKiJtemymR2FfQB2 + KsvsEPAF6PGyYg/ngXth/1tRw5PAJ2E/ZId51q0f9heuU+B7hD014M4UrsXx2oofXi0B + Q/dUI2iMc03E09c5c6SI7zHUGZj3RjmmCzF3lqoTN4A7YR9ZqmYKsV37ruol7nsCd9Pj + O9GbOQtcoBxJcrEV2RTQPAlYFH2LsEkOPD7OHlXgd6iYwBy5idzNKPce1REbZ6NSgVZ6 + jVfGT+O58cX4ZWwYz4B+rHbXe3z/6eMVdde2Pjz5jXrcOa69nRtVYVZxZQvd/8cyhI/Z + JzmmwdOhWVhr2HbkD5rMTLAMKMR/BT6X+pITVdzV7u24RRLMUD4sbCW6S1RuKdTqPYNK + rBwr2AB2cJLELFocuFNrujl4d9giem35TVey64b++vZ6+9ryHm3KqCkoE82zRGaUsVuj + 5N142/1mkRGfODq+572KWsn+SUUQP4U5WiryFFX0VlDWxG9nDn4btn5cP6Xn9UH9PAk9 + rZ/Rr+ijEb4MdEnPwnNRH6NJ8LBpIeISoIqDM9ROVGONA+Ip8fK0W2SR/Q9AGf1mCmVu + ZHN0cmVhbQplbmRvYmoKMTcgMCBvYmoKNzA0CmVuZG9iago5IDAgb2JqClsgL0lDQ0Jh + c2VkIDE2IDAgUiBdCmVuZG9iagoxOCAwIG9iago8PCAvTGVuZ3RoIDE5IDAgUiAvTiAz + IC9BbHRlcm5hdGUgL0RldmljZVJHQiAvRmlsdGVyIC9GbGF0ZURlY29kZSA+PgpzdHJl + YW0KeAGFVM9rE0EU/jZuqdAiCFprDrJ4kCJJWatoRdQ2/RFiawzbH7ZFkGQzSdZuNuvu + JrWliOTi0SreRe2hB/+AHnrwZC9KhVpFKN6rKGKhFy3xzW5MtqXqwM5+8943731vdt8A + DXLSNPWABOQNx1KiEWlsfEJq/IgAjqIJQTQlVdvsTiQGQYNz+Xvn2HoPgVtWw3v7d7J3 + rZrStpoHhP1A4Eea2Sqw7xdxClkSAog836Epx3QI3+PY8uyPOU55eMG1Dys9xFkifEA1 + Lc5/TbhTzSXTQINIOJT1cVI+nNeLlNcdB2luZsbIEL1PkKa7zO6rYqGcTvYOkL2d9H5O + s94+wiHCCxmtP0a4jZ71jNU/4mHhpObEhj0cGDX0+GAVtxqp+DXCFF8QTSeiVHHZLg3x + mK79VvJKgnCQOMpkYYBzWkhP10xu+LqHBX0m1xOv4ndWUeF5jxNn3tTd70XaAq8wDh0M + GgyaDUhQEEUEYZiwUECGPBoxNLJyPyOrBhuTezJ1JGq7dGJEsUF7Ntw9t1Gk3Tz+KCJx + lEO1CJL8Qf4qr8lP5Xn5y1yw2Fb3lK2bmrry4DvF5Zm5Gh7X08jjc01efJXUdpNXR5as + eXq8muwaP+xXlzHmgjWPxHOw+/EtX5XMlymMFMXjVfPqS4R1WjE3359sfzs94i7PLrXW + c62JizdWm5dn/WpI++6qvJPmVflPXvXx/GfNxGPiKTEmdornIYmXxS7xkthLqwviYG3H + CJ2VhinSbZH6JNVgYJq89S9dP1t4vUZ/DPVRlBnM0lSJ93/CKmQ0nbkOb/qP28f8F+T3 + iuefKAIvbODImbptU3HvEKFlpW5zrgIXv9F98LZua6N+OPwEWDyrFq1SNZ8gvAEcdod6 + HugpmNOWls05Uocsn5O66cpiUsxQ20NSUtcl12VLFrOZVWLpdtiZ0x1uHKE5QvfEp0pl + k/qv8RGw/bBS+fmsUtl+ThrWgZf6b8C8/UUKZW5kc3RyZWFtCmVuZG9iagoxOSAwIG9i + ago3MzcKZW5kb2JqCjggMCBvYmoKWyAvSUNDQmFzZWQgMTggMCBSIF0KZW5kb2JqCjIx + IDAgb2JqCjw8IC9MZW5ndGggMjIgMCBSIC9GaWx0ZXIgL0ZsYXRlRGVjb2RlID4+CnN0 + cmVhbQp4AbWdW7MdR3XH3+dTzKNUKR/mumfvx8RFqkKlKIiV4gF4SIQdh0gmSEDy8fP7 + r1v3vpxzZIogLO9e09OXf/davW49/uP4y/GP48Sf/TiNx7qOn74dfzX+MP7k68/z+P7z + ONufz+/Hr6anfdQ/XcXvIBzbvp0PfpzXbT5dhnmk8nFQ8XRen9Z53ceP43zeWvEDxdPT + cZo72ny+UGF+mo7twmMKx9leHt5bcT+2p7Pa0ssXxrA8TfvC4LKl/XTxCuqqCjGSLA8f + Rto51vN+bnVoSpOKBqMUvfG+P43yVWm4Ko1Wd7tcLucaPKOlTtBydjGCowECWlZnKMTe + j9/fYfgdK/Ez/vm9r83X39iyTeM3X7OCsxW+0r+0iO8/DrYE2+XpdD7tSyxBFbslKJrh + Oj9dln0WroC8+cuxBPN+fgLuXIL5ND1dtkOrFYs5n1avoCWoQixBlvslSJqDXA0arNVb + LEGVr576EtSzbgly8P0S7Dm7WoKafC1BUWoJivJx/AZugTEWMcakfd4BP6/wApt+OT3t + JzYqS/AP78Z5iYVZxq/my9OyHPs2frVZ1Xcfx5/84/o0sXjvvhvf/Ort+O7340/fGUfW + et40O7z/ON40y6rMyzrtj5v99fjmD2/FvMv45tN/fVs/3w6/Hd/9zLtrvA4Xz9t+ucDO + p+WyLZsmyfouTIzlnzQxuPnYewpLOsOt0yyGzlrzmS711v50Xoyno8yEDR72jVOWiwOm + PSnKIAjPk5i7Wl02r5M9Z7kGl4QPI0JnO4vBT0EbYDyNJFvNYnRLE/E8CC8W4YV4vsdE + 6NHfz5m2ASQWNwBqZxlpKLQ+jt+Nvx43/vyW/fK7fl8lQ8cysHUva3C0L0NS+mVIWoLM + 9gY642sD/bhEK5KuRpkncKNhE7C2DPN0epovYvBchnk6e51YhirnMhShW4akxTJUq45a + dZu4FuHm+XWxlqEmkstQM61lKCx8GarYlqFIX8bh2+zHWrCiDs8J7uaf9Xg6Lvs2j9tw + zdxw4S8+FR/+R3Fh8ePnz/+cxP/MH5//9BY+XIY3VeuHfFSUT2/ZMT+Gj4fTDEfGqRx8 + nJSej5OWXDqvT5zBkva2XU6zRMW629lglP18OCzJx+N+hmvtkM4NtJ8XrxMbqMqxgYYi + dBuoaL4FqtUsRre5gXIcsd+yGFxaxayeE2Hfe4OnnGltoERsCD5OaGoDjVkD2fjdC7w7 + z7ChTtCPQ3JQUjroq1ZyJufGxqGc0M/L4q0k7w6IbDupi3fHeYNT7XBO6Ocdka6+s+cs + B/RjVkCM1MyTFtBUq8GM2W1iWeO4fp6cf1c9J5LQzznTNoDEx6FPaNh1IULHJAH9F5zO + HG1x/Pgx2vPu9rSdpm1/xLtfJ98VC/85KcWSzqxjY9ZP33z7doiD9y/F1O9h3XuOvVOj + 0QlSjV4vMwrqx2FB+u6jlz6My8ypIxXaCYt27bieUTFEzCK6CHqplxadh2wRKw3LMksR + PrWGFtDfR+/Ff4fq5oVOdXaCKWLZjCti3kVobF6IWuq6+9kpaozBhsVTV4JrEohlqepH + TDFVtChqC0BKROwEfXx4Smfymkhu6S+BpJV6JI0Q0G1rwmPAbobk4A93W4lAcuRYTdDj + x26L4b3470DSCw3JwQkGTDXjJesikPT+Onr3syE5rDbIzuRYcxINSZtiQ9KKDUkrwkjD + C2ruto4om+B4r4ny+rovUnCPacwDEIMQ7ZYD8K9motReYaLgp8dMhM78jPqKXr6etILo + rhfjIpWQdMuUXGSPF5QgVd1iQaOoXQMXeWk9i4fiN3qw8VA0M6zYXtGDrDDpbOqb3yZV + L6aYqoiEtAfRQBbPekePaOlclfpf8ZxBalQmOW12MWQkZ3RiE7L+B597yMwOiy/SObv6 + NrNo7VnszsE3jtBxhd3F5I8/uaArIaoQa7EE24QlZKujX4mdfnfYqRhgRQNVlC6Sj/aH + v+q5japhF0O+xk6nfc39HrsXD5ttwXxi3TEdacItwelpmqZlfMep3Z06nG/H+Txd0PyT + YcIc/PoPf/7hT9+i36VRGFYa/DRxtut/7YestOPpfOzTPK7If+1z9CKNn5LWanqa2ddW + XCa0MHvETvMlCALj0D730nIWl+vdLC+5tNHYgi6IQhE9WUlrkAPxx8C8Y6GuWGROQAf0 + gS78siOoo5hgGZMw+BiySAN6flOkx2xxZQL0V0Uks7mZcgSD0KjxqWALmwN25J7R4LLS + Nl8avINKHbwqJl4bJ2xIBwdQ5nSDF/Wsh3fYUMj87M612nb5nhJeL3XwOqGD1wk5+9Zg + UkbvssPLCTfPb4qt+qAJdPDWBBPeUdMveFW4gleEpqUNj3wowTgLWwr+uTewYJcd/RcD + q2OXwc+Xb1IDKyXt07+lllYW15dpX+7EXDlmN9wGKN4HA0c7sV8fRrjM95Ue7BPOLiwd + 22hewD9kZFySOmRERBpLl8Gplu9yyKBr0qb9eD9Wb1b+gIBHeEz4j6y8nvARMpahGkrK + qF7o8Lr8UqnVHebzwopW5ZpHda4Z1shU0IpeEVzxGp7zWlTdfTsFiPrVgaiiIMKNbPwy + WAF7J0DEqygx5CjiipJ+21A86Tg3GO1Xj6MIwF9zOVmNmuwJKF2DDmxZJHXV4AlCvgH2 + rh74UtxX3xl0h2ZOqI1AU01ABhWu0BSh8ccjH+O27myjGR1j4UcyiJ8sg06WztuI9Fk3 + lOevMLvgpS88XeRIDiVq6H2AK6Ie4DFCJ4ktcQOHQCN8gGBiF4FlNbaLpL69QP86ZTqK + 5Np73ogq+Jj0fpZYFmOVavHAMZbd8fv9UKOhBODos+d9OfDt8xtTWN0e2UqjSBz1pfw9 + 0H/+Hvnd1Zo5+7T56vGM6uubJjv16eaIBi/aut5C9rq6la3grOSgGzqUjWAoBcpOKNBO + C4qEVNFGwR/do4yzUWC153u47QvmE+KpcFYBIHIOKjakaauD7JQtFUrqqkNRxatnfQGN + oRVtzB3cNa1aY593G5eAoi+EUgwV97FIrzMS6tA8sq2Kj270sjho7j15f/VB85zqtp6x + qfAnMxKWnAP0WM7ocU7RkqHwnS+IDqdowVRjWAloYDaKO5yCxoku66ueFNCA/Z0/vV3c + LnjItVWy3UUevKH1bGVbfgbmBRYFgE2fCcp6nHkYbXnButJ6nofV++3o3U+rkoPVctNS + zqV147MFHCZq4Pgyd1jBUY9NfqkOhir+YWbfUB1Wp/SoOqVhiBER+CSGHO9SRxrOGzIO + I61DdcMhGmsRqG4zr3M25XpGmbmz3FFoqKJJWnVhUW1ZwboyduKJFzp69zNQtcHq3BOq + OZeGqs+2A6dQTaxuTZuhi3LpBDLGWVd0gbtYFIfONG94GXv9LOx/2IYjhTDUF+tnXb8V + BfH5TsjGdddJ5PYhpkT3y+1d1HKoHtJYlvWJ0FuUNMGJWOpELPWPYkrz3/OXh05xibGR + ozbeonrXLP8qSXP6vq/LmAK6iJw/tNNkFLKZVvlf50OR2aScOGuOA30L1lmX+cReXVR1 + 4WTUaLyEBYHiQkxGMtWfozWrxXx/iXL2wBtGwUHEiBa0jGUnWD5hQK7r/HSWJblg/K4n + MTQR4+mszY6EIdqDQ1TjOU5YlgvO5GVTJVk2p2E5QHWdIZiqL3vufGFKKi6IpA1vO5W3 + p3XaFihn+SY4OOhjYo20pzm28LnC4dAOwpgFxnp5mq2vG7hst97QpIJmbPzReibAjG89 + L+cO8kOAqNeAHBDtQHDIcXX48dBBHs8D4nw/Ic8eEnJ8lVrUHTdHQr5hAMz7ziIE5Bv+ + EJy4V5CLz5cV4yAh38yUZZ0CcsSFGZMJuYoF+Ybz8HSRwZ2Qq4/LabVTOSCHX1i983Gw + LXxHXlYZ5ixflNtkOF1vaLXVH8bCc8fK48/B8kBOKC9hYgt8dTydVuJKl1sF9V1zfHRi + 4PWG11caxhHZgmcfyy3/Q7oWy2RMa7Jq/6533D87pvD2XGniO0KRxJbxBIstctFcK+MM + 6TdvHioV4e7srVcPKPzmbT+YjN+TkHHAcqjg8gwR7JlWRRHWVc7O1Q+xJm7Yu0gNzgk9 + Rh6hMFsRAWfSJh6z9l4h3/+uPw9KZkYl1z9aJ8gkGqtO0Mq9GJ3YY86q6iTfvz2D+qha + dOXjv99bcBMmn7SiG9XtzbntqWX8H0Np2o4JUc8JoPyF0w4bPpNPI/XSFm8DKakUC6YA + miuntcxZDbjbFIVL1YIdPyqyZo3Avm7V9gfGM8s3n8mE0oqY5JK7FOsv/HrxbFjNS6rQ + Hs3Hs+9xS/RvSkb+j511z+UM5QvDRjaJJuhdyW+joK43tpkzO7vyZ60rf7NbPRwDbMEO + mWzHvG93CkRbvBsmeXN5O6QntGutcJ4ZLGlcBiveT5iMQLGGzTaB3v9m723sOQ2ytABt + Bv50CVT5JrqA+VNxEtCO3rVWtXxWst0bvUTd60Xdj3kjaCKePO3rzj4AECVNsVIzrYph + GbERLC58OeHGVG4U/gb5lmSC4j8V53hBLmTe0xHqBBxwaozlt3etOGTbOPD8sRK2zhoJ + UJ1ZXEK+mNxnUno4aQmLcg7OKNqnw05zhZkXNAxoSsublkHh2YWsIIsuwy0HwVeCphcU + CnQ+BikHBO/PyhkxwrziCN9WtQF/yf01r2c0j2Md1BPuXAVJYLun5cKWE+3Aqxhg8AAf + w7Rr2ldoSQe4JqF6dSrAo8X0Fna6Evck3uOOEjlx+hbew85Q6dHx9kKHtxMc0Ho3itF2 + 4m0phfN+4BIIvNHu8K0oBp947+jc8LV8XYX3jt514eQG3cBbKQ9oToCWeO9M5wpvJxTe + yplABDKtwJsERylYmxx4ifcg2sJfCaZGfL6g8QW4iVaHd5Juzv/hhsdjt7pr847JF9ST + s3xTqJFnoiK9dyqcuf+Snts6ft+X2VAh9/rRKnFqV3R9eOVcJFtVASfbDIq5MW3jbZEJ + YCI7/EyE7Exe9R+HCNQeDN7a28mXoRWRT4oAens4/kRmC1f9Tl4ihG6wVK8MzA+6LorU + +fpOMK1ptndH3ry0M+8VODib0DbpTcEieGKBqWwLHmjxSdMs2Lxk9K2NNrOgvMp6Xk5E + ubR1GwXhoba0wbMWWgCuUkMEq9nfTFqMgsVwihJ2fVhBWGmq0ciNUPMMq6PFILBW1umM + JlaDv5+itvYdlRV8vMJVc0ewIWpt74hLmfxQtA6iotXkd5Spy4VAYKMQNre2GkQIoRMx + PhClrYSoaAWRUy5t6lWlQcRui+Y7iGoQBVEN9H6KPURtkjcC4HbTIvJ8XdEaUNqxEkwK + kFiqU/YyfoVXSe4S7EN00zt9+J8s8218Uxw+BmFOuTDlj3zyOYTA8Ob9504IdIns6djW + QdySW0nJMrVuMec+Wx6bSUeyleF7aUEnhU69giL//pgZKhRRBLI9zYbLCkgCvR3V2TBy + nlm425tbPdxtjfGbY6OGQpE4G3YzmbJYcRSR5zHMaKgow0JHFqTLChYhfabQV7QBWzwv + EfApDa1nw6SNy4q2H2qogdrLvm1ZgNXKZgGrBrSXO6CdkMhtBOvNsx2EYYPTe6A35E0D + ekd4WSiu1k1WUC6qfjegiTRZYkYCrWIBXQ0VRR11+KmYzwhutAJuwuuKNuAO6JpSAe1T + TogGL14B7aRXuA7lDveOjrIwi8RtkZvaQqf37PbQ+Iy0m974rKM1uQqH3kOTM5X8RUIc + cZqmxCIflYRaGBN4eLzslsuQz5tBkS286OrN5pppZpYLW0W6XOtuA3Arh6GUz1t32cLt + idzbvNmbD/bLzuXOmpGG82beHp3LN3dp0GPRBeSqlSPH1NbyJjsF+VRe/KhTfuxFh/FV + dGDBw0MrnR97UZBZiORbrADn01V0YEGLo07r2csxMC80PzZi0qrjp21tyWkbXfFaKzz+ + GVV8sObHHmou5ceO2XbgiF264jNnOH4QvO+rjbGFBZLSwRl1EhhqFDAeFhg2fJRitQJv + 3JCGANzghFKLIJcMQZyNKDR1Ck4vKwqmgfnDDs6gCKlqywuxll2h/cR1rPp9FR+swdnm + UnDGbDtwAk5mGVjdskRv1FdYQE7jkj6dhtoCA38L6fPHGwtP54tSM80dY7edViwQXNMg + qrgSK4Jz+4m0kShpLs/Z/mpsRW5lbUr+bsiqbMlFCXvuqm6gZBEAhI/Z/hb3a7b/glSy + KL6DxdCx5pyAp98GOysljdHL32kELQIEEqJiOlXD/BKcf9VGEAhN3PbzvIlsAGZ1B6kb + 1sa9hqthFSGGVbgSDPBxEgOyV2pYSeiGlf28tLFWAvANKw3z5vYTHU7TGRv2QcSpMk4/ + lYHYuXc9GMWTu1MNBfHhqbZyhl649SAHm4vjVZe+tC444Jzzq45mbrJA2bK2FasOe+au + pXuDw/Zh1tsErE7S7BEeTtdq1fGjn72ePWadoeuxWnoR9mwzxtwfcreJJs347A45u9E2 + r+2Qu2VZaSibXZlTSJaJeYGxu0awEJHBkxmlmW3P9G+LQKCMm+LmgXjejWhQP/k24a+Z + nizLicbibV1vUttRlC6gzixapuo9R8uJN60H/28/OKIxBLAR8HOZHOSdBYlxNgKBIktC + 2vCUFUHyCEnUvdITEGQy/NZ6hYtYYr9sFPdsErKNeAXmuh3J8zwPMpzQ0YIy7pTkln0g + eVx1qlEUIQfur7SBk1yJM6sf+Kq7kN3AcWEnIbrNV7qBW7MtlGrxJdODupVdAcDBwxfx + 4nHz1fo3Pm+uu34gkJgiwbLTjOF+ww/EeX76v0pd9xjOf+cPjzQNbyoY1Xu6mnDq5l9e + 703hWXQ48zqpsGBRf+ByB14c/cLHBDth3N8WuattS1Oso1D4PesM1gFuy+iAUK/3hopD + SLcK31NkJFmxZ5nnDkF8u7ghjlHm3EI4FNGG2MIzwpkdJPYWPqOd3K4iiW3Yo2j2WKD4 + faKMOmXhZH+KQsZ9bbwz7XVtb8W7q4sgvIcFciBBsqh+EWt0LzISfOOtEOdFmtBTN50k + ddNJUg5/0eVRDTgmwOJJI8wSTlybbKKByW+Y1XSSIFEU00kS02lzzH5Lqj0M31azhur9 + kct+W884p269jWzwe7/t5wyk1tH7qXfSvmZJLvioLxMhtg27gkwzE1IE1QnVQVOKg2gI + x6qHni6aXDeoQkQvrupJ/7hv8f701bmRNcEPV6ICX9Cyb2U7GK3rW2ZK0rLvrl7ru28x + mOXhSuQI2kz9JO7Dyy1kdiPq3uAsy5DZK75fk2qGVKCMAJZsJlnhCr1WL1AW8hhqvkJt + NdA3JKJvWnyMctXsUK6+O/RavUBZK5x9d/W6vrsWX0K5Ws799ECsN5RvpPqbTsm5Rxkl + sw/EEx3rMZGUQGm7Q3nNeg1lJd0JZUvrjz3PTKtm445nUM6aDRO7MGZ9d+hVi91ezr4x + MmrPd31nizeH9m0go1pOlMViN9p8Q/l2L+9fvJcJ55MOTMBlXyYxLK6mE58cwNmnOH09 + xbBXERuZK+lUtqfi0Ku3H4JJ4lNUwujsuiBVzEVRPiUA13XhT7suePsVzKojDe8luNiU + Q/8xiS8P+8jCEj4B10oMm1yQhKueBlxcMHVsA6567mA/hqsqOVzZRcDVnjpc2UXC1Z6/ + CldV/bFwPfTGPbYBLUGuwYV+38OVTx2udeZLFrYVE658/iJcWSngii4SrnpqcFUXBVc9 + fx2urPpj4To1ZnygNJorRh6uMFndWxBFFEd5E3CYx3EeBCKncrl0xtxjjdRf57LIZrbc + hk23E5Xyd+WrIi0jSiil5rjIuo+0Uqm8yORMy1jRAS+6ZqOkz4PcCEnooBGHIY6vTEYS + 1rACldKIYmbnvVlJuipUFLQ3nATnyzwToIlaMqHUemupUbI/NLiqpd2DBHCzdz5oiSzA + CXFrxupEAh2OdW7/zIqvQcO8JbQADU8lCQxmbhFZVPSWKJTCzsrcvKCqE4YKSa73MMYU + tC2ajAO6U4QXjRyNh1MK/XkiOUr9EbmU11NZ+6Sb0RRDoPUOKGTIRdHLBl2bHgdHAZrU + yN7AT/bQCmlv7PSFC0xKYLWiu0Czsh1rWXJvccDpUj+AJ0XDjmVptAK8Wuoo3h/KeKOx + LCTutmVBcoLqgZElH4Iti2UGnUmV7ZZlxzKcSMoj15X10bLs8B/fTCDVP5aFrRsKbbcs + Ratl4a6LMnJIss9lUX/nw3JIY1lonFQl0nA6oHDqTIq7d9AFnO/lIcpl6UB+UWtKcBdU + E+MCO6CGK92UrfVcpkekhf9rmsEZwB0zF3L48kzxe8ULtu4UL11dmMXWZBiRAuVsrRC+ + MSNnmme1mTuceohajvCgocjbHiIfN2mA9aDFh4df1cSEZbXdgFFWsPVNGoXTsA2zxZ3c + LqsHLfvu6rW++xZfXKhoGbHjMwj19rVckltFdybmfmNPPBf/WxX/NyAjWVKJ3uR3g6wR + mBqswufOkKFGUE6nnrNC5I0fSLakyHMhH09HQJXlYqTdtYNxcP4iqZK28IUPovtJmZFP + gO1jyTpi5qTlLdwiZP/Sf0nrRpz6AGNGDFA+LIuO3E7ymQ2Q1bYIhiYWODHcHVZYOKFm + vrFwyAil6Ts6nKnyinZYkDmNS/q4YAtJiBgWSWOWgYVTuA1R8646HRbeON6jxKL6Lyxi + gNczusIiJ5kb8tE1X4lgZylEyIHvxzXbth9bmgjJL8d+6DbUXZrIm5//4QecaTc70g50 + ux1/fT1eN2FIc8AhhqzmUgiKtyKnSpdNEsqH+IwsDVIcoxoUz6mdyd8/6WtYjYAU1+2S + 93az0Rpig8U3tKqWBDpf3upaxoljlar7JLCsPkhEaNRhVZWiQ8KI5QvZexZv0rCz6UaI + 7mlIMSmr4yR87NeEuxraVFmHHW4zU+/xWs4+h0N2qeOYY26IeeDwGm3O69czRHKJCJrl + l+YSoyJ1S1S0xHrnbgQWwFzrwZcq82NzVQeWQCGXVzpJJ6lC+kRdLT6fAfJK0f1QhFqi + sUjdEhUtYaumk6DvD6l7OfcC2iR9AaF7KWf2oVY2Z087sWMKoDtg+yWqWh+H5Nnnrh4r + VUnfeCI9lG3vxvv/W4aJMvR//Vu7V25Rly9N1Gf+nnJFep6vojgdHwAXUXb8ePb1wefz + 9aMqn/RCHGmjoGkCtqau3NfYO/GFnIjyPjQUN3jZhAJqsnGThkGmkTBM3mETei0+4YXh + 4XfVFPE3pFstLJn71uSQfiGfX4d8vmWZyj6ZHEMte42BJHkuqgRvxBiyFsdLG0PX2os7 + Joec87hXDzlhOQuIHNwGb3vXZWdh5nwSwAd+OtS+DYOLuxJf2GbGUgZsiHZ/gJWiZFnW + BFGsBHcjNezWodRGLwqAFxMJbM/gFlJ1tpLuLHKpmaKpL60oxxGq3XX1gHe2QzSSCW4t + Vs4cWtRx0CzWomHIucWK3cRlgtlS96Wrk0ig/a33SGtw7R1NB/QuunyQtEUBO68VLTVK + 9CfTqGrxwyzWRea55VejlJjFKl88V77oD7dyWKwLsz0IU1HTDFa84kRLT1YpDFbS05+w + 38ng43psDpOmMDr1XtCUJHagPkJxg5Xp6SwgPVPdpcGK9cwCeGK9W6wFFPq+W6xFadNj + WW6pdp7VlUMtiaXZ8VdcIa03JN7dYtXHoRzMnaxlt1hzWZThb07dWpaiAG8uS9LwTsSy + VEsdJU26tiy499xirWU5AYUsVhCPZdFnD0mPk8Way3JiWWSwQohlOQlMGay1LCdsPxu4 + AmmxLEWrZeFKiBmsOGNjWdRdGqy5LDYELNYGFEZyWKwNuppetywN5IzWezTl5lpCgSvV + qVmsqFaxfnykV86QxxYr3gn/0MzzFuuX3m3276u9dHrIR0rqus6KZrEq/OfMWJZo1ht0 + YSUtVrw9wdpVD7AetPjQYCGOH33rs9lpsWbfOyuY1mnVw3OQtOw769nJ8ajFl46Oqj9z + 7dRm5WdHsxD6JXshAeWFqIElZTUXAfeZE3AZZcqE0L9n+1thAy5Dyyx1gvzyeui2qsUD + KEpWiX/8IR/fCytVd3+R/4inpCk4y5VqXaVSLa5Q4761/tEE4j3JVadFy4yiCNGzexJ1 + CS/GdTULRDPRhiuSCa7uRC2BVdV2m3HO30vYhDl/J/gUd6XXyj7NohunUcKPL8uUc63m + vwdNOes+f6fIMs2pZR0BWTRDtJt/69kusM8XVDst180sruafE3tp4/GZVYkzXVgzyzQU + jM4eZYknVUCU3/hHiH7//fsMdlcKx6eIiHdf8f3uR6SclcbDOapdGepbfGam6W7YrWbT + pO6I4FS5KW34S6yBe5bvFUWdC103/h0a2slhnPAg9f0gja774fPC3sJLKGdr8a0cMyRe + Y+3beODcxSB+3GVezTeNL5LhAFWp/tK4lb+i2Wnsj3gk68FvhpK+JYKMxgTLt17xQ24I + T+soeIw8IbMR+Gqmw+rP9fFHL8N1OgTyuZbzuoUvMgKuX6lO5V2ytY3npuWy1tlpPK9O + 48uPL+eO1ABtSia2r8/XZ29nH82X8wj8ui/lqUuk8XEkxd0xZDPnjJbtOV1c5mp7gTPV + 7vSye1sLQDEjTewuF9EPu+dLMmD1Ei/FxrbD2wzT+3RAtgUOZIIyct3zMRdFeTEl+Igz + qtNOTArj0m6/IT/pUrcGOxrJVvqmRr6rYSmvymiYtGe1J1q+izMu2mt9NFobS7xLe3w4 + 98EIX05wqjcUJUJGKwRT/aHM4jaD1sYlc6rRYvz1bjcnfSaY/9pE/66datFe9sGRgfbu + 9dpY2rvdnKqX6516m0RYK7vDzkLwgTWJdjtN6IPohffSnu87d5+mjZ/2adohv8jebeS7 + 7tKDcq188qkjNk6XO3ivdtaluMqo+kv9qhzCDzm2evTv+YmLenSV+9wNtZQC7gOj1uAJ + MTcIyo/9xAjGT+/RZ0JrGGESI2gkrfAaK7J5s/JHPtilgqKIrZk8VvqKqdtf5dZfm8N4 + TqWJIjL5r1ggOsnTRdjTepYk3ghpcoWn0XiGjokMxyeNucFWTIpdIDqf+CYP3yKsWmw/ + XLo0UC2RFsO5RwSSnvK3bnPmWIJmunCj5pgi+slMHkc/6w19/OV8kHvwEVGM0hWlbk5F + q9HqEsCFT7208cua46K4LsJlLYuL4QNVnmWgI6OTJD8UHcVs4nc3p6JJnNyN8JW0FuHE + uBzZuyQgWiO4Dujy4PRZLWhY9zfVSKNtObIvn7/utJbzhpRY4grsjZlQvsWl9rwfY74l + q8PpYAe7jBALIFWd7x+0dK9Y2Zkj55da46MIug7Q9Tijs6E8IAqzzoLWRh0o2aPXgQLG + WataipOoEgev7qhl7Rjz/UGMK5KMAXJO7/xkP29f2ngZTz4qoPHunAhExZibqelK9FCR + T3bqMTzkCb6IDXzs9fD7u7cfI+h9cAdWabXsK+tDjgxl2UYf7FDPurU+2sPsI95mhB1m + d18m8Z5yhPeIodnO3EQlP/n2LBje/DzF7d+RdNn2YydU+aCXttPun+P01ptdQeLKxH/v + gc91Lq7pUkYguMPhF+2g+T77KbFeaeSf8yZ0PcoPruWDz2WVfHxO9L86yuNYCa5e7gdZ + 46hO6iyqAyutoxxQ1Bju73lnzZpvQZDvYkUR1B/f/DkR8VeGRtCtn7YUv/w/GDHqMwpl + bmRzdHJlYW0KZW5kb2JqCjIyIDAgb2JqCjg3MTQKZW5kb2JqCjIwIDAgb2JqCjw8IC9U + eXBlIC9QYWdlIC9QYXJlbnQgNCAwIFIgL1Jlc291cmNlcyAyMyAwIFIgL0NvbnRlbnRz + IDIxIDAgUiAvTWVkaWFCb3gKWzAgMCA1NzYgNzMzXSA+PgplbmRvYmoKMjMgMCBvYmoK + PDwgL1Byb2NTZXQgWyAvUERGIC9UZXh0IF0gL0NvbG9yU3BhY2UgPDwgL0NzMiA5IDAg + UiAvQ3MxIDggMCBSID4+IC9Gb250IDw8Ci9GNC4wIDI1IDAgUiAvRjMuMCAyNCAwIFIg + Pj4gPj4KZW5kb2JqCjQgMCBvYmoKPDwgL1R5cGUgL1BhZ2VzIC9NZWRpYUJveCBbMCAw + IDYxMiA3OTJdIC9Db3VudCAyIC9LaWRzIFsgMyAwIFIgMjAgMCBSIF0gPj4KZW5kb2Jq + CjI2IDAgb2JqCjw8IC9UeXBlIC9DYXRhbG9nIC9PdXRsaW5lcyAyIDAgUiAvUGFnZXMg + NCAwIFIgL1ZlcnNpb24gLzEuNCA+PgplbmRvYmoKMiAwIG9iago8PCAvTGFzdCAyNyAw + IFIgL0ZpcnN0IDI4IDAgUiA+PgplbmRvYmoKMjggMCBvYmoKPDwgL1BhcmVudCAyOSAw + IFIgL1RpdGxlIChDYW52YXMgMSkgL0NvdW50IDAgL0Rlc3QgWyAzIDAgUiAvWFlaIDAg + NTc2IDAgXQovTmV4dCAzMCAwIFIgPj4KZW5kb2JqCjMwIDAgb2JqCjw8IC9QYXJlbnQg + MzEgMCBSIC9QcmV2IDMyIDAgUiAvQ291bnQgMCAvRGVzdCBbIDIwIDAgUiAvWFlaIDAg + NzMzIDAgXSAvVGl0bGUKKENhbnZhcyAyKSA+PgplbmRvYmoKMzIgMCBvYmoKPDwgL1Bh + cmVudCAyOSAwIFIgPj4KZW5kb2JqCjMxIDAgb2JqCjw8ID4+CmVuZG9iagoyOSAwIG9i + ago8PCA+PgplbmRvYmoKMjcgMCBvYmoKPDwgL1BhcmVudCAzMSAwIFIgL1ByZXYgMzIg + MCBSIC9Db3VudCAwIC9EZXN0IFsgMjAgMCBSIC9YWVogMCA3MzMgMCBdIC9UaXRsZQoo + Q2FudmFzIDIpID4+CmVuZG9iagozMyAwIG9iago8PCAvTGVuZ3RoIDM0IDAgUiAvTGVu + Z3RoMSA3NTYwIC9GaWx0ZXIgL0ZsYXRlRGVjb2RlID4+CnN0cmVhbQp4Ab1Ze3gUVZY/ + t6r6EUgg73Se3Z1K5x1CEkgCQWlCd0h4hgRCOvJI50USCcYQMuAIExFUAkaRp+CiiLJI + BmmSLHSCIPLBqJ+O4guVRUcFdWY//dgdddcdSPf+bnUnQ/KNDn+4Vn2n7zn3dc79nVOn + 6t5ubVldS37UTiIVV9ib60i5oixEzFLdZG/2yIHfozxf3dZq8MiqRCJxRV3z8iaPrN1B + NCp6+Yq13vFBV4g0P9bX2ms87XQTZXY9Kjwym4Ayrr6pdY1HDuxFaV5xT7W3PcgJObbJ + vsarnzAfGVbam2o9/aN0KFOb71nV6pEj30dpaW6p9fZn5bDvbWKo9ad7yIcaSUMCeH9a + Asv+PCqaJLTydlz73vC5vGzslB8oQKvIy+Y8ppR/ML504ceOmwm+T/hEYrTPYH9eqpNc + SUS+DO0TfZ/QEutRhgz++DupNMVJRaCpoImglJRpOmpnh+hx0DMgkRrYFloL2gx6EiQN + cS9A6mNbuiWtuZ+tpQg20zxa0i8IDtfrRo3Wv+tk6t79+o91V0+xcHjvcxbe7Uc+00ax + Z9jTVEN69jyZ2H1USIlsb0/SCn0lml6gZlA7SFR+GXuhOyZTf4alkkliGBNPMRI7of86 + I03/ZYZTYN36cwlOCcUrMZDMY/Vno/frX45erj8D6vI0HUlCjxP6F6JX6LfHONnebv0T + 0U6GMds8xepoDD2hb0rapa/JUNpn73IKXd36SWgvM4/WZ+ca9ROjr+nTE5xaBjkterY+ + OeOP+jgMRDcDJjWZA/RR0dv1k9EUE21NmAw6xY6wfZTM9nWbZur7wWK5PUVJubuc7Lc9 + hYkZJie7z5xdmLgrqTDBlDRbb0oqSEgAX/aaZqPmLs00TaYmRZOoidcYNZGaYG2g1l87 + RuurHaXVajVO9vvuqXr1KdZFUwFLV49WrVU52YuolE6xo0rl0ZNaSStoSRvsdH+G4GUU + 7GRdvf6cA3NCrXBqJzuKuOBVR816iXOS0uAvcB4/+CWBaQWaSQ72qFNNm0LbpuqmBt4Z + MKnA8lM/lUrL4G/KT186Fu3YNau03HEk2ubI5Iw72jbYXTfI/GTZuhpNtfkpKbNK1va0 + NTfWWWtla6VsrQVVOra01esc7VUGw/HGZt5gcIjxlVXV9by01zqa5VqLo1G2GI63KeNG + NNfx5jbZcpzqrAvKj9eZay3dbeY2q2y32Hqq8luWDNO1eUhXS/4/0JXPJ2vhuqqUcSN0 + LeHNVVzXEq5rCddVZa5SdPHFWxtK81e1IjoN1oZZBkdiqaNofkW5w2C3WZzsECotq0l1 + lvxVpylR1U4RUjrpidwfgy7z0rXQ/ZXqVfJ3Nbn/S8yDU/s4Ca6pU+gsPUr76Bip6TD4 + RFpKe+h11ohnezH10iUWQ+OQeyVy0mx6k7nd71AdPYf+rXSOdtJx8sWYJgpBayczue+D + bAZfRRvdz1Ic5dJDdJomYdZO+tb9grsHrSW0kI5QF8a/wWThuBTkftF9jbQ0H3NuRMs7 + 7tnuYxRIqZRPxajdSGeYSbzsricd5cG6p+hpOkCv0DdsA+t117vb3BfdnyNUdRRFpbjX + sV72uXhMesj9lPs/3C4gkUjJ0FpJ2+kg5j+G+yxSq5XdzVrZdrZTMAsbhF5pkyrMNQAc + kmgG7kJk5UeAQB+dp7/S/7Lrgk70F1vFC+6J7u9oNM3CKvlKaqkN98O4O7GmU0zNxrPp + rJitYzvYTvaekCwsFMqF3whrhK/EueJica34nrRK6lZtVe1Rj3b94D7lftX9AYVRNN1F + LbQeqztHF+l7+hsTMVcUM7E8ls+W4m5n+4Q+doD1CcXsLLsoHGF/YlfZdXZDUAm+QoiQ + IrQK24Uu4Zzwltgg7hSfFP8k/iDdqRJUB1Rfqk2af3dVuTa73nLnuT93/4gUqyUjPJNP + c2kZ2bHaZppAv8MqjuI+Bq+dpwv0unJfZVH0Lf0IFIgFsgiWyebgnsvmsTrWwPazftxn + FFv+W4AjBB8hQAgTooRSoUpoEtqFD4R2MVJMFmeKFeIx3K+Jl8Qb4g1JJQVJIdIMqYi2 + Sk3SXtyHpMNSt/S2apLqTtVcVZmqXbVZtVWsVr2juqRer+5Ud6uvq/8TaXG25h7NVnjn + dcTsK4OvNKWUWBysz6SVVM0srIp2wRsHmJ06EF017BHg1UyJ7iXienGGMB7RcIZ+i2jd + S+tos7iYDrg/Eo/Qh4iUFZitnf5Vyqdo1W54ZwONRxR5b3NSclJiQrwpTo41GpDyoyIj + wnVhoSHBQYEB/n6+o0f5aDVqlSQKjFKtckGlwRFf6ZDi5cLCNC7LdlTYb6moxKNscBQM + 7+Mw8HF2NA3raUbPuhE9zZ6e5qGezN8whaakpRqsssHxR4tscLKK+eXgH7XINoPjW4Wf + o/CPK7wfeKMRAwxWXb3F4GCVBqujoK2+w1ppSUtlfWbAMSotlScOM43mEztoun0dEixN + 5z2sjgjZYnWEy+DRJpqs9hpH8fxyqyXSaLShDlUl5dCRltrggJ20xbdGrtniNFNVJefs + i8sdot3mECr5XAEpjjDZ4gi770vd38VBzrr1lkaHYCqw13YUOMyVWwAuFyu5ZN8KaVap + AdMKm2zlDrbJawS3sRGWcnM97wRTZaPB4SPny/UdjZUAl0rKuyPMEUrydVBxeXe4OVwR + 0lL7dOvzjFh9X9q0tGm8zDPq1nvKrx/01L97lpe69ec/QzmrZAgAxhGQi2Cnw1CtKJFh + bC7/qc2ljupc4ITLxrDMBtgz3SEgZkSTQ2UqsjvaSwfNqLd4jKtstHT7hEcoL6F8G/pX + dvhPhqfQ3182dPyAt3Wl/O03w2vs3hq1yf8H4o3c0UOx4mD2Qb6NvyxNWHW9Tq7n/m1T + fApZ1llvqYDMoeE2O4LxAi8uNzoMNlTgazJ1lpN8isuPM9ZpczL3JidZovvwjSouW4rm + VB5qDRboh5CWiopkI7hxqYYCaC7gsWLoMHQU1XQYCgz1CCbJpJRoqO2wpQPB0nLgRAug + 0WyLHGJrbbbJmCedz4Mh6N5hwwyN3hlQKlXpA+g0PhUvUzG+uHx+uaPdEukwW2zwAsL3 + bHG54ywi12ZDr4whS2Hxugad1+ZM2JyRjPYszyz4dmnHFLaODj5nablsdJzt6Ijs4M+b + R3YyGllh9lY4iXfhkDtZezHGopCNkYoPjLIRZtk4phMQ0oMRhW/2n0c4e8hujMyBtdkK + wrm/EMKTbgfhybeFcN6QpcMQngKb8zjCd/x6CN85DOGpP4+wechuGDkN1poVhPN/IYSn + 3w7ClttC2Dpk6TCEC2CzlSM849dDuHAYwkU/j/DMIbth5CxYO1NBePYvhPCc20F47m0h + PG/I0mEIF8PmeRzh+b8ewiXDEC79eYQXDNkNIxfC2gUKwmW/EMKLbgfh8ttC2DZk6TCE + K2CzjSN81xDC5kgH3ZqH20ekXfrFE/PiWyDHl5IqkPKFSSjL6Ji0ippB6yAfR7mRvUob + 1UdoI+dBLaBI4QhtxuZ7Jfq8DLkLUwye/fhiR3ICsoHK+Fb8//US/unsoreHRCpwapxZ + eS4t3uujsCvCgRNOe8bQWKV6AnYUjfQVy8Y+y8HeFOKEpcJe4RsxSrxfPCvyEzcB+w+S + LmLfKmKuqZ4zKW06PhxAWpxR0UUQl8GLV5wkgQi85gr1K9rLUvoxi4rKUsZnZAUYAxJA + +VKn8+YXqtN/m+6U5tzg514C9gSkZtAzioIpkuvBjtYnvR+Y+mFsP87dNMoczJgZqlFL + slGUWTx+jZnZOdnsa2Hn+VdS4lzfferKfPBVKWTKKlfrCrb1oZf1sarTn7z2e/fAduk7 + vUtsefxZ6Gp2X8Fepggo5FGeoqufEmiiokePvV4ZDtsSblkc55MvgrDQieDHgR+XPj7D + lJmTPZWNYWOZWoM7lMGUnOx4ORaSnB2XlRkWqhHVoVkwUTOGybEJ8Tm8iM+5k+Wwr5ZV + Px8XY1qZ1VybsyQ0YBnrMesDfIJb7nt0VnLk4XSmO3i6rs7woHqsyVcfGJ2aFr8kaqxq + xrX7d+6ONnyyb3Vq0aFtIVHqMX5R6cvnVgjB2lRd2uLS2cmlf9hXWLhnYHdUrChu8lXn + y+bCxn97ZOdzQVj3Ovdl7OBmYB8eR3rvurXAm+Mbgd0mL/m5JvcVC9WEaozceKwpawzj + C5RjhaBAysoMFbMTsEq1RnrApGLyzesxy3c/unyKfDy4Ka/6d9aS1z7KzWGLv2g5u2ZM + +Lij978liw/PXzHz2YMXlmTPyNs2rjjKn8nYPAss/27XttUFG3o6eDgynFaQWKJ6Fx6A + A5QI8AfiAkiFSOBxyz0zBh4IhAf8UAZfhKXZsChMMw7IqjVZGlmUg+ScrJxsgZX0BsSF + ypHho0syTd0xvee7z++M2RkXpWpbLAjPCWzhi503a8SnOg83Q/dGxPkzqnMUSiav7tHQ + GwIi6FZTgIKOwJ+hgMBJ0JolwtceF8tBXjwQk6d7V5dMrt7wl/ERxif6HgsP1D/cfGnc + ml7VuYEr842TDtr2D8wXDrbllO+9NPAaj3voFVZ5n6+xXs0StAqIL+iQGVbE6k5+cZLl + nkw9KSXfuKQ6/abHXvVl+DKeP30KVoFgIkEmxV7uTSepgZEfsDKiDEMZpsyq8Voedicb + WoDiT0D395Us7zbMz6u7t31aXMi8ntqP0nQxu0/tD62Y03hS3nhyR9jY8Oa611PX9Erp + e+bF3TE1rqCs9KkFnQM5wp/vLu48NLBNONWUOWv/23yV3Lcc3wOwN5zCvPaOgq062KrC + s61EXJbmFnt4gCHmEHjAYHmXofJU/bVxEbFPnHwsxD9yvTl1XkFuVuhvuPalJU8vepaj + WjWlxi80f+K9DQNvc50Ctbg/li5Kc5HzdF6t/cgvXFs//OyNc++CeaQH5ghkjBVyeIwH + ipcNUWn9z78RH1d7sOflz3JcL7n+55PzEyezsq/fviok7Vq642Z31zU2tss14HqRpdxE + HjC7vlH0RroWSu8iv4yhWMKLT/GOESsNUXT2w54oxTsSvOJ/pR/vjijy9USVYg1/1sJg + Tzacr7gqUBT405cQnyDK4meRgYb+U02TjRFBsf3r3x94/liMtaj+tyfO5cz88JG9a2ck + p7T2CjHti4+fqtl7/6JD7wmfdhYlTnH9BXY+u2vZxJiigU/gj8141tbCHwHeTABLFF/4 + KpapYJkW8aL1xAv3y9AjBn+s6EowXLhbpYtK9I/wf+Rk+Ab4oS97nyCeEYVjLQN7MP9K + 94fSbGkWTicTFAT6sfYUBfkg7+pToIPwughCqbuCaPesOQcQpLN45EpvBpUHPRQWqmeh + SKnI/sihLHt0ZMbhmq6gsdmxTQ9vyK2NMjCx769zxuuOaKPSn6xbO2bsFHnmvc5JFYYY + 141Prz8ZyYorxi9adJc1LjQ6Lja56IE9L3VW1N+ROWOuuSA5PCg6PdW647GLHz/DY4fR + y+7rwjeqCkROhteD/EELAEai14t+wI7nArz0sIIQoCWiDOMZKUvkeci7HiX1x+fwpMTe + OGHu0nft9I0NyvCLCYkxWhPWTw3dvU2/TVXh+mD7gDU3aDQTOn20DywXLmxX7OhyX5Ve + kdKREz0Z2/O6HaX4KnDIV34cP49CpOsg5I0cJV8DxSAhcnfA2MgHX6rPNTb3Xj0ij79j + 7fYvM6Oi90npA5NKJlQervgXYcyNd/bfkbzgyZLNwkd8/fjScH8PqsX56T+68B8EcMgi + C85hi5SzVn4SXILT3YX4DlpE5coghlNizzeRGjjS3NIZJWXzUwprV7TVtjZU29PmVa1o + uBf/HnK8B69CMAtANaBWEPIG7QA9B+oFnQe9D7oG+h4DJVAwKA40we290EZDPMPzNVwu + HiHbR8jKmm8Zr/wneYtcP6J/wwhZ+U/zlv4rR7TfM0JuHiG3jJBXjZBbR8irufx/pT/6 + bQplbmRzdHJlYW0KZW5kb2JqCjM0IDAgb2JqCjQ0NDgKZW5kb2JqCjM1IDAgb2JqCjw8 + IC9UeXBlIC9Gb250RGVzY3JpcHRvciAvQXNjZW50IDc3MCAvQ2FwSGVpZ2h0IDcxNyAv + RGVzY2VudCAtMjMwIC9GbGFncyA5NgovRm9udEJCb3ggWy05MzMgLTQ4MSAxNTcxIDEx + MzhdIC9Gb250TmFtZSAvTlNHUlZRK0hlbHZldGljYS1PYmxpcXVlIC9JdGFsaWNBbmds + ZQotNiAvU3RlbVYgMCAvTWF4V2lkdGggMTUwMCAvWEhlaWdodCA2MzcgL0ZvbnRGaWxl + MiAzMyAwIFIgPj4KZW5kb2JqCjM2IDAgb2JqClsgMjc4IDAgMCAwIDAgMCAwIDAgMCAw + IDAgMCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAK + MCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAgMCA2NjcgMCAwIDAgMCAwIDAgMCAwIDAgMCAw + IDAgMCAwIDAgMCA1NTYgMCAwIDAgNTU2CjI3OCAwIDU1NiAyMjIgMCAwIDAgODMzIDU1 + NiA1NTYgNTU2IDAgMzMzIDUwMCAyNzggNTU2IF0KZW5kb2JqCjI1IDAgb2JqCjw8IC9U + eXBlIC9Gb250IC9TdWJ0eXBlIC9UcnVlVHlwZSAvQmFzZUZvbnQgL05TR1JWUStIZWx2 + ZXRpY2EtT2JsaXF1ZSAvRm9udERlc2NyaXB0b3IKMzUgMCBSIC9XaWR0aHMgMzYgMCBS + IC9GaXJzdENoYXIgMzIgL0xhc3RDaGFyIDExNyAvRW5jb2RpbmcgL01hY1JvbWFuRW5j + b2RpbmcKPj4KZW5kb2JqCjM3IDAgb2JqCjw8IC9MZW5ndGggMzggMCBSIC9MZW5ndGgx + IDEzMjIwIC9GaWx0ZXIgL0ZsYXRlRGVjb2RlID4+CnN0cmVhbQp4Ab17d3xUx7XwzO3b + e9W2q9UW9YaEGmgRahTJgGyQMMKSQCCaaUIGHmBhU2VMbGMLDO6FjrUIbAQYhzgiQJ4L + xHEjzosTA3HyrDjJAycxaPc7c1fIoC/Jz3/4l706M3Nm7p07c+bMaXPVumRZM1KhdkSj + CVMbF81C0i+zEiFq2owFjYtiuCEF8ldmtLV6YjgbRIieP2vR7AUxXHgSIblz9vwVA88b + tQg5jrY0N86MtaObkOe2QEUMx8MgT2hZ0Lo8huuPQr58/sIZA+2GC4CPXNC4fOD96DPA + Pfc3LmiO3Z+ZAHnCooVLW2N4Ri/kDYuWNA/cj2thfO8jDLUutBDJ0DzEIwpp4apHiP9S + 7kQMtJJ2+M1MUWy7T1N0HekECb+v6kdS/jPxzY/+3nwzoHhc+AdUyG7dT3IuMZKIkBJD + e5/i8cEW6TlIXD2oJrkHjQEoBsgBSE4eZUXteDd6DOAFABrNwY+gFQCbAZ4GYAZL+wA7 + jh/pZoTQCbwC2fHYkIJx3220ua1yhfsXPZg7+pz7U+sXJ7ENVu+32NatQrJRcvwCfh7N + RG78KvLhlagSBfHOI4nz3Q3QtA8tAmgHoKUU433driz3WzgF+RgMz/iRi8FvuH+fmeq+ + ktlD4W7324EeBrKfuAALadynnc+5f+yc7X4L4ECsaX8i3PGGe59zvnubqwfv7HY/4ezB + 8MzjsWyZEx59w70gsdM9M1NqH9/ZQx3odudD++SQwp2bJ7pznJfd6YEeAQOe6hzvTsp8 + 150AD8JtHujUF9K5Hc5t7gJocjnLAgUAJ/F+vAsl4V3dvrHuE1CE6R4Zk5jX2YP/60hl + MNPXg1eGciuDnYmVAV/ieLcvsTwQgPLkc/w6/l5+FJ/FJ/NB3s+LfBxvFPSCVlALSkEu + CALfgw92F7u5k/gAKgayHDgicALbg1+DSuYkPiRVHjomMAIlIMHYE/0cmBcjYw8+cBTY + HiMovMFJJa4HHzoSqzoUcjOkxEgNWoqUIYEUUVig0FgUxo/2cGi9ua3YWqwfqcsvL/1X + SYPUcitN/tc/K3aGO8fV1Ib3O+vCWaQQddbdut16q/Av89Zl0NRckpw8btKKI22L5s4q + a/aWNXjLmgEawo+0tVjD7U0ez+G5i0iDJ0z7G5pmtJC8sTm8yNtcGp7rLfUcbpOeG9I8 + izS3eUsPo1lld9cenhVqLu1uC7WVeRtL6440lSypv+NdmwfftaTkn7yrhHS2hLyrSXpu + yLvqSXMTeVc9eVc9eVdTqEl6F5l82ZyakqWtwJ2esjnjPOFgTXjMxKm1YU9jXWkP3g2V + pcsQexpp2VMoyLYjO5OO3AhFPwW4RPLIPdGr7FmkjSyI/oUuhEU9ToCKFBeh0+hRtAt1 + IQ7thXIQTUc70Hk8F/b2NHQUfYRdKA21w77vQePROzgavYhmoVfg/lb0NnoKHUZKeGYB + MkHrVuyLrgQ8BOUmtC76EkpAeWgDOoXyodetqC+6L3oEWiehe9B+dACe/2/spQ4zhuhr + 0ctIQBOhz3XQcjE6PtqF9CgFlaAJULsOvYV99KVoC7KiQhjdM+h59CL6CfoKP4SPRlui + bdEL0d8Cq1qRA9XAtRofxb+lu5gN0Weif4xGgBJBlARvbUDb0MvQfxdcp0G0luF5uBVv + w09RIeoh6iiznrVE+oEOiagCrkqQypuAAsdRL/or+gf+mrLSWrqVPhPNif4fUqBxMEsy + k2bUBtdGuLbCnE5iDmfg0XgCXo2fxE/hD6gk6h6qlnqAWk5dpavpafQK+gNmKdPNbmF3 + cIrI9ejJ6Nnoh8iCnOhetAStgdm9jS6ga+hbTENfDuzDhbgET4erHe+ijuMX8XFqAj6N + L1D78W/wF/hrfINiKSVlopKpVmobdYB6m3qPnkM/RT9N/4a+zoxkKfZF9grn438VaYps + jrwXLYz+Nvp3ELECEmFlSlA1ug81wmwXoWHoQZjFIbi6YNV60Rl0Xrq+wA7Uh/4OVEBY + j+04C1fBVY3vwrPwHPwcPgHXW9JYvqFgISgZpaMslIOqoZqoBVQ79SHVTsfRSfRYeird + Bdc5+iP6Bn2DYRkDY2IqmDFoC7OA2QnXbmYv0828z+azI9lqdjLbzm5mt9Az2IvsR9wa + bivXzX3N/RnE4nh+Ib8FVuc88OxPgJe/+zE4AUafhe5HM3ApbkKdsBov4kbUAdw1E28C + ei1CwWg9vYauoDKAG95C/wXcuhOtRpvpaejF6Cf0fvQxcMp86LId7WFKkJPdDqvzEMoA + Lhq4QolJicGA35fgjRc9IPIdcXab1WI2GQ16nValVMhlAs+xDE1hlFLmLW/whP0NYcbv + raxMJbi3ESoab6togK3sCZffeU/YQ55rhKY77gzBnbOG3BmK3RkavBNrPUWoKDXFU+b1 + hN8t9Xp68NSJtVB+tNRb5wn3SeUqqfyYVFZBWRThAU+ZtaXUE8YNnrJweVtLR1lDaWoK + Ph4CcshTU4jgCCEF6TiMRjeuBgGLRpM7ysJ2b2lZ2OaFMrTRvrLGmeEJE2vLSuNEsQ7q + oGpSLbwjNWVOGMaJHlHO9M58pCeEmhpIqXFabZhurAtTDaQvXXLY4i0NW1ZesX6H3iqV + bbmtMUz5yhubO8rDoYZHgLgEbSBY4xbAxtV4oFtqfV1tGK8fGAQZ41wYKRluTCf4GuZ6 + wjJvibelY24DEBdNqu22h+yS8A2jCbXdtpBNQlJTjlvXFIow++Opo1JHkbxQtK6J5b9/ + OFb/i9Mkt67p/RzycZMGCYAJBbxjYJxhzwzpJV4YbB5JmvNQx4w8oBP86jBMcw6MZ3SY + Ap6hfWHWN6Yx3F5zaxgtpbHBNcwt7ZbZ7JISKqmD+xs6tAWwUnC/1uvpuA7ausHb99Wd + NY0DNZxPex2RRrLQg7wSxo23ym1EWfpg1i1WbwtZ3zZpTQH3WstuqwCckIaMOWwEBT6h + Vgx76qACrMmUcT1INqH2MMZb63pwdH0PKnUeBxuVvm86NKcQVptTCu8HJDUFKpJEKKWl + eMrhzeWEVzwdno4xMzs85Z4WYCbGJ+XQ0NxRlw4UrKkFOqG74Y2hurjBYnNdXQH0k076 + gUfg9o466GHuQA+QS1Xp/XBTRgooU9o/oXZibbi9NC4cKq2DVQD2PT2hNnwaOLeuDu7K + HBwpjHj1HOvAmLNgzJlJ0J4d6wVsl3booq6jg/RZU+sVw6c7OuI6yH6L4T0YDa0IDVT0 + IHILIXkPbp8Az0LmFeOkNRC9IgyrjtB0GLD0LY4Cm/3fUzh3cNzw5HAYba5E4bwfiML5 + 34fCBd+LwoWDI72DwkUw5kJC4RH/OQqPvIPCxf+ewqHBccMgR8FoQxKFS34gCo/+PhQu + /V4ULhsc6R0ULocxlxEKV/znKFx5B4XH/HsKjx0cNwxyHIx2rETh8T8Qhau+D4WrvxeF + 7xoc6R0UngBjvotQeOJ/jsKT7qBwzb+n8N2D44ZB3gOjvVui8OQfiMJTvg+Fa78XhesG + R3oHhafCmOsIhe8dpHAoLoxul8PtQ8Qu+sEF87TbSA6WEqtHJcxS1AsQAsgEGAZA6qoA + xgE8A/7YVIBXuXw0g8pH56W6yaiBfxSiHUvRJHDGCyGvBBiBz6J1gLdz+yFfKkEhtR9t + hroSeNYCde1QVsCrb8WMlODJdAKeicolR55GDGKhjoe623+xgBKJHcmlatKHUiqpbr8N + qQcwzR21MQQiBkgHoAcwABgBTABm8GuskCNkQ3YUB/6ZU8KGoanoKn4Cf0610nL6bvoj + ZhZzjl3EbmLD3Cq+EoIdT8qmyzn5MPkrCo9ileKaskBlV81V9aqnwvMU+CyIuQC+Lg2z + KY7FsYR0MDYABG0PQhcACA5l+rMexAAgKPOfoRPS/Ccnn4BeWDQ5OSMzWyfqAgAlzNae + m79jT307uoepugExEaBaL7zsQ/YJ8JS8hwXcg7NDSobhlQzfySJ5hUzb1mvt/bA/HxUX + X3s3M8OQMxIPz9Z5db0/3enfepr+psNQt/vb++lvpL5CMGYX+yyKR7tD1blMOTOFnee8 + 37XStQ5vpIQkYaptnm2VbZXjdRuL4rGGcahtIu+wQdyLdWs08QZ5joH1uJeJ8UrxQT7P + vDBeHdCsdefFJ1R4yTh+2XetT3u97zIqLuovKu7T6fPT9ZZ8DLk+P18HCarPzBi9IuRg + bEqfzq/Qq4NIZuSD2MaotPIgFkyQQARQq8Uk4LAW1efqi3Hu8NycYX5vPM/xXiiLWXqT + kec0mIMK0SSOXf+T02uHTepcfbzCzxyjS5bh4DdfrCh/fXNT3kw7rb6ZeBzrFy0cl1Mz + b/W2LePWn2y7EPnm5YMrK5rH52ZOmbtfoktm9FPGzu4EPu0Nuccoa1KbE2ekLktclsp1 + +vE4IVluTTaq6H9kGnNU4MR4Q0ZdjvZBlSozLieB5XMyVdbOQKmuB6KPGnle2kLKnehZ + Sweo7Iqs26gCtEHFfcV9QJRr/Ve1fVpCH0IbiSS56Rk2P5Kxfqcv3s8hOogYWsgAcji8 + 7iCy+6xBzGAeyJUOiUuMA5r5IQFiJScTYmmLSGntWqAZrmeonGyzxZydFSMcx+e4cHbW + bWQcRsgIniJQ0IVNRuTF5itvKoPlx7YefP1Fvc/g8JubRy3Z0Xy0zM92h+7Hpl/9uSKl + fPGDkb/+PYAt5x4pXrxj+ZNtGD9PU568x+a1Li9Z+cKicz89vm5SttN9uP3dSATICtw9 + DPgtnX0GSio0LRQvo+SCClPUW3qO4ykOs7wA8QVeTi1TsF/TSp6he7DlddypEg7Ke3Dt + EVZToZYoeP1aUT9wVTFkRbp8iWpAuPyNacnMau0ZTWYG1smwTszB2bpsk1dHvRrJwe/1 + b6Ee2/HBBxCa2Nz/QITF08P01pv3PRt5iYwNo5LoZ4wD4lQedDKUXKnf5KbyleWGKYbZ + BqZAUKp4pJRr1OpleoNBr9Z49AYeGSxySw4MLD5kVz2oVjv1BRqGyfGcdap0fJ59Icrz + xFeIsRW/3tcLi91X3A+rffnarZUm2wDGDENGsaWHtbfCfgha3VhG+WkXBIkgxOxhHbAn + ZFZIsJsJIi4OEsEW2xtktbVFZLnJWtcb7ljngAE2BA2bJDuLMRkpMT4h0K9fHbr7hZ3H + 2uvXpz+zgPqy//kRWakT5pzB+huRvq7I/2nxgp2FrndWdb5SGZLR9GuRJX6DGPnpf0d+ + fuYdaQ2ror9ivOxzIDkDaF8o/wE7tgg+IWCrtW1AG/EmGV8hyMWAmKNWG+mzfE4cG8iB + vZJIrXXl6RZa5FSRPCHTklgRlAjTn79q3KTlK9OtICYG9gPZEIRA0jYY5vM7PBoz4li/ + R+MKYr8pIYgcBiiRPYEZ2q0Vg9hnDgSRUw8J2ROSrMCxDUB2wFpcD8cXZpPXHwCpQX1H + Dm880mmH60X9wLYwGc3ZdMWpbq131Lrt3fKR0yfPPYqVkf89H/ls1Go8fu2ja3a3dj3/ + KPvcP9bdkzE18ofIzXtTg1cv/zTyAc6EkJPiBJ757a9//ND9Z3fu2kRimRjicYTf25EG + 1YRyWYWNylMUKPNVY1X3UJOZJuoYL1+lOqo6o6IpGVapC5CGkSkpFWi/hWohT3ZQravQ + SmQCMXqFMDiwPHA8sA0GwVmPTRwFOxbEoN6QO1zMYdLLrtROSXWmnS39cvP2m1+y7c+O + jhw9fXLnjM/wTtz5p0Ovkz34DIzJwl4APeVBz4QqgvpKQ62hWbVMxc5RrlBSfkGjVZk0 + CpnVpFcpGI92Ck6i5Z6fxyVwWK/J1LrxTJqWeax5Mnu8O9NjE+M/EGdUQlS8+lpVX7X2 + mypJzPddIxqIMPxV3XcSXy+tqd3mYgSnz8G6RyE7bx2FXUzcKGwTIAHxRVYvJrV8sHWR + PpvIJY5XY5N3WO6dwh/3nT0b6br24Zm+Kesa8rtLl05IMAeXbdwTSmC7L1xgzmP+t11z + 17XXr13zo67Fd8X7RpU3Pbaq7CGgwdToJXYxewU0vwsdDhXGsdtxJ0u7YXc9hDeymw1s + jUBvcOp0Jq7ASSsLTDIX5XLZ6EyqUJups3tkmTab2/OiOHfW7fO+RjgXVigm07V90mQL + kMPiM/jVvji/wizLQiqjNgvrdRot7wCMRXQWxhRDy63KLKTRQyLYuSwQ7ZAQAQ5sTBg5 + lpKKtSDOBWzxpmGJe4E8w3OHZwMTiJ6AH7g5V/QyLjxM97Z4pvvTyPW/fP3Z0hGut+1P + dEU+jqLXrhw8gSuC7JXIpZNbd0fej5yJRCI/3lf3+JfPntr1Lj6Iyy78Ttrjr4K1OANs + GRVYSrND7o26Tj2VJShcGgq5LIKQabDbVT61zWb/SGzbHKNBv7T2qLi/uF+auB+bdT6T + n+NZnuFpnuJZTq4VYLZmSGR6RRbmjRARlRY8iczLR2ZCdLuW8oo6WvRYzDojTyVi6kLz + qNaxhXbNp3+JPH+OqsHpe56q3RXZ0N+13xRYWPdITQXW4bQbO1jDx29HLv7xVKRbmsOM + 6Kckxgy2jRddDI2M4zbg9RTtxG52A97seMPDhgQNYzLT2gXmNWZKY9apmA3xWp3LoNeb + +IJ42iSoCuwyL+X10i59Dx4X0tJMJl2o9RnsPnmmy5YAh2Wzj4hzF93BA/2SGo+xgaTb + Yb9KVfn1RLfD1pWokxInIqXD5/FjnzJODqsuQsIhJgtTNMsonKosJHPzWZilICFKfYAR + JBYgPIAsZsMgE4hZCQYxR9R5A8AI3luMEKCvbv9V5pmE3x18J/KHq5g5i1k6Moxa357R + XP3wzyM33nz33Fs4TWS/mLA08vmL2yLvRS5Gvo0c+z2mXr35p1MLk8fu+yVeghdfukBJ + suw8CLQnJPvWQk5lT4B4QygtGc5giFlLpWdkGsDOPH/+PDFXoZHImXFwP4syQwZE0ZSL + YQXazmPKxyIbB+eFNUfEtukS9Yqq+ouqtVfBToRCMVitYACbxGfOUl/enAjd/bUL9mxD + 9EPmG1jPdIh5R0LTEzUBr9+fq84RK/xN/pXqBxJk8wSr2uKj6tQt6v3xtFxdEJ8QL6cZ + h3WDMT092VFgpJmCZFkGJVcLuoR4dzAjQ2f1WcYIvqA9y+3TjUG+dFtm1gvi3AGWBo0k + MbW0sfVgZxC4bYPrwJhN68+uXywtaVUwTedGAuWn/Kk+zmf30ykoGaWmSRmbJCRjp8Gd + jOJM1mRss+JUJhnJAopk7FPgNCjziZC49A5oNENC1hzsXmnhpaJkzxHRSCw6SckT+y3g + T8eg1nKGJRANH9NwnMloMbvJPSYjQ7hhOMYuftiMbxdN6x43/qWzP524BZT97/Hok5rM + ey+Fd04tvPDeUxO3RJ7938ifdu2iqSp8aXX1E56RLyzPzvKlpuRMO/azyG+utxUvfbJp + fpYnIz2+cHbvtV9seeRPDLhlGPngy4DFsM5KOJh+SpDh5fwK2XLFRryBYSvwOKqUrmSq + hBL5ZmGj/Bx1FkyCcwplrWI236LYTG2gN/CbFU9TnfRT/E7FPmo3/Sq/X6GB02S5oLAJ + ZvkUnlMIjJwaGSwLsj6wEpFPqVTIGEwrYKNwShZRglxB84LaYrGDiNkQEmjmmpySXWtX + ILxBaVNtBQ6zVWuvWav68/PtAAOZrbqsufSqFRVbiuCPuCUbq9L6Nqb1wantURmclYBh + tzOk0RPxzNIMx8sEmVwgdXI9w9BQjZSKjau1wpmNaVY2WQAjbqMAcAsZN3HFEQyH1fDE + G9AdA51IHcpkQqw/8Ikp6EHQnpZAy67stwq91o2ksFroBRt2SX39YlS/xCDD2fCHvTLs + xf3YhMd/gsdj06XImouRQ5EDFyPt7Kkb9zAHCHw7mnn7xkhpl04Ce5acYmngfLII/TqU + l5SB5VpFnNIRyK7UzpHN1fL5gl4po+Oy+ASZU6t0FiZTaYmFxwqpwqwkn17Ls4IjEG9x + 9OCOkNfidPMBZ5qCcuYoiviiIoeRT0zam2AfGZfoGKsJ5NlGjHwTb4fDu+O4Ew1IxNj2 + udzfO6gXwSLQ55NtUw+yMK0vrY/IRDAQpA0UzB1uikfY5sO5GhFZXSAizR6jiMV4NJwS + kd1pEbFJhGRAHsZkoeTgJIBvMzx3BFZjyQ003eEjjgRfB/aCDqy7LHiFGjRnwB8gGeyb + 3OEGrF5SfV9dp9iStaApswYfHWlSPrzy0UJRvpf928un2pZZfEqXLinFX59klg1/b9VT + p05s73h/asqY3Y+bHJxa5UifjecLKdbUaTXjk2p+tquyckf/dkc8Ta9XciXeUOXc1zc9 + 9YoBXya2F5xy0heYainusCeUvseGd1j3Cvut9FhBt8tI00bOaedVTqMijo+Ls2gDegxu + o87ulAcsNgd8+sEfEZes/k7fFFX15ef/M6tjGLIJPqVJ7kdqg9YfszdsgIG9IUr2hsKs + 8oO9AYnMyvmJvSH+E3tDoi0yx6wNHuSKRMFsQjoqR4uyeeqjLyxd2iVrDo7N2PTEoodt + Xa4/n/zFt1j/SwdTHf54xsN7F7zw4mebH/jwDM6+Cke0BfB+VBm9xNiBLx1wEu/DytCK + 7cLT9j1umlVTGtZoUus1JmNIGTIKiXY8TvEGfRb/jD4b94nwqewj9yfeLy1fehVndWf1 + 1DSBFRM0O83OhHyO582i08HLnWaFj9/u2OM45vjYwfjMGrAybXIlr4O4hDPA2gMJaXzA + ZvMHfinuro/Rsf+yJON/2U+csZiGTq8fFPK3tLfEoOXIC1sXjq4xy3BusLb0WoPWqGU4 + pS8+LsEP9rTTj11OmYX3I4VJ7QeT3msXoYqFRLAC/SGq4UeSaJf8E8lHSUpOWosX16PF + 9eCkWIjUFmPe+XAgNDHvJUMPZRM5743nQH8e/SgvV6+9+TX72PZH784wHubvypy0YtSk + c5E/YuvvsFsRHHto1V4We5mKefdMnD/2pZfP1OdWFD6eNsGhBQnCYQqXRPzLyh860oHJ + B2KwJiMihfSXsCZulApfVhwLVeUaxwhjZLVCnWyTcl/cXue+wO7k43EKkK7m+ER1rzwe + xAHDJTptcr1Trknj09JYB51mTktNZO0ZSnVANdIfcNjSMzaKS0oGpUE+oXT/5etA5QE7 + GYzlmHEUM4q8QbtLoUvwaf1el9+PgnZIdAq1iDRqpcrnjPfjQFwi8K1SL0pU/M4oktQi + 2ESWnGwwGjkx3h+IRTyG50o7PUEH7IqAfgNcDLoTU6umZ+fsLloUOX/oK/UxVWDEw++H + /HTujtWvRW5g/gQufeXBt8p921a9fVdK5CJTMtI7euPNrHfaLu16tTJQ9MTkX0+a8Dfs + xCqcFnnxdPd9O18/1TVjHZUK9KTg6xCECyUpzKPckIO/woCi5Gi5DLQU0D+Rp2GDyvaL + TTHiFFX19hf1SgqJeHjFVeA7EGOKBC/WHYMfk3TjI/YU8b8xaocF+zv0rUB7QjPrKFwg + YBsFjGLhprCz2RXccn4je5w+T1+i5SzLgSqV0dQ66knqFTDB8vUyGcPCRwDcAj3PQxt8 + DsByMoElLgjYazQn5zk5Z1dBZCYRKWxKVbfYdBybYxKdGGdFoEuJ1iwqgnESSY4BQHMS + BfgTBrRfcj27WntaKxQJRaDCCFsvgakQDebFvM7bfgi/dzUyCx++Gunefog9dfMAPhtZ + 2N9EOToi90vzA9pxPqYC+dH6UCEv8GpOYxEsaosmIARgG1faJitmK5Ren9zu9NrkFGPx + iU6LUwWmARfn8NEGeRCIrEuED8twtz2RfE8XkiOc5gPGsQWCPVh15Duy91/WXgNXdYDy + YAmAAdoHDuwtlzUzA9Ubsk1SAAWY65Ym8epI/JEzwQqBBRYrresODatb3F6dklD0UvMn + 1Ukn51XNffqYPXHRrD1HmfQddyWMKE4on1zzzN1b+4dTX86bsHV3/+PUyQVZ4557v//c + AM/QfbAHSVR6eijzGHeWoxjOyAWMbVwrzxqVlNGqdbIwTatCbuftdqRMlNkdOM2aaEO2 + OFDT3B0zk8RabKfBvPpImIw4HyQeCFO6bSpkBiBnwMPWefG6A+P3t1yekHLMmbEmlDg2 + LzXuKN4D458+6fkpL/VPpF5uKpqpMpfkLJ7T/z4MFmRHIcRKRdBpSvAVbeixUPYOoVP7 + tPlVZq+wW7vP3COcEz5mrqj/YFQWCJzTyiudeoWNt9lMVEBjj5MFTDZ7XA+WgWYbkMgx + o3tQRsREAxwn+BUGGUhPHeXHvAVKrApKcqPSj7AWEsEMioxWQ0IMAykhEeMEPbGSyRqB + 9tJDwAeiYCimvD5fnzH+xKudnS/DB2U3I3/7deQm1v+ea8Wa3Z3Tn7zZfeAyfSnyVeRa + pD/yGk6+CcZFiIX9txkW6wngTx26K+QP0H7VcLqCYdSCllLLdDJlQCBLpJMLdgNO0ybq + kE1v6MFlsDRrJEEImwg0Drg6xVXFvf29ROPEgr4Sp5FlMVtMxLsn7LX5gOmVeazVqY3T + bnoC2Oh47i6Kfoumupb07yA8UxL9mH6DGQcyOx2nhX6UJ9vBduqfNu4w7Ujiggm+QK5Y + LlYkVAQmJ0wJzEqY7V+hXKFaoW7ztia0+lr9u117Uww0qCo2lUkzILspzuKwmlKNaUGN + Yo7g9+X6KF+8Ss4kG6w/czgNPONM25msSOdlai3Fo3Qx3e62mq0By8ignw8E7Zlqd0A7 + EgXSbBmZ3YP6lUSCJLmfr4USmW5+OqQDnhTESqTtFnOhxuNUym8C10lUu0Ukg89MMXhP + ImKToOTUQ12c0SpijyZeRGK8WiUE5CL2+2Ry8KZE+LYYEpfOIRIPKmYlxgInUvQkxhCS + zQhhUtgDkg91uwsFQVKLmf//fSgODMUA/lrwle6duWNEYOmPNo9q/dXxv84bTe1n/SOf + njWnLFj9wNslcz79n6/P8vgYnjA1Y8qUe8sSwDKJTxqzdsebW6e2jMiqqA6VJ9kMzvSU + sid/dOHTF6h/AC9Zol9TMnYq7JxJr6vS5KfVcJ5THPIx5nwLzanlOjuIMoiIJyKT2qSh + 3TRF3zRD1OWmOHvACuyvz+9NJ0ZKTISlEwHWX9Sn7b8saRAS/AZncNDu9efovDnZe984 + cMBvylS5jO7RgTVTH3+cnRr5cFt/WZ5BgamtMmHtbOrMNkmPtUe/oP8Htjk5MZseKugx + njNSMoNgtBlsxiD3AP0xKBHEquWIU8lZ2NdW3moF0zJNnqhU2O04kQz2F7fErRQeJOw/ + qP+LiwhDxGQSjg0UFLMONN9wye4Bx1bnw3n2jIffLPUd3U95h83edqUmFXcx6f35k4Y1 + 7J36LKW+cfG5EUl3Pz1pM/WJnehHcErpPzLpCPRsKK0EnwFHbTZqoVro2dxGZhO7B+2l + BPiSkipjxrIbmM3sWTjlE8YElwZ58O5ADM0m+xScwJ7ooqNg6HmYHvzwMZpeoKcwBd8z + PxxycaA94U0sR9xAlqI5GuLTjFwgi9VFncBE+687grs4m62a+J2ff94/6G1KzubAERgP + alNbfbmKj2XJ4DKGfFSinqYZlAhHImBn3tE5KOkukECD/Uq+7JCeWV6bDH+gesGkrF9M + fEdQvJ9hF04+E5l/OrKMSb+5g265cREoROI68Is2w/ei/+zngkojnJnGTkyJXooHrRyA + 7w1TIAqTiwpQKZzjVsJH2OPhe03yVSz59nQymoJqUZ3UIYbTVyyVOPgfAjSmrK5izPjk + yub5bc2tc2Y0QkusldzSDrAFAOJG6CDAmwDE4iEm6lcAN+FmJYADIAWgCGA8wDSA+QCr + ALYAPANwEOBNgHcAPgP4CuAmCEwlgAMgBaAIYHx04Af9o8EyhnO/O/GsIXj2EBzOte54 + PmcInjsEHz4ELx6Cjx6Clw7BK4bgoIjveP/4IXjNEHzKEJysxO3znzEEl/jjNvrMHtI+ + Zwg+bwgu/b/Lbc8TS+/29y0cgi8Zgi8dgrcOwZcNwdsI/v8AjCJchAplbmRzdHJlYW0K + ZW5kb2JqCjM4IDAgb2JqCjg5MjcKZW5kb2JqCjM5IDAgb2JqCjw8IC9UeXBlIC9Gb250 + RGVzY3JpcHRvciAvQXNjZW50IDc3MCAvQ2FwSGVpZ2h0IDcxNyAvRGVzY2VudCAtMjMw + IC9GbGFncyAzMgovRm9udEJCb3ggWy05NTEgLTQ4MSAxNDQ1IDExMjJdIC9Gb250TmFt + ZSAvSUVZR0lMK0hlbHZldGljYSAvSXRhbGljQW5nbGUgMAovU3RlbVYgMCAvTWF4V2lk + dGggMTUwMCAvWEhlaWdodCA2MzcgL0ZvbnRGaWxlMiAzNyAwIFIgPj4KZW5kb2JqCjQw + IDAgb2JqClsgNTU2IDU1NiA1NTYgNTU2IDU1NiA1NTYgNTU2IDAgMCAwIDAgMCAwIDAg + NTU2IDAgMCAwIDcyMiA3MjIgMCAwIDc3OCAwIDI3OAowIDAgNTU2IDAgMCAwIDAgMCAw + IDY2NyAwIDAgMCA5NDQgMCAwIDAgMCAwIDAgMCAwIDAgNTU2IDAgNTAwIDAgNTU2IDAg + NTU2CjAgMjIyIDAgNTAwIDAgODMzIDU1NiA1NTYgMCAwIDMzMyA1MDAgMjc4IDU1NiA1 + MDAgXQplbmRvYmoKMTAgMCBvYmoKPDwgL1R5cGUgL0ZvbnQgL1N1YnR5cGUgL1RydWVU + eXBlIC9CYXNlRm9udCAvSUVZR0lMK0hlbHZldGljYSAvRm9udERlc2NyaXB0b3IKMzkg + MCBSIC9XaWR0aHMgNDAgMCBSIC9GaXJzdENoYXIgNDkgL0xhc3RDaGFyIDExOCAvRW5j + b2RpbmcgL01hY1JvbWFuRW5jb2RpbmcKPj4KZW5kb2JqCjQxIDAgb2JqCjw8IC9MZW5n + dGggNDIgMCBSIC9MZW5ndGgxIDU4NjQgL0ZpbHRlciAvRmxhdGVEZWNvZGUgPj4Kc3Ry + ZWFtCngBvVh7dBTVGf/uPHY3EDAJIWwem5ll2LwjJJEQSApL2A15gYEA7iLIbpINSZqU + FEIqKJhasLIgVSmo0GMLbW0BkUnCgU0oGKkWrdai1keptT5qbXvqsa2lx4rs9DezyZZw + qid/cJx77nzfd7977/e7v3vnztzpWr8xQBOoh3iqW+nvbCbjSmqHuLOxw98ZsRNegFzZ + 2N0lR2wxi4hvb+5c2xGxLd8lGmdb275puP2kEiLujZaAvynip88gi1tQELHZTZDTWjq6 + bo/YCScgre3rGof9k9A/Tezw3z4cn96ELX/N3xGI1E9yQuZ1rtvQNWzHQc7oXB8Yrs88 + wPdrYig10TqKoTYyE0dxSKuJzH8eZyMBXt2P68DzMRfX3FB2ieIthr1m0XcM+Qv7z575 + JPhZZuyDMaloHTNSX5em7HA2USyDf2bsgxZi/UaTkZspRPW5IapCnoc8Ezk3d76Vethj + dD/yD5B5amU7aRPyDuRHkIWodhjWANvZJ1icg2wTpbBq53hBWpaYLFnHjZdeDjHTiUel + 31rfO82SMXvvsOS+CRQzfxz7Afs+NZHEfkwOtpkqKYvt789ul3xwHaZO5B5k3rgzdrgv + vVA6y/LIITC0yaB0gZ2UPijIl94vCHGsTzqXGRIgnkqH5bxBGrI9Kj1pWyudRT4acR3J + Ro2T0mFbu7QnPcT290kP2kIMbR6IiI02ND0pdWTvk5oKDH/tvhB3tE+aDf8K53ipuMQu + zbT9UZqeGbIw2Pm2Wimn4FfSNDRENRmdOpzxUpptjzQHrnSbO3MO8ml2hB2gHHagz1Et + DULFcPurskv2hdgd/ZVZBY4Q2+wsrszal12Z6ciulRzZFZmZ0Fc8a95mvtU831xozjVn + mTPMdnOqOdGSYImzTLTEWsZZLBZziD3eN08ynWZHaR5oOdpvMVnEEHsChcJpdswoPHbK + Ilg4C1kSQ9rbWLyMEkPs6AksQ0ZQTpoMzRRix7Au9KJjTknQNcFwxHG6jhvuxDELR9Wk + svtCJtqe1D3POi9hbvzsCtfn3XyGZ+Se+/mXldnUfTX1HvWIzasW6opm845Ut44onyu7 + NsIVKM/NrVm6qb+7s63ZHVDcPsUdQPapO7tbrGpPgyz3tnXqDlnlM3wNjS269AfUTiXg + UtsUl9zbbbS7xt2su7sVVy81u5d5epudAVdft7Pbrfhd3v6G8vWrR8XaEY21vvz/xCrX + O1uvx2ow2l0Ta7XubtBjrdZjrdZjNTgbjFj64N2t9eUburA6ZXdrjaxm1atVS1Z6VNnv + dYXYYyh0bSRxiOLEM5Ql9lCKMJ0kIu23yBd1GV6u/Uk8T3HhDu0ffCkmdUDPXHheGQ3R + fXSAjmMX+in0LLqNHqbnWBue7VV0gl5j6XQj9l6BQlRLLzBNe4ma6Ueo30XnaC/1Uiza + dNBkeHczh7YZthN6A23TDtE0KqF76AzNRq+76UPtsNYP71JaTkfoKNo/zxSuV5ikPaH9 + kSy0BH1ug+clrVY7TgmUR+VUh9JtdJY5+ItaC1mpFOi+R9+ng/QU/Y3dzU5oLVq3dkF7 + B0vVSmlUj7SFnWDv8MeFe7TvaX/VwmAii3IQ1Ud76Ifo/zjSELZWN/sq62J72F7Oyd3N + nRC2i1PCV8BDNi1EqsSufC8YGKCn6Z/0H/YRZ+Xj+C7+GW2m9jGNpxqMUh9JgLqRvo20 + G2M6zUxsBlvA6tgW9l22l73C5XDLOQ/3De527k/8Yn4Vv4l/Rdgg9Im7xIdN48OXtNPa + ee1VmkI2upXW01aM7hxdoH/Rp4xHX2nMwUpZObsNqYcd4AbYQTbA1bEhdoE7wv7A3mMf + scucyMVyk7lcrovbwx3lznEv8q38Xv4R/g/8JWGuyIkHxfdNDvPvwg3hHeEXtVLtHe0T + bLEWsmNmymkxrSE/RttJN9FdGMUxpOOYtafpGXrOSO+xNPqQPgELxBJYCitki5AWs5tZ + M2tlj7JBpLMGln9zmAguhovnpnBpXD3XwHVwPdyrXA+fyufw1fxK/jjSs/xr/GX+siAK + k4TJwkKhinYJHcJ+pMeEnwp9wq/F2eJccbG4QuwRd4i7+EbxJfE101bTblOf6SPT37Et + 1prXmXdhdp7Dmn1q5JVmSIFNA/pC+ho1MhdroH2YjYPMT0GsriZ2L/jqpCxtNb+VX8jN + wGo4S3dgte6nLbSDX0UHtTf4I/Q6Vor+fdFDPxHKySY+hNm5m2ZgFQ0nZ3ZOdlZmhmOa + MtUuY8tPS01Jtk5Jmpw4KSE+bkLs+HExFrNJFHiOUZ5bqfDJaoZPFTKUysp83Vb8KPBf + VeDDoyyrFaPrqLLezg/XqJpO1Gy+pqYzUtMZrcni5DIqy8+T3Yqs/sqlyCG2cokH+n0u + xSurHxr6IkO/39AnQLfb0UB2W1tcssp8slut6G4Jun2u/Dw24AQd4/Lz9I3DSeP1jlVa + 4N+CDZYW6DXcaoricqvJCnT4eIfb36TWLfG4Xal2uxdlKFrqQYz8vFYVOGlnbJPStDPk + pAafrvlXeVTe71U5n95XfK46RXGpUza/b/2fOaK5d13lVDlHhT8QrFCdvp0gVzd9uuXf + BaumXka33HavR2Xbh0HoGNuAVIcbeSc4fG2yGqOUKy3BNh/IpaWevhRnirH5qlTn6Ut2 + JhtGft6AdWupHaMfyJ+fP1+XpXbr1oj84FuR8peHdGnd+vTbkDVLowQwnQGlCjhVudEI + ogBsiX4LlFCwsQQ84fIyDLMVeBaoHNYM71BFR5Vf7akfgdHiioDztbn6YpJTjJdQuRf1 + fcG4OZgp1I9T5OAlvK19yod/G13iHy4xOeIuke7UJzq6VlTmH9G79ZelA6NusSot+vx2 + G3MKW7G6ryqArVOjY1YT8QKv89hV2YsCfE3m1YQops7Ty9hub4hp20Pksg3gG5Vfcxvc + efpSa3UhPoz8PBTk2KHdmCdXIHKFvlbkoBysagrKFXILFpPgMCQcgaB3Ohis94AnWoaI + Tm9qVA14vXPQz3S9HzRB9aAXPbQN9wBpFE2/gkoz8vAy5TPqPEs8ao8rVXW6vJgFLN+h + Oo86hJXr9aJWQRQpEG9ptQ5jLgTmghz4iyK94NulB114g0G9z3qPYleHgsHUoP68RewQ + o2sLnMMFIdKr6JSHWE8d2kIo9lRjDuyKHbC8Oqc3YUmPrCh8s38xw8VR3Gg5C2iLDYZL + rhPDs8fC8JwxMVwaRTqK4TJgLtUZ/sqXx/DcUQzP+2KGnVHcADkfaJ0Gw+XXieEFY2HY + NSaG3VGkoxiuAGa3zvDCL4/hylEMV30xw9VR3ABZA7TVBsO114nhRWNhePGYGL45inQU + w3XAfLPO8JIvj+Gloxiu/2KGl0VxA+RyoF1mMLziOjF8y1gY9oyJYW8U6SiGVwKzV2f4 + 1ijDzlSVrt6He67Zdum6b8yrrqIcX0piApWLK+i4sIHWIW9h52kbZCpcI/90YnHSqIVd + h69wHLHHdHH4ao9cOKlfdYnD+k34i/QEhfGFrn+9cvi2J+ECzoQ8/i3Ni/zvsUzHSxnZ + EhciuoCs29D5N0MkIBN085s0iBZEK3IH0YsIOaOgKN4en4lcLuwOffaueObTBSFh0WX9 + nxKH720yMcQZR4mUqsfBaTFm+iDGNcHoIw696X0we2GS2SQodl5hGbjbC4tnFbMPuL1P + P5U7LfzxW+HCb50XJpdtCHe1s133PClNFc/8/tnHtSt7hI+lML/+/kOItU67KGaI53AC + s0Vj3YBYRElGrDTwqsdy6H1nKFNNZlMSS5oyqYgpsyYyZSo3q5iKCrnyLS/m5t65+d5X + cx1+bmJO/oZj4fOHpGVPruv/RXEBK3nrjm+8/tzzP7/wy8f/zD+UXsPZPp27t9HD8j/9 + K7sFoYBji3YRp5WFOHNOI8kY8yBOVIkGhhTMqc6d/g/PGHeSOclsNyUVFc4qnlU0kZkB + CkgmJQBJEl+cacAUvukQmfLZR+lrH7pvbZnSm9hR2niXe+mzb5TMYqveXT90+8TkG4/d + +aLCf3tJe/WhHz6zunhh6QM31qXFMQUHRY6VfzX8wMaKu/uDEXzbAHLD8NzfMDwnAiab + m455KMIEFJkV1nzq3VOs5FTeKSHn8mvimRcwrtTwcuFlnMgm0lTCQ2XMpR38TjbGMojz + fRrGhMWChRL35iDJsGPjE2ajV2Mg+timYKTFCIAZKCpM4Dl9tJkZmbzCv52aIA+e7phj + T5k0dXDrb678+Hi6u6rljpPnZlW/fu/+TQtzcrtOcOk9q3pPN+2/85bHXuHe2l2VVRb+ + S/hK+NC+NTPTq678HhhJ+xdyAGfv/3eZUFhknOBr8TdAvxj+JkSeMRP+ElD1Le76Wk9u + ZaC9O9DV2ujPv7mhvfXr+Mus1xy5ZCh5yGXI+nO6CrldG76gU1RneIZH203X2AbOq+q3 + XuPv1O3/Agh6PzUKZW5kc3RyZWFtCmVuZG9iago0MiAwIG9iagozMzIyCmVuZG9iago0 + MyAwIG9iago8PCAvVHlwZSAvRm9udERlc2NyaXB0b3IgL0FzY2VudCA3NzAgL0NhcEhl + aWdodCA2ODQgL0Rlc2NlbnQgLTIzMCAvRmxhZ3MgOTYKL0ZvbnRCQm94IFstOTMzIC00 + ODEgMTU3MSAxMTM4XSAvRm9udE5hbWUgL0pXRVNMWCtIZWx2ZXRpY2EtT2JsaXF1ZSAv + SXRhbGljQW5nbGUKLTYgL1N0ZW1WIDAgL01heFdpZHRoIDE1MDAgL1hIZWlnaHQgNTEz + IC9Gb250RmlsZTIgNDEgMCBSID4+CmVuZG9iago0NCAwIG9iagpbIDY2NyAwIDAgMCAw + IDAgMCAwIDAgMCAwIDAgMCAwIDAgMCAwIDAgMCAwIDU1NiA1NTYgMCAwIDAgMjIyIDAg + MCAwIDAgMCAwCjU1NiBdCmVuZG9iagoxMSAwIG9iago8PCAvVHlwZSAvRm9udCAvU3Vi + dHlwZSAvVHJ1ZVR5cGUgL0Jhc2VGb250IC9KV0VTTFgrSGVsdmV0aWNhLU9ibGlxdWUg + L0ZvbnREZXNjcmlwdG9yCjQzIDAgUiAvV2lkdGhzIDQ0IDAgUiAvRmlyc3RDaGFyIDgw + IC9MYXN0Q2hhciAxMTIgL0VuY29kaW5nIC9NYWNSb21hbkVuY29kaW5nCj4+CmVuZG9i + ago0NSAwIG9iago8PCAvTGVuZ3RoIDQ2IDAgUiAvTGVuZ3RoMSAxNjQ2MCAvRmlsdGVy + IC9GbGF0ZURlY29kZSA+PgpzdHJlYW0KeAG9e3d8VUX2+Mztr/fe8/JaeiEhIYE8QgoB + gnQSJBhK6EgLERAwKF1kEaQIqNiQIvIIEQKIyyIsuO4qdsWyuoLLlqzufpFV4d38ztwX + IuS3ux//2M/e+87MnJl7586cOXPmnDPzGuctaEAq1IxoNGTM+DmTkXQV70KIjk2cNX5O + AjeQ+DcTmxq9CZwNQ/nMyXOmzErgwmMIyV1TZi7qfN8I74fGTm0YPylRjm5CnD8VMhI4 + 7gFx8tRZjQsTuL4V4ldmzp7YWW4sBrx+1viFnd9HnwLuvXf8rIbE88XkveQ5s+c3JvCi + 7yFunjOvofN5XAPtewthyM1Es5EMzUA8opAW7jqE+KtyF2KglJTDNSlNsfkeTfF3SCdI + +D3Vv5DiX/te+eD7hpshxaPCD5Ahu/U8ibmIGEFIiaG8XfFoV4n0HgSZbWh4ahuqAigB + yANITe1rRc14D9oIsBuARtPww2gRwFqAxwGYrtQ+wI7jh1sYIXoCL0J2PCCqYDwjjDaP + Va7wvNOGudYnPR9bvzqJbTB6X2JbiwrJ+srxbvwUmoQ8+HkUwItRfxTGO45EZnrqoWgf + mgPQDEBLIcb7Wtw5nldxGgowGN4JIjeDj3r+mJ3uuZLdRuEWz5lQGwPRr9yARTWe064n + Pb90TfG8CnAgUbQ/Ak8c9exzzfRsdrfhHS2eTa42DO88mogWuODVo55Zka2eSdlS+aCt + bdSBFk8hlI+KKjz5BT5PnuuyJzPUJmDA012DPCnZv/Mkw4vwmBcqDUR1Hqdrs6cXFLld + 5aFeACfxfrwTpeCdLYEBnhOQhO4eqYoUbG3D9x/pH84OtOHF0fz+4a2R/qFAZJAnEKkI + hSA96gK/gr+b78vn8Kl8mA/yPt7BGwW9oBXUglKQC4LAt+EXW0o83El8AJUAWQ4cETiB + bcMvQSZzEh+UMg8eExiBEpBgbOv4ApgXI2MbPtCqJSlIHOWkFNeGDx5JZB2MehiSYqQC + LUXSEECIKCxQaACK4UfaOLTS3FRiLdH30RVWlP27oF4quRWm/vvLil2xrQOH18T2u2pj + OSTR4aq99bj1VuLfxo0LoKihNDV14LBFR5rmTJ9c3uAvr/eXNwDUxx5ummqNNU/weg9P + n0MKvDE6WD9h4lQSj2+IzfE3lMWm+8u8h5uk97oVTybFTf6yw2hy+Yiaw5OjDWUtTdGm + cv/4stojE0rn1d3xrbVd35pX+i++VUoqm0e+NUF6r9u36kjxBPKtOvKtOvKtCdEJ0rdI + 58unDS+d3wjc6S2fNtAbCw+PVQ0dUxPzjq8ta8N7ILNsAWJPIy17CoXZZmRnMpEHoY6P + AS6RWBzZ8TV7HmnFWR1/p4tgUI8ToMSSYnQaPYJ2okOIQ3shHUbj0Hb0Op4Oc3ssakUf + YDfKANnLoDY0CP0Wd3S8jSaj5+D5RnQGbUGHkRLemYVMULoBBzoWAx6F9AS0ouMZlIwK + 0Cp0ChVCrRtQe8e+jiNQOgyNRPvRAXj/DeynDjOGjpc6LiMBDYU6V0DJ2x2DOg4hPUpD + pWgI5K5Ar+IAfaljKrKiImjdLvQUehr9Cv0VP4hbO6Z2NHVc7PgSWNWKnGg43EtxK/6S + PsSs6tjV8ecOESgRRinw1Xq0GT0L9R+C+zSI1nI8AzfizXgLFaUepFqZlaxFjAMdIqgS + 7v4gldcABY6js+gf6Af8DWWltXQjfa4jr+P/kAINhF6SnjSgJrhXw70B+nQSczgL98ND + 8FL8GN6C36VSqJFUDXUftZD6mh5Mj6UX0e8y85kWdj27nVOI33Wc7Djf8T6yIBe6G81D + y6B3Z9BFdA39iGmoy4kDuAiX4nFwN+Od1HH8ND5ODcGn8UVqP/49/gp/g29QLKWkTFQq + 1Uhtpg5QZ6g36Wn0Fvpx+vf0d0wflmKfZq9wAf4TcYK4Vnyzo6jjy47vQcQKyAcjU4oG + o3vQeOjtHNQDPQC9OAj3IRi1s+gcel26v8JO1I6+ByogrMd2nIOr4R6M78KT8TT8JD4B + 96tSW65TMBCUjNJRFspJDacmULOoZup9qpl20Cn0AHoMfQjuC/QH9A36BsMyBsbEVDJV + aD0zi9kB9x5mL9PCvMUWsn3Ywewotpldy66nJ7Jvsx9wy7gNXAv3DfctiMVB/Gx+PYzO + 68CzvwJe/ulicDK0PgfdiybiMjwBbYXReBqPR+uAuybhNUCvOSjcUUcvoyupLOCGV9H9 + wK070FK0lh6Lnu74iN6PPgROmQlVNqMXmFLkYrfB6DyIsoCLOu9oJCUSDgUDyf4knxdE + vtNht1ktZpPRoNdpVUqFXCbwHMvQFEZp5f6Kem8sWB9jgv7+/dMJ7h8PGeNvy6iHqeyN + Vdz5TMxL3hsPRXc8GYUnJ3d7Mpp4Mtr1JNZ6i1Fxepq33O+N/a7M723DY4bWQPqRMn+t + N9Yupaul9EYprYK0zwcveMutU8u8MVzvLY9VNE1dV15flp6Gj0eBHPL0NCI4okhBKo6h + fuOXgoBF/cgT5TG7v6w8ZvNDGsroQPn4SbEhQ2vKyxw+Xy3kQdawGvhGetq0GLQTPayc + 5J/0cFsUTagnqfFja2L0+NoYVU/q0qXGLP6ymGXxFetP6K1U+frbCmNUoGJ8w7qKWLT+ + YSAuQesJNn49YAOHe6FaamVtTQyv7GwEaeN0aClpbmJNCNRP98Zk/lL/1HXT64G4aFhN + iz1ql4RvDA2pabFFbRKSnnbcuqzIB70/nt43vS+Ji3zWZYn4jw8l8t85TWLrsrNfQDxw + WBcBMKGAvwraGfNOlD7ih8YWkKChAK2bWAB0gqsWQzenQXv6xSjgGToQYwNV42PNw281 + Y2pZonH108taZDa7tAiV1sLz9eu0vWCk4Hmt37vuO1it6/3tf70zZ3xnDhfQfodIIRno + Ll6J4fG30k1ksQxAr6da/VPJ+DZJYwq431p+WwbghDSkzTEjLOBDanwxby1kgDaZNrAN + yYbUHMZ4Q20b7ljZhspcx0FHpe8ZB8VphNWmlcH3AUlPg4wUH6Qy0rwV8OUKwivedd51 + VZPWeSu8U4GZmIAUQ0HDutpMoODwGqATGgFfjNY6upINtbW9oJ5MUg+8Ao+vq4UapnfW + ALGUlRmHh7LSYDGlg0NqhtbEmsscsWhZLYwCsO/pITWx08C5tbXwVHZXS6HFS6dZO9uc + A23OToHy3EQtoLs0QxW169aROofX+H2x0+vWOdaR+ZbA2zDqnhHtzGhD5BFC8jbcPATe + hcjvc0hj4PP7oFm1hKY9gKVvcRTo7P+Zwvld7YY3e0Jr8yUKF/yXKFz4cyjc62dRuKir + pXdQuBjaXEQo3Pt/R+E+d1C45D9TONrVbmhkX2htVKJw6X+Jwv1+DoXLfhaFy7taegeF + K6DN5YTClf87Cve/g8JV/5nCA7raDY0cCK0dIFF40H+JwtU/h8KDfxaF7+pq6R0UHgJt + votQeOj/jsLD7qDw8P9M4RFd7YZGjoTWjpAoPOq/ROHRP4fCNT+LwrVdLb2DwmOgzbWE + wnd3UTjqiKHb5XBzN7GL/uuCeextJAdNidWjUqoQDOf9aANAGfMiGsvMR1GAs51xNsQ9 + AEoBqgEGdqar2FHIDbbaGIgPQR2vQ/4uwHcRHOJWiOuZr5AP0vv5R8AzMh8NA2gC470I + 4gKA/vCeE+Le+DxaAfnNEK/l9kN6vgTkuSZo11ooI+20AN4MaQUBaP4tv5MSrKFXAPei + WuIGuOOiwIJACF7ovFh4lgebQobkUIcSfD7qW0WdsQZi8C0gXSeul2IDhEYAE4AZ7CAr + skHKDtD9coB950JusOe8YLkglATgBxszgIIoBDYeAsstBaWC3ZgO6QwAcvWAewlo+f8A + RWcVpadepZX0biafaWZz2AvcCO4yWBNRfir/qmAUnpRNl22XfSF/UdGk+EI5Qfmmarvq + snq9+oQmRTNIc0GbrN2lU+tG6HvoV+g/NzQaXgJjpxSIcBHsbhp6X5LwqQngW5MBCNo2 + hC4CEBzS9KdtiAFAkOY/RSfgDYRGpZ6AWliIs7JzdT5dCKCU2dB28w/sqR/7tTHVN8A/ + A9TfII6jxrPvA636RGVGncxgtljsspN4F9DciHdF1VEYwkFam8n8T9/MYdY2Pmdlaurg + a9Xt9s/s7e+1Dy5vKPsalZRkZ2GK53Rai9ngz8ChYCiYp+2Zb6DGPZFZOTRn86JNFZEC + s6Ku6CT7vvjWxk/EL8XPv31M/PPlZTMf2zv6Lhz+42YckNpTBu2xQHsMKD+qFHTIYIL2 + MIM0BtIkcEVCk2SCzWj6p6/kfnDkkJa81/7Zbe0w6Hvm67ShIJ3rxhY3Nml5jq58KqOC + tGJH32BWZFzRCXEczt/wIfZh37ePYfP1+Q1Lr80VP7q6RfxcasNYoP1sxgT81jPqpBez + lFdQLJbLVdASbjEj89LyxcimLBlmTR2svVZ9rThefK2TDgkkO8uQ59MB1U0+nV83Frce + wK3ioAP46F58TKzaKw7AR+E7FIp2fMw42e1IAxw4N2pZzeIKwZSnYZ15vEpfQM+2Fijc + lS5t01nre+3xdlTSXtKendVvUbQHcqiCOGAPygJs0Ky2hmH09GHsECCl5SBlUZrC2EBB + YJM7w0jHQED8PZgE0rUc3MEWs07LUz5vKKjr0VPv0+frelD+JEpntJhz6eiS+tHLxD+I + 4rJpJU04b92ehQef2pzZ/yV2+5XD4m/FT38p/u2Lk7jo2iFc8eOV7/Gwa7hIfF/87JOV + b0DXgK/OQgffZzcBF/kPC7gN50aVDMMrGX4ri+SVMtKps+/HC4F1rv2O0KsP7pkLxDr7 + 2o7ghtP09XWG2j0/3ktfl+qKwni42SdgZu6JDs5nKpjR7AzXve7F7hV4NSWkCGNsM2xL + bEucL9tYlIQ1jFNt8/FOG/h2WY9Gk2SQ5xlYr2eBL0npe4AvMM9OUoc0yz0FScmV/gRx + r7Vrv2u/jEpgJEvadfrCTL2lEEOsLyzUQYDqJLI7GZsyoAsq9Oowkhl5IC6j0srDWDBB + APTVaiX6Amnz9SU4v2d+Xo+gP4nneD+kfTl6k5HnNJiDDGCMASt/dXp5j2Fblx6vDDLH + 6NIFOHz9q0UVL6+dUDDJTqtvRo5j/ZzZA/OGz1i6ef3AlSebLorXn31xcWXDoPzs0dP3 + S3TJBv6xsztQNjob9VQph6c3RCamL4gsSOe2BvFAIVVuTTWq6B+yjXkqMNT9UaMuT/uA + SpXtyEtm+bxslXVrqEzXBh52jbwgYzbliXiX0yEqtzLnNqq0X0swHhDlWvxrbbuW0IfQ + RiJJfmaWLYhkbNAVSApyiA4jhhaygBxOvyeM7AFrGDOYB3JlQuD2OYBmQQi6mFFbTLhx + +XKgGa5jqLxcM/BeToJwHJ/nxrk5t5GxByEjeEOAgjCxjciPzVdeUYYrjm148eWn9QGD + M2hu6Dtve0NreZBtid6LTZ98W5lWMfcB8R/fh7DlwsMlc7cvfKwJ46doyluwcUbjwtLF + u+dceO34imG5Ls/h5t+JIpAV5mUP4LdMdhekVGhsNElGyQUVpqhX9RzHUxxmeQF8aLyc + WqBgv6GVPEO3YcvLeKtKeFHehmuOsJpKtUTB70A0AFeVEAmhK5SoBoQrXJ2RyizVntOA + zNTJsM6Xh3N1uSa/jnpezMNvxtdTG7e/+y6439bG7xNZPC5Gb7h5zxPiM6RtGJV2fAoy + oxnWqpPR1P76NR6qUFlhGG2YYmB6CUoVj5RyjVq9QG8w6NUar97AI4NFbsmDhiVF7aoH + 1GqXvpeGYfK8510qHV9gn40KvEmVvsSIf9d+FqRMe0kcRvvytVsjTaYBtBmajBJDD2Nv + BTEUtnqwjArSblgkYRvFyzphTsisEGAPE0acAwLBlpgbRPRoi8lwk7Gug1XmtnEOGWBC + 0DBJcnMYk5HyJSWH4vql0RG7dxxrrluZuWsWdTX+VO+c9CHTzmH9DbH9kPh/WjxrR5H7 + t0u2Ptc/KqPpl8R5QYNPfO0N8TfnfiuNYXXHJ4yffRI5YA3fFy28z44tQkAI2Wpsq9Bq + vEbGVwpyX8iXp1Yb6fN8noMN5cFciVDL3QW62RY5VSxPzrZEKsMSYeKFSwYOW7g40wpi + onM+tAOJCIESAjkQdHo1ZsSxQa/GHcZBU3IYOQ2QInMCM7RH6wvjgDkURi49BGROSLIC + JyYAmQHLcR3IZLPJD2snSOGfyOFPQjqtJJ8lecKZjCCeK0+1aP19V2xrkfcZN2p6K1aK + f3ld/LTvUjxo+SPL9jQeeuoR9skfVozMGiP+Sbx5d3r468uvie/ibHCrKk7gST9+9ssH + 7z2/Y+ca4q/H4HMm/N4M69DwaD6rsFEFil7KQtUA1UhqFDOBOsbLl6haVedUNCXDKnUv + pGFkSkoFW4az1UKB7EW1rlIrkQnE6BXC4MDywPHANhgEZx02caAaEDGoN+T39OUxmeVX + akanuzLOl11du+3mVbb5iX5i6+mTOyZ+infgrX87+DJsYQKffwiybReswRbwur4RrRyF + R8vGaGoNk3CDbIZmmuG+gKxKe7+tyT8vMD+0JHtJzhrbau/q0JqMNdnbbapKIUcIqKlA + jiJPp0tj89ysJS9NRRWAA2TVMXVBZHamUOCA9MvGgswelbkJ9peWgZ/kXXtiynaOcV5K + htOrN9Mqc7oxjJSp6jCW6wVgcxcEjIcKY1OGJYxUKRDwTjaMaS8EXZJOknKJMe4Ub2Qc + 9belEehMPWAKJAQcTBCYCv6kZMjrST23qvmhBxu3Tl7z/P6Vy5/dskt8OeWuq++/+eey + 4JDa3HvEq2+Lv1+ymI6uHDtk1aoxDfPiRatXPbxx84NznqV2pw5p3v31x4+uGp6ZHsmb + tPuU+MNXHz1wPJvIuaqOjxgdrB9kjhyIZtjYVDZs7s/VsFPZtbY19u12WYXA+0KhPLnc + 6svTskye47xVxVPFvDsbdv1GRhUqFHEsTy5Q3ZooMDu038ULlyZmiyRL7pwonqDNrjBg + Wh+ggkkamCVeHcwS2gYiJKgA1K+GieIxQIDtID4CSpgtZIno1FwS8wQnpIdBjWGi5PXQ + 53oNZtDzQHAE89BtFMVaYUbP8uVHg8WHJ7/1979dxYX3ld71kHj+nUtUzuGn7l+xc80W + PGZLoftDXHVPNabeeA2Hxa93/kn84Q3xpU/34OAjsSd3Hn5s/fOEH91gffUGfZyS7JBL + 0SH9cQ2eiuk19DZmu3yfvE3WJufCcox4jsOUIJNBIEc8i9djmvEa5fKAHvKMLBuAVQQr + FCwtkzMcixUUphHl5oU2XBuVgWufk8lpFrC9Ub2KaJzsk/hJuU2petq3fhwovLbB16zV + 8bhNUjgryqyoxFJcUlwdl5aYErIwJ7SWTEliDwQPJHPaEWPO1q7OsIIIJxk0ZNBna1M7 + n12tLS7mAWA5qgMRhBXYkIv9tI/2Y3rD79tXfkmZLm2Jn3zqt9RGagxZlOiJP/bDbWJ/ + iRpjOi6xc9krkgV1OFrkYLfhrSztgRXgQbyaXWtghwv0KpdOZ+J6uWhlL5PMTbndNjqb + KtJm6+xeWbbN5vE+7Zs+uVOZH6y9Xg1cBNIVpAgovJDQJrTeXshpCRiC6oAjqDDLcpDK + qM3Bep1GyzsBYxGdgzHF0HKrMgdp9BAIdi4H1A8IJA5KLD+JkGQsB5VDwBYwVyQJqyeq + Rs9cEFSSRgzWi8/PuHEP3RnfuZaPxe/+/s2n83u7z9g3HRI/7EAvXXnxBK4Ms1fESyc3 + 7BHfEs+JovjLfbWPXn3i1M7f4Rdx+cU/SOvQIZCt7UApYrsOjibzboZR0G7YApcJbrlC + UFJKJYW4aVSRzK6mhQCyqdRtWHHEt2VtgiDxYkKRa5dhsAhFiIZaTIgSB6WZ2BedgA8x + mTc306k336eX3DhDedhTrWLpflF9CD4tyfjXQdBvAoRGFnIi4wSIfTBmU2H/lZiRVGZW + tgH079dff52Yh1AIxhYzEJ5nUXbUgCiacjOsQNt5TAVYZOPgrMDwI74mwpGDrxH2G6wF + M5AkwBSEmqBZu85TV28Oher+cQhmDdTHpkB9xI5vihp74gJYFbAFh3AlrqFYqJdqwzuj + FknHIgoWBYcP5LRcjjkBvg5lL7OMXUnmyc6oXIZsCuVuX9Oc275/nXyd8HSCUCXFkGRg + Fqxeei47i+iXwNZglPkx/Hb9hfr61O/jmlepXuypG2OYPT/2Y56/cbdELAr2+RDzPaTl + YFeNj+ZNU07TL1Iu1jP9jTXGqcbFRoYX3DqtVo7VGjKScoHi9EpGZjRmM3azRgaDaDL/ + i0GM68CsSIyhFkYQhlJaHw2STsv5wVaEJQAiHyjAh6gtZ7/94HMx5zzdvLB0vtiI1696 + gT312YUXO+KbmeO9PCI9byOR4XAEg10o0TWEHovqeVUV7s/W4hp2GjvJuJAVzCdh49iG + HNgZLfX7vMF6/Vz9AiOtd3uMThPtc5uNTFCfHHAjmczBuxVU0OkQvAGTJ2CmszXTHPaI + EAyE5LZw5APflsQcTYw28CQY3u+BcVBcLOmLZMXXJcwmIoHqYCRSiUjBRJ2X+kX7csii + RpR3DxHhFhPMvUxM9B3oO125/tl5vSeL9vPU3r2z3po1YdRolqcV+oxrciWj5CcVLhaL + ztPOOZueKHSLcurp7HHxFXtz/fOaz42IVBh9huJR323MdsTXAU3qO95nroNMyoS9VTE6 + LqIJ+YPBfHWerzI4IbhYfV+ybIZgVVsCVK16qnp/Ei1X90pKTpLTjNO6ypiZmersZaSZ + XqmyLEquFnTJSZ5wVpbOGrBUCYGwPccT0FWhQKYtO2e3b3rnLAWJ9ZPg0oOuT+A2AUZG + PiOeWzdXUherwxk6DxKoIBVMD3Bgx9Np4F9Kz5AiNkVIxS6DJxU5TNZUbLPidCYVyUKK + VBxQ4AxI8xEI3HonFJohkNZHrRbWyIQy2alSdlpV0lJJbKhQUCJ1Xo9komUntEzQJS1m + aSxMRsYPjoCeGLv5HhN/nDO2ZeCgZ86/NnQ9KNx/xP1OarLvvhTbMabo4ptbhq4Xn/iL + +LedO2mqGl9aOniTt8/uhbk5gfS0vLHHfi3+/rumkvmPTZiZ483KTCqacvbaO+sf/hsD + 7j9MvGsMyBfwT/WI2jHnRjzFCDJY49ANig6wzA3OJpBFrrtXRZIrwEuSOwUUyNdF3W9E + HXvq0I//YNUwWck82N/xMZsJdZvA21cc9VvYEFugpeWIYntpZWbabDbKAkq7FQeMNot1 + t29LQnJUJ4YtIVvbS4pBX8WS/4OYnSAsJP8IHbSBq6ixuPbd+N3Zv6laJa4X16+sovqx + p2427p6+++C4p+j1N8+Lf98kXsfyTVhDF0JfwZvFz4X2KPGo6BZBhhfyi2QLFavxKoat + xAOpMro/Uy2UytcKq+UXqPNgglxQKGsUU/ipirXUKnoVv1bxOLWV3sLvUOyj9tDP8/sV + GjihJRcUNsEsH81zCoGRU33C5WE2AFYpKEpKhYzBtIKiWU7JIlA+FDQvqIG4PMutigo0 + c01Oya41KxBepbSpNgCZbUBn0CUKC+0AnVFCr0hoFZZi4gZZXZ3RvjqjHU5Ctcrg/AEY + kjuiGj1Zalma4XiZIJODTN4RlesZhoZspFSsXqoVzoG+waYKYDSuFgBuIQOHLjqCQWbC + G0ehOgYqkSqUyYREfeAjpqAGQXtaAi27OG4VzlpXk8RS4SyMz7y6urmobp5BhnPhh/0y + 7MdxbMKDPsKDsOmSuOxt8aB44G2xGST7SOYAAZDuZ270gdGg0TCwn8nJEA2c+SlGn0UL + UrKwXKtwKJ2h3P7aabLpWr5Q0CtltCOHT5a5tEpXUSqVESk6VkQV5aQE9FqeFZyhJIuz + Da8DNnN5+JArQ0G58hTFfHGx08hHUvYm2/s4Is4BmlCBrXefV/A2YPzjeCvq1HASouJy + /OwtrgMtB1Z0IiKI0Mxoz2gnahzIUklYhPN7mpIQtgVwvsaHrG6HD5m9RnBdJqGelA/Z + XRYfTAwIiBzoFAGdDpVksLF75vfGaiy5nUx3+KT6gG8FxLEOrJAc+ATRpcFtSyJihvQ0 + YPW8wffUbvVNzZk1IXs4bu1jUj60+JEin3wv+89nTzUtsASUbl1KWrAuxSzr+eaSLadO + bFv31pi0qj2PmpycWuXMnIJnCmnW9LHDB6UM//XO/v23x7c5k2h6pZIr9Uf7T395zZbn + DPgymbtNHZ8zAfYMePDdaE40Yw//gvNDJ50kaNwUi5DFxfI6udulUBhDgt1rz9Bm4AjS + gdq42neq7pYCcPmyJICJkxR+OvDbSdSz6s2c3MwZg1gvh8DEW4LYIHMHgVigFRJfBCgr + hBR6HbG2gAImf3KXCQFOiqZDRc/VX/jh+qXFI3IK91CTH330kfuPByvPsGfif6keKraL + 10QxVuSvXrv06qv7Pj/69rZxhyV5BKeh6IvMYNhxcKAXopkv2PB2615hv5UeIOh2Gmna + yLnsvMplVDh4h8OiDekxuN50dpc8ZLE54Ygof8Q3b2knx0DPiqvBGv1XWnEPZBMCSpM8 + iNQGLfSS6MM2wEAf9kn6sMKsCoI+DIHMygWJPuz7F/qw5IBD5oQ2zEs+BuCK3ISFpUW5 + PPXBV5ZD2nnLXhyQtWbTnIdsh9zfnnznR6x/z8kMjn048aG9s3Y//ena+94/h3O/hqNc + vUAEoYKOS3Q7jKsCudB90Zye6kr1aPULzD4HGxCMlMalRYLLxRvklMuiYDMMGdqITm/3 + KEJ2m9uz2jev9Pbuxy+DJ+3OsbVbnTI5wtiqgL45IUA2KojkDiEIHZRGF3qlJ+zd6TgB + j7eFaIB5pFuIGI7XNz299Ok9i9fsw+uGZ/U++EzJi7OPiD9+8zm+5+qHr7/x2sXfUD17 + uAdSrh/7bJlYg9N//DMeDTKkf8clxg4yxEl2ibAyumib8Lj9BQ/NqikNazSp9RqTMaqM + GoWIHQ9UHKXP41/T5x0fCR/LPvB85L9quepXnNed11NjBdaXrNlhdiUXcjxv9rmcvNxl + VgT4bc4XnMdgDjABsybgZG1yJa8Dn7UrxNpDyRl8yGYLht7z7Ukwf3U8wfrvxSXLTzIA + M+u6lA/QyySfrTQdKpAfxCwc3cMsw3mCOq1ea9AatQynDCQ5koPgVXQFsdsls/BBpDCp + g+Du8dt9kMVCIFiBr8DjDYTuNKiITw+SKakpy/HcOjQX7Eei15lNvoTnljAQcf1IBhbK + lVS9JA50/dYPCvL12pvfsBu3PTIiy3iYvyt72KK+wy6If8bWP2CPIjzg4JK9LPYzlTNG + Dp054Jlnz9XlVxY9mjHEqQVpDwY2LhWDCyoePLIOf0rWfwxjgWDf6B3wFlVHU3kXJ3fR + WGMsNKs4vdwGq6BapYtY9Lxeo/aoKfVNo81qu+mbsizBYvG6wrNEH9fe2s6SDBnQbrOz + 9MQrCapqBrAMZyJuWrjzcvNe9pe06pItTptimLeltWXLFra0x1iKeo7CI1/acHMSvWvD + XmgXjXqLRfRV4BUP7B5moGPR6nxjlVAlqxFqZWuU+xx7XftCe1KPOxSwQpuTIuqz8iRY + Uhgu4rLJ9S65JoPPyGCddIY5Iz3C2rOU6pCqTzDktGVm3TZBrrUXEuEXv/wdrBuddjNI + QWnYE2IwzR+2uxW65IA26HcHgyhsh0CnUPuQRq1UBVxJQRxyREBOKPWgpCUWkp9ck9Je + kSUvVweed/CxhBJe+p750mqRrAPxgKSNDuLRIooTppaMy83bUzxHfP3gX9XHVKHeD70V + DdL525e+JN7A/Alc9twDr1YENi85c1ea+DZT2sffb/XNnN82Xdr5fP9Q8aZRnw0b8k/s + wiqcIT59uuWeHS+fOjRxBUV2Xyk4tYtwkbSS87BD6OSvMDD4HC0naiTwRYSnQSDK9vsm + JKRHcfXZePHZrmEtqYYhJYYucbivOAYXk3LjA/YU8Rlj1AwDRuw+BUjsSbUU7iVgGwUM + bOFGs1PYRdxCfjV7nH6dvkTLWZYDdUxGUyuox2DQaaoQ3DsMC4czuVl6nocyOKbJcjKB + JS4J0HtpTs5zcs6ugt2ECFKAQ6fFN+E4Nie0AsJvxaCPfQ2aF9hTxSVEG8AAoH0RJepX + YMhaU+vYpdrTWqFYkHw1MN3mQVeIFuTHvM7ffBC/+bU4GR/+WmzZdhCU0wP4vDg7PoFy + rhPvlfq3FjpJfFg0ikRhXxx6AUQDmiHaxrC3kQys+a5tXUKsta2tkleA0AjozwWYStgb + Xxkt4gVezWksgkVt0YSEEIio/rZRiikKpT8gt7v8NjnFWAI+l8WlAhWVczgDtEEehm/q + IuA+xC32CPmvRBRkeEYAmM8WCrdh1ZGfhi5+WXut/Vq8szHg5wJDoB2c7rc254hZb8g1 + SRsHIHZuaTR+Hdl340wwysTwlFIrWqI9auc2D05LLn6m4aPBKSdnVE9//Jg9MmfyC61M + 5va7knuXJFeMGr5rxIZ4T+rqjCEb9sQfpU7Oyhn45FvxC518R7fDPLbBij4umn2MO89R + DGfkQsYmrpFnjUrKaNWCpoI4q0Ju5+12pIzI7E6cYY3YkM0B6iJ3R88kbSUxW6Ff7WR7 + iDiaE34K021dIT0AGarG0B+84sCg/VMvD0k75spaFo0MKEh3tOIXoP3jhj01+pn4UOrZ + CcWTVObSvLnT4m9BY2Gki2CP0Ad6iFI6AbExmrtd2Kp93Pw8s1fYo91nbhMuCB8yV9R/ + Mip7CZzLyitdeoWNt9lMVEhjd8hCJpsd/OQy0EY6V5s7LaaEay4NjnoEFQYZrAw6Koh5 + C6RYFaTkRmUQYS0EghmUD1oNgbR2kCAVludkveQcgDEi3nDY6AAnLsqTFI4vVmYNOvH8 + 1q3Pwp8Fbor//Ey8ifV/5BqxZs/WcY/dbDlwmb4k/hXUr7j4Ek69CUpulOgcTeJIJgBd + V8NucWM0bZ/wgoUKC16nTs25TLyGU7uciiQ1FbLak+WgSfoiSRqbP/lfapKSukH2f6Wl + 02l2INYeZILIAR1jzRBgmzqIaIvUJ6lbRJ8k2mNizIhxnYs7+RMOcJN1EBRMnZ/69QuB + ihMnywMQihmH8qN3339UPNa4Y9GwrKLWRe++0zz28MlJO5aM3kMf3lAVLoYtnLj4zNZ7 + 8txV8c/IHIR5TG2COahDd0WDITqo6klXMoxa0FJqmU6mDAmEDXVywW7ARKdCNr2hDZcD + +yWWOxA2oDGQUxvVJWfjZ2HN6NzQlWYTYb2u9Q7m/gHTczNYq0vr0K7ZBFPleP5Oin6V + pg7Ni28n8wL2aeijzEBY2zJxRvQXBbLt7Fb948btpu0pXDg5EMr3VfgqkytDo5JHhyYn + TwkuUi5SLVI3+RuTGwONwT3uvWkGGlQNNp3JMCC7yWFxWk3pxoywRjENPE/5ASqQpJIz + qQbrr50uA8+4MnakKjJ5mVpL8SjTl2n3WM3WkKVPOMiHwvZstSek7YNCGbas7JYu/QhE + SGJ9LNRCinS3MJMYWAkPDbG+iEhJuGYG4XQqaAKXjE/t8SEZ/E0Kg1fGB75LSLn0kOcw + Wn3Yq0nyIV+SWiWE5D4cDMjk4KXxwX/jIHDrnD7imUlYZIldT8nrLLHILcYn/kjJN3O7 + awZUDYuZ//99M8A4wRD+RgiU7Z20vXdo/i/W9m385Pg/ZvSj9rPBPo9PnlYeHnzfmdJp + H3/+zXkeH8NDxmSNHn13eTJolkkpVcu3v7JhzNTeOZWDoxUpNoMrM638sV9c/Hg39QPw + kqXjG0rGjgHpMOxlVYb8tBrOapREA4y50EJzarnODuIadrsjyKQ2aWgPTdE3zTabHXSn + Tuukm+6USYR0vLhdG78srbREYyLz4JaNGcwj6tPeowcOBE3ZKrfR0y+0bMyjj7JjxPc3 + x8sLDApMbZAJy6dQ5zZL631zx1f05zCfyQmucdFebcYLRkpmEIw2g80Y5u6jP4TFFrFq + OeJUchZkl5W3WsHkyZBHlAq7HUdIY9+5taRI7h7C/l16Ugk4LW/J3Ts8P/6ekt4Ke3K6 + AC6wZz30SlmgdT/l7zFl85Xh6cTdHi8c1qN+75gnKPWNt5/snTLi8WFrqY/ggBkG/QHR + f2YyEegj0YxSfA6cIlPQVGoqPYVbzaxhX0B7KQH+CUSVMwPYVcxa9jxzgRWqwvPDxLsN + olZSS8Hh0tYxpxUUdS/Thh86RtOz9BSm4P94D0XdHGgZ8CWWIy4XlqI5GvaeGblABusQ + dQITLWnFEXyIsyX2i774onPHiOgXsGOU2MfXF/KgXmgHX67mE1EquGeiASqip2kGRcAV + D3bCHZWDMnOIRT/VK/mNunxGiZpZXpsKP/DUgElQN5f4aUBB+RS7ceo5ceZpcQHsVGyn + p954GyiUoNWXkFKgPHLKrXM/goY9CY4cb8skp9g4WFb0hSfgVOCtlNCZIrsWDmwBDYgo + Qe4/Xf/hE3EbXvS1eF0UL+NFTKa4Gi9i4zfin+BN4r0UOWIG9UlXRwP8x+pfXZmQCSIJ + Vkw1nEIzSh7FxPlBYssTmw/0ZzgNmAnnbnIRbDiinqgA9UJlqBxVSP/wqoI/NQ6C/z/d + Jf3PbBj8d2wkGoVGoxo473g31I7hX2hY+jRHTi6OrO07vHp4av+GmU0NjdMmjpeekIoh + OAtwEeALgG/JqwDwDvYCZAFEAYYA1APMAWgG2AiwGyAGcBrgIsAXAN8mOk9pIfYCZAFE + AYYA1APMAWgG2AiwGyAGcBrgIsAXAN8SwgBoAbwAWQBRgCEA9QBzAJoBNgLs7ui8EFxd + aYy83XByvvL28kg3PLUbDt+843nYyL4Dz+mG53bD4WzPHc/ndcPzu+E9u+EF3XA4jXtH + fX274f264eXdcFDS7nh/UDd8cDccaH3H88O64cO74SO64SO74aO74eO74RO64RO74ZO6 + 4dKcum28J3crn9INn9YNn9ENn9kNl/53f1v9xLK5nX9md8OBJ+8on9cNn98Nb+yGL+iG + N3XDFxL8/wEtXOnXCmVuZHN0cmVhbQplbmRvYmoKNDYgMCBvYmoKMTEyMDgKZW5kb2Jq + CjQ3IDAgb2JqCjw8IC9UeXBlIC9Gb250RGVzY3JpcHRvciAvQXNjZW50IDc3MCAvQ2Fw + SGVpZ2h0IDcxNyAvRGVzY2VudCAtMjMwIC9GbGFncyAzMgovRm9udEJCb3ggWy05NTEg + LTQ4MSAxNDQ1IDExMjJdIC9Gb250TmFtZSAvVVlBU01TK0hlbHZldGljYSAvSXRhbGlj + QW5nbGUgMAovU3RlbVYgMCAvTWF4V2lkdGggMTUwMCAvWEhlaWdodCA1NDAgL0ZvbnRG + aWxlMiA0NSAwIFIgPj4KZW5kb2JqCjQ4IDAgb2JqClsgMjc4IDAgMCAwIDAgMCAwIDAg + MzMzIDMzMyAwIDU4NCAwIDAgMCAwIDU1NiA1NTYgNTU2IDU1NiA1NTYgNTU2IDU1NiA1 + NTYKNTU2IDU1NiAwIDAgMCAwIDAgMCAwIDY2NyAwIDcyMiAwIDY2NyAwIDAgMCAyNzgg + MCAwIDU1NiAwIDcyMiAwIDY2NyAwIDcyMgo2NjcgNjExIDcyMiAwIDk0NCAwIDAgMCAw + IDAgMCAwIDAgMCA1NTYgNTU2IDUwMCA1NTYgNTU2IDI3OCA1NTYgMCAyMjIgMCA1MDAK + MjIyIDgzMyA1NTYgNTU2IDU1NiAwIDMzMyA1MDAgMjc4IDU1NiA1MDAgMCA1MDAgXQpl + bmRvYmoKMjQgMCBvYmoKPDwgL1R5cGUgL0ZvbnQgL1N1YnR5cGUgL1RydWVUeXBlIC9C + YXNlRm9udCAvVVlBU01TK0hlbHZldGljYSAvRm9udERlc2NyaXB0b3IKNDcgMCBSIC9X + aWR0aHMgNDggMCBSIC9GaXJzdENoYXIgMzIgL0xhc3RDaGFyIDEyMCAvRW5jb2Rpbmcg + L01hY1JvbWFuRW5jb2RpbmcKPj4KZW5kb2JqCjQ5IDAgb2JqCihNYWMgT1MgWCAxMC42 + LjggUXVhcnR6IFBERkNvbnRleHQpCmVuZG9iago1MCAwIG9iagooRDoyMDEyMDEyNjE3 + NDExM1owMCcwMCcpCmVuZG9iagoxIDAgb2JqCjw8IC9Qcm9kdWNlciA0OSAwIFIgL0Ny + ZWF0aW9uRGF0ZSA1MCAwIFIgL01vZERhdGUgNTAgMCBSID4+CmVuZG9iagp4cmVmCjAg + NTEKMDAwMDAwMDAwMCA2NTUzNSBmIAowMDAwMDUwMDUxIDAwMDAwIG4gCjAwMDAwMTg2 + OTYgMDAwMDAgbiAKMDAwMDAwNzI0NyAwMDAwMCBuIAowMDAwMDE4NTI2IDAwMDAwIG4g + CjAwMDAwMDAwMjIgMDAwMDAgbiAKMDAwMDAwNzIyNyAwMDAwMCBuIAowMDAwMDA3MzUx + IDAwMDAwIG4gCjAwMDAwMDk0NDggMDAwMDAgbiAKMDAwMDAwODU1MiAwMDAwMCBuIAow + MDAwMDMzODI1IDAwMDAwIG4gCjAwMDAwMzc3NjMgMDAwMDAgbiAKMDAwMDAwNzUzOSAw + MDAwMCBuIAowMDAwMDA3NTg0IDAwMDAwIG4gCjAwMDAwMDc2MzIgMDAwMDAgbiAKMDAw + MDAwNzY3OSAwMDAwMCBuIAowMDAwMDA3NzI0IDAwMDAwIG4gCjAwMDAwMDg1MzIgMDAw + MDAgbiAKMDAwMDAwODU4OCAwMDAwMCBuIAowMDAwMDA5NDI4IDAwMDAwIG4gCjAwMDAw + MTgyOTUgMDAwMDAgbiAKMDAwMDAwOTQ4NCAwMDAwMCBuIAowMDAwMDE4Mjc0IDAwMDAw + IG4gCjAwMDAwMTg0MDIgMDAwMDAgbiAKMDAwMDA0OTc4MiAwMDAwMCBuIAowMDAwMDI0 + MTYwIDAwMDAwIG4gCjAwMDAwMTg2MTYgMDAwMDAgbiAKMDAwMDAxOTAzOCAwMDAwMCBu + IAowMDAwMDE4NzQ0IDAwMDAwIG4gCjAwMDAwMTkwMTYgMDAwMDAgbiAKMDAwMDAxODg1 + MCAwMDAwMCBuIAowMDAwMDE4OTk0IDAwMDAwIG4gCjAwMDAwMTg5NTcgMDAwMDAgbiAK + MDAwMDAxOTE0NSAwMDAwMCBuIAowMDAwMDIzNjgzIDAwMDAwIG4gCjAwMDAwMjM3MDQg + MDAwMDAgbiAKMDAwMDAyMzkzOCAwMDAwMCBuIAowMDAwMDI0MzQzIDAwMDAwIG4gCjAw + MDAwMzMzNjEgMDAwMDAgbiAKMDAwMDAzMzM4MiAwMDAwMCBuIAowMDAwMDMzNjA3IDAw + MDAwIG4gCjAwMDAwMzQwMDAgMDAwMDAgbiAKMDAwMDAzNzQxMiAwMDAwMCBuIAowMDAw + MDM3NDMzIDAwMDAwIG4gCjAwMDAwMzc2NjcgMDAwMDAgbiAKMDAwMDAzNzk0NiAwMDAw + MCBuIAowMDAwMDQ5MjQ1IDAwMDAwIG4gCjAwMDAwNDkyNjcgMDAwMDAgbiAKMDAwMDA0 + OTQ5MiAwMDAwMCBuIAowMDAwMDQ5OTU3IDAwMDAwIG4gCjAwMDAwNTAwMDkgMDAwMDAg + biAKdHJhaWxlcgo8PCAvU2l6ZSA1MSAvUm9vdCAyNiAwIFIgL0luZm8gMSAwIFIgL0lE + IFsgPGI1ZmJmYWY5ZDExOTc0NzQzZWU1MzI0YjRlNGI3ZjYxPgo8YjVmYmZhZjlkMTE5 + NzQ3NDNlZTUzMjRiNGU0YjdmNjE+IF0gPj4Kc3RhcnR4cmVmCjUwMTI2CiUlRU9GCjMg + MCBvYmoKPDwvVHlwZSAvUGFnZSAvQ29udGVudHMgNSAwIFIgL01lZGlhQm94IFswIDAg + NTc2IDU3Nl0gL1BhcmVudCA0IDAgUiAvUmVzb3VyY2VzIDcgMCBSID4+CmVuZG9iagoy + MCAwIG9iago8PC9UeXBlIC9QYWdlIC9Db250ZW50cyAyMSAwIFIgL01lZGlhQm94IFsw + IDAgNTc2IDczM10gL1BhcmVudCA0IDAgUiAvUmVzb3VyY2VzIDIzIDAgUiA+PgplbmRv + YmoKMSAwIG9iago8PC9BdXRob3IgKERlcmVrIFd5YXR0XG5QYXRyaWsgTm9yZHdhbGwp + L0NyZWF0aW9uRGF0ZSAoRDoyMDEyMDEyNTEwMDcwMFopL0NyZWF0b3IgKE9tbmlHcmFm + ZmxlIDUuMy42KS9Nb2REYXRlIChEOjIwMTIwMTI2MTczNzAwWikvUHJvZHVjZXIgNDkg + MCBSIC9UaXRsZSAoZmF1bHR0b2xlcmFuY2VzYW1wbGUuZ3JhZmZsZSk+PgplbmRvYmoK + eHJlZgoxIDEKMDAwMDA1MTUxMyAwMDAwMCBuIAozIDEKMDAwMDA1MTMwNCAwMDAwMCBu + IAoyMCAxCjAwMDAwNTE0MDcgMDAwMDAgbiAKdHJhaWxlcgo8PC9JRCBbPGI1ZmJmYWY5 + ZDExOTc0NzQzZWU1MzI0YjRlNGI3ZjYxPiA8YjVmYmZhZjlkMTE5NzQ3NDNlZTUzMjRi + NGU0YjdmNjE+XSAvSW5mbyAxIDAgUiAvUHJldiA1MDEyNiAvUm9vdCAyNiAwIFIgL1Np + emUgNTE+PgpzdGFydHhyZWYKNTE3MTQKJSVFT0YK + + QuickLookThumbnail + + TU0AKgAAK1iAP+BP8AQWDQeEQmFQuGQ2HQ+IRGJQuBgB3u91gADAYFAAGR+JyGRSOSSW + TSeUSmVSuWSuBwSWzGZTOEORyOEAOl0rEAPx+hMACwVlAAAujTSkUmlUumU2nSGX0+pU + +dOgAONyK8ACMRwV+Px/ABwOAMAAWiwlgAEWup223W+4XGZ1G5XWDv68RaLgB830APV9 + veLO13gB/PpmWYXAMAMVitsAAoEgYACYT2VzOUSAAYDAkXbQaHRaOnXTSUyKtVutcANJ + 3NwAPEHPsAPgDv0AAED4x8vV8AAHuBvgAqjMJABaLRoAAjEYWX96wR9PkbgAVCocaftd + vud2G6bvSu8WFZMVcWILvOih7jgMCYyHvd5vYABBstYAEsUg4AYT6G8cQOgAIQdikAAB + wQ8MFQXBi4PBBqSm+4QAFUeJhAACoVwEhZpFuZIAAcCwIgACgQAuyIIgaAB7nieqNGUZ + QAHsBoIAAc5wH0ABFC8PcIR9H8gJlB8goMasjOhFwAyUABum+bwAHIGKwgkEMToWa5fm + ehB+n2fi1AajoSByFaGHSYjYCIBYYoYfE2gAHs4SJOU5yBIc5GjPDKhMEwAATPwAH3QI + AFWZpbIsE7GATMCEHGaRugACwRg0AB4HMdoAAkDwLAAAoDsog53nCdQAA4b4DgAK4iCi + hk8GiAAX1hOlZVm707SJVs9T5PwEoQelfMabbEm8A1LnwB6CAWCUVAIAwCtqejBH8nye + nw2gFnUggVAMDwAByFQaAAAVxVZPNYBfWl0XS0NbSDXE911P6FvGvTCnOdp0gAXJ0xiA + YK14Eh8gqAAJgOB8SARGoMgpE6jAW3MlofXFzXVimKrbdkgXdPc+3ihx+4+ABbmIXgAG + 4exyAAI4Th4AAUhEE6aYlWOLZpmqkYxH+NXhXkFZlc+baBoKU5xH2dY5njw59oWl6YqC + K1po1dwXpWm6rqyFaJCGo4672qavr+razBut6Q7R47O5Dks4zoABXt2wbhoOxQZsjuza + 35m7yAAQb4AAN7/uPA5pua5Y+3BlmbLR9H2mCDJscdIAtE6N0+8KLncAAGga/i8S8FgU + hKAAMdHwXSzlwi4mQZTEnGeVeA+EYUaAr8vHGaxh1QJ4idN3kf9QuBlGcagAHoAdJgYB + 2DJQbZrmk/p2KsCYKLKCgLAzPoFYccZwtgDgPK6fJ8MFJQAuACKgJYdJwecJogzX3v4Q + X363mcaRsUof9N+R5STmoaCHxxjfNgm0+gBwEK8AKARZw8h4mFAgBIChsR4OYckpMHgQ + wmEtfU+x9z8YPK1aed5+r9x4P5I88klI/FBD3Hui6FRtB3jtVG9ZDZBjpm/gmRmGhHnN + wafWAB9r74PxDNI/Mt0I38P6hQ0uDcQIOxEKm4YAAwxjIxHwPw+DNWPpeAgAtZwOwcg1 + IhEYtsSISqbAQ9lTg6RlkFH0PEhBRiOjqHWOwAAHgNqTG6OAnAIQPoCjopcBYCiOkHHq + PY+gFgKlAG+OFyAIwQAfAAOIco5lIAVgiPWQ8cZBgAHUOwwoDgShAJ6AVg0TYgtXHYOw + jI1xrjBAADMGYSjIyELcMaXAAB5ADA4pgCwG2mDaGkMYAAXAng9jHCE7sZoTD3HWNoAA + RALr4U8s5OgxxvGCAGCgokqIntCIuYUbY3BXAAA+B8+g4hxTALPLQtYCCnt5GaX8BTMC + 1tlJKNAZYxFMATU2AFcY6x0yWeyAxzID0RjmHEk8DIHQQsPfKPUeg8jKgpZ+Scb8wlUB + HiEd+ZR3JmKbAKPdlAOAGDgU5AlWQ6B9IqHEAwHROYfypZsPOmwAJXCwnMCAeAABtDZK + sCUExZRyjkBEAAFwLgjUpmsUlXA8QDFdnuSkXgtRVgAIqRtU5X4YDuIyBihqKx7n0gnH + YDQHJJAOfOWYGJ2SUUZecFejkySBIKpCAAA4+ZLA3AWcMAh7yQjgHIvgEAHET2CKsPlL + p9QGsOgSARA4Any2KNpY8AAEQHoqJIMcblFABgqCrTKDlHTurzHdadko7CcDkHmvgeo/ + Dfj/AK+UeQ3jYBbTSiAByvBhDBfuD4H7sidH0HcO0HNSAXAyKXU+qJaoDk0XmuIAVWK6 + kHfJdRxpBbrksrhRu0hFKPnbruPwds0AfgVmos0kIyRov3AiA5FQxhoGsAReq6pPkvWS + umvMGAKSugtBOCAko4R5qnHSBOUk3rvmjXmLwZ7uBtgKIyT+yACQHMOPdFkgo9x5IuAe + NgaoAAmArRqV83CgTcVFqOrAI9TLlp5qhVK57S7u1ywWQmMhU67gPACYUGIBTWXSaHXW + 65FbtkGyMxAk40RyD5L4CIJ1oonY3NCLE81OAJx2AsCRSY/VpqVjsBEDjAshEHHmO2OA + GxwDZTeCFhwsBn0UBWA2UgPAdA7KfcwroB4DF8G2LlEgBD1EHkGrwdI63MAfA4WUbY35 + HgfmBodzBRp8SaMEBcCpxxujhHKAAEoIUBDhHIVbTBx5DmC0IZIAEoI4ANA8DNSgC3ZY + 1rmQ/HJUq7jrG4h8I4Hj1AQAdQaKSCDGLzyOQXJL5V57EMMXnIuRMlbIrrkLYaCYpDYH + bREDgScpBABoZsce4StlcrxnwuQlxjiorwDVTZBxwjOmgOYa5OADgMV4P8fxBAIgaKAO + kbslgWhGXAP4bBkADgyBbOYaxlAlA4d2U7PUnhzHDCaBZJ81VZDXHAqMeYJgugA1plQg + +tyn13eyR0f42xZmVA0wYa2jTKgiW4ObQ6ICQLzHuPjJwFAJI1sEOcrekCxDk6ABoC0E + R5D0RcANcZa1TjsHdT0DoGkTjaQAdbT4ABtjhksAADQNlwgTZhE0f483IOUAAPrtTkd2 + lwFoM4XqpAu3KAMAlU46t/gAHKNU4YDAKsGG6MY/AIgbOyALfUAAIQauyIOO4by+ANpa + 8SB+h5Tht+Xb8C+DJhz6BBAUM5A6CU6DnHcPQq4GAp8go1jaul2aQP2iTCd/jQYmhLB+ + ucdfue2UQLkPL3wABZjaGAAAdYGDaAMAswbMpEh8LQXCPJLwHxyEdCMDcIXocNYvVdjG + XQ7SrBHAgfgAtKiSDMGmZAbg4pLAiA6WUA/43ic5svsE2o+Uc/sLLsCgxJBxDqp6HQA4 + Cu9Uri1qIc5IKcruf260GmGQYGAcngIOkIYclWVGb+W4OENgA8BAqOlYXwkI/2IMkQ9M + AoAqROHEHASeBABCM2HKHKHERIeqRkHsRdAijUHaHacwA2BQOqIOwUQWV89MG6HOJwGq + HeUeqgN+AEBKP4HqHMp6AUHwsgAMH2umAwACRqBYAoM2A6A4QE2OzyxguaHkHa6A/ANY + /GsgJIF+GQVcsoAA/aYEHEHOIyAqswkmHOVGuuCiCKpiJQG6HKjsHaBCC3AGu89ars9g + jOU4RWM4H8RiTAVOSCIqGWHQU+A++vB8SCdoAAE2F2FML4AifKCUA0uOAuAeYEak7QNI + 4iH0HsooCCASnm2YToHYHkMEG8AoKI5DEOPCruAIHscgByAaJxDQVkHIHsI6HMAgmREy + QaikHIqKp8HWpQBeA6BS92a1DCK6e4xCBqAMGmAABOA+euIq2oZA2Y2M2iIZHKXG2q2K + 2dHVHYv2LyGkG+VGH0BSC/EK9Y1svCO0ruAEHmpQB2AgZRGKIOG8HG6A2CI6ApDsIWkO + N+kHAgIMGaGoMgBkBWM3C/IiKKAVIoIis4s8tAykpoihDA+2uaqmH+HWxCAWH4446URI + AmOOkcZQBEA+W41E06AwkUAAHnJkyET8ngHZBxDeA0euG4QkK2j8AAHQ5qAaAYYc3yII + hYN+ApJoSg5odEBSZYHmX7H3AKo8uqhFEShMAUH2KsBqAUUfFmIMGWGmmg5yRy/sRyHo + HsN+HmHqMFI+MoHQHYp6BQBEl6HiPmKK7sRsHWMKBGA8LK0Qs86ZKclAAACwCUB8Pq/o + IPGPGTGXJKm/JOKaGfNFJ+AQdCqmxo9XLFHXH8NOruHyHWNYCEAujs4w3cHLA8AQVOS6 + NwHhL0AAAuAmRqY+LCHjJkAy0y5AsGb9OAl0k2AmsyUAK+NjLuABOGK2A8evC/JEQPJJ + GbNAKYG7PCAAGSGsVGBEW+uwYsuuHIG0nmCoCSrdANNYNIx2AAcwBkAOzZC+SCHGHqV4 + HOAjMvO9O+4gGiedN6N/P2JkG8G6NgAM3MA5C6KWoAfK6mYEkiklH7LImXLMU2HmHZBe + B4AamgAkwvPS2eIIyFHSfK2SIRHlOrHOQTRXRc2hRYrqGaHUngH+A8CDM85FQIVkk0Rc + GtSIIQYnFUNHAOKbAShRQ+ZQAEH4PUrGPoAeAgRGHOHMZQA6A8oeHGHEOGAyrOgkMKAK + I2xcHiHgp6rAmAG+G+MgBCBCdCHQHO07SqRqUCNoH27XSqRHKeXxGsuPB6pnM/SAXUGz + UPPTTwOAAeYMA9UcNPSUKYGaGiNYHkAEeuAWAYs0aEHKG2nmCiCM7BUKYqlwmI/GWdE2 + 7Qlk1giLPmNGnUcgGkHCRcAmAyqOZsWkRyG6GeF0AAC8CyQNVGIAAIFA4JBYNB4RCYVC + 4ZBX3DwA0IlBRpFQAAYxDY1G42/48/45IZFDY/EYm/JRIXXKwAFZdI5hCm/MwAHZsAAJ + OYLGACABPPwADaFMaJRaNR6RSaVCZQ/AAz6hF4zFRpS6tAo/IKvW6U0a8ABfYa5HK80Q + AJrQAATa7Hbbdb7hcYFD33T6iArwAKpcpjWb5f4S0sEABdhcBBsE0rPaQRjcPj8hkbjd + LszwBeAFeotkoZfs5f7LYLFktDaBNarZn9Vq9ZGspUMtmM1VdbBs9kH7ub4094ABhv85 + pbTawTteNx9ZlIk0KlPb3yKxJcA1eoAHj1wABe1b393QAN/Bwa+JfIAAP56Psp50PZ7Y + S+vgANDshn9fbt75wtPxPdV22/4AHfATmqOfEDAAHsEv7Bb3PgfT5K++j7PY/C5P01Di + wY/rQrCF8NQ+40HQgs0JBm+7pNAr7TQxED2w40cWxiyURPmvL6xNCkUJicBwGyACPH8A + AQhCFKEtCnJ8gAB4HgsAAOA4ECEu7IJtG0ZoAAkCQPAACwLAu48DHwAB6zIAB8zPH6PJ + wnTGgQAAHThAiGRfD0ZTsx8aQjG0Jqukr1phCqNnGcZvgAdp2lqgQAyCCIICSAAPg+Eq + CluW5YgADYOUKlADgADQMieAAMAwDU0pAbBsGYy4BmK6x4gWAASBGKcsSzPD4muX5eAA + fxxnCAAGHsebzPiAcfgAfQBgIAB8AQBQAHkBwIAAEYiCOAAKAuDEjK/DqlzPJJlmWWlY + hIHNRVJO87RExLLryGV4KWmcfHYdpdqCBlzhQFEcI7HSGHSdJ0AAchylffB5PMBACgAd + x2zcC4LiaACUQedB04OmwDAAAYBsycJw42EQQ1odx3HKAB9n4X4AHsex4S4CwHABgQJJ + 8E4ozfOK5HvnoAGkVpUgAFh4HUAAFvQdp6HqAB1HnYYUgzbZ+N0s1lhEK4ugACOuW4s1 + vKNETeFzTAOHAAFB42EIQVoCm3XVD886snQY7qo5yHJX52HZcgUhVNz/notQECJagRhW + kl/oWiRSp8FB7gAcpyneADqZQI4jhaux+2ABmGLRIJKEnXYiiMFlyyaXZdHsAAbBsCds + Aqupdl2agABEEUmg0DVpm4bYMgBeAnMeeHigAbpVcaFp+zFqkgmSb9fiCFFJmuesxAYJ + 4qpqEIRIbOiYqaADeF92JrgAbxunToYWg59BuuK8mcgh+m4Qa+Mj7puyYYDgeCsHBICQ + nosBYDOAAEkJILgADnHOSBRoSgAAeA8CMhKgSFKoSuAEAQxExj1YSAoBTG2OqdHyPgGy + vB/lOAWAtK55yenaWM3tIIAgAg+AAz1gYGAMjcAAOEcA60nAdAioYdqywJARghBKChkh + 0DmHM00Vop2hgGMyPBnw9B/EgHqDxwoKQbA3JC+AmA1BqGKAYA2DiSyeiaEyMAAAXgvg + 7ABCBjY2hspbBqDWCD9ijGUXAsg+KInxD4H6g+QiSR9j+c5FlII+h9oPG+N5QoBGOlnB + KacAoAljAIAGp0BABFOnaYZKIAA6B0DjAAP0fwwzFnFGUMkbx3wcQUR44If4/gemLBhB + VxRtk1ICcoM4aMbgPAdbOx0nouBoJBAMBkDqvFjLQHENgAAVwcMzHgPJB4vBuMMAeBeZ + 4/x8zMHcOIAASAdJuHMOdJIAR/y5LDCcuRlGnrDHqPZpg7B7uUGyNkaxZxqTUBCAhToq + gDpBeCpMAYCmGSLUUVgghPYbz3jmAlZ4AitACKcZejYBB/GZAUAQ4oCwBKdHwOqWIMwW + OCZMsN3CTRsDXYY/IAD9Fpx8IGSV4rMB3jzHi2geQ52aD2Hcy0ASDx9AGSCPsApIB+AH + qcAMkEGiegDAKsuiMNF3GZJLLdIKaiCj9ZUj+VR2R/LGAImInCD0xjhqED0D7kAXgsAq + AAmbMB5jyl2vAICciDwWh6OOcwzB0z/HWAhpg9QGFOH8OCcyxiQAOByDFVc0SBj8rGPY + ZEBh/lDAcC8FFfiBD3Hm6wfAxRlSpAql8B4CltgYHyA0AAOwQAyWw28mBlByxNAANYdg + 3ZSj7coPcBpIB8gJSCASEJOADMMHuNFQoBRoDkAAAYMQP7qgJU6YezBTpCl1HwPRyAAR + sDamqCsBgABlDWp+PMf6RQbgil2BoCzwIVqwJgM2/UpZTGEMMedTuAI5wgcqdVfdoU2k + hGyN4bZ8h23mHaApMQ+AGzMAacUA12mOSUTuSUd407zA8AGzAYA6iegMAkB+CIA66g1A + 06cDIFltkEPwNZHoABbDtGSm8FKXwCXNIKO8csQAIgbrqW60jrMMqdWVZYuY+UH2NcEu + dSYMAVp1IWPLLQABQC/FUrwFrMwEAVtlkshI4Rn4NAYBNmYEwPJfHrT0AA8x1swluSAD + IKEtkwzisPOmdosgAzznshefbqgKU6PYZhigBAjBCWoCTMx7tKpqOUnoPQLy7ZG94jgr + dPOGgoMPUSTkntNHU0YA2qTzHoC1q0kIxxo2pGOAW4ADAQuwAJVckKUyIiyVaBcEyWwC + gHYYPQdzCdcrGACx65jDB8j0TFnFhOz3IATA+8BH6QQIAaApnMdtPx+D4LqPYeLggVBC + tsQSRTnB9DyaYAQBiz9crLySy0bg7AABGAWiYF4KnNH4EsLYUAAAEBG0IQpXJlh9j4Qe + PXY4AALglfcO0cLAx0DZupi/R4+N3bNznnVJQGGbD7HyXUFQQ7bAQAy7Ahg8RUo+DiFs + NJDRWC4FgAAcAKEkgSA5kche6wAcJZTyVtAz4eAVBIqVJkQwNArSjxR9Y5xszm1ywzoA + CgI2yHUNxlGa2ZgR57oLPXOBm3mHuPFpmbkmjg7Km8C8QwahUuxJrJxTKxi4DGJsAARw + fuFCz39Xh3sBZaYSes3LnDYLoW2mFTAGwNuRck8FeOp2jMRS+yaoowfNAAEWI8RsBxFB + eSEDoFVoiG68GcK+VgFQQvAGmLi1IGwW6PAWBC2Q4hoQ8AOAw4oFgRKlswXXs7TM7mXw + 5hljY5BqKFBEDVIvhwAbn3SUUf82mtjGTEFIJgUFTFaIENIbTthdjwgMAsFNdczEEHWN + +oWTQAbGYSP4fiQQHAWWmOgbd1AKAgW3+4eoeBwQfYexJLIrbr+BLDsLNYB4gofIeq8Y + cRyAHwA5xAFQExIohhnpyAWgahlgc4CJpgAYCRNwBIBy/Ag4bYYp2w9YfQfEAgDbbofQ + exMSqxhgfC0qnJrbIxZEAZVYzKsQpwfTohjwzLZYzLhZB6qhZAepJLNa2Q3JIIDYFTFT + ukBkGRZAdhpgBAbhJIYgUZsgBkMAAAQkMbwJII/g66n4CcNQABvbfENR2AekOK6rVQ2T + xhOBmZQ4dq27bsNDnBHgAASsQIAAMINQMjnAALfAeIBwuqmpZYA4BY4oAoBBjYhBPwjI + hjXg2USpNQ9cSyiQoq7pZkG4A4ewnoCAd5hgBgdjS4G6OUO77ohBi5gYZgcoaqBZAL94 + BxzgAoCA4qUBhkX6rYo7XkH5Mx65XgeZB4BgeZYwDof52AGADRIrywmIygdQdZowbQdp + s4cQexowe4BJzjCgkAf4AwnrMyqxZcSRjZP4gcTIvIrboAfpqhlLhiVIfaRYewuoBAe5 + YwBIegnoDQBDboE4CRKIC4CpJrBJ+wyiYByIeRowcgeRgYdwfZYYfKg5ihDIfQAiRcc0 + HskD3cSIA8Sgg8eZzkI7bKr7QIksYsk4nERgAofQzIA4fYzIAsUpLAAxmYDgBZJoCrFC + mp+q+6XisAjRlx1kPocSoJQzs46wfJYYcYeBgYBYGp4DH5hgewdCn4BgehZYAajYAURg + A4ASbwA62QC4BZmwDIBaupJcBcoguTXkOJwUpAACKxhIdgfBmAeUBrOcASG7khQwd8PQ + AgF5mwBoDCIboEGxyAfwdpMQfqfRYABS9JRYgSqQnssQgQA4AZjYBoAxZ4Bz3Zrcs5LA + BEuCFZo81QnKrCnEYY70DJMxNBETcRJIdAdsbIeqVAeoFhNwkofIeR1gA4d6pwcZwQBo + AI4pkbR6TJZaT5ToBMzyOakQ8wApjbVJjbATBJjrupQCXomL6AYgaIZAAAYYdwxQCi+h + H4b5wQIIEwHBSADhLc7o7I7cds14gYmZQobQdxX4ZIdJ2wB4EhJoBIfJYwBYexq4B59w + ETsCuwmhbw2U+s+s/E/JBgUiKJjgDZWAdIekPQCwBp2EtjboD4By+wA5Z5QaVE1o3w4B + HMowo6nYAAbIcq4DMh2DSJaZHhs4n4E5FlC4jYXgYZ8oCICpmwBwBC2S1q2SEBZ8olCy + MVINKYojxiMh2yUhsA6CwAtxC4/lKgw9KVMFMYjbxg3gacOZjdLQ5FLgttLw1NMhCxbp + GFONOohNMw3s7I0TLFNk8FLpFQ4dOFO1P5r9OlQdQ4gVPFNDAQwqBVGD7w/NQA/dQVRA + rdMVStQc2VK7VZTtRpE9GNOQsxFdL9TFS1OdPlUtOtTSMlTi/1R1LdP1N1SVIFVIq1S9 + WtMdVZ21Rgw1R9MI8Y8rAUTtUAhg9US9TFW9XFKlXVVtT1XwwA/7Bshw9dYdSAhrxgHF + bM+xhgyT6FYovLXgmFM9FyXdZVOM2Q6kW1PVZ1WFYhdVN5DI6Y6sPqUghFaomLXg8CMF + c1MldA6rAQFtgNT9axuFeAx9g1flhIpNf0W1gFgVZ6nFhAwFiRdTXgYFi5JRJYABG4pg + lIXNj7UAADA9O5A5SoW4ACPIGrUh91Q9hlVtgJzViCPlilSNUVQNeJGLXgZlnbUxox8R + FYFVoJiglIYtoqDppiP436XZIbjZA7zQYMMpZhA4HNqjxVVRn1dLggxwFlrlgc/NmlUI + xdSdnBBgkpdobltBmhgJmJJrXheC20Nq/hgbwljMBYyllJ9AbyWJMhpga9vwABuqykPo + INwhWpm1ZZnwa1xVVtrh09mR+1sAuNyI91HgAFtCHlPVaM2ZJNkbyhnRmcMC9IV90dVw + mom9vSWIDN1U1ZWAXF1wAAH12NbdVoIF2tMEu1xSf7BNxtrxuBKq8zxIIt4VwwvlTafp + HwId5MoSm5GRVCalTc2UaluNkZesPQeBAZLJmz6FuJbyJyJ82TzCCKCT98OVwgINz925 + ly3txd3drtx5GVFcPxs9jlJwvly6BaBgAFbM+Neo/qnR4z9hX4YAZSDgCoAxaYegfJ1g + bIAKoQAwCpZ6RIpwfYcpYYHgCZDwckqZ9ABDfABoCUBcjozIAEbtjcKKUs3J1oGqE4Fc + C1WlIN3F9lrd91dtglPokAZoZyAwh6Ro+IlaIDxzx4w7xlGZUbGYkaRxB4FoFZIt7IhN + rNvj0whB8QZob1NAcoApygfgCMRwBZNwewb6ooAoB44oBAC62QhCRRIIeobKIAAQCZTo + BYC95gg0Yoeoa6IAf4CiERZQtU4p4ICi0IDYCRbcTQgdbFbV/o5GGN3WGdx2GpBYaAaV + NAbIc5zgEIE1mNMiR5B4cobCVgKwJ4Ixr1sWF4hIUAWwVDoIBCoVJRZ4hAdQbplABIB6 + 9LFC2Tchple42UlIBbNkHZ5ofQpwezOQCwEZUqDWEanJNQfAdJYYJoCwHRBEVogtyY1m + RlrRN13l945AY4ZQ5ge4A5LYBgB0BYggegeSn4br8JoYGM+M+ohmdJymcqIYggbwbUWx + ZxWADRKAhKvJmABpaQmIdAb4swJzdGUlUdSghAQoUIRxZgExNwATDghAb4ZaaklIeTkA + DTsYeYdin4dIbZlDbbXAAxZYeAc6orlRm0rBAIcrfAG4LAIQkjQIDIbAzINIL4M2atWd + UmRd9Vvx87BIFeol3o9wZwaSageAf5JucmcwgYZoZCNwFmdwAAagZ08pAx1iR6dqDSyp + QwdaoSEpMQC4DJ9wBQBa2V67fEwiIGszFWTk+0dgjKDRYweqnzvYJ4LIhIdIcAxQJoIK + yghBdpFchYhYdMbAAAUYYQVppoBhpgAuLwnDYkUR1g9YlEcTjsBUu4czfAB7kRRQnrah + XisoBQB5WGXLkJmyTQzIfiRxjgepIIEIfLboJAGl891TbAgeaw1eKOoJFmohxGbg4+pG + pWphYGcrIIdqIGdZ2xUhLaEpyAcocaWID4ENH4A4BA4odwdh9aVRzhLzx+7m7yRRUxLg + C7x4d4dyIACACLbu8a6u7Q6JWIFGTQg2vuv+wOhNm4kM2Qb5vJn4caagcgep9YeAAZyA + eycQy4ApYzM0dYgsluYSVMejoGNQi4fokABgf5NwCAf5Z4D4Bp4AF4D70oDBL2Uwgmwg + xgxw9m31v4/m4Oow9u4su+4+p1RG/AAGwGwQg+3gkbxieploe51gcodp9YVoYhcgAgFh + mwfwe4pwDYfyIYDoBxL5pBZ4BlFJYER5LABpabeJWEojAQmHH4gT6AaIaaf4dDkAAw9A + rYyimMWwCQCbbrx1lgtr6ACwCJWAGIGFV4hVNo1vGupepu5NQ/HQJQH3QAgtcdH3FIq5 + erfAWAYgXCBYfLfADwCJUoHIEaygD4DZLfMgyPM19af4dIfZ2ABYBzbtQ4bAZ4YSN4KV + 84ztWPQepPGxJqdxB4Bwclk4CIBChAgbeI4odAdhygEADZL4bQb5lAEja6UvY8ylsgeg + exyADACiIYbitws4ELx4cAcp9fbCIfapyAgnYiBej66oEWmYeYBYEh44aaDgEgDOV4g9 + zwJHfJjiSthQhHUo0IeIAyCgxtsgmGe8WweIeEPQEQEpxAA2+Q2Snxym9WIADp7w2W6J + JQCLlgoob4bQxQK4I/HvQPWw1nQm44BIfh9YGwBKHmeBGQkoaIcqpAER4fHXHnforunu + hfHwr/gPge+QowZgY5lgDQDrR4cQbxHwc/ACZxKO1xB4cgcRQq1Z4A85Nzqt/QHxa4o3 + j3kHkRxNd24nXHQpMwdZ84IQC7fEklbgkZpZyABkSAzgbgd5YwdwC5wvm+/XnIpA0I8h + SfUYgV7xhpkxAJAYAQC4qvggo+f5AO5YgvOpbYcociWOfjR4eMwgnA7YnJhm7VJ6M4o/ + ryavsHWvsQ43k5JoCAARmAGIAp89CwhgaYbJs4ghlQpwe7KBLGWp44cRgfaxJIBQBJtQ + DhbYFYErFQmAaodJzgeoDqCHvXkfvgolzVadY4gRgJ9amzyBlADIFwJdrXgtMn0XkP6M + Tf0w2v1D94coswIoDZmABKggkKe5MQaIbCWICT228weXavaYgAAfb8fgAA4GAwAC4TCA + ACQQBoAiUTikVizScr6AD4EJNADpcDSABNIIxi0nlEplUrlktl0vmExmQAaM1ADxAwjA + AIBAJADsbTFAATAr2ioLBYKADqdbsAAdDYbADdcDgAAhD4epdNAFJpUUer2owWCoUADf + cLiAAjEAgADjcrlAFksz1sNHpIAdrueAACAhGoAfIHC1nbUiK5Hk0sf+Nf8zyGRijOaT + YADwf+FCACvowAjWAAC0WS0mllDgeUJdQUIMfkMjkum2Wz2m1yU1aM3nMfcjfABKCzer + oIAu240wZ7cdECFRfw2IxUtx2P4/GymWzGFdzkaoAHoQtQXCgRAD98wAAfpAD+9gAAPv + iuO93w+WiAXr9vvAIA+X6+LGtC0bzH69D1PkYRtnqAAEhYKbXJEkjFurCcKQqmLcN0nQ + BvKAAdACYj5v3C0Rpuex9gAbIICY54AMTCSVOnEjTOuy7MgABgHAem5vGSAAPAuhpsm0 + bQABEEIQgAdElAAB8mgAfR9xPKKNAgB6GnKcxzAArKtHEcZxgADQMgym55HiAACAI4qE + ISeB4HeAAMgxMhvG+3wTBKEqzrShQVCAAB+AChKQQg2MZUPRDqwwnCdHOcJtgAJ4NHCn + aDsiZhp0gfh/QIEQOAw9DRyggqDuKBADuKex8HyADxx0yBsHCdIAHkEovRZFzpPlRLIR + o7MbxzXlhIpQjYRfYdkWSllFt2fp8wUHwDGVAsNsgVhcRAfMoPK9oLAnHRqm2tQQA2C4 + AHmu4GAUnwqCQHjInMdp5gAcgMipXDosZXdlJbX0bAOnoAAMdRkPQfV5oopClKYdqng5 + Mhum+tQRA+DqPq5hSKrsowLrKABvHDMARhCrRxnIc65gqCYAY2vClHSdh3AABoSz+fYD + ZXYsI35nmeopZidHedRyAAJIJGzgQDOKyD+vpAD/PkcZznXJgGgWAAHXVELInEdS+nQD + gr3xY6URjnyVX8wp8nZSAhgs5aEaXs7ImMbp6TQFMHZ1Q2577XmgKWczfB4A7cvE8mmv + 2+rRvYf2t8S/7Hvs8rzvTDfG62inIcwb5zzgdwRC3sddQBvyLbS0J5qsHYIaIAoCgJ0y + ZnQfSInEBgdQfY3ZbOu0FFp4GtiZ4YAAV4yPnTWZ3+Xrb/SU5Z7+iACxKMCYUCK0IAvu + Bp2mPrB/Kce58I0COs3odGqBCDs6HFLX1u0eN56VNfYAAd55XnT4KgAbRvTACYETFmTt + UAiA8iKoyBEEAA+UBiSR2JwAWB0GRlwGgtdGvp0rvCJOoAUPs5YNAEjdTQARapMxfDIN + yBg8arXyqhPvAg0R+wBmjAg+ZNMJSZDIG6PI0IKQqu6Z3BpZA+YiAAGDEdAJ93NIAB9E + 0AA04oJFBECJx6AEspaIIQUdUWydgddyTwn0QjbDfMOi1fJK2zRCdQPkdY1wABCAuU5U + pMXMCTFGLMAAIAOGFHePFu4HwNGFGIM40AEoDKVISDMFZOgYApJ0ZAb48TijrAqEJ3QS + gfAuJSf6Th8IxGmGBKFc4815lVKsA2VBQwJsrAdK0vwECGgmlkgsBMYSVjXlwAAag5FW + AdBECs/kGZPkyP8OAbAzAABYCWDl0h1I1GVRqYUCQBEzgvAGaA/zTGntOMe1BAA1huFq + BQp5ahphpDmROPgECKxujTRACQDJXyTn+eiPcAAOJ8AAde3IlURFWT+SePojUWYEkFHs + Pkow+B9kaH6P4gqm3HGPck9pNFFISKmIQ8UAxPk0uxn2QYA4B4jRIo6AAZlJydk8AACy + lkwTHgYpgACWQJpaS2RggAZIyhmgAHoPlx0wzSOTA6BgCQAASgkipBiZ0GnUDpG2wUI4 + HV5kPIigOcrmJskTcWfeqzlj8OOqzTdySAnKnqqsNUdREh8AfCVEBvhKkMAfrkYKIo4x + 2JaHCOtog6B4tUQUqwe4/iND4H/YMgRAh/kFe0fcAUMyKycAEftTlPyTqcQIAFAjAgBH + FpE7EBE+iJWBRuPI+4LwTwWIo8tOARLWAAHDa+mUsyeWfqBbWrS+6mTQV+AsBkDR+Daj + wCYDdRRrjbOEWxiw5x1FOay1dzA9nxAAZUeQcA40tXIAAOJk5ClvLnHogqyJ9112fHWO + 1OBUFQDcYkAAFAJEkDcHCloAYHgcHuAekhvbZCJu+AAKUWgqgADuAIUYdoAV5j7AKY8A + hxE0NKczba/Y7YeAXHTZ88xBR2gKnsCYCiSAMgDPIEgID2HjTywhMONNuTsI2Rwq/E5k + RzjdGepEIgNCKjsxwAATwv8ADoAY3cA4C7aErHKNQ3w+h7qsAkB4wo7VZGhsdYtJgGDy + JqdiPQdzdx+wKcmP89uW0CD3HkUYBADSfAOAseQiTij2j4HoUbMSCgLgkA5Pp1+ayJAG + yEoAfRBR5jpL6DoCMwAvBKbFi/FFuHeDeHApQZA1X0gnk1ogmGF0UDKFqAAMYXYfkUi3 + WkRArxKnoBEA5NCqCWjgGa0gco1SrALIgfwAJjwHgXqK5PMBGx5lGAcBc8g8F466KMBg + E7Fh4jnYbgshICAGE+y85Kx2RSrAVBGmQfI9J7AgBoCdG63yWj9IGzMbqBA+hmDrpST+ + KXeOYGUMsZcwYlboJbY0+4KATAkSYk4lA5RzpaF4NEYTHx7soHgAEo2ymBAKpE5o9p87 + HzcwejA/Jo2yzC4i6QgT4z1j4ILh88hUqihDBoD4AFMFQbyg1urlHKzbHyHjy8AA5h1n + LG4SAt47mUDzH6UYewASND1H8PhJ9mD3WRyhvEiQCQHNXJYPvjR8uoD+olQ09Z59ZE7T + QV0ANnylUiAgASBoHQJJkBKBktxD6ipNR05PlkQuVdt7gomepGx8dCoFYMfSrB4D0TPO + AywyB3mgAOBzUw+h69CAIPs/a5kdAiAoxYAwAyEgFsaQajICgDFKAaAhq+ZmrkHpFSH0 + NIaU5D7jyvt/p/VLJGoNc7ozRtkiGsORSAGANKgAqAqooJAMAfKuBcrUsCG+i9X8U4/q + fjfJ+V8v5nFeLfN+h9H6XcSolR+n9f7H2cTkBAAADgEAAAMAAAABAHMAAAEBAAMAAAAB + AHQAAAECAAMAAAAEAAAsBgEDAAMAAAABAAUAAAEGAAMAAAABAAIAAAERAAQAAAABAAAA + CAESAAMAAAABAAEAAAEVAAMAAAABAAQAAAEWAAMAAAABAHQAAAEXAAQAAAABAAArTwEc + AAMAAAABAAEAAAE9AAMAAAABAAIAAAFSAAMAAAABAAEAAAFTAAMAAAAEAAAsDgAAAAAA + CAAIAAgACAABAAEAAQAB + + ReadOnly + NO + Sheets + + + ActiveLayerIndex + 0 + AutoAdjust + + BackgroundGraphic + + Bounds + {{0, 0}, {576, 576}} + Class + SolidGraphic + ID + 2 + Style + + shadow + + Draws + NO + + stroke + + Draws + NO + + + + CanvasOrigin + {0, 0} + CanvasSize + {576, 576} + ColumnAlign + 1 + ColumnSpacing + 36 + DisplayScale + 1 0/72 in = 1.0000 in + GraphicsList + + + Bounds + {{316.728, 320.875}, {31, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 173 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\i\fs24 \cf0 Piped} + VerticalPad + 0 + + Wrap + NO + + + Class + LineGraphic + Head + + ID + 172 + + ID + 39 + Points + + {290.381, 335.905} + {375.476, 335.208} + + Style + + stroke + + HeadArrow + FilledBall + Pattern + 1 + TailArrow + FilledBall + + + Tail + + ID + 132 + + + + Class + Group + Graphics + + + Class + Group + Graphics + + + Bounds + {{495.565, 306.375}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 170 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 7} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{486.315, 299.375}, {25.5, 28}} + Class + ShapedGraphic + ID + 171 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 169 + + + Bounds + {{373.242, 316.625}, {147, 36}} + Class + ShapedGraphic + ID + 172 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 CurrentCount} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + ID + 168 + + + Bounds + {{339.874, 336.5}, {222.001, 15}} + Class + ShapedGraphic + Head + + ID + 42 + + ID + 27 + Rotation + 270.19351196289062 + Shape + AdjustableArrow + ShapeData + + ratio + 0.32189163565635681 + width + 14.973806381225586 + + Style + + fill + + Color + + a + 0.1 + b + 0 + g + 0 + r + 0 + + MiddleFraction + 0.70634919404983521 + + shadow + + Color + + a + 0.4 + b + 0 + g + 0 + r + 0 + + Fuzziness + 0.0 + ShadowVector + {0, 2} + + stroke + + Color + + a + 0.75 + b + 0 + g + 0 + r + 0 + + + + TextRelativeArea + {{0.125, 0.25}, {0.75, 0.5}} + isConnectedShape + + + + Class + Group + Graphics + + + Class + Group + Graphics + + + Bounds + {{344.447, 518}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 190 + Shape + Rectangle + Style + + fill + + Color + + b + 0.709303 + g + 0.709303 + r + 0.709303 + + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 6} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{335.197, 511}, {25.5, 28}} + Class + ShapedGraphic + ID + 191 + Shape + Diamond + Style + + fill + + Color + + b + 0.709303 + g + 0.709303 + r + 0.709303 + + + + Text + + VerticalPad + 0 + + + + ID + 189 + + + Bounds + {{222.125, 528.25}, {147, 36}} + Class + ShapedGraphic + ID + 192 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.470799 + g + 0.720363 + r + 0.411647 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 GetCurrentCount} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + ID + 188 + + + Class + LineGraphic + ID + 161 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {207, 525} + {225.561, 525} + {227.5, 544} + {363.5, 532.5} + {363.5, 514.5} + {393, 515} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Class + Group + Graphics + + + Bounds + {{325.036, 411.5}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 152 + Shape + Rectangle + Style + + fill + + Color + + b + 0.704208 + g + 0.704208 + r + 0.704208 + + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 5} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{315.786, 404.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 153 + Shape + Diamond + Style + + fill + + Color + + b + 0.704208 + g + 0.704208 + r + 0.704208 + + + + Text + + VerticalPad + 0 + + + + ID + 151 + + + Class + Group + Graphics + + + Bounds + {{272.109, 411.5}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 155 + Shape + Rectangle + Style + + fill + + Color + + b + 0.704208 + g + 0.704208 + r + 0.704208 + + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 3} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{262.859, 404.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 156 + Shape + Diamond + Style + + fill + + Color + + b + 0.704208 + g + 0.704208 + r + 0.704208 + + + + Text + + VerticalPad + 0 + + + + ID + 154 + + + Class + Group + Graphics + + + Bounds + {{298.572, 411.5}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 158 + Shape + Rectangle + Style + + fill + + Color + + b + 0.704208 + g + 0.704208 + r + 0.704208 + + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 4} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{289.322, 404.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 159 + Shape + Diamond + Style + + fill + + Color + + b + 0.704208 + g + 0.704208 + r + 0.704208 + + + + Text + + VerticalPad + 0 + + + + ID + 157 + + + Bounds + {{250.5, 425}, {99, 36}} + Class + ShapedGraphic + ID + 160 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.499395 + g + 0.764118 + r + 0.43665 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Increment} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + ID + 150 + + + Class + LineGraphic + ID + 149 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {207, 464} + {225.561, 464} + {227, 446} + {363.5, 434} + {363.5, 449} + {393, 476} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Bounds + {{151.061, 299.375}, {72, 53.25}} + Class + ShapedGraphic + ID + 23 + Shape + AdjustableWedge + ShapeData + + startAngle + 328 + + Style + + fill + + Color + + b + 0.479135 + g + 0.526363 + r + 1 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 ?} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.1, 0.1}, {0.8, 0.7}} + + + Class + Group + Graphics + + + Class + Group + Graphics + + + Bounds + {{266.536, 308.25}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 130 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 6} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{257.286, 301.25}, {25.5, 28}} + Class + ShapedGraphic + ID + 131 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 129 + + + Bounds + {{144.214, 318.5}, {147, 36}} + Class + ShapedGraphic + ID + 132 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 GetCurrentCount} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + ID + 128 + + + Class + LineGraphic + ID + 137 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {184.061, 254.5} + {185.091, 267.5} + {223.061, 281.5} + {193.5, 394} + {169.061, 407.5} + {169.061, 433.5} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Class + Group + Graphics + + + Bounds + {{96.0362, 304.5}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 108 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 5} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{86.7862, 297.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 109 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 107 + + + Class + Group + Graphics + + + Bounds + {{43.1086, 304.5}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 111 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 3} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{33.8586, 297.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 112 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 110 + + + Class + Group + Graphics + + + Bounds + {{69.5724, 304.5}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 114 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 4} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{60.3224, 297.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 115 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 113 + + + Bounds + {{21.5, 318.5}, {99, 36}} + Class + ShapedGraphic + ID + 116 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Increment} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + ID + 106 + + + Class + LineGraphic + ID + 136 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {121.5, 254.5} + {122.53, 267.5} + {67.5, 280} + {82, 391} + {106.5, 407.5} + {106.5, 433.5} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{163.75, 67}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 68 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 2} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{154.5, 60}, {25.5, 28}} + Class + ShapedGraphic + ID + 69 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 67 + + + Class + Group + Graphics + + + Bounds + {{323.75, 173.25}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 63 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 1} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{314.5, 166.25}, {25.5, 28}} + Class + ShapedGraphic + ID + 64 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 62 + + + Bounds + {{122.53, 74}, {54, 36}} + Class + ShapedGraphic + ID + 53 + Line + + ID + 52 + Position + 0.49880924820899963 + RotationType + 0 + + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Do} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + Class + LineGraphic + ID + 52 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {106.5, 141} + {149, 92} + {193.5, 140.5} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Bounds + {{281.828, 179.75}, {54, 36}} + Class + ShapedGraphic + ID + 25 + Line + + ID + 50 + Position + 0.546242356300354 + RotationType + 0 + + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Start} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + Class + LineGraphic + Head + + ID + 42 + + ID + 50 + Points + + {207.5, 197.75} + {393, 197.75} + + Style + + stroke + + HeadArrow + 0 + TailArrow + Arrow + + + Tail + + ID + 57 + + + + Class + Group + Graphics + + + Bounds + {{401.75, 187.75}, {99, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 41 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Worker} + + + + Bounds + {{393, 162.5}, {116.5, 70.5}} + Class + ShapedGraphic + ID + 42 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 1 + g + 0.834169 + r + 0.745487 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Listener} + + TextPlacement + 0 + + + ID + 40 + + + Bounds + {{401.75, 481.5}, {99, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 30 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Storage} + + + + AllowConnections + NO + Bounds + {{393, 455.5}, {116.5, 70.5}} + Class + ShapedGraphic + ID + 31 + Shape + Rectangle + Style + + fill + + Color + + b + 1 + g + 0.834169 + r + 0.745487 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Counter} + + TextPlacement + 0 + + + Bounds + {{99.25, 503.5}, {99, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 33 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Storage} + + + + Bounds + {{99.25, 461.5}, {99, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 34 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Counter} + + + + AllowConnections + NO + Bounds + {{90.5, 434}, {116.5, 113.5}} + Class + ShapedGraphic + ID + 35 + Shape + Rectangle + Style + + fill + + Color + + b + 1 + g + 0.834169 + r + 0.745487 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 CounterService} + + TextPlacement + 0 + + + Bounds + {{99.25, 208.5}, {99, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 55 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 CounterService} + + + + Bounds + {{99.25, 166.25}, {99, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 56 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Listener} + + + + AllowConnections + NO + Bounds + {{90.5, 141}, {116.5, 113.5}} + Class + ShapedGraphic + ID + 57 + Shape + Rectangle + Style + + fill + + Color + + b + 1 + g + 0.834169 + r + 0.745487 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Worker} + + TextPlacement + 0 + + + GridInfo + + HPages + 1 + KeepToScale + + Layers + + + Lock + NO + Name + Layer 1 + Print + YES + View + YES + + + LayoutInfo + + Animate + NO + circoMinDist + 18 + circoSeparation + 0.0 + layoutEngine + dot + neatoSeparation + 0.0 + twopiSeparation + 0.0 + + Orientation + 2 + PrintOnePage + + RowAlign + 1 + RowSpacing + 36 + SheetTitle + Canvas 1 + UniqueID + 1 + VPages + 1 + + + ActiveLayerIndex + 0 + AutoAdjust + + BackgroundGraphic + + Bounds + {{0, 0}, {576, 733}} + Class + SolidGraphic + ID + 2 + Style + + shadow + + Draws + NO + + stroke + + Draws + NO + + + + CanvasOrigin + {0, 0} + ColumnAlign + 1 + ColumnSpacing + 36 + DisplayScale + 1 0/72 in = 1.0000 in + GraphicsList + + + Bounds + {{181.75, 176.75}, {70, 22}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + ID + 284 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\i\fs18 \cf0 Perhaps at some\ +point in the future} + VerticalPad + 0 + + Wrap + NO + + + Class + Group + Graphics + + + Bounds + {{40.7892, 237.744}, {32.9216, 14}} + Class + ShapedGraphic + FitText + Vertical + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 282 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 N+1} + VerticalPad + 0 + + + + Bounds + {{39, 230.744}, {36.5, 28}} + Class + ShapedGraphic + ID + 283 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 281 + + + Class + Group + Graphics + + + Bounds + {{157.463, 186.25}, {9, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 279 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 N} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{149.213, 179.25}, {25.5, 28}} + Class + ShapedGraphic + ID + 280 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 278 + + + Bounds + {{12.1562, 250.388}, {47.6875, 23.488}} + Class + ShapedGraphic + ID + 277 + Line + + ID + 275 + Position + 0.50168603658676147 + RotationType + 0 + + Shape + AndGate + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Stop} + VerticalPad + 0 + + TextRelativeArea + {{0, 0.1}, {1, 0.8}} + + + Class + LineGraphic + ID + 275 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {72.5, 110.5} + {36, 241} + {72.5, 412.5} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Bounds + {{79.2255, 195.39}, {98.1063, 36}} + Class + ShapedGraphic + FontInfo + + Color + + b + 1 + g + 1 + r + 1 + + + ID + 274 + Line + + ID + 273 + Position + 0.7117239236831665 + RotationType + 0 + + Shape + FlattenedRectangle + Style + + fill + + Color + + b + 0.037037 + g + 0.037037 + r + 1 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf1 Service\ +Unavailable} + VerticalPad + 0 + + TextRelativeArea + {{0.1, 0}, {0.8, 1}} + + + Class + LineGraphic + ID + 273 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {128.279, 341.5} + {128.279, 161.5} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{480.375, 373.5}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 271 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 7} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{471.125, 366.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 272 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 270 + + + Class + LineGraphic + ID + 269 + Points + + {500.875, 419.5} + {379.125, 375} + + Style + + stroke + + Color + + b + 0.0116515 + g + 0.0470275 + r + 1 + + HeadArrow + 0 + TailArrow + 0 + Width + 2 + + + + + Class + LineGraphic + ID + 268 + Points + + {376.125, 418} + {500.875, 377.5} + + Style + + stroke + + Color + + b + 0.0116515 + g + 0.0470275 + r + 1 + + HeadArrow + 0 + TailArrow + 0 + Width + 2 + + + + + Class + Group + Graphics + + + Bounds + {{474.625, 618.5}, {14, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 263 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 16} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{468.875, 611.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 264 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 262 + + + Bounds + {{227.812, 566.5}, {47.6875, 23.488}} + Class + ShapedGraphic + ID + 261 + Shape + Bezier + ShapeData + + UnitPoints + + {-0.5, -0.5} + {-0.5, -0.5} + {0.5, -0.5} + {0.5, -0.5} + {0.5, -0.5} + {0.5, 0.499995} + {0.5, 0.499995} + {0.166667, 0.499995} + {-0.333334, 0.500006} + {-0.5, 0.499995} + {-0.499999, 0.499995} + {-0.5, -0.5} + + + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + stroke + + CornerRadius + 9 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs18 \cf0 ActorRef} + + + + Class + Group + Graphics + + + Bounds + {{308.812, 576.157}, {14, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 259 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 15} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{303.062, 569.157}, {25.5, 28}} + Class + ShapedGraphic + ID + 260 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 258 + + + Bounds + {{226.562, 584.657}, {107, 36}} + Class + ShapedGraphic + ID + 257 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 UseStorage} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + Class + LineGraphic + ID + 256 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {195, 513.5} + {194.5, 586.5} + {367.463, 604} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{475.125, 564.063}, {13, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 254 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 11} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{468.875, 557.063}, {25.5, 28}} + Class + ShapedGraphic + ID + 255 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 253 + + + Class + LineGraphic + ID + 252 + Points + + {498.625, 610.063} + {376.875, 565.563} + + Style + + stroke + + Color + + b + 0.0116515 + g + 0.0470275 + r + 1 + + HeadArrow + 0 + TailArrow + 0 + Width + 2 + + + + + Class + LineGraphic + ID + 251 + Points + + {373.875, 608.563} + {498.625, 568.063} + + Style + + stroke + + Color + + b + 0.0116515 + g + 0.0470275 + r + 1 + + HeadArrow + 0 + TailArrow + 0 + Width + 2 + + + + + Bounds + {{376.125, 571.063}, {119.5, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 250 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.444444 + g + 0.444444 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Storage} + + + + Bounds + {{292.213, 476.5}, {44.5, 23.488}} + Class + ShapedGraphic + ID + 248 + Shape + Bezier + ShapeData + + UnitPoints + + {-0.5, -0.5} + {-0.5, -0.5} + {0.5, -0.5} + {0.5, -0.5} + {0.5, -0.5} + {0.5, 0.499995} + {0.5, 0.499995} + {0.166667, 0.499995} + {-0.333334, 0.500006} + {-0.5, 0.499995} + {-0.499999, 0.499995} + {-0.5, -0.5} + + + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + stroke + + CornerRadius + 9 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs18 \cf0 None} + + + + Class + Group + Graphics + + + Bounds + {{373.213, 486.157}, {14, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 246 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 10} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{367.463, 479.157}, {25.5, 28}} + Class + ShapedGraphic + ID + 247 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 245 + + + Bounds + {{290.963, 494.657}, {107, 36}} + Class + ShapedGraphic + ID + 244 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 UseStorage} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + Class + LineGraphic + ID + 249 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {213.5, 488.75} + {437.5, 494.657} + {437.5, 546} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{326.106, 220}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 242 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 6} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{316.856, 213}, {25.5, 28}} + Class + ShapedGraphic + ID + 243 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 241 + + + Class + Group + Graphics + + + Bounds + {{299.106, 220}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 239 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 4} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{289.856, 213}, {25.5, 28}} + Class + ShapedGraphic + ID + 240 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 238 + + + Class + Group + Graphics + + + Bounds + {{272.356, 220}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 236 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 2} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{263.106, 213}, {25.5, 28}} + Class + ShapedGraphic + ID + 237 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 235 + + + Class + Group + Graphics + + + Bounds + {{330.152, 275.343}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 233 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 5} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{320.902, 268.343}, {25.5, 28}} + Class + ShapedGraphic + ID + 234 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 232 + + + Class + Group + Graphics + + + Bounds + {{303.152, 275.343}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 230 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 3} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{293.902, 268.343}, {25.5, 28}} + Class + ShapedGraphic + ID + 231 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 229 + + + Class + Group + Graphics + + + Bounds + {{276.402, 275.343}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 227 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 1} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{267.152, 268.343}, {25.5, 28}} + Class + ShapedGraphic + ID + 228 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 226 + + + Bounds + {{260.537, 235.256}, {90.7313, 23.488}} + Class + ShapedGraphic + ID + 26 + Line + + ID + 220 + Position + 0.53924757242202759 + RotationType + 0 + + Shape + AndGate + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Restart} + VerticalPad + 0 + + TextRelativeArea + {{0, 0.1}, {1, 0.8}} + + + Class + LineGraphic + ID + 220 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {445, 379.25} + {229, 247} + {168.553, 341.5} + + Style + + stroke + + HeadArrow + 0 + LineType + 2 + TailArrow + Arrow + + + + + Bounds + {{261.633, 290.5}, {90, 36}} + Class + ShapedGraphic + FontInfo + + Color + + b + 1 + g + 1 + r + 1 + + + ID + 24 + Line + + ID + 219 + Position + 0.55132246017456055 + RotationType + 0 + + Shape + FlattenedRectangle + Style + + fill + + Color + + b + 0.037037 + g + 0.037037 + r + 1 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf1 Storage\ +Exception} + VerticalPad + 0 + + TextRelativeArea + {{0.1, 0}, {0.8, 1}} + + + Class + LineGraphic + ID + 219 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {430.75, 379} + {225.5, 308.5} + {181.75, 342} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{310.981, 425.75}, {14, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 217 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 13} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{305.231, 418.75}, {25.5, 28}} + Class + ShapedGraphic + ID + 218 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 216 + + + Bounds + {{270.731, 437.5}, {64, 28}} + Class + ShapedGraphic + ID + 20 + Shape + Hexagon + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Create} + VerticalPad + 0 + + TextRelativeArea + {{0, 0.1}, {1, 0.8}} + + + Class + LineGraphic + ID + 215 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {213.375, 452} + {335.231, 451.5} + {378, 451.5} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + AllowConnections + NO + Bounds + {{378, 434.75}, {114.728, 33.5}} + Class + ShapedGraphic + ID + 214 + Shape + Rectangle + Style + + fill + + Color + + b + 1 + g + 0.834169 + r + 0.745487 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Storage} + + + + Class + Group + Graphics + + + Bounds + {{181.375, 463.5}, {14, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 212 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 14} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{175.625, 456.5}, {25.5, 28}} + Class + ShapedGraphic + ID + 213 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 211 + + + Bounds + {{82.125, 469.5}, {121.75, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 210 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Storage} + + + + Bounds + {{74.2787, 532.961}, {54, 23.488}} + Class + ShapedGraphic + ID + 21 + Line + + ID + 205 + Position + 0.14824382960796356 + RotationType + 0 + + Shape + Bezier + ShapeData + + UnitPoints + + {-0.5, -0.5} + {-0.5, -0.5} + {0.5, -0.5} + {0.5, -0.5} + {0.5, -0.5} + {0.5, 0.499995} + {0.5, 0.499995} + {0.166667, 0.499995} + {-0.333334, 0.500006} + {-0.5, 0.499995} + {-0.499999, 0.499995} + {-0.5, -0.5} + + + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + stroke + + CornerRadius + 9 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs18 \cf0 In 10 secs} + + + + Class + Group + Graphics + + + Bounds + {{162, 542}, {14, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 208 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 12} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{156.25, 535}, {25.5, 28}} + Class + ShapedGraphic + ID + 209 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 207 + + + Bounds + {{85.25, 550.5}, {101, 36}} + Class + ShapedGraphic + ID + 206 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.653558 + g + 1 + r + 0.571443 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Reconnect} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.23}, {0.7, 0.7}} + + + Class + LineGraphic + ID + 205 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {101.5, 513.5} + {101, 584} + {170.5, 513.5} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + Class + Group + Graphics + + + Bounds + {{184.875, 412}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 203 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 9} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{175.625, 405}, {25.5, 28}} + Class + ShapedGraphic + ID + 204 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 202 + + + Class + LineGraphic + ID + 201 + Points + + {205.375, 458} + {83.625, 413.5} + + Style + + stroke + + Color + + b + 0.0116515 + g + 0.0470275 + r + 1 + + HeadArrow + 0 + TailArrow + 0 + Width + 2 + + + + + Class + LineGraphic + ID + 200 + Points + + {80.625, 456.5} + {205.375, 416} + + Style + + stroke + + Color + + b + 0.0116515 + g + 0.0470275 + r + 1 + + HeadArrow + 0 + TailArrow + 0 + Width + 2 + + + + + Class + Group + Graphics + + + Bounds + {{333, 355.75}, {7, 14}} + Class + ShapedGraphic + FitText + YES + Flow + Resize + FontInfo + + Font + Helvetica-Oblique + Size + 12 + + ID + 224 + Shape + Rectangle + Style + + fill + + Draws + NO + + shadow + + Draws + NO + + stroke + + Draws + NO + + + Text + + Pad + 0 + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 8} + VerticalPad + 0 + + Wrap + NO + + + Bounds + {{323.75, 348.75}, {25.5, 28}} + Class + ShapedGraphic + ID + 225 + Shape + Diamond + Style + + fill + + Color + + b + 0.486503 + g + 1 + r + 0.975875 + + + + Text + + VerticalPad + 0 + + + + ID + 223 + + + Bounds + {{242.25, 360.75}, {107, 53.25}} + Class + ShapedGraphic + ID + 25 + Shape + Speech Bubble + Style + + fill + + Color + + b + 0.444444 + g + 0.444444 + r + 1 + + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Terminated\ +(Storage)} + VerticalPad + 0 + + TextPlacement + 0 + TextRelativeArea + {{0.15, 0.15}, {0.7, 0.7}} + + + Class + LineGraphic + ID + 137 + OrthogonalBarAutomatic + + OrthogonalBarPoint + {0, 0} + OrthogonalBarPosition + -1 + Points + + {378, 400} + {357.5, 400} + {282, 366.5} + {213.375, 365} + + Style + + stroke + + HeadArrow + Arrow + LineType + 2 + TailArrow + 0 + + + + + AllowConnections + NO + Bounds + {{378, 379.25}, {114.728, 33.5}} + Class + ShapedGraphic + ID + 174 + Shape + Rectangle + Style + + fill + + Color + + b + 0.444444 + g + 0.444444 + r + 1 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Storage} + + + + Bounds + {{375.614, 625.5}, {119.5, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 30 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Storage} + + + + AllowConnections + NO + Bounds + {{367.463, 546}, {136.537, 122.5}} + Class + ShapedGraphic + ID + 31 + Shape + Rectangle + Style + + fill + + Color + + b + 1 + g + 0.834169 + r + 0.745487 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Counter} + + TextPlacement + 0 + + + Bounds + {{82.875, 419}, {119.5, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 33 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.444444 + g + 0.444444 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Storage} + + + + Bounds + {{81.75, 366.5}, {121.75, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 34 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 Counter} + + + + AllowConnections + NO + Bounds + {{72.5, 341.5}, {141, 172}} + Class + ShapedGraphic + ID + 35 + Shape + Rectangle + Style + + fill + + Color + + b + 1 + g + 0.834169 + r + 0.745487 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 CounterService} + + TextPlacement + 0 + + + Bounds + {{81.25, 117.187}, {99, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 55 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 CounterService} + + + + Bounds + {{81.25, 74.9365}, {99, 33.5}} + Class + ShapedGraphic + FontInfo + + Font + Helvetica + Size + 12 + + ID + 56 + Magnets + + {0, 1} + {0, -1} + {1, 0} + {-1, 0} + + Shape + Rectangle + Style + + fill + + Color + + b + 0.629424 + g + 0.814599 + r + 1 + + + stroke + + CornerRadius + 5 + Pattern + 1 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs20 \cf0 ProgressListener} + + + + AllowConnections + NO + Bounds + {{72.5, 49.6865}, {116.5, 113.5}} + Class + ShapedGraphic + ID + 57 + Shape + Rectangle + Style + + fill + + Color + + b + 1 + g + 0.834169 + r + 0.745487 + + + stroke + + CornerRadius + 5 + + + Text + + Text + {\rtf1\ansi\ansicpg1252\cocoartf1038\cocoasubrtf360 +{\fonttbl\f0\fswiss\fcharset0 Helvetica;} +{\colortbl;\red255\green255\blue255;} +\pard\tx560\tx1120\tx1680\tx2240\tx2800\tx3360\tx3920\tx4480\tx5040\tx5600\tx6160\tx6720\qc\pardirnatural + +\f0\fs24 \cf0 Worker} + + TextPlacement + 0 + + + GridInfo + + HPages + 1 + KeepToScale + + Layers + + + Lock + NO + Name + Layer 1 + Print + YES + View + YES + + + LayoutInfo + + Animate + NO + circoMinDist + 18 + circoSeparation + 0.0 + layoutEngine + dot + neatoSeparation + 0.0 + twopiSeparation + 0.0 + + Orientation + 2 + PrintOnePage + + RowAlign + 1 + RowSpacing + 36 + SheetTitle + Canvas 2 + UniqueID + 2 + VPages + 1 + + + SmartAlignmentGuidesActive + YES + SmartDistanceGuidesActive + YES + UseEntirePage + + WindowInfo + + CurrentSheet + 0 + ExpandedCanvases + + + name + Canvas 1 + + + Frame + {{29, 4}, {1423, 1024}} + ListView + + OutlineWidth + 142 + RightSidebar + + ShowRuler + + Sidebar + + SidebarWidth + 120 + VisibleRegion + {{-73, 81.25}, {723.864, 494.318}} + Zoom + 1.7599999904632568 + ZoomValues + + + Canvas 1 + 1.7599999904632568 + 1.8700000047683716 + + + Canvas 2 + 2 + 1 + + + + saveQuickLookFiles + YES + + diff --git a/akka-docs/java/fault-tolerance-sample.rst b/akka-docs/java/fault-tolerance-sample.rst index 4b359b792d..8e379c5fcc 100644 --- a/akka-docs/java/fault-tolerance-sample.rst +++ b/akka-docs/java/fault-tolerance-sample.rst @@ -1,5 +1,51 @@ .. _fault-tolerance-sample-java: +Diagrams of the Fault Tolerance Sample (Java) +---------------------------------------------- + +.. image:: ../images/faulttolerancesample-normal-flow.png + +*The above diagram illustrates the normal message flow.* + +**Normal flow:** + +======= ================================================================================== +Step Description +======= ================================================================================== +1 The progress ``Listener`` starts the work. +2 The ``Worker`` schedules work by sending ``Do`` messages periodically to itself +3, 4, 5 When receiving ``Do`` the ``Worker`` tells the ``CounterService`` + to increment the counter, three times. The ``Increment`` message is forwarded + to the ``Counter``, which updates its counter variable and sends current value + to the ``Storage``. +6, 7 The ``Worker`` asks the ``CounterService`` of current value of the counter and pipes + the result back to the ``Listener``. +======= ================================================================================== + + +.. image:: ../images/faulttolerancesample-failure-flow.png + +*The above diagram illustrates what happens in case of storage failure.* + +**Failure flow:** + +=========== ================================================================================== +Step Description +=========== ================================================================================== +1 The ``Storage`` throws ``StorageException``. +2 The ``CounterService`` is supervisor of the ``Storage`` and restarts the + ``Storage`` when ``StorageException`` is thrown. +3, 4, 5, 6 The ``Storage`` continues to fail and is restarted. +7 After 3 failures and restarts within 5 seconds the ``Storage`` is stopped by its + supervisor, i.e. the ``CounterService``. +8 The ``CounterService`` is also watching the ``Storage`` for termination and + receives the ``Terminated`` message when the ``Storage`` has been stopped ... +9, 10, 11 and tells the ``Counter`` that there is no ``Storage``. +12 The ``CounterService`` schedules a ``Reconnect`` message to itself. +13, 14 When it receives the ``Reconnect`` message it creates a new ``Storage`` ... +15, 16 and tells the the ``Counter`` to use the new ``Storage`` +=========== ================================================================================== + Full Source Code of the Fault Tolerance Sample (Java) ------------------------------------------------------ diff --git a/akka-docs/scala/fault-tolerance-sample.rst b/akka-docs/scala/fault-tolerance-sample.rst index 6859d54a8f..ccda303e45 100644 --- a/akka-docs/scala/fault-tolerance-sample.rst +++ b/akka-docs/scala/fault-tolerance-sample.rst @@ -1,5 +1,53 @@ .. _fault-tolerance-sample-scala: +Diagrams of the Fault Tolerance Sample (Scala) +---------------------------------------------- + + + +.. image:: ../images/faulttolerancesample-normal-flow.png + +*The above diagram illustrates the normal message flow.* + +**Normal flow:** + +======= ================================================================================== +Step Description +======= ================================================================================== +1 The progress ``Listener`` starts the work. +2 The ``Worker`` schedules work by sending ``Do`` messages periodically to itself +3, 4, 5 When receiving ``Do`` the ``Worker`` tells the ``CounterService`` + to increment the counter, three times. The ``Increment`` message is forwarded + to the ``Counter``, which updates its counter variable and sends current value + to the ``Storage``. +6, 7 The ``Worker`` asks the ``CounterService`` of current value of the counter and pipes + the result back to the ``Listener``. +======= ================================================================================== + + +.. image:: ../images/faulttolerancesample-failure-flow.png + +*The above diagram illustrates what happens in case of storage failure.* + +**Failure flow:** + +=========== ================================================================================== +Step Description +=========== ================================================================================== +1 The ``Storage`` throws ``StorageException``. +2 The ``CounterService`` is supervisor of the ``Storage`` and restarts the + ``Storage`` when ``StorageException`` is thrown. +3, 4, 5, 6 The ``Storage`` continues to fail and is restarted. +7 After 3 failures and restarts within 5 seconds the ``Storage`` is stopped by its + supervisor, i.e. the ``CounterService``. +8 The ``CounterService`` is also watching the ``Storage`` for termination and + receives the ``Terminated`` message when the ``Storage`` has been stopped ... +9, 10, 11 and tells the ``Counter`` that there is no ``Storage``. +12 The ``CounterService`` schedules a ``Reconnect`` message to itself. +13, 14 When it receives the ``Reconnect`` message it creates a new ``Storage`` ... +15, 16 and tells the the ``Counter`` to use the new ``Storage`` +=========== ================================================================================== + Full Source Code of the Fault Tolerance Sample (Scala) ------------------------------------------------------ From a7e5da6819623812e36e14fa797753cab683ffaa Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 27 Jan 2012 12:44:40 +0100 Subject: [PATCH 08/94] Packaging the new FJ pool into Akka --- .../main/java/akka/jsr166y/ForkJoinPool.java | 2630 +++++++++++++++++ .../main/java/akka/jsr166y/ForkJoinTask.java | 1543 ++++++++++ .../akka/jsr166y/ForkJoinWorkerThread.java | 119 + .../java/akka/jsr166y/RecursiveAction.java | 164 + .../main/java/akka/jsr166y/RecursiveTask.java | 68 + .../akka/dispatch/ThreadPoolBuilder.scala | 9 +- 6 files changed, 4532 insertions(+), 1 deletion(-) create mode 100644 akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java create mode 100644 akka-actor/src/main/java/akka/jsr166y/ForkJoinTask.java create mode 100644 akka-actor/src/main/java/akka/jsr166y/ForkJoinWorkerThread.java create mode 100644 akka-actor/src/main/java/akka/jsr166y/RecursiveAction.java create mode 100644 akka-actor/src/main/java/akka/jsr166y/RecursiveTask.java diff --git a/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java b/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java new file mode 100644 index 0000000000..e5d7bedb2c --- /dev/null +++ b/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java @@ -0,0 +1,2630 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package akka.jsr166y; + +import akka.util.Unsafe; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Random; +import java.util.concurrent.AbstractExecutorService; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.RunnableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.concurrent.atomic.AtomicLong; +import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.locks.Condition; + +/** + * An {@link ExecutorService} for running {@link ForkJoinTask}s. + * A {@code ForkJoinPool} provides the entry point for submissions + * from non-{@code ForkJoinTask} clients, as well as management and + * monitoring operations. + * + *

A {@code ForkJoinPool} differs from other kinds of {@link + * ExecutorService} mainly by virtue of employing + * work-stealing: all threads in the pool attempt to find and + * execute tasks submitted to the pool and/or created by other active + * tasks (eventually blocking waiting for work if none exist). This + * enables efficient processing when most tasks spawn other subtasks + * (as do most {@code ForkJoinTask}s), as well as when many small + * tasks are submitted to the pool from external clients. Especially + * when setting asyncMode to true in constructors, {@code + * ForkJoinPool}s may also be appropriate for use with event-style + * tasks that are never joined. + * + *

A {@code ForkJoinPool} is constructed with a given target + * parallelism level; by default, equal to the number of available + * processors. The pool attempts to maintain enough active (or + * available) threads by dynamically adding, suspending, or resuming + * internal worker threads, even if some tasks are stalled waiting to + * join others. However, no such adjustments are guaranteed in the + * face of blocked IO or other unmanaged synchronization. The nested + * {@link ManagedBlocker} interface enables extension of the kinds of + * synchronization accommodated. + * + *

In addition to execution and lifecycle control methods, this + * class provides status check methods (for example + * {@link #getStealCount}) that are intended to aid in developing, + * tuning, and monitoring fork/join applications. Also, method + * {@link #toString} returns indications of pool state in a + * convenient form for informal monitoring. + * + *

As is the case with other ExecutorServices, there are three + * main task execution methods summarized in the following + * table. These are designed to be used primarily by clients not + * already engaged in fork/join computations in the current pool. The + * main forms of these methods accept instances of {@code + * ForkJoinTask}, but overloaded forms also allow mixed execution of + * plain {@code Runnable}- or {@code Callable}- based activities as + * well. However, tasks that are already executing in a pool should + * normally instead use the within-computation forms listed in the + * table unless using async event-style tasks that are not usually + * joined, in which case there is little difference among choice of + * methods. + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + * + *
Call from non-fork/join clients Call from within fork/join computations
Arrange async execution {@link #execute(ForkJoinTask)} {@link ForkJoinTask#fork}
Await and obtain result {@link #invoke(ForkJoinTask)} {@link ForkJoinTask#invoke}
Arrange exec and obtain Future {@link #submit(ForkJoinTask)} {@link ForkJoinTask#fork} (ForkJoinTasks are Futures)
+ * + *

Sample Usage. Normally a single {@code ForkJoinPool} is + * used for all parallel task execution in a program or subsystem. + * Otherwise, use would not usually outweigh the construction and + * bookkeeping overhead of creating a large set of threads. For + * example, a common pool could be used for the {@code SortTasks} + * illustrated in {@link RecursiveAction}. Because {@code + * ForkJoinPool} uses threads in {@linkplain java.lang.Thread#isDaemon + * daemon} mode, there is typically no need to explicitly {@link + * #shutdown} such a pool upon program exit. + * + *

 {@code
+ * static final ForkJoinPool mainPool = new ForkJoinPool();
+ * ...
+ * public void sort(long[] array) {
+ *   mainPool.invoke(new SortTask(array, 0, array.length));
+ * }}
+ * + *

Implementation notes: This implementation restricts the + * maximum number of running threads to 32767. Attempts to create + * pools with greater than the maximum number result in + * {@code IllegalArgumentException}. + * + *

This implementation rejects submitted tasks (that is, by throwing + * {@link RejectedExecutionException}) only when the pool is shut down + * or internal resources have been exhausted. + * + * @since 1.7 + * @author Doug Lea + */ +public class ForkJoinPool extends AbstractExecutorService { + + /* + * Implementation Overview + * + * This class and its nested classes provide the main + * functionality and control for a set of worker threads: + * Submissions from non-FJ threads enter into submission + * queues. Workers take these tasks and typically split them into + * subtasks that may be stolen by other workers. Preference rules + * give first priority to processing tasks from their own queues + * (LIFO or FIFO, depending on mode), then to randomized FIFO + * steals of tasks in other queues. + * + * WorkQueues. + * ========== + * + * Most operations occur within work-stealing queues (in nested + * class WorkQueue). These are special forms of Deques that + * support only three of the four possible end-operations -- push, + * pop, and poll (aka steal), under the further constraints that + * push and pop are called only from the owning thread (or, as + * extended here, under a lock), while poll may be called from + * other threads. (If you are unfamiliar with them, you probably + * want to read Herlihy and Shavit's book "The Art of + * Multiprocessor programming", chapter 16 describing these in + * more detail before proceeding.) The main work-stealing queue + * design is roughly similar to those in the papers "Dynamic + * Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005 + * (http://research.sun.com/scalable/pubs/index.html) and + * "Idempotent work stealing" by Michael, Saraswat, and Vechev, + * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186). + * The main differences ultimately stem from gc requirements that + * we null out taken slots as soon as we can, to maintain as small + * a footprint as possible even in programs generating huge + * numbers of tasks. To accomplish this, we shift the CAS + * arbitrating pop vs poll (steal) from being on the indices + * ("base" and "top") to the slots themselves. So, both a + * successful pop and poll mainly entail a CAS of a slot from + * non-null to null. Because we rely on CASes of references, we + * do not need tag bits on base or top. They are simple ints as + * used in any circular array-based queue (see for example + * ArrayDeque). Updates to the indices must still be ordered in a + * way that guarantees that top == base means the queue is empty, + * but otherwise may err on the side of possibly making the queue + * appear nonempty when a push, pop, or poll have not fully + * committed. Note that this means that the poll operation, + * considered individually, is not wait-free. One thief cannot + * successfully continue until another in-progress one (or, if + * previously empty, a push) completes. However, in the + * aggregate, we ensure at least probabilistic non-blockingness. + * If an attempted steal fails, a thief always chooses a different + * random victim target to try next. So, in order for one thief to + * progress, it suffices for any in-progress poll or new push on + * any empty queue to complete. + * + * This approach also enables support of a user mode in which local + * task processing is in FIFO, not LIFO order, simply by using + * poll rather than pop. This can be useful in message-passing + * frameworks in which tasks are never joined. However neither + * mode considers affinities, loads, cache localities, etc, so + * rarely provide the best possible performance on a given + * machine, but portably provide good throughput by averaging over + * these factors. (Further, even if we did try to use such + * information, we do not usually have a basis for exploiting + * it. For example, some sets of tasks profit from cache + * affinities, but others are harmed by cache pollution effects.) + * + * WorkQueues are also used in a similar way for tasks submitted + * to the pool. We cannot mix these tasks in the same queues used + * for work-stealing (this would contaminate lifo/fifo + * processing). Instead, we loosely associate (via hashing) + * submission queues with submitting threads, and randomly scan + * these queues as well when looking for work. In essence, + * submitters act like workers except that they never take tasks, + * and they are multiplexed on to a finite number of shared work + * queues. However, classes are set up so that future extensions + * could allow submitters to optionally help perform tasks as + * well. Pool submissions from internal workers are also allowed, + * but use randomized rather than thread-hashed queue indices to + * avoid imbalance. Insertion of tasks in shared mode requires a + * lock (mainly to protect in the case of resizing) but we use + * only a simple spinlock (using bits in field runState), because + * submitters encountering a busy queue try or create others so + * never block. + * + * Management. + * ========== + * + * The main throughput advantages of work-stealing stem from + * decentralized control -- workers mostly take tasks from + * themselves or each other. We cannot negate this in the + * implementation of other management responsibilities. The main + * tactic for avoiding bottlenecks is packing nearly all + * essentially atomic control state into two volatile variables + * that are by far most often read (not written) as status and + * consistency checks + * + * Field "ctl" contains 64 bits holding all the information needed + * to atomically decide to add, inactivate, enqueue (on an event + * queue), dequeue, and/or re-activate workers. To enable this + * packing, we restrict maximum parallelism to (1<<15)-1 (which is + * far in excess of normal operating range) to allow ids, counts, + * and their negations (used for thresholding) to fit into 16bit + * fields. + * + * Field "runState" contains 32 bits needed to register and + * deregister WorkQueues, as well as to enable shutdown. It is + * only modified under a lock (normally briefly held, but + * occasionally protecting allocations and resizings) but even + * when locked remains available to check consistency. + * + * Recording WorkQueues. WorkQueues are recorded in the + * "workQueues" array that is created upon pool construction and + * expanded if necessary. Updates to the array while recording + * new workers and unrecording terminated ones are protected from + * each other by a lock but the array is otherwise concurrently + * readable, and accessed directly. To simplify index-based + * operations, the array size is always a power of two, and all + * readers must tolerate null slots. Shared (submission) queues + * are at even indices, worker queues at odd indices. Grouping + * them together in this way simplifies and speeds up task + * scanning. To avoid flailing during start-up, the array is + * presized to hold twice #parallelism workers (which is unlikely + * to need further resizing during execution). But to avoid + * dealing with so many null slots, variable runState includes a + * mask for the nearest power of two that contains all current + * workers. All worker thread creation is on-demand, triggered by + * task submissions, replacement of terminated workers, and/or + * compensation for blocked workers. However, all other support + * code is set up to work with other policies. To ensure that we + * do not hold on to worker references that would prevent GC, ALL + * accesses to workQueues are via indices into the workQueues + * array (which is one source of some of the messy code + * constructions here). In essence, the workQueues array serves as + * a weak reference mechanism. Thus for example the wait queue + * field of ctl stores indices, not references. Access to the + * workQueues in associated methods (for example signalWork) must + * both index-check and null-check the IDs. All such accesses + * ignore bad IDs by returning out early from what they are doing, + * since this can only be associated with termination, in which + * case it is OK to give up. + * + * All uses of the workQueues array check that it is non-null + * (even if previously non-null). This allows nulling during + * termination, which is currently not necessary, but remains an + * option for resource-revocation-based shutdown schemes. It also + * helps reduce JIT issuance of uncommon-trap code, which tends to + * unnecessarily complicate control flow in some methods. + * + * Event Queuing. Unlike HPC work-stealing frameworks, we cannot + * let workers spin indefinitely scanning for tasks when none can + * be found immediately, and we cannot start/resume workers unless + * there appear to be tasks available. On the other hand, we must + * quickly prod them into action when new tasks are submitted or + * generated. In many usages, ramp-up time to activate workers is + * the main limiting factor in overall performance (this is + * compounded at program start-up by JIT compilation and + * allocation). So we try to streamline this as much as possible. + * We park/unpark workers after placing in an event wait queue + * when they cannot find work. This "queue" is actually a simple + * Treiber stack, headed by the "id" field of ctl, plus a 15bit + * counter value (that reflects the number of times a worker has + * been inactivated) to avoid ABA effects (we need only as many + * version numbers as worker threads). Successors are held in + * field WorkQueue.nextWait. Queuing deals with several intrinsic + * races, mainly that a task-producing thread can miss seeing (and + * signalling) another thread that gave up looking for work but + * has not yet entered the wait queue. We solve this by requiring + * a full sweep of all workers (via repeated calls to method + * scan()) both before and after a newly waiting worker is added + * to the wait queue. During a rescan, the worker might release + * some other queued worker rather than itself, which has the same + * net effect. Because enqueued workers may actually be rescanning + * rather than waiting, we set and clear the "parker" field of + * Workqueues to reduce unnecessary calls to unpark. (This + * requires a secondary recheck to avoid missed signals.) Note + * the unusual conventions about Thread.interrupts surrounding + * parking and other blocking: Because interrupts are used solely + * to alert threads to check termination, which is checked anyway + * upon blocking, we clear status (using Thread.interrupted) + * before any call to park, so that park does not immediately + * return due to status being set via some other unrelated call to + * interrupt in user code. + * + * Signalling. We create or wake up workers only when there + * appears to be at least one task they might be able to find and + * execute. When a submission is added or another worker adds a + * task to a queue that previously had fewer than two tasks, they + * signal waiting workers (or trigger creation of new ones if + * fewer than the given parallelism level -- see signalWork). + * These primary signals are buttressed by signals during rescans; + * together these cover the signals needed in cases when more + * tasks are pushed but untaken, and improve performance compared + * to having one thread wake up all workers. + * + * Trimming workers. To release resources after periods of lack of + * use, a worker starting to wait when the pool is quiescent will + * time out and terminate if the pool has remained quiescent for + * SHRINK_RATE nanosecs. This will slowly propagate, eventually + * terminating all workers after long periods of non-use. + * + * Shutdown and Termination. A call to shutdownNow atomically sets + * a runState bit and then (non-atomically) sets each workers + * runState status, cancels all unprocessed tasks, and wakes up + * all waiting workers. Detecting whether termination should + * commence after a non-abrupt shutdown() call requires more work + * and bookkeeping. We need consensus about quiescence (i.e., that + * there is no more work). The active count provides a primary + * indication but non-abrupt shutdown still requires a rechecking + * scan for any workers that are inactive but not queued. + * + * Joining Tasks. + * ============== + * + * Any of several actions may be taken when one worker is waiting + * to join a task stolen (or always held by) another. Because we + * are multiplexing many tasks on to a pool of workers, we can't + * just let them block (as in Thread.join). We also cannot just + * reassign the joiner's run-time stack with another and replace + * it later, which would be a form of "continuation", that even if + * possible is not necessarily a good idea since we sometimes need + * both an unblocked task and its continuation to + * progress. Instead we combine two tactics: + * + * Helping: Arranging for the joiner to execute some task that it + * would be running if the steal had not occurred. + * + * Compensating: Unless there are already enough live threads, + * method tryCompensate() may create or re-activate a spare + * thread to compensate for blocked joiners until they unblock. + * + * A third form (implemented in tryRemoveAndExec and + * tryPollForAndExec) amounts to helping a hypothetical + * compensator: If we can readily tell that a possible action of a + * compensator is to steal and execute the task being joined, the + * joining thread can do so directly, without the need for a + * compensation thread (although at the expense of larger run-time + * stacks, but the tradeoff is typically worthwhile). + * + * The ManagedBlocker extension API can't use helping so relies + * only on compensation in method awaitBlocker. + * + * The algorithm in tryHelpStealer entails a form of "linear" + * helping: Each worker records (in field currentSteal) the most + * recent task it stole from some other worker. Plus, it records + * (in field currentJoin) the task it is currently actively + * joining. Method tryHelpStealer uses these markers to try to + * find a worker to help (i.e., steal back a task from and execute + * it) that could hasten completion of the actively joined task. + * In essence, the joiner executes a task that would be on its own + * local deque had the to-be-joined task not been stolen. This may + * be seen as a conservative variant of the approach in Wagner & + * Calder "Leapfrogging: a portable technique for implementing + * efficient futures" SIGPLAN Notices, 1993 + * (http://portal.acm.org/citation.cfm?id=155354). It differs in + * that: (1) We only maintain dependency links across workers upon + * steals, rather than use per-task bookkeeping. This sometimes + * requires a linear scan of workers array to locate stealers, but + * often doesn't because stealers leave hints (that may become + * stale/wrong) of where to locate them. A stealHint is only a + * hint because a worker might have had multiple steals and the + * hint records only one of them (usually the most current). + * Hinting isolates cost to when it is needed, rather than adding + * to per-task overhead. (2) It is "shallow", ignoring nesting + * and potentially cyclic mutual steals. (3) It is intentionally + * racy: field currentJoin is updated only while actively joining, + * which means that we miss links in the chain during long-lived + * tasks, GC stalls etc (which is OK since blocking in such cases + * is usually a good idea). (4) We bound the number of attempts + * to find work (see MAX_HELP_DEPTH) and fall back to suspending + * the worker and if necessary replacing it with another. + * + * It is impossible to keep exactly the target parallelism number + * of threads running at any given time. Determining the + * existence of conservatively safe helping targets, the + * availability of already-created spares, and the apparent need + * to create new spares are all racy, so we rely on multiple + * retries of each. Currently, in keeping with on-demand + * signalling policy, we compensate only if blocking would leave + * less than one active (non-waiting, non-blocked) worker. + * Additionally, to avoid some false alarms due to GC, lagging + * counters, system activity, etc, compensated blocking for joins + * is only attempted after rechecks stabilize in + * ForkJoinTask.awaitJoin. (Retries are interspersed with + * Thread.yield, for good citizenship.) + * + * Style notes: There is a lot of representation-level coupling + * among classes ForkJoinPool, ForkJoinWorkerThread, and + * ForkJoinTask. The fields of WorkQueue maintain data structures + * managed by ForkJoinPool, so are directly accessed. There is + * little point trying to reduce this, since any associated future + * changes in representations will need to be accompanied by + * algorithmic changes anyway. All together, these low-level + * implementation choices produce as much as a factor of 4 + * performance improvement compared to naive implementations, and + * enable the processing of billions of tasks per second, at the + * expense of some ugliness. + * + * Methods signalWork() and scan() are the main bottlenecks so are + * especially heavily micro-optimized/mangled. There are lots of + * inline assignments (of form "while ((local = field) != 0)") + * which are usually the simplest way to ensure the required read + * orderings (which are sometimes critical). This leads to a + * "C"-like style of listing declarations of these locals at the + * heads of methods or blocks. There are several occurrences of + * the unusual "do {} while (!cas...)" which is the simplest way + * to force an update of a CAS'ed variable. There are also other + * coding oddities that help some methods perform reasonably even + * when interpreted (not compiled). + * + * The order of declarations in this file is: (1) declarations of + * statics (2) fields (along with constants used when unpacking + * some of them), listed in an order that tends to reduce + * contention among them a bit under most JVMs; (3) nested + * classes; (4) internal control methods; (5) callbacks and other + * support for ForkJoinTask methods; (6) exported methods (plus a + * few little helpers); (7) static block initializing all statics + * in a minimally dependent order. + */ + + /** + * Factory for creating new {@link ForkJoinWorkerThread}s. + * A {@code ForkJoinWorkerThreadFactory} must be defined and used + * for {@code ForkJoinWorkerThread} subclasses that extend base + * functionality or initialize threads with different contexts. + */ + public static interface ForkJoinWorkerThreadFactory { + /** + * Returns a new worker thread operating in the given pool. + * + * @param pool the pool this thread works in + * @throws NullPointerException if the pool is null + */ + public ForkJoinWorkerThread newThread(ForkJoinPool pool); + } + + /** + * Default ForkJoinWorkerThreadFactory implementation; creates a + * new ForkJoinWorkerThread. + */ + static class DefaultForkJoinWorkerThreadFactory + implements ForkJoinWorkerThreadFactory { + public ForkJoinWorkerThread newThread(ForkJoinPool pool) { + return new ForkJoinWorkerThread(pool); + } + } + + /** + * Creates a new ForkJoinWorkerThread. This factory is used unless + * overridden in ForkJoinPool constructors. + */ + public static final ForkJoinWorkerThreadFactory + defaultForkJoinWorkerThreadFactory; + + /** + * Permission required for callers of methods that may start or + * kill threads. + */ + private static final RuntimePermission modifyThreadPermission; + + /** + * If there is a security manager, makes sure caller has + * permission to modify threads. + */ + private static void checkPermission() { + SecurityManager security = System.getSecurityManager(); + if (security != null) + security.checkPermission(modifyThreadPermission); + } + + /** + * Generator for assigning sequence numbers as pool names. + */ + private static final AtomicInteger poolNumberGenerator; + + /** + * Bits and masks for control variables + * + * Field ctl is a long packed with: + * AC: Number of active running workers minus target parallelism (16 bits) + * TC: Number of total workers minus target parallelism (16 bits) + * ST: true if pool is terminating (1 bit) + * EC: the wait count of top waiting thread (15 bits) + * ID: ~(poolIndex >>> 1) of top of Treiber stack of waiters (16 bits) + * + * When convenient, we can extract the upper 32 bits of counts and + * the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e = + * (int)ctl. The ec field is never accessed alone, but always + * together with id and st. The offsets of counts by the target + * parallelism and the positionings of fields makes it possible to + * perform the most common checks via sign tests of fields: When + * ac is negative, there are not enough active workers, when tc is + * negative, there are not enough total workers, when id is + * negative, there is at least one waiting worker, and when e is + * negative, the pool is terminating. To deal with these possibly + * negative fields, we use casts in and out of "short" and/or + * signed shifts to maintain signedness. + * + * When a thread is queued (inactivated), its eventCount field is + * negative, which is the only way to tell if a worker is + * prevented from executing tasks, even though it must continue to + * scan for them to avoid queuing races. + * + * Field runState is an int packed with: + * SHUTDOWN: true if shutdown is enabled (1 bit) + * SEQ: a sequence number updated upon (de)registering workers (15 bits) + * MASK: mask (power of 2 - 1) covering all registered poolIndexes (16 bits) + * + * The combination of mask and sequence number enables simple + * consistency checks: Staleness of read-only operations on the + * workers and queues arrays can be checked by comparing runState + * before vs after the reads. The low 16 bits (i.e, anding with + * SMASK) hold (the smallest power of two covering all worker + * indices, minus one. The mask for queues (vs workers) is twice + * this value plus 1. + */ + + // bit positions/shifts for fields + private static final int AC_SHIFT = 48; + private static final int TC_SHIFT = 32; + private static final int ST_SHIFT = 31; + private static final int EC_SHIFT = 16; + + // bounds + private static final int MAX_ID = 0x7fff; // max poolIndex + private static final int SMASK = 0xffff; // mask short bits + private static final int SHORT_SIGN = 1 << 15; + private static final int INT_SIGN = 1 << 31; + + // masks + private static final long STOP_BIT = 0x0001L << ST_SHIFT; + private static final long AC_MASK = ((long)SMASK) << AC_SHIFT; + private static final long TC_MASK = ((long)SMASK) << TC_SHIFT; + + // units for incrementing and decrementing + private static final long TC_UNIT = 1L << TC_SHIFT; + private static final long AC_UNIT = 1L << AC_SHIFT; + + // masks and units for dealing with u = (int)(ctl >>> 32) + private static final int UAC_SHIFT = AC_SHIFT - 32; + private static final int UTC_SHIFT = TC_SHIFT - 32; + private static final int UAC_MASK = SMASK << UAC_SHIFT; + private static final int UTC_MASK = SMASK << UTC_SHIFT; + private static final int UAC_UNIT = 1 << UAC_SHIFT; + private static final int UTC_UNIT = 1 << UTC_SHIFT; + + // masks and units for dealing with e = (int)ctl + private static final int E_MASK = 0x7fffffff; // no STOP_BIT + private static final int E_SEQ = 1 << EC_SHIFT; + + // runState bits + private static final int SHUTDOWN = 1 << 31; + private static final int RS_SEQ = 1 << 16; + private static final int RS_SEQ_MASK = 0x7fff0000; + + // access mode for WorkQueue + static final int LIFO_QUEUE = 0; + static final int FIFO_QUEUE = 1; + static final int SHARED_QUEUE = -1; + + /** + * The wakeup interval (in nanoseconds) for a worker waiting for a + * task when the pool is quiescent to instead try to shrink the + * number of workers. The exact value does not matter too + * much. It must be short enough to release resources during + * sustained periods of idleness, but not so short that threads + * are continually re-created. + */ + private static final long SHRINK_RATE = + 4L * 1000L * 1000L * 1000L; // 4 seconds + + /** + * The timeout value for attempted shrinkage, includes + * some slop to cope with system timer imprecision. + */ + private static final long SHRINK_TIMEOUT = SHRINK_RATE - (SHRINK_RATE / 10); + + /** + * The maximum stolen->joining link depth allowed in tryHelpStealer. + * Depths for legitimate chains are unbounded, but we use a fixed + * constant to avoid (otherwise unchecked) cycles and to bound + * staleness of traversal parameters at the expense of sometimes + * blocking when we could be helping. + */ + private static final int MAX_HELP_DEPTH = 16; + + /* + * Field layout order in this class tends to matter more than one + * would like. Runtime layout order is only loosely related to + * declaration order and may differ across JVMs, but the following + * empirically works OK on current JVMs. + */ + + volatile long ctl; // main pool control + final int parallelism; // parallelism level + final int localMode; // per-worker scheduling mode + int nextPoolIndex; // hint used in registerWorker + volatile int runState; // shutdown status, seq, and mask + WorkQueue[] workQueues; // main registry + final ReentrantLock lock; // for registration + final Condition termination; // for awaitTermination + final ForkJoinWorkerThreadFactory factory; // factory for new workers + final Thread.UncaughtExceptionHandler ueh; // per-worker UEH + final AtomicLong stealCount; // collect counts when terminated + final AtomicInteger nextWorkerNumber; // to create worker name string + final String workerNamePrefix; // Prefix for assigning worker names + + /** + * Queues supporting work-stealing as well as external task + * submission. See above for main rationale and algorithms. + * Implementation relies heavily on "Unsafe" intrinsics + * and selective use of "volatile": + * + * Field "base" is the index (mod array.length) of the least valid + * queue slot, which is always the next position to steal (poll) + * from if nonempty. Reads and writes require volatile orderings + * but not CAS, because updates are only performed after slot + * CASes. + * + * Field "top" is the index (mod array.length) of the next queue + * slot to push to or pop from. It is written only by owner thread + * for push, or under lock for trySharedPush, and accessed by + * other threads only after reading (volatile) base. Both top and + * base are allowed to wrap around on overflow, but (top - base) + * (or more commonly -(base - top) to force volatile read of base + * before top) still estimates size. + * + * The array slots are read and written using the emulation of + * volatiles/atomics provided by Unsafe. Insertions must in + * general use putOrderedObject as a form of releasing store to + * ensure that all writes to the task object are ordered before + * its publication in the queue. (Although we can avoid one case + * of this when locked in trySharedPush.) All removals entail a + * CAS to null. The array is always a power of two. To ensure + * safety of Unsafe array operations, all accesses perform + * explicit null checks and implicit bounds checks via + * power-of-two masking. + * + * In addition to basic queuing support, this class contains + * fields described elsewhere to control execution. It turns out + * to work better memory-layout-wise to include them in this + * class rather than a separate class. + * + * Performance on most platforms is very sensitive to placement of + * instances of both WorkQueues and their arrays -- we absolutely + * do not want multiple WorkQueue instances or multiple queue + * arrays sharing cache lines. (It would be best for queue objects + * and their arrays to share, but there is nothing available to + * help arrange that). Unfortunately, because they are recorded + * in a common array, WorkQueue instances are often moved to be + * adjacent by garbage collectors. To reduce impact, we use field + * padding that works OK on common platforms; this effectively + * trades off slightly slower average field access for the sake of + * avoiding really bad worst-case access. (Until better JVM + * support is in place, this padding is dependent on transient + * properties of JVM field layout rules.) We also take care in + * allocating and sizing and resizing the array. Non-shared queue + * arrays are initialized (via method growArray) by workers before + * use. Others are allocated on first use. + */ + static final class WorkQueue { + /** + * Capacity of work-stealing queue array upon initialization. + * Must be a power of two; at least 4, but set larger to + * reduce cacheline sharing among queues. + */ + static final int INITIAL_QUEUE_CAPACITY = 1 << 8; + + /** + * Maximum size for queue arrays. Must be a power of two less + * than or equal to 1 << (31 - width of array entry) to ensure + * lack of wraparound of index calculations, but defined to a + * value a bit less than this to help users trap runaway + * programs before saturating systems. + */ + static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M + + volatile long totalSteals; // cumulative number of steals + int seed; // for random scanning; initialize nonzero + volatile int eventCount; // encoded inactivation count; < 0 if inactive + int nextWait; // encoded record of next event waiter + int rescans; // remaining scans until block + int nsteals; // top-level task executions since last idle + final int mode; // lifo, fifo, or shared + int poolIndex; // index of this queue in pool (or 0) + int stealHint; // index of most recent known stealer + volatile int runState; // 1: locked, -1: terminate; else 0 + volatile int base; // index of next slot for poll + int top; // index of next slot for push + ForkJoinTask[] array; // the elements (initially unallocated) + final ForkJoinWorkerThread owner; // owning thread or null if shared + volatile Thread parker; // == owner during call to park; else null + ForkJoinTask currentJoin; // task being joined in awaitJoin + ForkJoinTask currentSteal; // current non-local task being executed + // Heuristic padding to ameliorate unfortunate memory placements + Object p00, p01, p02, p03, p04, p05, p06, p07, p08, p09, p0a; + + WorkQueue(ForkJoinWorkerThread owner, int mode) { + this.owner = owner; + this.mode = mode; + // Place indices in the center of array (that is not yet allocated) + base = top = INITIAL_QUEUE_CAPACITY >>> 1; + } + + /** + * Returns number of tasks in the queue + */ + final int queueSize() { + int n = base - top; // non-owner callers must read base first + return (n >= 0) ? 0 : -n; + } + + /** + * Pushes a task. Call only by owner in unshared queues. + * + * @param task the task. Caller must ensure non-null. + * @param p, if non-null, pool to signal if necessary + * @throw RejectedExecutionException if array cannot + * be resized + */ + final void push(ForkJoinTask task, ForkJoinPool p) { + ForkJoinTask[] a; + int s = top, m, n; + if ((a = array) != null) { // ignore if queue removed + U.putOrderedObject + (a, (((m = a.length - 1) & s) << ASHIFT) + ABASE, task); + if ((n = (top = s + 1) - base) <= 2) { + if (p != null) + p.signalWork(); + } + else if (n >= m) + growArray(true); + } + } + + /** + * Pushes a task if lock is free and array is either big + * enough or can be resized to be big enough. + * + * @param task the task. Caller must ensure non-null. + * @return true if submitted + */ + final boolean trySharedPush(ForkJoinTask task) { + boolean submitted = false; + if (runState == 0 && U.compareAndSwapInt(this, RUNSTATE, 0, 1)) { + ForkJoinTask[] a = array; + int s = top, n = s - base; + try { + if ((a != null && n < a.length - 1) || + (a = growArray(false)) != null) { // must presize + int j = (((a.length - 1) & s) << ASHIFT) + ABASE; + U.putObject(a, (long)j, task); // don't need "ordered" + top = s + 1; + submitted = true; + } + } finally { + runState = 0; // unlock + } + } + return submitted; + } + + /** + * Takes next task, if one exists, in FIFO order. + */ + final ForkJoinTask poll() { + ForkJoinTask[] a; int b, i; + while ((b = base) - top < 0 && (a = array) != null && + (i = (a.length - 1) & b) >= 0) { + int j = (i << ASHIFT) + ABASE; + ForkJoinTask t = (ForkJoinTask)U.getObjectVolatile(a, j); + if (t != null && base == b && + U.compareAndSwapObject(a, j, t, null)) { + base = b + 1; + return t; + } + } + return null; + } + + /** + * Takes next task, if one exists, in LIFO order. + * Call only by owner in unshared queues. + */ + final ForkJoinTask pop() { + ForkJoinTask t; int m; + ForkJoinTask[] a = array; + if (a != null && (m = a.length - 1) >= 0) { + for (int s; (s = top - 1) - base >= 0;) { + int j = ((m & s) << ASHIFT) + ABASE; + if ((t = (ForkJoinTask)U.getObjectVolatile(a, j)) == null) + break; + if (U.compareAndSwapObject(a, j, t, null)) { + top = s; + return t; + } + } + } + return null; + } + + /** + * Takes next task, if one exists, in order specified by mode. + */ + final ForkJoinTask nextLocalTask() { + return mode == 0 ? pop() : poll(); + } + + /** + * Returns next task, if one exists, in order specified by mode. + */ + final ForkJoinTask peek() { + ForkJoinTask[] a = array; int m; + if (a == null || (m = a.length - 1) < 0) + return null; + int i = mode == 0 ? top - 1 : base; + int j = ((i & m) << ASHIFT) + ABASE; + return (ForkJoinTask)U.getObjectVolatile(a, j); + } + + /** + * Returns task at index b if b is current base of queue. + */ + final ForkJoinTask pollAt(int b) { + ForkJoinTask[] a; int i; + ForkJoinTask task = null; + if ((a = array) != null && (i = ((a.length - 1) & b)) >= 0) { + int j = (i << ASHIFT) + ABASE; + ForkJoinTask t = (ForkJoinTask)U.getObjectVolatile(a, j); + if (t != null && base == b && + U.compareAndSwapObject(a, j, t, null)) { + base = b + 1; + task = t; + } + } + return task; + } + + /** + * Pops the given task only if it is at the current top. + */ + final boolean tryUnpush(ForkJoinTask t) { + ForkJoinTask[] a; int s; + if ((a = array) != null && (s = top) != base && + U.compareAndSwapObject + (a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) { + top = s; + return true; + } + return false; + } + + /** + * Polls the given task only if it is at the current base. + */ + final boolean pollFor(ForkJoinTask task) { + ForkJoinTask[] a; int b, i; + if ((b = base) - top < 0 && (a = array) != null && + (i = (a.length - 1) & b) >= 0) { + int j = (i << ASHIFT) + ABASE; + if (U.getObjectVolatile(a, j) == task && base == b && + U.compareAndSwapObject(a, j, task, null)) { + base = b + 1; + return true; + } + } + return false; + } + + /** + * If present, removes from queue and executes the given task, or + * any other cancelled task. Returns (true) immediately on any CAS + * or consistency check failure so caller can retry. + * + * @return false if no progress can be made + */ + final boolean tryRemoveAndExec(ForkJoinTask task) { + boolean removed = false, empty = true, progress = true; + ForkJoinTask[] a; int m, s, b, n; + if ((a = array) != null && (m = a.length - 1) >= 0 && + (n = (s = top) - (b = base)) > 0) { + for (ForkJoinTask t;;) { // traverse from s to b + int j = ((--s & m) << ASHIFT) + ABASE; + t = (ForkJoinTask)U.getObjectVolatile(a, j); + if (t == null) // inconsistent length + break; + else if (t == task) { + if (s + 1 == top) { // pop + if (!U.compareAndSwapObject(a, j, task, null)) + break; + top = s; + removed = true; + } + else if (base == b) // replace with proxy + removed = U.compareAndSwapObject(a, j, task, + new EmptyTask()); + break; + } + else if (t.status >= 0) + empty = false; + else if (s + 1 == top) { // pop and throw away + if (U.compareAndSwapObject(a, j, t, null)) + top = s; + break; + } + if (--n == 0) { + if (!empty && base == b) + progress = false; + break; + } + } + } + if (removed) + task.doExec(); + return progress; + } + + /** + * Initializes or doubles the capacity of array. Call either + * by owner or with lock held -- it is OK for base, but not + * top, to move while resizings are in progress. + * + * @param rejectOnFailure if true, throw exception if capacity + * exceeded (relayed ultimately to user); else return null. + */ + final ForkJoinTask[] growArray(boolean rejectOnFailure) { + ForkJoinTask[] oldA = array; + int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY; + if (size <= MAXIMUM_QUEUE_CAPACITY) { + int oldMask, t, b; + ForkJoinTask[] a = array = new ForkJoinTask[size]; + if (oldA != null && (oldMask = oldA.length - 1) >= 0 && + (t = top) - (b = base) > 0) { + int mask = size - 1; + do { + ForkJoinTask x; + int oldj = ((b & oldMask) << ASHIFT) + ABASE; + int j = ((b & mask) << ASHIFT) + ABASE; + x = (ForkJoinTask)U.getObjectVolatile(oldA, oldj); + if (x != null && + U.compareAndSwapObject(oldA, oldj, x, null)) + U.putObjectVolatile(a, j, x); + } while (++b != t); + } + return a; + } + else if (!rejectOnFailure) + return null; + else + throw new RejectedExecutionException("Queue capacity exceeded"); + } + + /** + * Removes and cancels all known tasks, ignoring any exceptions + */ + final void cancelAll() { + ForkJoinTask.cancelIgnoringExceptions(currentJoin); + ForkJoinTask.cancelIgnoringExceptions(currentSteal); + for (ForkJoinTask t; (t = poll()) != null; ) + ForkJoinTask.cancelIgnoringExceptions(t); + } + + // Execution methods + + /** + * Removes and runs tasks until empty, using local mode + * ordering. + */ + final void runLocalTasks() { + if (base - top < 0) { + for (ForkJoinTask t; (t = nextLocalTask()) != null; ) + t.doExec(); + } + } + + /** + * Executes a top-level task and any local tasks remaining + * after execution. + * + * @return true unless terminating + */ + final boolean runTask(ForkJoinTask t) { + boolean alive = true; + if (t != null) { + currentSteal = t; + t.doExec(); + runLocalTasks(); + ++nsteals; + currentSteal = null; + } + else if (runState < 0) // terminating + alive = false; + return alive; + } + + /** + * Executes a non-top-level (stolen) task + */ + final void runSubtask(ForkJoinTask t) { + if (t != null) { + ForkJoinTask ps = currentSteal; + currentSteal = t; + t.doExec(); + currentSteal = ps; + } + } + + /** + * Computes next value for random probes. Scans don't require + * a very high quality generator, but also not a crummy one. + * Marsaglia xor-shift is cheap and works well enough. Note: + * This is manually inlined in several usages in ForkJoinPool + * to avoid writes inside busy scan loops. + */ + final int nextSeed() { + int r = seed; + r ^= r << 13; + r ^= r >>> 17; + r ^= r << 5; + return seed = r; + } + + // Unsafe mechanics + private static final sun.misc.Unsafe U; + private static final long RUNSTATE; + private static final int ABASE; + private static final int ASHIFT; + static { + int s; + try { + U = getUnsafe(); + Class k = WorkQueue.class; + Class ak = ForkJoinTask[].class; + RUNSTATE = U.objectFieldOffset + (k.getDeclaredField("runState")); + ABASE = U.arrayBaseOffset(ak); + s = U.arrayIndexScale(ak); + } catch (Exception e) { + throw new Error(e); + } + if ((s & (s-1)) != 0) + throw new Error("data type scale not a power of two"); + ASHIFT = 31 - Integer.numberOfLeadingZeros(s); + } + } + + /** + * Class for artificial tasks that are used to replace the target + * of local joins if they are removed from an interior queue slot + * in WorkQueue.tryRemoveAndExec. We don't need the proxy to + * actually do anything beyond having a unique identity. + */ + static final class EmptyTask extends ForkJoinTask { + EmptyTask() { status = ForkJoinTask.NORMAL; } // force done + public Void getRawResult() { return null; } + public void setRawResult(Void x) {} + public boolean exec() { return true; } + } + + /** + * Computes a hash code for the given thread. This method is + * expected to provide higher-quality hash codes than those using + * method hashCode(). + */ + static final int hashThread(Thread t) { + long id = (t == null) ? 0L : t.getId(); // Use MurmurHash of thread id + int h = (int)id ^ (int)(id >>> 32); + h ^= h >>> 16; + h *= 0x85ebca6b; + h ^= h >>> 13; + h *= 0xc2b2ae35; + return h ^ (h >>> 16); + } + + /** + * Top-level runloop for workers + */ + final void runWorker(ForkJoinWorkerThread wt) { + WorkQueue w = wt.workQueue; + w.growArray(false); // Initialize queue array and seed in this thread + w.seed = hashThread(Thread.currentThread()) | (1 << 31); // force < 0 + + do {} while (w.runTask(scan(w))); + } + + // Creating, registering and deregistering workers + + /** + * Tries to create and start a worker + */ + private void addWorker() { + Throwable ex = null; + ForkJoinWorkerThread w = null; + try { + if ((w = factory.newThread(this)) != null) { + w.start(); + return; + } + } catch (Throwable e) { + ex = e; + } + deregisterWorker(w, ex); + } + + /** + * Callback from ForkJoinWorkerThread constructor to assign a + * public name. This must be separate from registerWorker because + * it is called during the "super" constructor call in + * ForkJoinWorkerThread. + */ + final String nextWorkerName() { + return workerNamePrefix.concat + (Integer.toString(nextWorkerNumber.addAndGet(1))); + } + + /** + * Callback from ForkJoinWorkerThread constructor to establish and + * record its WorkQueue + * + * @param wt the worker thread + */ + final void registerWorker(ForkJoinWorkerThread wt) { + WorkQueue w = wt.workQueue; + ReentrantLock lock = this.lock; + lock.lock(); + try { + int k = nextPoolIndex; + WorkQueue[] ws = workQueues; + if (ws != null) { // ignore on shutdown + int n = ws.length; + if (k < 0 || (k & 1) == 0 || k >= n || ws[k] != null) { + for (k = 1; k < n && ws[k] != null; k += 2) + ; // workers are at odd indices + if (k >= n) // resize + workQueues = ws = Arrays.copyOf(ws, n << 1); + } + w.poolIndex = k; + w.eventCount = ~(k >>> 1) & SMASK; // Set up wait count + ws[k] = w; // record worker + nextPoolIndex = k + 2; + int rs = runState; + int m = rs & SMASK; // recalculate runState mask + if (k > m) + m = (m << 1) + 1; + runState = (rs & SHUTDOWN) | ((rs + RS_SEQ) & RS_SEQ_MASK) | m; + } + } finally { + lock.unlock(); + } + } + + /** + * Final callback from terminating worker, as well as failure to + * construct or start a worker in addWorker. Removes record of + * worker from array, and adjusts counts. If pool is shutting + * down, tries to complete termination. + * + * @param wt the worker thread or null if addWorker failed + * @param ex the exception causing failure, or null if none + */ + final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) { + WorkQueue w = null; + if (wt != null && (w = wt.workQueue) != null) { + w.runState = -1; // ensure runState is set + stealCount.getAndAdd(w.totalSteals + w.nsteals); + int idx = w.poolIndex; + ReentrantLock lock = this.lock; + lock.lock(); + try { // remove record from array + WorkQueue[] ws = workQueues; + if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w) + ws[nextPoolIndex = idx] = null; + } finally { + lock.unlock(); + } + } + + long c; // adjust ctl counts + do {} while (!U.compareAndSwapLong + (this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) | + ((c - TC_UNIT) & TC_MASK) | + (c & ~(AC_MASK|TC_MASK))))); + + if (!tryTerminate(false) && w != null) { + w.cancelAll(); // cancel remaining tasks + if (w.array != null) // suppress signal if never ran + signalWork(); // wake up or create replacement + } + + if (ex != null) // rethrow + U.throwException(ex); + } + + + // Maintaining ctl counts + + /** + * Increments active count; mainly called upon return from blocking + */ + final void incrementActiveCount() { + long c; + do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT)); + } + + /** + * Activates or creates a worker + */ + final void signalWork() { + /* + * The while condition is true if: (there is are too few total + * workers OR there is at least one waiter) AND (there are too + * few active workers OR the pool is terminating). The value + * of e distinguishes the remaining cases: zero (no waiters) + * for create, negative if terminating (in which case do + * nothing), else release a waiter. The secondary checks for + * release (non-null array etc) can fail if the pool begins + * terminating after the test, and don't impose any added cost + * because JVMs must perform null and bounds checks anyway. + */ + long c; int e, u; + while ((((e = (int)(c = ctl)) | (u = (int)(c >>> 32))) & + (INT_SIGN|SHORT_SIGN)) == (INT_SIGN|SHORT_SIGN)) { + WorkQueue[] ws = workQueues; int i; WorkQueue w; Thread p; + if (e == 0) { // add a new worker + if (U.compareAndSwapLong + (this, CTL, c, (long)(((u + UTC_UNIT) & UTC_MASK) | + ((u + UAC_UNIT) & UAC_MASK)) << 32)) { + addWorker(); + break; + } + } + else if (e > 0 && ws != null && + (i = ((~e << 1) | 1) & SMASK) < ws.length && + (w = ws[i]) != null && + w.eventCount == (e | INT_SIGN)) { + if (U.compareAndSwapLong + (this, CTL, c, (((long)(w.nextWait & E_MASK)) | + ((long)(u + UAC_UNIT) << 32)))) { + w.eventCount = (e + E_SEQ) & E_MASK; + if ((p = w.parker) != null) + U.unpark(p); // release a waiting worker + break; + } + } + else + break; + } + } + + /** + * Tries to decrement active count (sometimes implicitly) and + * possibly release or create a compensating worker in preparation + * for blocking. Fails on contention or termination. + * + * @return true if the caller can block, else should recheck and retry + */ + final boolean tryCompensate() { + WorkQueue[] ws; WorkQueue w; Thread p; + int pc = parallelism, e, u, ac, tc, i; + long c = ctl; + + if ((e = (int)c) >= 0) { + if ((ac = ((u = (int)(c >>> 32)) >> UAC_SHIFT)) <= 0 && + e != 0 && (ws = workQueues) != null && + (i = ((~e << 1) | 1) & SMASK) < ws.length && + (w = ws[i]) != null) { + if (w.eventCount == (e | INT_SIGN) && + U.compareAndSwapLong + (this, CTL, c, ((long)(w.nextWait & E_MASK) | + (c & (AC_MASK|TC_MASK))))) { + w.eventCount = (e + E_SEQ) & E_MASK; + if ((p = w.parker) != null) + U.unpark(p); + return true; // release an idle worker + } + } + else if ((tc = (short)(u >>> UTC_SHIFT)) >= 0 && ac + pc > 1) { + long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK); + if (U.compareAndSwapLong(this, CTL, c, nc)) + return true; // no compensation needed + } + else if (tc + pc < MAX_ID) { + long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK); + if (U.compareAndSwapLong(this, CTL, c, nc)) { + addWorker(); + return true; // create replacement + } + } + } + return false; + } + + // Submissions + + /** + * Unless shutting down, adds the given task to some submission + * queue; using a randomly chosen queue index if the caller is a + * ForkJoinWorkerThread, else one based on caller thread's hash + * code. If no queue exists at the index, one is created. If the + * queue is busy, another is chosen by sweeping through the queues + * array. + */ + private void doSubmit(ForkJoinTask task) { + if (task == null) + throw new NullPointerException(); + Thread t = Thread.currentThread(); + int r = ((t instanceof ForkJoinWorkerThread) ? + ((ForkJoinWorkerThread)t).workQueue.nextSeed() : hashThread(t)); + for (;;) { + int rs = runState, m = rs & SMASK; + int j = r &= (m & ~1); // even numbered queues + WorkQueue[] ws = workQueues; + if (rs < 0 || ws == null) + throw new RejectedExecutionException(); // shutting down + if (ws.length > m) { // consistency check + for (WorkQueue q;;) { // circular sweep + if (((q = ws[j]) != null || + (q = tryAddSharedQueue(j)) != null) && + q.trySharedPush(task)) { + signalWork(); + return; + } + if ((j = (j + 2) & m) == r) { + Thread.yield(); // all queues busy + break; + } + } + } + } + } + + /** + * Tries to add and register a new queue at the given index. + * + * @param idx the workQueues array index to register the queue + * @return the queue, or null if could not add because could + * not acquire lock or idx is unusable + */ + private WorkQueue tryAddSharedQueue(int idx) { + WorkQueue q = null; + ReentrantLock lock = this.lock; + if (idx >= 0 && (idx & 1) == 0 && !lock.isLocked()) { + // create queue outside of lock but only if apparently free + WorkQueue nq = new WorkQueue(null, SHARED_QUEUE); + if (lock.tryLock()) { + try { + WorkQueue[] ws = workQueues; + if (ws != null && idx < ws.length) { + if ((q = ws[idx]) == null) { + int rs; // update runState seq + ws[idx] = q = nq; + runState = (((rs = runState) & SHUTDOWN) | + ((rs + RS_SEQ) & ~SHUTDOWN)); + } + } + } finally { + lock.unlock(); + } + } + } + return q; + } + + // Scanning for tasks + + /** + * Scans for and, if found, returns one task, else possibly + * inactivates the worker. This method operates on single reads of + * volatile state and is designed to be re-invoked continuously in + * part because it returns upon detecting inconsistencies, + * contention, or state changes that indicate possible success on + * re-invocation. + * + * The scan searches for tasks across queues, randomly selecting + * the first #queues probes, favoring steals 2:1 over submissions + * (by exploiting even/odd indexing), and then performing a + * circular sweep of all queues. The scan terminates upon either + * finding a non-empty queue, or completing a full sweep. If the + * worker is not inactivated, it takes and returns a task from + * this queue. On failure to find a task, we take one of the + * following actions, after which the caller will retry calling + * this method unless terminated. + * + * * If not a complete sweep, try to release a waiting worker. If + * the scan terminated because the worker is inactivated, then the + * released worker will often be the calling worker, and it can + * succeed obtaining a task on the next call. Or maybe it is + * another worker, but with same net effect. Releasing in other + * cases as well ensures that we have enough workers running. + * + * * If the caller has run a task since the the last empty scan, + * return (to allow rescan) if other workers are not also yet + * enqueued. Field WorkQueue.rescans counts down on each scan to + * ensure eventual inactivation, and occasional calls to + * Thread.yield to help avoid interference with more useful + * activities on the system. + * + * * If pool is terminating, terminate the worker + * + * * If not already enqueued, try to inactivate and enqueue the + * worker on wait queue. + * + * * If already enqueued and none of the above apply, either park + * awaiting signal, or if this is the most recent waiter and pool + * is quiescent, relay to idleAwaitWork to check for termination + * and possibly shrink pool. + * + * @param w the worker (via its WorkQueue) + * @return a task or null of none found + */ + private final ForkJoinTask scan(WorkQueue w) { + boolean swept = false; // true after full empty scan + WorkQueue[] ws; // volatile read order matters + int r = w.seed, ec = w.eventCount; // ec is negative if inactive + int rs = runState, m = rs & SMASK; + if ((ws = workQueues) != null && ws.length > m) { + ForkJoinTask task = null; + for (int k = 0, j = -2 - m; ; ++j) { + WorkQueue q; int b; + if (j < 0) { // random probes while j negative + r ^= r << 13; r ^= r >>> 17; k = (r ^= r << 5) | (j & 1); + } // worker (not submit) for odd j + else // cyclic scan when j >= 0 + k += (m >>> 1) | 1; // step by half to reduce bias + + if ((q = ws[k & m]) != null && (b = q.base) - q.top < 0) { + if (ec >= 0) + task = q.pollAt(b); // steal + break; + } + else if (j > m) { + if (rs == runState) // staleness check + swept = true; + break; + } + } + w.seed = r; // save seed for next scan + if (task != null) + return task; + } + + // Decode ctl on empty scan + long c = ctl; int e = (int)c, a = (int)(c >> AC_SHIFT), nr, ns; + if (!swept) { // try to release a waiter + WorkQueue v; Thread p; + if (e > 0 && a < 0 && ws != null && + (v = ws[((~e << 1) | 1) & m]) != null && + v.eventCount == (e | INT_SIGN) && U.compareAndSwapLong + (this, CTL, c, ((long)(v.nextWait & E_MASK) | + ((c + AC_UNIT) & (AC_MASK|TC_MASK))))) { + v.eventCount = (e + E_SEQ) & E_MASK; + if ((p = v.parker) != null) + U.unpark(p); + } + } + else if ((nr = w.rescans) > 0) { // continue rescanning + int ac = a + parallelism; + if ((w.rescans = (ac < nr) ? ac : nr - 1) > 0 && w.seed < 0 && + w.eventCount == ec) + Thread.yield(); // 1 bit randomness for yield call + } + else if (e < 0) // pool is terminating + w.runState = -1; + else if (ec >= 0) { // try to enqueue + long nc = (long)ec | ((c - AC_UNIT) & (AC_MASK|TC_MASK)); + w.nextWait = e; + w.eventCount = ec | INT_SIGN; // mark as inactive + if (!U.compareAndSwapLong(this, CTL, c, nc)) + w.eventCount = ec; // back out on CAS failure + else if ((ns = w.nsteals) != 0) { // set rescans if ran task + if (a <= 0) // ... unless too many active + w.rescans = a + parallelism; + w.nsteals = 0; + w.totalSteals += ns; + } + } + else{ // already queued + if (parallelism == -a) + idleAwaitWork(w); // quiescent + if (w.eventCount == ec) { + Thread.interrupted(); // clear status + ForkJoinWorkerThread wt = w.owner; + U.putObject(wt, PARKBLOCKER, this); + w.parker = wt; // emulate LockSupport.park + if (w.eventCount == ec) // recheck + U.park(false, 0L); // block + w.parker = null; + U.putObject(wt, PARKBLOCKER, null); + } + } + return null; + } + + /** + * If inactivating worker w has caused pool to become quiescent, + * check for pool termination, and, so long as this is not the + * only worker, wait for event for up to SHRINK_RATE nanosecs On + * timeout, if ctl has not changed, terminate the worker, which + * will in turn wake up another worker to possibly repeat this + * process. + * + * @param w the calling worker + */ + private void idleAwaitWork(WorkQueue w) { + long c; int nw, ec; + if (!tryTerminate(false) && + (int)((c = ctl) >> AC_SHIFT) + parallelism == 0 && + (ec = w.eventCount) == ((int)c | INT_SIGN) && + (nw = w.nextWait) != 0) { + long nc = ((long)(nw & E_MASK) | // ctl to restore on timeout + ((c + AC_UNIT) & AC_MASK) | (c & TC_MASK)); + ForkJoinTask.helpExpungeStaleExceptions(); // help clean + ForkJoinWorkerThread wt = w.owner; + while (ctl == c) { + long startTime = System.nanoTime(); + Thread.interrupted(); // timed variant of version in scan() + U.putObject(wt, PARKBLOCKER, this); + w.parker = wt; + if (ctl == c) + U.park(false, SHRINK_RATE); + w.parker = null; + U.putObject(wt, PARKBLOCKER, null); + if (ctl != c) + break; + if (System.nanoTime() - startTime >= SHRINK_TIMEOUT && + U.compareAndSwapLong(this, CTL, c, nc)) { + w.runState = -1; // shrink + w.eventCount = (ec + E_SEQ) | E_MASK; + break; + } + } + } + } + + /** + * Tries to locate and execute tasks for a stealer of the given + * task, or in turn one of its stealers, Traces currentSteal -> + * currentJoin links looking for a thread working on a descendant + * of the given task and with a non-empty queue to steal back and + * execute tasks from. The first call to this method upon a + * waiting join will often entail scanning/search, (which is OK + * because the joiner has nothing better to do), but this method + * leaves hints in workers to speed up subsequent calls. The + * implementation is very branchy to cope with potential + * inconsistencies or loops encountering chains that are stale, + * unknown, or of length greater than MAX_HELP_DEPTH links. All + * of these cases are dealt with by just retrying by caller. + * + * @param joiner the joining worker + * @param task the task to join + * @return true if found or ran a task (and so is immediately retryable) + */ + final boolean tryHelpStealer(WorkQueue joiner, ForkJoinTask task) { + ForkJoinTask subtask; // current target + boolean progress = false; + int depth = 0; // current chain depth + int m = runState & SMASK; + WorkQueue[] ws = workQueues; + + if (ws != null && ws.length > m && (subtask = task).status >= 0) { + outer:for (WorkQueue j = joiner;;) { + // Try to find the stealer of subtask, by first using hint + WorkQueue stealer = null; + WorkQueue v = ws[j.stealHint & m]; + if (v != null && v.currentSteal == subtask) + stealer = v; + else { + for (int i = 1; i <= m; i += 2) { + if ((v = ws[i]) != null && v.currentSteal == subtask) { + stealer = v; + j.stealHint = i; // save hint + break; + } + } + if (stealer == null) + break; + } + + for (WorkQueue q = stealer;;) { // Try to help stealer + ForkJoinTask t; int b; + if (task.status < 0) + break outer; + if ((b = q.base) - q.top < 0) { + progress = true; + if (subtask.status < 0) + break outer; // stale + if ((t = q.pollAt(b)) != null) { + stealer.stealHint = joiner.poolIndex; + joiner.runSubtask(t); + } + } + else { // empty - try to descend to find stealer's stealer + ForkJoinTask next = stealer.currentJoin; + if (++depth == MAX_HELP_DEPTH || subtask.status < 0 || + next == null || next == subtask) + break outer; // max depth, stale, dead-end, cyclic + subtask = next; + j = stealer; + break; + } + } + } + } + return progress; + } + + /** + * If task is at base of some steal queue, steals and executes it. + * + * @param joiner the joining worker + * @param task the task + */ + final void tryPollForAndExec(WorkQueue joiner, ForkJoinTask task) { + WorkQueue[] ws; + int m = runState & SMASK; + if ((ws = workQueues) != null && ws.length > m) { + for (int j = 1; j <= m && task.status >= 0; j += 2) { + WorkQueue q = ws[j]; + if (q != null && q.pollFor(task)) { + joiner.runSubtask(task); + break; + } + } + } + } + + /** + * Returns a non-empty steal queue, if one is found during a random, + * then cyclic scan, else null. This method must be retried by + * caller if, by the time it tries to use the queue, it is empty. + */ + private WorkQueue findNonEmptyStealQueue(WorkQueue w) { + int r = w.seed; // Same idea as scan(), but ignoring submissions + for (WorkQueue[] ws;;) { + int m = runState & SMASK; + if ((ws = workQueues) == null) + return null; + if (ws.length > m) { + WorkQueue q; + for (int n = m << 2, k = r, j = -n;;) { + r ^= r << 13; r ^= r >>> 17; r ^= r << 5; + if ((q = ws[(k | 1) & m]) != null && q.base - q.top < 0) { + w.seed = r; + return q; + } + else if (j > n) + return null; + else + k = (j++ < 0) ? r : k + ((m >>> 1) | 1); + + } + } + } + } + + /** + * Runs tasks until {@code isQuiescent()}. We piggyback on + * active count ctl maintenance, but rather than blocking + * when tasks cannot be found, we rescan until all others cannot + * find tasks either. + */ + final void helpQuiescePool(WorkQueue w) { + for (boolean active = true;;) { + w.runLocalTasks(); // exhaust local queue + WorkQueue q = findNonEmptyStealQueue(w); + if (q != null) { + ForkJoinTask t; + if (!active) { // re-establish active count + long c; + active = true; + do {} while (!U.compareAndSwapLong + (this, CTL, c = ctl, c + AC_UNIT)); + } + if ((t = q.poll()) != null) + w.runSubtask(t); + } + else { + long c; + if (active) { // decrement active count without queuing + active = false; + do {} while (!U.compareAndSwapLong + (this, CTL, c = ctl, c -= AC_UNIT)); + } + else + c = ctl; // re-increment on exit + if ((int)(c >> AC_SHIFT) + parallelism == 0) { + do {} while (!U.compareAndSwapLong + (this, CTL, c = ctl, c + AC_UNIT)); + break; + } + } + } + } + + /** + * Gets and removes a local or stolen task for the given worker + * + * @return a task, if available + */ + final ForkJoinTask nextTaskFor(WorkQueue w) { + for (ForkJoinTask t;;) { + WorkQueue q; + if ((t = w.nextLocalTask()) != null) + return t; + if ((q = findNonEmptyStealQueue(w)) == null) + return null; + if ((t = q.poll()) != null) + return t; + } + } + + /** + * Returns the approximate (non-atomic) number of idle threads per + * active thread to offset steal queue size for method + * ForkJoinTask.getSurplusQueuedTaskCount(). + */ + final int idlePerActive() { + // Approximate at powers of two for small values, saturate past 4 + int p = parallelism; + int a = p + (int)(ctl >> AC_SHIFT); + return (a > (p >>>= 1) ? 0 : + a > (p >>>= 1) ? 1 : + a > (p >>>= 1) ? 2 : + a > (p >>>= 1) ? 4 : + 8); + } + + // Termination + + /** + * Sets SHUTDOWN bit of runState under lock + */ + private void enableShutdown() { + ReentrantLock lock = this.lock; + if (runState >= 0) { + lock.lock(); // don't need try/finally + runState |= SHUTDOWN; + lock.unlock(); + } + } + + /** + * Possibly initiates and/or completes termination. Upon + * termination, cancels all queued tasks and then + * + * @param now if true, unconditionally terminate, else only + * if no work and no active workers + * @return true if now terminating or terminated + */ + private boolean tryTerminate(boolean now) { + for (long c;;) { + if (((c = ctl) & STOP_BIT) != 0) { // already terminating + if ((short)(c >>> TC_SHIFT) == -parallelism) { + ReentrantLock lock = this.lock; // signal when no workers + lock.lock(); // don't need try/finally + termination.signalAll(); // signal when 0 workers + lock.unlock(); + } + return true; + } + if (!now) { + if ((int)(c >> AC_SHIFT) != -parallelism || runState >= 0 || + hasQueuedSubmissions()) + return false; + // Check for unqueued inactive workers. One pass suffices. + WorkQueue[] ws = workQueues; WorkQueue w; + if (ws != null) { + int n = ws.length; + for (int i = 1; i < n; i += 2) { + if ((w = ws[i]) != null && w.eventCount >= 0) + return false; + } + } + } + if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) + startTerminating(); + } + } + + /** + * Initiates termination: Runs three passes through workQueues: + * (0) Setting termination status, followed by wakeups of queued + * workers; (1) cancelling all tasks; (2) interrupting lagging + * threads (likely in external tasks, but possibly also blocked in + * joins). Each pass repeats previous steps because of potential + * lagging thread creation. + */ + private void startTerminating() { + for (int pass = 0; pass < 3; ++pass) { + WorkQueue[] ws = workQueues; + if (ws != null) { + WorkQueue w; Thread wt; + int n = ws.length; + for (int j = 0; j < n; ++j) { + if ((w = ws[j]) != null) { + w.runState = -1; + if (pass > 0) { + w.cancelAll(); + if (pass > 1 && (wt = w.owner) != null && + !wt.isInterrupted()) { + try { + wt.interrupt(); + } catch (SecurityException ignore) { + } + } + } + } + } + // Wake up workers parked on event queue + int i, e; long c; Thread p; + while ((i = ((~(e = (int)(c = ctl)) << 1) | 1) & SMASK) < n && + (w = ws[i]) != null && + w.eventCount == (e | INT_SIGN)) { + long nc = ((long)(w.nextWait & E_MASK) | + ((c + AC_UNIT) & AC_MASK) | + (c & (TC_MASK|STOP_BIT))); + if (U.compareAndSwapLong(this, CTL, c, nc)) { + w.eventCount = (e + E_SEQ) & E_MASK; + if ((p = w.parker) != null) + U.unpark(p); + } + } + } + } + } + + // Exported methods + + // Constructors + + /** + * Creates a {@code ForkJoinPool} with parallelism equal to {@link + * java.lang.Runtime#availableProcessors}, using the {@linkplain + * #defaultForkJoinWorkerThreadFactory default thread factory}, + * no UncaughtExceptionHandler, and non-async LIFO processing mode. + * + * @throws SecurityException if a security manager exists and + * the caller is not permitted to modify threads + * because it does not hold {@link + * java.lang.RuntimePermission}{@code ("modifyThread")} + */ + public ForkJoinPool() { + this(Runtime.getRuntime().availableProcessors(), + defaultForkJoinWorkerThreadFactory, null, false); + } + + /** + * Creates a {@code ForkJoinPool} with the indicated parallelism + * level, the {@linkplain + * #defaultForkJoinWorkerThreadFactory default thread factory}, + * no UncaughtExceptionHandler, and non-async LIFO processing mode. + * + * @param parallelism the parallelism level + * @throws IllegalArgumentException if parallelism less than or + * equal to zero, or greater than implementation limit + * @throws SecurityException if a security manager exists and + * the caller is not permitted to modify threads + * because it does not hold {@link + * java.lang.RuntimePermission}{@code ("modifyThread")} + */ + public ForkJoinPool(int parallelism) { + this(parallelism, defaultForkJoinWorkerThreadFactory, null, false); + } + + /** + * Creates a {@code ForkJoinPool} with the given parameters. + * + * @param parallelism the parallelism level. For default value, + * use {@link java.lang.Runtime#availableProcessors}. + * @param factory the factory for creating new threads. For default value, + * use {@link #defaultForkJoinWorkerThreadFactory}. + * @param handler the handler for internal worker threads that + * terminate due to unrecoverable errors encountered while executing + * tasks. For default value, use {@code null}. + * @param asyncMode if true, + * establishes local first-in-first-out scheduling mode for forked + * tasks that are never joined. This mode may be more appropriate + * than default locally stack-based mode in applications in which + * worker threads only process event-style asynchronous tasks. + * For default value, use {@code false}. + * @throws IllegalArgumentException if parallelism less than or + * equal to zero, or greater than implementation limit + * @throws NullPointerException if the factory is null + * @throws SecurityException if a security manager exists and + * the caller is not permitted to modify threads + * because it does not hold {@link + * java.lang.RuntimePermission}{@code ("modifyThread")} + */ + public ForkJoinPool(int parallelism, + ForkJoinWorkerThreadFactory factory, + Thread.UncaughtExceptionHandler handler, + boolean asyncMode) { + checkPermission(); + if (factory == null) + throw new NullPointerException(); + if (parallelism <= 0 || parallelism > MAX_ID) + throw new IllegalArgumentException(); + this.parallelism = parallelism; + this.factory = factory; + this.ueh = handler; + this.localMode = asyncMode ? FIFO_QUEUE : LIFO_QUEUE; + this.nextPoolIndex = 1; + long np = (long)(-parallelism); // offset ctl counts + this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK); + // initialize workQueues array with room for 2*parallelism if possible + int n = parallelism << 1; + if (n >= MAX_ID) + n = MAX_ID; + else { // See Hackers Delight, sec 3.2, where n < (1 << 16) + n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; + } + this.workQueues = new WorkQueue[(n + 1) << 1]; + ReentrantLock lck = this.lock = new ReentrantLock(); + this.termination = lck.newCondition(); + this.stealCount = new AtomicLong(); + this.nextWorkerNumber = new AtomicInteger(); + StringBuilder sb = new StringBuilder("ForkJoinPool-"); + sb.append(poolNumberGenerator.incrementAndGet()); + sb.append("-worker-"); + this.workerNamePrefix = sb.toString(); + // Create initial submission queue + WorkQueue sq = tryAddSharedQueue(0); + if (sq != null) + sq.growArray(false); + } + + // Execution methods + + /** + * Performs the given task, returning its result upon completion. + * If the computation encounters an unchecked Exception or Error, + * it is rethrown as the outcome of this invocation. Rethrown + * exceptions behave in the same way as regular exceptions, but, + * when possible, contain stack traces (as displayed for example + * using {@code ex.printStackTrace()}) of both the current thread + * as well as the thread actually encountering the exception; + * minimally only the latter. + * + * @param task the task + * @return the task's result + * @throws NullPointerException if the task is null + * @throws RejectedExecutionException if the task cannot be + * scheduled for execution + */ + public T invoke(ForkJoinTask task) { + doSubmit(task); + return task.join(); + } + + /** + * Arranges for (asynchronous) execution of the given task. + * + * @param task the task + * @throws NullPointerException if the task is null + * @throws RejectedExecutionException if the task cannot be + * scheduled for execution + */ + public void execute(ForkJoinTask task) { + doSubmit(task); + } + + // AbstractExecutorService methods + + /** + * @throws NullPointerException if the task is null + * @throws RejectedExecutionException if the task cannot be + * scheduled for execution + */ + public void execute(Runnable task) { + if (task == null) + throw new NullPointerException(); + ForkJoinTask job; + if (task instanceof ForkJoinTask) // avoid re-wrap + job = (ForkJoinTask) task; + else + job = ForkJoinTask.adapt(task, null); + doSubmit(job); + } + + /** + * Submits a ForkJoinTask for execution. + * + * @param task the task to submit + * @return the task + * @throws NullPointerException if the task is null + * @throws RejectedExecutionException if the task cannot be + * scheduled for execution + */ + public ForkJoinTask submit(ForkJoinTask task) { + doSubmit(task); + return task; + } + + /** + * @throws NullPointerException if the task is null + * @throws RejectedExecutionException if the task cannot be + * scheduled for execution + */ + public ForkJoinTask submit(Callable task) { + if (task == null) + throw new NullPointerException(); + ForkJoinTask job = ForkJoinTask.adapt(task); + doSubmit(job); + return job; + } + + /** + * @throws NullPointerException if the task is null + * @throws RejectedExecutionException if the task cannot be + * scheduled for execution + */ + public ForkJoinTask submit(Runnable task, T result) { + if (task == null) + throw new NullPointerException(); + ForkJoinTask job = ForkJoinTask.adapt(task, result); + doSubmit(job); + return job; + } + + /** + * @throws NullPointerException if the task is null + * @throws RejectedExecutionException if the task cannot be + * scheduled for execution + */ + public ForkJoinTask submit(Runnable task) { + if (task == null) + throw new NullPointerException(); + ForkJoinTask job; + if (task instanceof ForkJoinTask) // avoid re-wrap + job = (ForkJoinTask) task; + else + job = ForkJoinTask.adapt(task, null); + doSubmit(job); + return job; + } + + /** + * @throws NullPointerException {@inheritDoc} + * @throws RejectedExecutionException {@inheritDoc} + */ + public List> invokeAll(Collection> tasks) { + ArrayList> forkJoinTasks = + new ArrayList>(tasks.size()); + for (Callable task : tasks) + forkJoinTasks.add(ForkJoinTask.adapt(task)); + invoke(new InvokeAll(forkJoinTasks)); + + @SuppressWarnings({"unchecked", "rawtypes"}) + List> futures = (List>) (List) forkJoinTasks; + return futures; + } + + static final class InvokeAll extends RecursiveAction { + final ArrayList> tasks; + InvokeAll(ArrayList> tasks) { this.tasks = tasks; } + public void compute() { + try { invokeAll(tasks); } + catch (Exception ignore) {} + } + private static final long serialVersionUID = -7914297376763021607L; + } + + /** + * Returns the factory used for constructing new workers. + * + * @return the factory used for constructing new workers + */ + public ForkJoinWorkerThreadFactory getFactory() { + return factory; + } + + /** + * Returns the handler for internal worker threads that terminate + * due to unrecoverable errors encountered while executing tasks. + * + * @return the handler, or {@code null} if none + */ + public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() { + return ueh; + } + + /** + * Returns the targeted parallelism level of this pool. + * + * @return the targeted parallelism level of this pool + */ + public int getParallelism() { + return parallelism; + } + + /** + * Returns the number of worker threads that have started but not + * yet terminated. The result returned by this method may differ + * from {@link #getParallelism} when threads are created to + * maintain parallelism when others are cooperatively blocked. + * + * @return the number of worker threads + */ + public int getPoolSize() { + return parallelism + (short)(ctl >>> TC_SHIFT); + } + + /** + * Returns {@code true} if this pool uses local first-in-first-out + * scheduling mode for forked tasks that are never joined. + * + * @return {@code true} if this pool uses async mode + */ + public boolean getAsyncMode() { + return localMode != 0; + } + + /** + * Returns an estimate of the number of worker threads that are + * not blocked waiting to join tasks or for other managed + * synchronization. This method may overestimate the + * number of running threads. + * + * @return the number of worker threads + */ + public int getRunningThreadCount() { + int rc = 0; + WorkQueue[] ws; WorkQueue w; + if ((ws = workQueues) != null) { + int n = ws.length; + for (int i = 1; i < n; i += 2) { + Thread.State s; ForkJoinWorkerThread wt; + if ((w = ws[i]) != null && (wt = w.owner) != null && + w.eventCount >= 0 && + (s = wt.getState()) != Thread.State.BLOCKED && + s != Thread.State.WAITING && + s != Thread.State.TIMED_WAITING) + ++rc; + } + } + return rc; + } + + /** + * Returns an estimate of the number of threads that are currently + * stealing or executing tasks. This method may overestimate the + * number of active threads. + * + * @return the number of active threads + */ + public int getActiveThreadCount() { + int r = parallelism + (int)(ctl >> AC_SHIFT); + return (r <= 0) ? 0 : r; // suppress momentarily negative values + } + + /** + * Returns {@code true} if all worker threads are currently idle. + * An idle worker is one that cannot obtain a task to execute + * because none are available to steal from other threads, and + * there are no pending submissions to the pool. This method is + * conservative; it might not return {@code true} immediately upon + * idleness of all threads, but will eventually become true if + * threads remain inactive. + * + * @return {@code true} if all threads are currently idle + */ + public boolean isQuiescent() { + return (int)(ctl >> AC_SHIFT) + parallelism == 0; + } + + /** + * Returns an estimate of the total number of tasks stolen from + * one thread's work queue by another. The reported value + * underestimates the actual total number of steals when the pool + * is not quiescent. This value may be useful for monitoring and + * tuning fork/join programs: in general, steal counts should be + * high enough to keep threads busy, but low enough to avoid + * overhead and contention across threads. + * + * @return the number of steals + */ + public long getStealCount() { + long count = stealCount.get(); + WorkQueue[] ws; WorkQueue w; + if ((ws = workQueues) != null) { + int n = ws.length; + for (int i = 1; i < n; i += 2) { + if ((w = ws[i]) != null) + count += w.totalSteals; + } + } + return count; + } + + /** + * Returns an estimate of the total number of tasks currently held + * in queues by worker threads (but not including tasks submitted + * to the pool that have not begun executing). This value is only + * an approximation, obtained by iterating across all threads in + * the pool. This method may be useful for tuning task + * granularities. + * + * @return the number of queued tasks + */ + public long getQueuedTaskCount() { + long count = 0; + WorkQueue[] ws; WorkQueue w; + if ((ws = workQueues) != null) { + int n = ws.length; + for (int i = 1; i < n; i += 2) { + if ((w = ws[i]) != null) + count += w.queueSize(); + } + } + return count; + } + + /** + * Returns an estimate of the number of tasks submitted to this + * pool that have not yet begun executing. This method may take + * time proportional to the number of submissions. + * + * @return the number of queued submissions + */ + public int getQueuedSubmissionCount() { + int count = 0; + WorkQueue[] ws; WorkQueue w; + if ((ws = workQueues) != null) { + int n = ws.length; + for (int i = 0; i < n; i += 2) { + if ((w = ws[i]) != null) + count += w.queueSize(); + } + } + return count; + } + + /** + * Returns {@code true} if there are any tasks submitted to this + * pool that have not yet begun executing. + * + * @return {@code true} if there are any queued submissions + */ + public boolean hasQueuedSubmissions() { + WorkQueue[] ws; WorkQueue w; + if ((ws = workQueues) != null) { + int n = ws.length; + for (int i = 0; i < n; i += 2) { + if ((w = ws[i]) != null && w.queueSize() != 0) + return true; + } + } + return false; + } + + /** + * Removes and returns the next unexecuted submission if one is + * available. This method may be useful in extensions to this + * class that re-assign work in systems with multiple pools. + * + * @return the next submission, or {@code null} if none + */ + protected ForkJoinTask pollSubmission() { + WorkQueue[] ws; WorkQueue w; ForkJoinTask t; + if ((ws = workQueues) != null) { + int n = ws.length; + for (int i = 0; i < n; i += 2) { + if ((w = ws[i]) != null && (t = w.poll()) != null) + return t; + } + } + return null; + } + + /** + * Removes all available unexecuted submitted and forked tasks + * from scheduling queues and adds them to the given collection, + * without altering their execution status. These may include + * artificially generated or wrapped tasks. This method is + * designed to be invoked only when the pool is known to be + * quiescent. Invocations at other times may not remove all + * tasks. A failure encountered while attempting to add elements + * to collection {@code c} may result in elements being in + * neither, either or both collections when the associated + * exception is thrown. The behavior of this operation is + * undefined if the specified collection is modified while the + * operation is in progress. + * + * @param c the collection to transfer elements into + * @return the number of elements transferred + */ + protected int drainTasksTo(Collection> c) { + int count = 0; + WorkQueue[] ws; WorkQueue w; ForkJoinTask t; + if ((ws = workQueues) != null) { + int n = ws.length; + for (int i = 0; i < n; ++i) { + if ((w = ws[i]) != null) { + while ((t = w.poll()) != null) { + c.add(t); + ++count; + } + } + } + } + return count; + } + + /** + * Returns a string identifying this pool, as well as its state, + * including indications of run state, parallelism level, and + * worker and task counts. + * + * @return a string identifying this pool, as well as its state + */ + public String toString() { + long st = getStealCount(); + long qt = getQueuedTaskCount(); + long qs = getQueuedSubmissionCount(); + int rc = getRunningThreadCount(); + int pc = parallelism; + long c = ctl; + int tc = pc + (short)(c >>> TC_SHIFT); + int ac = pc + (int)(c >> AC_SHIFT); + if (ac < 0) // ignore transient negative + ac = 0; + String level; + if ((c & STOP_BIT) != 0) + level = (tc == 0) ? "Terminated" : "Terminating"; + else + level = runState < 0 ? "Shutting down" : "Running"; + return super.toString() + + "[" + level + + ", parallelism = " + pc + + ", size = " + tc + + ", active = " + ac + + ", running = " + rc + + ", steals = " + st + + ", tasks = " + qt + + ", submissions = " + qs + + "]"; + } + + /** + * Initiates an orderly shutdown in which previously submitted + * tasks are executed, but no new tasks will be accepted. + * Invocation has no additional effect if already shut down. + * Tasks that are in the process of being submitted concurrently + * during the course of this method may or may not be rejected. + * + * @throws SecurityException if a security manager exists and + * the caller is not permitted to modify threads + * because it does not hold {@link + * java.lang.RuntimePermission}{@code ("modifyThread")} + */ + public void shutdown() { + checkPermission(); + enableShutdown(); + tryTerminate(false); + } + + /** + * Attempts to cancel and/or stop all tasks, and reject all + * subsequently submitted tasks. Tasks that are in the process of + * being submitted or executed concurrently during the course of + * this method may or may not be rejected. This method cancels + * both existing and unexecuted tasks, in order to permit + * termination in the presence of task dependencies. So the method + * always returns an empty list (unlike the case for some other + * Executors). + * + * @return an empty list + * @throws SecurityException if a security manager exists and + * the caller is not permitted to modify threads + * because it does not hold {@link + * java.lang.RuntimePermission}{@code ("modifyThread")} + */ + public List shutdownNow() { + checkPermission(); + enableShutdown(); + tryTerminate(true); + return Collections.emptyList(); + } + + /** + * Returns {@code true} if all tasks have completed following shut down. + * + * @return {@code true} if all tasks have completed following shut down + */ + public boolean isTerminated() { + long c = ctl; + return ((c & STOP_BIT) != 0L && + (short)(c >>> TC_SHIFT) == -parallelism); + } + + /** + * Returns {@code true} if the process of termination has + * commenced but not yet completed. This method may be useful for + * debugging. A return of {@code true} reported a sufficient + * period after shutdown may indicate that submitted tasks have + * ignored or suppressed interruption, or are waiting for IO, + * causing this executor not to properly terminate. (See the + * advisory notes for class {@link ForkJoinTask} stating that + * tasks should not normally entail blocking operations. But if + * they do, they must abort them on interrupt.) + * + * @return {@code true} if terminating but not yet terminated + */ + public boolean isTerminating() { + long c = ctl; + return ((c & STOP_BIT) != 0L && + (short)(c >>> TC_SHIFT) != -parallelism); + } + + /** + * Returns {@code true} if this pool has been shut down. + * + * @return {@code true} if this pool has been shut down + */ + public boolean isShutdown() { + return runState < 0; + } + + /** + * Blocks until all tasks have completed execution after a shutdown + * request, or the timeout occurs, or the current thread is + * interrupted, whichever happens first. + * + * @param timeout the maximum time to wait + * @param unit the time unit of the timeout argument + * @return {@code true} if this executor terminated and + * {@code false} if the timeout elapsed before termination + * @throws InterruptedException if interrupted while waiting + */ + public boolean awaitTermination(long timeout, TimeUnit unit) + throws InterruptedException { + long nanos = unit.toNanos(timeout); + final ReentrantLock lock = this.lock; + lock.lock(); + try { + for (;;) { + if (isTerminated()) + return true; + if (nanos <= 0) + return false; + nanos = termination.awaitNanos(nanos); + } + } finally { + lock.unlock(); + } + } + + /** + * Interface for extending managed parallelism for tasks running + * in {@link ForkJoinPool}s. + * + *

A {@code ManagedBlocker} provides two methods. Method + * {@code isReleasable} must return {@code true} if blocking is + * not necessary. Method {@code block} blocks the current thread + * if necessary (perhaps internally invoking {@code isReleasable} + * before actually blocking). These actions are performed by any + * thread invoking {@link ForkJoinPool#managedBlock}. The + * unusual methods in this API accommodate synchronizers that may, + * but don't usually, block for long periods. Similarly, they + * allow more efficient internal handling of cases in which + * additional workers may be, but usually are not, needed to + * ensure sufficient parallelism. Toward this end, + * implementations of method {@code isReleasable} must be amenable + * to repeated invocation. + * + *

For example, here is a ManagedBlocker based on a + * ReentrantLock: + *

 {@code
+     * class ManagedLocker implements ManagedBlocker {
+     *   final ReentrantLock lock;
+     *   boolean hasLock = false;
+     *   ManagedLocker(ReentrantLock lock) { this.lock = lock; }
+     *   public boolean block() {
+     *     if (!hasLock)
+     *       lock.lock();
+     *     return true;
+     *   }
+     *   public boolean isReleasable() {
+     *     return hasLock || (hasLock = lock.tryLock());
+     *   }
+     * }}
+ * + *

Here is a class that possibly blocks waiting for an + * item on a given queue: + *

 {@code
+     * class QueueTaker implements ManagedBlocker {
+     *   final BlockingQueue queue;
+     *   volatile E item = null;
+     *   QueueTaker(BlockingQueue q) { this.queue = q; }
+     *   public boolean block() throws InterruptedException {
+     *     if (item == null)
+     *       item = queue.take();
+     *     return true;
+     *   }
+     *   public boolean isReleasable() {
+     *     return item != null || (item = queue.poll()) != null;
+     *   }
+     *   public E getItem() { // call after pool.managedBlock completes
+     *     return item;
+     *   }
+     * }}
+ */ + public static interface ManagedBlocker { + /** + * Possibly blocks the current thread, for example waiting for + * a lock or condition. + * + * @return {@code true} if no additional blocking is necessary + * (i.e., if isReleasable would return true) + * @throws InterruptedException if interrupted while waiting + * (the method is not required to do so, but is allowed to) + */ + boolean block() throws InterruptedException; + + /** + * Returns {@code true} if blocking is unnecessary. + */ + boolean isReleasable(); + } + + /** + * Blocks in accord with the given blocker. If the current thread + * is a {@link ForkJoinWorkerThread}, this method possibly + * arranges for a spare thread to be activated if necessary to + * ensure sufficient parallelism while the current thread is blocked. + * + *

If the caller is not a {@link ForkJoinTask}, this method is + * behaviorally equivalent to + *

 {@code
+     * while (!blocker.isReleasable())
+     *   if (blocker.block())
+     *     return;
+     * }
+ * + * If the caller is a {@code ForkJoinTask}, then the pool may + * first be expanded to ensure parallelism, and later adjusted. + * + * @param blocker the blocker + * @throws InterruptedException if blocker.block did so + */ + public static void managedBlock(ManagedBlocker blocker) + throws InterruptedException { + Thread t = Thread.currentThread(); + ForkJoinPool p = ((t instanceof ForkJoinWorkerThread) ? + ((ForkJoinWorkerThread)t).pool : null); + while (!blocker.isReleasable()) { + if (p == null || p.tryCompensate()) { + try { + do {} while (!blocker.isReleasable() && !blocker.block()); + } finally { + if (p != null) + p.incrementActiveCount(); + } + break; + } + } + } + + // AbstractExecutorService overrides. These rely on undocumented + // fact that ForkJoinTask.adapt returns ForkJoinTasks that also + // implement RunnableFuture. + + protected RunnableFuture newTaskFor(Runnable runnable, T value) { + return (RunnableFuture) ForkJoinTask.adapt(runnable, value); + } + + protected RunnableFuture newTaskFor(Callable callable) { + return (RunnableFuture) ForkJoinTask.adapt(callable); + } + + // Unsafe mechanics + private static final sun.misc.Unsafe U; + private static final long CTL; + private static final long RUNSTATE; + private static final long PARKBLOCKER; + + static { + poolNumberGenerator = new AtomicInteger(); + modifyThreadPermission = new RuntimePermission("modifyThread"); + defaultForkJoinWorkerThreadFactory = + new DefaultForkJoinWorkerThreadFactory(); + int s; + try { + U = getUnsafe(); + Class k = ForkJoinPool.class; + Class tk = Thread.class; + CTL = U.objectFieldOffset + (k.getDeclaredField("ctl")); + RUNSTATE = U.objectFieldOffset + (k.getDeclaredField("runState")); + PARKBLOCKER = U.objectFieldOffset + (tk.getDeclaredField("parkBlocker")); + } catch (Exception e) { + throw new Error(e); + } + } + + private static sun.misc.Unsafe getUnsafe() { + return Unsafe.instance; + } +} diff --git a/akka-actor/src/main/java/akka/jsr166y/ForkJoinTask.java b/akka-actor/src/main/java/akka/jsr166y/ForkJoinTask.java new file mode 100644 index 0000000000..fe31c4b165 --- /dev/null +++ b/akka-actor/src/main/java/akka/jsr166y/ForkJoinTask.java @@ -0,0 +1,1543 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package akka.jsr166y; +import java.io.Serializable; +import java.util.Collection; +import java.util.List; +import java.util.RandomAccess; +import java.lang.ref.WeakReference; +import java.lang.ref.ReferenceQueue; +import java.util.concurrent.Callable; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.Future; +import java.util.concurrent.RejectedExecutionException; +import java.util.concurrent.RunnableFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; +import java.util.concurrent.locks.ReentrantLock; +import java.lang.reflect.Constructor; + +/** + * Abstract base class for tasks that run within a {@link ForkJoinPool}. + * A {@code ForkJoinTask} is a thread-like entity that is much + * lighter weight than a normal thread. Huge numbers of tasks and + * subtasks may be hosted by a small number of actual threads in a + * ForkJoinPool, at the price of some usage limitations. + * + *

A "main" {@code ForkJoinTask} begins execution when submitted + * to a {@link ForkJoinPool}. Once started, it will usually in turn + * start other subtasks. As indicated by the name of this class, + * many programs using {@code ForkJoinTask} employ only methods + * {@link #fork} and {@link #join}, or derivatives such as {@link + * #invokeAll(ForkJoinTask...) invokeAll}. However, this class also + * provides a number of other methods that can come into play in + * advanced usages, as well as extension mechanics that allow + * support of new forms of fork/join processing. + * + *

A {@code ForkJoinTask} is a lightweight form of {@link Future}. + * The efficiency of {@code ForkJoinTask}s stems from a set of + * restrictions (that are only partially statically enforceable) + * reflecting their main use as computational tasks calculating pure + * functions or operating on purely isolated objects. The primary + * coordination mechanisms are {@link #fork}, that arranges + * asynchronous execution, and {@link #join}, that doesn't proceed + * until the task's result has been computed. Computations should + * ideally avoid {@code synchronized} methods or blocks, and should + * minimize other blocking synchronization apart from joining other + * tasks or using synchronizers such as Phasers that are advertised to + * cooperate with fork/join scheduling. Subdividable tasks should also + * not perform blocking IO, and should ideally access variables that + * are completely independent of those accessed by other running + * tasks. These guidelines are loosely enforced by not permitting + * checked exceptions such as {@code IOExceptions} to be + * thrown. However, computations may still encounter unchecked + * exceptions, that are rethrown to callers attempting to join + * them. These exceptions may additionally include {@link + * RejectedExecutionException} stemming from internal resource + * exhaustion, such as failure to allocate internal task + * queues. Rethrown exceptions behave in the same way as regular + * exceptions, but, when possible, contain stack traces (as displayed + * for example using {@code ex.printStackTrace()}) of both the thread + * that initiated the computation as well as the thread actually + * encountering the exception; minimally only the latter. + * + *

It is possible to define and use ForkJoinTasks that may block, + * but doing do requires three further considerations: (1) Completion + * of few if any other tasks should be dependent on a task + * that blocks on external synchronization or IO. Event-style async + * tasks that are never joined often fall into this category. (2) To + * minimize resource impact, tasks should be small; ideally performing + * only the (possibly) blocking action. (3) Unless the {@link + * ForkJoinPool.ManagedBlocker} API is used, or the number of possibly + * blocked tasks is known to be less than the pool's {@link + * ForkJoinPool#getParallelism} level, the pool cannot guarantee that + * enough threads will be available to ensure progress or good + * performance. + * + *

The primary method for awaiting completion and extracting + * results of a task is {@link #join}, but there are several variants: + * The {@link Future#get} methods support interruptible and/or timed + * waits for completion and report results using {@code Future} + * conventions. Method {@link #invoke} is semantically + * equivalent to {@code fork(); join()} but always attempts to begin + * execution in the current thread. The "quiet" forms of + * these methods do not extract results or report exceptions. These + * may be useful when a set of tasks are being executed, and you need + * to delay processing of results or exceptions until all complete. + * Method {@code invokeAll} (available in multiple versions) + * performs the most common form of parallel invocation: forking a set + * of tasks and joining them all. + * + *

In the most typical usages, a fork-join pair act like a a call + * (fork) and return (join) from a parallel recursive function. As is + * the case with other forms of recursive calls, returns (joins) + * should be performed innermost-first. For example, {@code a.fork(); + * b.fork(); b.join(); a.join();} is likely to be substantially more + * efficient than joining {@code a} before {@code b}. + * + *

The execution status of tasks may be queried at several levels + * of detail: {@link #isDone} is true if a task completed in any way + * (including the case where a task was cancelled without executing); + * {@link #isCompletedNormally} is true if a task completed without + * cancellation or encountering an exception; {@link #isCancelled} is + * true if the task was cancelled (in which case {@link #getException} + * returns a {@link java.util.concurrent.CancellationException}); and + * {@link #isCompletedAbnormally} is true if a task was either + * cancelled or encountered an exception, in which case {@link + * #getException} will return either the encountered exception or + * {@link java.util.concurrent.CancellationException}. + * + *

The ForkJoinTask class is not usually directly subclassed. + * Instead, you subclass one of the abstract classes that support a + * particular style of fork/join processing, typically {@link + * RecursiveAction} for computations that do not return results, or + * {@link RecursiveTask} for those that do. Normally, a concrete + * ForkJoinTask subclass declares fields comprising its parameters, + * established in a constructor, and then defines a {@code compute} + * method that somehow uses the control methods supplied by this base + * class. While these methods have {@code public} access (to allow + * instances of different task subclasses to call each other's + * methods), some of them may only be called from within other + * ForkJoinTasks (as may be determined using method {@link + * #inForkJoinPool}). Attempts to invoke them in other contexts + * result in exceptions or errors, possibly including + * {@code ClassCastException}. + * + *

Method {@link #join} and its variants are appropriate for use + * only when completion dependencies are acyclic; that is, the + * parallel computation can be described as a directed acyclic graph + * (DAG). Otherwise, executions may encounter a form of deadlock as + * tasks cyclically wait for each other. However, this framework + * supports other methods and techniques (for example the use of + * {@link Phaser}, {@link #helpQuiesce}, and {@link #complete}) that + * may be of use in constructing custom subclasses for problems that + * are not statically structured as DAGs. To support such usages a + * ForkJoinTask may be atomically marked using {@link + * #markForkJoinTask} and checked for marking using {@link + * #isMarkedForkJoinTask}. The ForkJoinTask implementation does not + * use these {@code protected} methods or marks for any purpose, but + * they may be of use in the construction of specialized subclasses. + * For example, parallel graph traversals can use the supplied methods + * to avoid revisiting nodes/tasks that have already been + * processed. Also, completion based designs can use them to record + * that one subtask has completed. (Method names for marking are bulky + * in part to encourage definition of methods that reflect their usage + * patterns.) + * + *

Most base support methods are {@code final}, to prevent + * overriding of implementations that are intrinsically tied to the + * underlying lightweight task scheduling framework. Developers + * creating new basic styles of fork/join processing should minimally + * implement {@code protected} methods {@link #exec}, {@link + * #setRawResult}, and {@link #getRawResult}, while also introducing + * an abstract computational method that can be implemented in its + * subclasses, possibly relying on other {@code protected} methods + * provided by this class. + * + *

ForkJoinTasks should perform relatively small amounts of + * computation. Large tasks should be split into smaller subtasks, + * usually via recursive decomposition. As a very rough rule of thumb, + * a task should perform more than 100 and less than 10000 basic + * computational steps, and should avoid indefinite looping. If tasks + * are too big, then parallelism cannot improve throughput. If too + * small, then memory and internal task maintenance overhead may + * overwhelm processing. + * + *

This class provides {@code adapt} methods for {@link Runnable} + * and {@link Callable}, that may be of use when mixing execution of + * {@code ForkJoinTasks} with other kinds of tasks. When all tasks are + * of this form, consider using a pool constructed in asyncMode. + * + *

ForkJoinTasks are {@code Serializable}, which enables them to be + * used in extensions such as remote execution frameworks. It is + * sensible to serialize tasks only before or after, but not during, + * execution. Serialization is not relied on during execution itself. + * + * @since 1.7 + * @author Doug Lea + */ +public abstract class ForkJoinTask implements Future, Serializable { + + /* + * See the internal documentation of class ForkJoinPool for a + * general implementation overview. ForkJoinTasks are mainly + * responsible for maintaining their "status" field amidst relays + * to methods in ForkJoinWorkerThread and ForkJoinPool. + * + * The methods of this class are more-or-less layered into + * (1) basic status maintenance + * (2) execution and awaiting completion + * (3) user-level methods that additionally report results. + * This is sometimes hard to see because this file orders exported + * methods in a way that flows well in javadocs. + */ + + /** + * The number of times to try to help join a task without any + * apparent progress before giving up and blocking. The value is + * arbitrary but should be large enough to cope with transient + * stalls (due to GC etc) that can cause helping methods not to be + * able to proceed because other workers have not progressed to + * the point where subtasks can be found or taken. + */ + private static final int HELP_RETRIES = 32; + + /* + * The status field holds run control status bits packed into a + * single int to minimize footprint and to ensure atomicity (via + * CAS). Status is initially zero, and takes on nonnegative + * values until completed, upon which status holds value + * NORMAL, CANCELLED, or EXCEPTIONAL. Tasks undergoing blocking + * waits by other threads have the SIGNAL bit set. Completion of + * a stolen task with SIGNAL set awakens any waiters via + * notifyAll. Even though suboptimal for some purposes, we use + * basic builtin wait/notify to take advantage of "monitor + * inflation" in JVMs that we would otherwise need to emulate to + * avoid adding further per-task bookkeeping overhead. We want + * these monitors to be "fat", i.e., not use biasing or thin-lock + * techniques, so use some odd coding idioms that tend to avoid + * them. + */ + + /** The run status of this task */ + volatile int status; // accessed directly by pool and workers + static final int NORMAL = 0xfffffffc; // negative with low 2 bits 0 + static final int CANCELLED = 0xfffffff8; // must be < NORMAL + static final int EXCEPTIONAL = 0xfffffff4; // must be < CANCELLED + static final int SIGNAL = 0x00000001; + static final int MARKED = 0x00000002; + + /** + * Marks completion and wakes up threads waiting to join this + * task, also clearing signal request bits. A specialization for + * NORMAL completion is in method doExec. + * + * @param completion one of NORMAL, CANCELLED, EXCEPTIONAL + * @return completion status on exit + */ + private int setCompletion(int completion) { + for (int s;;) { + if ((s = status) < 0) + return s; + if (U.compareAndSwapInt(this, STATUS, s, (s & ~SIGNAL)|completion)) { + if ((s & SIGNAL) != 0) + synchronized (this) { notifyAll(); } + return completion; + } + } + } + + /** + * Primary execution method for stolen tasks. Unless done, calls + * exec and records status if completed, but doesn't wait for + * completion otherwise. + * + * @return status on exit from this method + */ + final int doExec() { + int s; boolean completed; + if ((s = status) >= 0) { + try { + completed = exec(); + } catch (Throwable rex) { + return setExceptionalCompletion(rex); + } + while ((s = status) >= 0 && completed) { + if (U.compareAndSwapInt(this, STATUS, s, (s & ~SIGNAL)|NORMAL)) { + if ((s & SIGNAL) != 0) + synchronized (this) { notifyAll(); } + return NORMAL; + } + } + } + return s; + } + + /** + * Blocks a non-worker-thread until completion. + * @return status upon completion + */ + private int externalAwaitDone() { + int s; + if ((s = status) >= 0) { + boolean interrupted = false; + synchronized (this) { + while ((s = status) >= 0) { + if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) { + try { + wait(); + } catch (InterruptedException ie) { + interrupted = true; + } + } + } + } + if (interrupted) + Thread.currentThread().interrupt(); + } + return s; + } + + /** + * Blocks a non-worker-thread until completion or interruption or timeout. + */ + private int externalInterruptibleAwaitDone(long millis) + throws InterruptedException { + int s; + if (Thread.interrupted()) + throw new InterruptedException(); + if ((s = status) >= 0) { + synchronized (this) { + while ((s = status) >= 0) { + if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) { + wait(millis); + if (millis > 0L) + break; + } + } + } + } + return s; + } + + + /** + * Implementation for join, get, quietlyJoin. Directly handles + * only cases of already-completed, external wait, and + * unfork+exec. Others are relayed to awaitJoin. + * + * @return status upon completion + */ + private int doJoin() { + int s; Thread t; ForkJoinWorkerThread wt; ForkJoinPool.WorkQueue w; + if ((s = status) >= 0) { + if (!((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) + s = externalAwaitDone(); + else if (!(w = (wt = (ForkJoinWorkerThread)t).workQueue). + tryUnpush(this) || (s = doExec()) >= 0) + s = awaitJoin(w, wt.pool); + } + return s; + } + + /** + * Helps and/or blocks until joined. + * + * @param w the joiner + * @param p the pool + * @return status upon completion + */ + private int awaitJoin(ForkJoinPool.WorkQueue w, ForkJoinPool p) { + int s; + ForkJoinTask prevJoin = w.currentJoin; + w.currentJoin = this; + for (int k = HELP_RETRIES; (s = status) >= 0;) { + if ((w.queueSize() > 0) ? + w.tryRemoveAndExec(this) : // self-help + p.tryHelpStealer(w, this)) // help process tasks + k = HELP_RETRIES; // reset if made progress + else if ((s = status) < 0) // recheck + break; + else if (--k > 0) { + if ((k & 3) == 1) + Thread.yield(); // occasionally yield + } + else if (k == 0) + p.tryPollForAndExec(w, this); // uncommon self-help case + else if (p.tryCompensate()) { // true if can block + try { + int ss = status; + if (ss >= 0 && // assert need signal + U.compareAndSwapInt(this, STATUS, ss, ss | SIGNAL)) { + synchronized (this) { + if (status >= 0) // block + wait(); + } + } + } catch (InterruptedException ignore) { + } finally { + p.incrementActiveCount(); // re-activate + } + } + } + w.currentJoin = prevJoin; + return s; + } + + /** + * Implementation for invoke, quietlyInvoke. + * + * @return status upon completion + */ + private int doInvoke() { + int s; Thread t; + if ((s = doExec()) >= 0) { + if (!((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) + s = externalAwaitDone(); + else { + ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t; + s = awaitJoin(wt.workQueue, wt.pool); + } + } + return s; + } + + // Exception table support + + /** + * Table of exceptions thrown by tasks, to enable reporting by + * callers. Because exceptions are rare, we don't directly keep + * them with task objects, but instead use a weak ref table. Note + * that cancellation exceptions don't appear in the table, but are + * instead recorded as status values. + * + * Note: These statics are initialized below in static block. + */ + private static final ExceptionNode[] exceptionTable; + private static final ReentrantLock exceptionTableLock; + private static final ReferenceQueue exceptionTableRefQueue; + + /** + * Fixed capacity for exceptionTable. + */ + private static final int EXCEPTION_MAP_CAPACITY = 32; + + /** + * Key-value nodes for exception table. The chained hash table + * uses identity comparisons, full locking, and weak references + * for keys. The table has a fixed capacity because it only + * maintains task exceptions long enough for joiners to access + * them, so should never become very large for sustained + * periods. However, since we do not know when the last joiner + * completes, we must use weak references and expunge them. We do + * so on each operation (hence full locking). Also, some thread in + * any ForkJoinPool will call helpExpungeStaleExceptions when its + * pool becomes isQuiescent. + */ + static final class ExceptionNode extends WeakReference>{ + final Throwable ex; + ExceptionNode next; + final long thrower; // use id not ref to avoid weak cycles + ExceptionNode(ForkJoinTask task, Throwable ex, ExceptionNode next) { + super(task, exceptionTableRefQueue); + this.ex = ex; + this.next = next; + this.thrower = Thread.currentThread().getId(); + } + } + + /** + * Records exception and sets exceptional completion. + * + * @return status on exit + */ + private int setExceptionalCompletion(Throwable ex) { + int h = System.identityHashCode(this); + final ReentrantLock lock = exceptionTableLock; + lock.lock(); + try { + expungeStaleExceptions(); + ExceptionNode[] t = exceptionTable; + int i = h & (t.length - 1); + for (ExceptionNode e = t[i]; ; e = e.next) { + if (e == null) { + t[i] = new ExceptionNode(this, ex, t[i]); + break; + } + if (e.get() == this) // already present + break; + } + } finally { + lock.unlock(); + } + return setCompletion(EXCEPTIONAL); + } + + /** + * Cancels, ignoring any exceptions thrown by cancel. Used during + * worker and pool shutdown. Cancel is spec'ed not to throw any + * exceptions, but if it does anyway, we have no recourse during + * shutdown, so guard against this case. + */ + static final void cancelIgnoringExceptions(ForkJoinTask t) { + if (t != null && t.status >= 0) { + try { + t.cancel(false); + } catch (Throwable ignore) { + } + } + } + + /** + * Removes exception node and clears status + */ + private void clearExceptionalCompletion() { + int h = System.identityHashCode(this); + final ReentrantLock lock = exceptionTableLock; + lock.lock(); + try { + ExceptionNode[] t = exceptionTable; + int i = h & (t.length - 1); + ExceptionNode e = t[i]; + ExceptionNode pred = null; + while (e != null) { + ExceptionNode next = e.next; + if (e.get() == this) { + if (pred == null) + t[i] = next; + else + pred.next = next; + break; + } + pred = e; + e = next; + } + expungeStaleExceptions(); + status = 0; + } finally { + lock.unlock(); + } + } + + /** + * Returns a rethrowable exception for the given task, if + * available. To provide accurate stack traces, if the exception + * was not thrown by the current thread, we try to create a new + * exception of the same type as the one thrown, but with the + * recorded exception as its cause. If there is no such + * constructor, we instead try to use a no-arg constructor, + * followed by initCause, to the same effect. If none of these + * apply, or any fail due to other exceptions, we return the + * recorded exception, which is still correct, although it may + * contain a misleading stack trace. + * + * @return the exception, or null if none + */ + private Throwable getThrowableException() { + if (status != EXCEPTIONAL) + return null; + int h = System.identityHashCode(this); + ExceptionNode e; + final ReentrantLock lock = exceptionTableLock; + lock.lock(); + try { + expungeStaleExceptions(); + ExceptionNode[] t = exceptionTable; + e = t[h & (t.length - 1)]; + while (e != null && e.get() != this) + e = e.next; + } finally { + lock.unlock(); + } + Throwable ex; + if (e == null || (ex = e.ex) == null) + return null; + if (e.thrower != Thread.currentThread().getId()) { + Class ec = ex.getClass(); + try { + Constructor noArgCtor = null; + Constructor[] cs = ec.getConstructors();// public ctors only + for (int i = 0; i < cs.length; ++i) { + Constructor c = cs[i]; + Class[] ps = c.getParameterTypes(); + if (ps.length == 0) + noArgCtor = c; + else if (ps.length == 1 && ps[0] == Throwable.class) + return (Throwable)(c.newInstance(ex)); + } + if (noArgCtor != null) { + Throwable wx = (Throwable)(noArgCtor.newInstance()); + wx.initCause(ex); + return wx; + } + } catch (Exception ignore) { + } + } + return ex; + } + + /** + * Poll stale refs and remove them. Call only while holding lock. + */ + private static void expungeStaleExceptions() { + for (Object x; (x = exceptionTableRefQueue.poll()) != null;) { + if (x instanceof ExceptionNode) { + ForkJoinTask key = ((ExceptionNode)x).get(); + ExceptionNode[] t = exceptionTable; + int i = System.identityHashCode(key) & (t.length - 1); + ExceptionNode e = t[i]; + ExceptionNode pred = null; + while (e != null) { + ExceptionNode next = e.next; + if (e == x) { + if (pred == null) + t[i] = next; + else + pred.next = next; + break; + } + pred = e; + e = next; + } + } + } + } + + /** + * If lock is available, poll stale refs and remove them. + * Called from ForkJoinPool when pools become quiescent. + */ + static final void helpExpungeStaleExceptions() { + final ReentrantLock lock = exceptionTableLock; + if (lock.tryLock()) { + try { + expungeStaleExceptions(); + } finally { + lock.unlock(); + } + } + } + + /** + * Report the result of invoke or join; called only upon + * non-normal return of internal versions. + */ + private V reportResult() { + int s; Throwable ex; + if ((s = status) == CANCELLED) + throw new CancellationException(); + if (s == EXCEPTIONAL && (ex = getThrowableException()) != null) + U.throwException(ex); + return getRawResult(); + } + + // public methods + + /** + * Arranges to asynchronously execute this task. While it is not + * necessarily enforced, it is a usage error to fork a task more + * than once unless it has completed and been reinitialized. + * Subsequent modifications to the state of this task or any data + * it operates on are not necessarily consistently observable by + * any thread other than the one executing it unless preceded by a + * call to {@link #join} or related methods, or a call to {@link + * #isDone} returning {@code true}. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @return {@code this}, to simplify usage + */ + public final ForkJoinTask fork() { + ForkJoinWorkerThread wt; + (wt = (ForkJoinWorkerThread)Thread.currentThread()). + workQueue.push(this, wt.pool); + return this; + } + + /** + * Returns the result of the computation when it {@link #isDone is + * done}. This method differs from {@link #get()} in that + * abnormal completion results in {@code RuntimeException} or + * {@code Error}, not {@code ExecutionException}, and that + * interrupts of the calling thread do not cause the + * method to abruptly return by throwing {@code + * InterruptedException}. + * + * @return the computed result + */ + public final V join() { + if (doJoin() != NORMAL) + return reportResult(); + else + return getRawResult(); + } + + /** + * Commences performing this task, awaits its completion if + * necessary, and returns its result, or throws an (unchecked) + * {@code RuntimeException} or {@code Error} if the underlying + * computation did so. + * + * @return the computed result + */ + public final V invoke() { + if (doInvoke() != NORMAL) + return reportResult(); + else + return getRawResult(); + } + + /** + * Forks the given tasks, returning when {@code isDone} holds for + * each task or an (unchecked) exception is encountered, in which + * case the exception is rethrown. If more than one task + * encounters an exception, then this method throws any one of + * these exceptions. If any task encounters an exception, the + * other may be cancelled. However, the execution status of + * individual tasks is not guaranteed upon exceptional return. The + * status of each task may be obtained using {@link + * #getException()} and related methods to check if they have been + * cancelled, completed normally or exceptionally, or left + * unprocessed. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @param t1 the first task + * @param t2 the second task + * @throws NullPointerException if any task is null + */ + public static void invokeAll(ForkJoinTask t1, ForkJoinTask t2) { + t2.fork(); + t1.invoke(); + t2.join(); + } + + /** + * Forks the given tasks, returning when {@code isDone} holds for + * each task or an (unchecked) exception is encountered, in which + * case the exception is rethrown. If more than one task + * encounters an exception, then this method throws any one of + * these exceptions. If any task encounters an exception, others + * may be cancelled. However, the execution status of individual + * tasks is not guaranteed upon exceptional return. The status of + * each task may be obtained using {@link #getException()} and + * related methods to check if they have been cancelled, completed + * normally or exceptionally, or left unprocessed. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @param tasks the tasks + * @throws NullPointerException if any task is null + */ + public static void invokeAll(ForkJoinTask... tasks) { + Throwable ex = null; + int last = tasks.length - 1; + for (int i = last; i >= 0; --i) { + ForkJoinTask t = tasks[i]; + if (t == null) { + if (ex == null) + ex = new NullPointerException(); + } + else if (i != 0) + t.fork(); + else if (t.doInvoke() < NORMAL && ex == null) + ex = t.getException(); + } + for (int i = 1; i <= last; ++i) { + ForkJoinTask t = tasks[i]; + if (t != null) { + if (ex != null) + t.cancel(false); + else if (t.doJoin() < NORMAL) + ex = t.getException(); + } + } + if (ex != null) + U.throwException(ex); + } + + /** + * Forks all tasks in the specified collection, returning when + * {@code isDone} holds for each task or an (unchecked) exception + * is encountered, in which case the exception is rethrown. If + * more than one task encounters an exception, then this method + * throws any one of these exceptions. If any task encounters an + * exception, others may be cancelled. However, the execution + * status of individual tasks is not guaranteed upon exceptional + * return. The status of each task may be obtained using {@link + * #getException()} and related methods to check if they have been + * cancelled, completed normally or exceptionally, or left + * unprocessed. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @param tasks the collection of tasks + * @return the tasks argument, to simplify usage + * @throws NullPointerException if tasks or any element are null + */ + public static > Collection invokeAll(Collection tasks) { + if (!(tasks instanceof RandomAccess) || !(tasks instanceof List)) { + invokeAll(tasks.toArray(new ForkJoinTask[tasks.size()])); + return tasks; + } + @SuppressWarnings("unchecked") + List> ts = + (List>) tasks; + Throwable ex = null; + int last = ts.size() - 1; + for (int i = last; i >= 0; --i) { + ForkJoinTask t = ts.get(i); + if (t == null) { + if (ex == null) + ex = new NullPointerException(); + } + else if (i != 0) + t.fork(); + else if (t.doInvoke() < NORMAL && ex == null) + ex = t.getException(); + } + for (int i = 1; i <= last; ++i) { + ForkJoinTask t = ts.get(i); + if (t != null) { + if (ex != null) + t.cancel(false); + else if (t.doJoin() < NORMAL) + ex = t.getException(); + } + } + if (ex != null) + U.throwException(ex); + return tasks; + } + + /** + * Attempts to cancel execution of this task. This attempt will + * fail if the task has already completed or could not be + * cancelled for some other reason. If successful, and this task + * has not started when {@code cancel} is called, execution of + * this task is suppressed. After this method returns + * successfully, unless there is an intervening call to {@link + * #reinitialize}, subsequent calls to {@link #isCancelled}, + * {@link #isDone}, and {@code cancel} will return {@code true} + * and calls to {@link #join} and related methods will result in + * {@code CancellationException}. + * + *

This method may be overridden in subclasses, but if so, must + * still ensure that these properties hold. In particular, the + * {@code cancel} method itself must not throw exceptions. + * + *

This method is designed to be invoked by other + * tasks. To terminate the current task, you can just return or + * throw an unchecked exception from its computation method, or + * invoke {@link #completeExceptionally}. + * + * @param mayInterruptIfRunning this value has no effect in the + * default implementation because interrupts are not used to + * control cancellation. + * + * @return {@code true} if this task is now cancelled + */ + public boolean cancel(boolean mayInterruptIfRunning) { + return setCompletion(CANCELLED) == CANCELLED; + } + + public final boolean isDone() { + return status < 0; + } + + public final boolean isCancelled() { + return status == CANCELLED; + } + + /** + * Returns {@code true} if this task threw an exception or was cancelled. + * + * @return {@code true} if this task threw an exception or was cancelled + */ + public final boolean isCompletedAbnormally() { + return status < NORMAL; + } + + /** + * Returns {@code true} if this task completed without throwing an + * exception and was not cancelled. + * + * @return {@code true} if this task completed without throwing an + * exception and was not cancelled + */ + public final boolean isCompletedNormally() { + return status == NORMAL; + } + + /** + * Returns the exception thrown by the base computation, or a + * {@code CancellationException} if cancelled, or {@code null} if + * none or if the method has not yet completed. + * + * @return the exception, or {@code null} if none + */ + public final Throwable getException() { + int s = status; + return ((s >= NORMAL) ? null : + (s == CANCELLED) ? new CancellationException() : + getThrowableException()); + } + + /** + * Completes this task abnormally, and if not already aborted or + * cancelled, causes it to throw the given exception upon + * {@code join} and related operations. This method may be used + * to induce exceptions in asynchronous tasks, or to force + * completion of tasks that would not otherwise complete. Its use + * in other situations is discouraged. This method is + * overridable, but overridden versions must invoke {@code super} + * implementation to maintain guarantees. + * + * @param ex the exception to throw. If this exception is not a + * {@code RuntimeException} or {@code Error}, the actual exception + * thrown will be a {@code RuntimeException} with cause {@code ex}. + */ + public void completeExceptionally(Throwable ex) { + setExceptionalCompletion((ex instanceof RuntimeException) || + (ex instanceof Error) ? ex : + new RuntimeException(ex)); + } + + /** + * Completes this task, and if not already aborted or cancelled, + * returning the given value as the result of subsequent + * invocations of {@code join} and related operations. This method + * may be used to provide results for asynchronous tasks, or to + * provide alternative handling for tasks that would not otherwise + * complete normally. Its use in other situations is + * discouraged. This method is overridable, but overridden + * versions must invoke {@code super} implementation to maintain + * guarantees. + * + * @param value the result value for this task + */ + public void complete(V value) { + try { + setRawResult(value); + } catch (Throwable rex) { + setExceptionalCompletion(rex); + return; + } + setCompletion(NORMAL); + } + + /** + * Waits if necessary for the computation to complete, and then + * retrieves its result. + * + * @return the computed result + * @throws CancellationException if the computation was cancelled + * @throws ExecutionException if the computation threw an + * exception + * @throws InterruptedException if the current thread is not a + * member of a ForkJoinPool and was interrupted while waiting + */ + public final V get() throws InterruptedException, ExecutionException { + int s = (Thread.currentThread() instanceof ForkJoinWorkerThread) ? + doJoin() : externalInterruptibleAwaitDone(0L); + Throwable ex; + if (s == CANCELLED) + throw new CancellationException(); + if (s == EXCEPTIONAL && (ex = getThrowableException()) != null) + throw new ExecutionException(ex); + return getRawResult(); + } + + /** + * Waits if necessary for at most the given time for the computation + * to complete, and then retrieves its result, if available. + * + * @param timeout the maximum time to wait + * @param unit the time unit of the timeout argument + * @return the computed result + * @throws CancellationException if the computation was cancelled + * @throws ExecutionException if the computation threw an + * exception + * @throws InterruptedException if the current thread is not a + * member of a ForkJoinPool and was interrupted while waiting + * @throws TimeoutException if the wait timed out + */ + public final V get(long timeout, TimeUnit unit) + throws InterruptedException, ExecutionException, TimeoutException { + // Messy in part because we measure in nanos, but wait in millis + int s; long millis, nanos; + Thread t = Thread.currentThread(); + if (!(t instanceof ForkJoinWorkerThread)) { + if ((millis = unit.toMillis(timeout)) > 0L) + s = externalInterruptibleAwaitDone(millis); + else + s = status; + } + else if ((s = status) >= 0 && (nanos = unit.toNanos(timeout)) > 0L) { + long deadline = System.nanoTime() + nanos; + ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t; + ForkJoinPool.WorkQueue w = wt.workQueue; + ForkJoinPool p = wt.pool; + if (w.tryUnpush(this)) + doExec(); + boolean blocking = false; + try { + while ((s = status) >= 0) { + if (w.runState < 0) + cancelIgnoringExceptions(this); + else if (!blocking) + blocking = p.tryCompensate(); + else { + millis = TimeUnit.NANOSECONDS.toMillis(nanos); + if (millis > 0L && + U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) { + try { + synchronized (this) { + if (status >= 0) + wait(millis); + } + } catch (InterruptedException ie) { + } + } + if ((s = status) < 0 || + (nanos = deadline - System.nanoTime()) <= 0L) + break; + } + } + } finally { + if (blocking) + p.incrementActiveCount(); + } + } + if (s != NORMAL) { + Throwable ex; + if (s == CANCELLED) + throw new CancellationException(); + if (s != EXCEPTIONAL) + throw new TimeoutException(); + if ((ex = getThrowableException()) != null) + throw new ExecutionException(ex); + } + return getRawResult(); + } + + /** + * Joins this task, without returning its result or throwing its + * exception. This method may be useful when processing + * collections of tasks when some have been cancelled or otherwise + * known to have aborted. + */ + public final void quietlyJoin() { + doJoin(); + } + + /** + * Commences performing this task and awaits its completion if + * necessary, without returning its result or throwing its + * exception. + */ + public final void quietlyInvoke() { + doInvoke(); + } + + /** + * Possibly executes tasks until the pool hosting the current task + * {@link ForkJoinPool#isQuiescent is quiescent}. This method may + * be of use in designs in which many tasks are forked, but none + * are explicitly joined, instead executing them until all are + * processed. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + */ + public static void helpQuiesce() { + ForkJoinWorkerThread wt = + (ForkJoinWorkerThread)Thread.currentThread(); + wt.pool.helpQuiescePool(wt.workQueue); + } + + /** + * Resets the internal bookkeeping state of this task, allowing a + * subsequent {@code fork}. This method allows repeated reuse of + * this task, but only if reuse occurs when this task has either + * never been forked, or has been forked, then completed and all + * outstanding joins of this task have also completed. Effects + * under any other usage conditions are not guaranteed. + * This method may be useful when executing + * pre-constructed trees of subtasks in loops. + * + *

Upon completion of this method, {@code isDone()} reports + * {@code false}, and {@code getException()} reports {@code + * null}. However, the value returned by {@code getRawResult} is + * unaffected. To clear this value, you can invoke {@code + * setRawResult(null)}. + */ + public void reinitialize() { + if (status == EXCEPTIONAL) + clearExceptionalCompletion(); + else + status = 0; + } + + /** + * Returns the pool hosting the current task execution, or null + * if this task is executing outside of any ForkJoinPool. + * + * @see #inForkJoinPool + * @return the pool, or {@code null} if none + */ + public static ForkJoinPool getPool() { + Thread t = Thread.currentThread(); + return (t instanceof ForkJoinWorkerThread) ? + ((ForkJoinWorkerThread) t).pool : null; + } + + /** + * Returns {@code true} if the current thread is a {@link + * ForkJoinWorkerThread} executing as a ForkJoinPool computation. + * + * @return {@code true} if the current thread is a {@link + * ForkJoinWorkerThread} executing as a ForkJoinPool computation, + * or {@code false} otherwise + */ + public static boolean inForkJoinPool() { + return Thread.currentThread() instanceof ForkJoinWorkerThread; + } + + /** + * Tries to unschedule this task for execution. This method will + * typically succeed if this task is the most recently forked task + * by the current thread, and has not commenced executing in + * another thread. This method may be useful when arranging + * alternative local processing of tasks that could have been, but + * were not, stolen. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @return {@code true} if unforked + */ + public boolean tryUnfork() { + return ((ForkJoinWorkerThread)Thread.currentThread()) + .workQueue.tryUnpush(this); + } + + /** + * Returns an estimate of the number of tasks that have been + * forked by the current worker thread but not yet executed. This + * value may be useful for heuristic decisions about whether to + * fork other tasks. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @return the number of tasks + */ + public static int getQueuedTaskCount() { + return ((ForkJoinWorkerThread) Thread.currentThread()) + .workQueue.queueSize(); + } + + /** + * Returns an estimate of how many more locally queued tasks are + * held by the current worker thread than there are other worker + * threads that might steal them. This value may be useful for + * heuristic decisions about whether to fork other tasks. In many + * usages of ForkJoinTasks, at steady state, each worker should + * aim to maintain a small constant surplus (for example, 3) of + * tasks, and to process computations locally if this threshold is + * exceeded. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @return the surplus number of tasks, which may be negative + */ + public static int getSurplusQueuedTaskCount() { + /* + * The aim of this method is to return a cheap heuristic guide + * for task partitioning when programmers, frameworks, tools, + * or languages have little or no idea about task granularity. + * In essence by offering this method, we ask users only about + * tradeoffs in overhead vs expected throughput and its + * variance, rather than how finely to partition tasks. + * + * In a steady state strict (tree-structured) computation, + * each thread makes available for stealing enough tasks for + * other threads to remain active. Inductively, if all threads + * play by the same rules, each thread should make available + * only a constant number of tasks. + * + * The minimum useful constant is just 1. But using a value of + * 1 would require immediate replenishment upon each steal to + * maintain enough tasks, which is infeasible. Further, + * partitionings/granularities of offered tasks should + * minimize steal rates, which in general means that threads + * nearer the top of computation tree should generate more + * than those nearer the bottom. In perfect steady state, each + * thread is at approximately the same level of computation + * tree. However, producing extra tasks amortizes the + * uncertainty of progress and diffusion assumptions. + * + * So, users will want to use values larger, but not much + * larger than 1 to both smooth over transient shortages and + * hedge against uneven progress; as traded off against the + * cost of extra task overhead. We leave the user to pick a + * threshold value to compare with the results of this call to + * guide decisions, but recommend values such as 3. + * + * When all threads are active, it is on average OK to + * estimate surplus strictly locally. In steady-state, if one + * thread is maintaining say 2 surplus tasks, then so are + * others. So we can just use estimated queue length. + * However, this strategy alone leads to serious mis-estimates + * in some non-steady-state conditions (ramp-up, ramp-down, + * other stalls). We can detect many of these by further + * considering the number of "idle" threads, that are known to + * have zero queued tasks, so compensate by a factor of + * (#idle/#active) threads. + */ + ForkJoinWorkerThread wt = + (ForkJoinWorkerThread)Thread.currentThread(); + return wt.workQueue.queueSize() - wt.pool.idlePerActive(); + } + + // Extension methods + + /** + * Returns the result that would be returned by {@link #join}, even + * if this task completed abnormally, or {@code null} if this task + * is not known to have been completed. This method is designed + * to aid debugging, as well as to support extensions. Its use in + * any other context is discouraged. + * + * @return the result, or {@code null} if not completed + */ + public abstract V getRawResult(); + + /** + * Forces the given value to be returned as a result. This method + * is designed to support extensions, and should not in general be + * called otherwise. + * + * @param value the value + */ + protected abstract void setRawResult(V value); + + /** + * Immediately performs the base action of this task. This method + * is designed to support extensions, and should not in general be + * called otherwise. The return value controls whether this task + * is considered to be done normally. It may return false in + * asynchronous actions that require explicit invocations of + * {@link #complete} to become joinable. It may also throw an + * (unchecked) exception to indicate abnormal exit. + * + * @return {@code true} if completed normally + */ + protected abstract boolean exec(); + + /** + * Returns, but does not unschedule or execute, a task queued by + * the current thread but not yet executed, if one is immediately + * available. There is no guarantee that this task will actually + * be polled or executed next. Conversely, this method may return + * null even if a task exists but cannot be accessed without + * contention with other threads. This method is designed + * primarily to support extensions, and is unlikely to be useful + * otherwise. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @return the next task, or {@code null} if none are available + */ + protected static ForkJoinTask peekNextLocalTask() { + return ((ForkJoinWorkerThread) Thread.currentThread()).workQueue.peek(); + } + + /** + * Unschedules and returns, without executing, the next task + * queued by the current thread but not yet executed. This method + * is designed primarily to support extensions, and is unlikely to + * be useful otherwise. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @return the next task, or {@code null} if none are available + */ + protected static ForkJoinTask pollNextLocalTask() { + return ((ForkJoinWorkerThread) Thread.currentThread()) + .workQueue.nextLocalTask(); + } + + /** + * Unschedules and returns, without executing, the next task + * queued by the current thread but not yet executed, if one is + * available, or if not available, a task that was forked by some + * other thread, if available. Availability may be transient, so a + * {@code null} result does not necessarily imply quiescence + * of the pool this task is operating in. This method is designed + * primarily to support extensions, and is unlikely to be useful + * otherwise. + * + *

This method may be invoked only from within {@code + * ForkJoinPool} computations (as may be determined using method + * {@link #inForkJoinPool}). Attempts to invoke in other contexts + * result in exceptions or errors, possibly including {@code + * ClassCastException}. + * + * @return a task, or {@code null} if none are available + */ + protected static ForkJoinTask pollTask() { + ForkJoinWorkerThread wt = + (ForkJoinWorkerThread)Thread.currentThread(); + return wt.pool.nextTaskFor(wt.workQueue); + } + + // Mark-bit operations + + /** + * Returns true if this task is marked. + * + * @return true if this task is marked + * @since 1.8 + */ + public final boolean isMarkedForkJoinTask() { + return (status & MARKED) != 0; + } + + /** + * Atomically sets the mark on this task. + * + * @return true if this task was previously unmarked + * @since 1.8 + */ + public final boolean markForkJoinTask() { + for (int s;;) { + if (((s = status) & MARKED) != 0) + return false; + if (U.compareAndSwapInt(this, STATUS, s, s | MARKED)) + return true; + } + } + + /** + * Atomically clears the mark on this task. + * + * @return true if this task was previously marked + * @since 1.8 + */ + public final boolean unmarkForkJoinTask() { + for (int s;;) { + if (((s = status) & MARKED) == 0) + return false; + if (U.compareAndSwapInt(this, STATUS, s, s & ~MARKED)) + return true; + } + } + + /** + * Adaptor for Runnables. This implements RunnableFuture + * to be compliant with AbstractExecutorService constraints + * when used in ForkJoinPool. + */ + static final class AdaptedRunnable extends ForkJoinTask + implements RunnableFuture { + final Runnable runnable; + final T resultOnCompletion; + T result; + AdaptedRunnable(Runnable runnable, T result) { + if (runnable == null) throw new NullPointerException(); + this.runnable = runnable; + this.resultOnCompletion = result; + } + public T getRawResult() { return result; } + public void setRawResult(T v) { result = v; } + public boolean exec() { + runnable.run(); + result = resultOnCompletion; + return true; + } + public void run() { invoke(); } + private static final long serialVersionUID = 5232453952276885070L; + } + + /** + * Adaptor for Callables + */ + static final class AdaptedCallable extends ForkJoinTask + implements RunnableFuture { + final Callable callable; + T result; + AdaptedCallable(Callable callable) { + if (callable == null) throw new NullPointerException(); + this.callable = callable; + } + public T getRawResult() { return result; } + public void setRawResult(T v) { result = v; } + public boolean exec() { + try { + result = callable.call(); + return true; + } catch (Error err) { + throw err; + } catch (RuntimeException rex) { + throw rex; + } catch (Exception ex) { + throw new RuntimeException(ex); + } + } + public void run() { invoke(); } + private static final long serialVersionUID = 2838392045355241008L; + } + + /** + * Returns a new {@code ForkJoinTask} that performs the {@code run} + * method of the given {@code Runnable} as its action, and returns + * a null result upon {@link #join}. + * + * @param runnable the runnable action + * @return the task + */ + public static ForkJoinTask adapt(Runnable runnable) { + return new AdaptedRunnable(runnable, null); + } + + /** + * Returns a new {@code ForkJoinTask} that performs the {@code run} + * method of the given {@code Runnable} as its action, and returns + * the given result upon {@link #join}. + * + * @param runnable the runnable action + * @param result the result upon completion + * @return the task + */ + public static ForkJoinTask adapt(Runnable runnable, T result) { + return new AdaptedRunnable(runnable, result); + } + + /** + * Returns a new {@code ForkJoinTask} that performs the {@code call} + * method of the given {@code Callable} as its action, and returns + * its result upon {@link #join}, translating any checked exceptions + * encountered into {@code RuntimeException}. + * + * @param callable the callable action + * @return the task + */ + public static ForkJoinTask adapt(Callable callable) { + return new AdaptedCallable(callable); + } + + // Serialization support + + private static final long serialVersionUID = -7721805057305804111L; + + /** + * Saves this task to a stream (that is, serializes it). + * + * @serialData the current run status and the exception thrown + * during execution, or {@code null} if none + */ + private void writeObject(java.io.ObjectOutputStream s) + throws java.io.IOException { + s.defaultWriteObject(); + s.writeObject(getException()); + } + + /** + * Reconstitutes this task from a stream (that is, deserializes it). + */ + private void readObject(java.io.ObjectInputStream s) + throws java.io.IOException, ClassNotFoundException { + s.defaultReadObject(); + Object ex = s.readObject(); + if (ex != null) + setExceptionalCompletion((Throwable)ex); + } + + // Unsafe mechanics + private static final sun.misc.Unsafe U; + private static final long STATUS; + static { + exceptionTableLock = new ReentrantLock(); + exceptionTableRefQueue = new ReferenceQueue(); + exceptionTable = new ExceptionNode[EXCEPTION_MAP_CAPACITY]; + try { + U = getUnsafe(); + STATUS = U.objectFieldOffset + (ForkJoinTask.class.getDeclaredField("status")); + } catch (Exception e) { + throw new Error(e); + } + } + + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ + private static sun.misc.Unsafe getUnsafe() { + try { + return sun.misc.Unsafe.getUnsafe(); + } catch (SecurityException se) { + try { + return java.security.AccessController.doPrivileged + (new java.security + .PrivilegedExceptionAction() { + public sun.misc.Unsafe run() throws Exception { + java.lang.reflect.Field f = sun.misc + .Unsafe.class.getDeclaredField("theUnsafe"); + f.setAccessible(true); + return (sun.misc.Unsafe) f.get(null); + }}); + } catch (java.security.PrivilegedActionException e) { + throw new RuntimeException("Could not initialize intrinsics", + e.getCause()); + } + } + } +} diff --git a/akka-actor/src/main/java/akka/jsr166y/ForkJoinWorkerThread.java b/akka-actor/src/main/java/akka/jsr166y/ForkJoinWorkerThread.java new file mode 100644 index 0000000000..61b0cce979 --- /dev/null +++ b/akka-actor/src/main/java/akka/jsr166y/ForkJoinWorkerThread.java @@ -0,0 +1,119 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package akka.jsr166y; + +/** + * A thread managed by a {@link ForkJoinPool}, which executes + * {@link ForkJoinTask}s. + * This class is subclassable solely for the sake of adding + * functionality -- there are no overridable methods dealing with + * scheduling or execution. However, you can override initialization + * and termination methods surrounding the main task processing loop. + * If you do create such a subclass, you will also need to supply a + * custom {@link ForkJoinPool.ForkJoinWorkerThreadFactory} to use it + * in a {@code ForkJoinPool}. + * + * @since 1.7 + * @author Doug Lea + */ +public class ForkJoinWorkerThread extends Thread { + /* + * ForkJoinWorkerThreads are managed by ForkJoinPools and perform + * ForkJoinTasks. For explanation, see the internal documentation + * of class ForkJoinPool. + */ + + final ForkJoinPool.WorkQueue workQueue; // Work-stealing mechanics + final ForkJoinPool pool; // the pool this thread works in + + /** + * Creates a ForkJoinWorkerThread operating in the given pool. + * + * @param pool the pool this thread works in + * @throws NullPointerException if pool is null + */ + protected ForkJoinWorkerThread(ForkJoinPool pool) { + super(pool.nextWorkerName()); + setDaemon(true); + Thread.UncaughtExceptionHandler ueh = pool.ueh; + if (ueh != null) + setUncaughtExceptionHandler(ueh); + this.pool = pool; + this.workQueue = new ForkJoinPool.WorkQueue(this, pool.localMode); + pool.registerWorker(this); + } + + /** + * Returns the pool hosting this thread. + * + * @return the pool + */ + public ForkJoinPool getPool() { + return pool; + } + + /** + * Returns the index number of this thread in its pool. The + * returned value ranges from zero to the maximum number of + * threads (minus one) that have ever been created in the pool. + * This method may be useful for applications that track status or + * collect results per-worker rather than per-task. + * + * @return the index number + */ + public int getPoolIndex() { + return workQueue.poolIndex; + } + + /** + * Initializes internal state after construction but before + * processing any tasks. If you override this method, you must + * invoke {@code super.onStart()} at the beginning of the method. + * Initialization requires care: Most fields must have legal + * default values, to ensure that attempted accesses from other + * threads work correctly even before this thread starts + * processing tasks. + */ + protected void onStart() { + } + + /** + * Performs cleanup associated with termination of this worker + * thread. If you override this method, you must invoke + * {@code super.onTermination} at the end of the overridden method. + * + * @param exception the exception causing this thread to abort due + * to an unrecoverable error, or {@code null} if completed normally + */ + protected void onTermination(Throwable exception) { + } + + /** + * This method is required to be public, but should never be + * called explicitly. It performs the main run loop to execute + * {@link ForkJoinTask}s. + */ + public void run() { + Throwable exception = null; + try { + onStart(); + pool.runWorker(this); + } catch (Throwable ex) { + exception = ex; + } finally { + try { + onTermination(exception); + } catch (Throwable ex) { + if (exception == null) + exception = ex; + } finally { + pool.deregisterWorker(this, exception); + } + } + } +} + diff --git a/akka-actor/src/main/java/akka/jsr166y/RecursiveAction.java b/akka-actor/src/main/java/akka/jsr166y/RecursiveAction.java new file mode 100644 index 0000000000..c13c513171 --- /dev/null +++ b/akka-actor/src/main/java/akka/jsr166y/RecursiveAction.java @@ -0,0 +1,164 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package akka.jsr166y; + +/** + * A recursive resultless {@link ForkJoinTask}. This class + * establishes conventions to parameterize resultless actions as + * {@code Void} {@code ForkJoinTask}s. Because {@code null} is the + * only valid value of type {@code Void}, methods such as {@code join} + * always return {@code null} upon completion. + * + *

Sample Usages. Here is a simple but complete ForkJoin + * sort that sorts a given {@code long[]} array: + * + *

 {@code
+ * static class SortTask extends RecursiveAction {
+ *   final long[] array; final int lo, hi;
+ *   SortTask(long[] array, int lo, int hi) {
+ *     this.array = array; this.lo = lo; this.hi = hi;
+ *   }
+ *   SortTask(long[] array) { this(array, 0, array.length); }
+ *   protected void compute() {
+ *     if (hi - lo < THRESHOLD)
+ *       sortSequentially(lo, hi);
+ *     else {
+ *       int mid = (lo + hi) >>> 1;
+ *       invokeAll(new SortTask(array, lo, mid),
+ *                 new SortTask(array, mid, hi));
+ *       merge(lo, mid, hi);
+ *     }
+ *   }
+ *   // implementation details follow:
+ *   final static int THRESHOLD = 1000;
+ *   void sortSequentially(int lo, int hi) {
+ *     Arrays.sort(array, lo, hi);
+ *   }
+ *   void merge(int lo, int mid, int hi) {
+ *     long[] buf = Arrays.copyOfRange(array, lo, mid);
+ *     for (int i = 0, j = lo, k = mid; i < buf.length; j++)
+ *       array[j] = (k == hi || buf[i] < array[k]) ?
+ *         buf[i++] : array[k++];
+ *   }
+ * }}
+ * + * You could then sort {@code anArray} by creating {@code new + * SortTask(anArray)} and invoking it in a ForkJoinPool. As a more + * concrete simple example, the following task increments each element + * of an array: + *
 {@code
+ * class IncrementTask extends RecursiveAction {
+ *   final long[] array; final int lo, hi;
+ *   IncrementTask(long[] array, int lo, int hi) {
+ *     this.array = array; this.lo = lo; this.hi = hi;
+ *   }
+ *   protected void compute() {
+ *     if (hi - lo < THRESHOLD) {
+ *       for (int i = lo; i < hi; ++i)
+ *         array[i]++;
+ *     }
+ *     else {
+ *       int mid = (lo + hi) >>> 1;
+ *       invokeAll(new IncrementTask(array, lo, mid),
+ *                 new IncrementTask(array, mid, hi));
+ *     }
+ *   }
+ * }}
+ * + *

The following example illustrates some refinements and idioms + * that may lead to better performance: RecursiveActions need not be + * fully recursive, so long as they maintain the basic + * divide-and-conquer approach. Here is a class that sums the squares + * of each element of a double array, by subdividing out only the + * right-hand-sides of repeated divisions by two, and keeping track of + * them with a chain of {@code next} references. It uses a dynamic + * threshold based on method {@code getSurplusQueuedTaskCount}, but + * counterbalances potential excess partitioning by directly + * performing leaf actions on unstolen tasks rather than further + * subdividing. + * + *

 {@code
+ * double sumOfSquares(ForkJoinPool pool, double[] array) {
+ *   int n = array.length;
+ *   Applyer a = new Applyer(array, 0, n, null);
+ *   pool.invoke(a);
+ *   return a.result;
+ * }
+ *
+ * class Applyer extends RecursiveAction {
+ *   final double[] array;
+ *   final int lo, hi;
+ *   double result;
+ *   Applyer next; // keeps track of right-hand-side tasks
+ *   Applyer(double[] array, int lo, int hi, Applyer next) {
+ *     this.array = array; this.lo = lo; this.hi = hi;
+ *     this.next = next;
+ *   }
+ *
+ *   double atLeaf(int l, int h) {
+ *     double sum = 0;
+ *     for (int i = l; i < h; ++i) // perform leftmost base step
+ *       sum += array[i] * array[i];
+ *     return sum;
+ *   }
+ *
+ *   protected void compute() {
+ *     int l = lo;
+ *     int h = hi;
+ *     Applyer right = null;
+ *     while (h - l > 1 && getSurplusQueuedTaskCount() <= 3) {
+ *        int mid = (l + h) >>> 1;
+ *        right = new Applyer(array, mid, h, right);
+ *        right.fork();
+ *        h = mid;
+ *     }
+ *     double sum = atLeaf(l, h);
+ *     while (right != null) {
+ *        if (right.tryUnfork()) // directly calculate if not stolen
+ *          sum += right.atLeaf(right.lo, right.hi);
+ *       else {
+ *          right.join();
+ *          sum += right.result;
+ *        }
+ *        right = right.next;
+ *      }
+ *     result = sum;
+ *   }
+ * }}
+ * + * @since 1.7 + * @author Doug Lea + */ +public abstract class RecursiveAction extends ForkJoinTask { + private static final long serialVersionUID = 5232453952276485070L; + + /** + * The main computation performed by this task. + */ + protected abstract void compute(); + + /** + * Always returns {@code null}. + * + * @return {@code null} always + */ + public final Void getRawResult() { return null; } + + /** + * Requires null completion value. + */ + protected final void setRawResult(Void mustBeNull) { } + + /** + * Implements execution conventions for RecursiveActions. + */ + protected final boolean exec() { + compute(); + return true; + } + +} diff --git a/akka-actor/src/main/java/akka/jsr166y/RecursiveTask.java b/akka-actor/src/main/java/akka/jsr166y/RecursiveTask.java new file mode 100644 index 0000000000..12378ee6c8 --- /dev/null +++ b/akka-actor/src/main/java/akka/jsr166y/RecursiveTask.java @@ -0,0 +1,68 @@ +/* + * Written by Doug Lea with assistance from members of JCP JSR-166 + * Expert Group and released to the public domain, as explained at + * http://creativecommons.org/publicdomain/zero/1.0/ + */ + +package akka.jsr166y; + +/** + * A recursive result-bearing {@link ForkJoinTask}. + * + *

For a classic example, here is a task computing Fibonacci numbers: + * + *

 {@code
+ * class Fibonacci extends RecursiveTask {
+ *   final int n;
+ *   Fibonacci(int n) { this.n = n; }
+ *   Integer compute() {
+ *     if (n <= 1)
+ *        return n;
+ *     Fibonacci f1 = new Fibonacci(n - 1);
+ *     f1.fork();
+ *     Fibonacci f2 = new Fibonacci(n - 2);
+ *     return f2.compute() + f1.join();
+ *   }
+ * }}
+ * + * However, besides being a dumb way to compute Fibonacci functions + * (there is a simple fast linear algorithm that you'd use in + * practice), this is likely to perform poorly because the smallest + * subtasks are too small to be worthwhile splitting up. Instead, as + * is the case for nearly all fork/join applications, you'd pick some + * minimum granularity size (for example 10 here) for which you always + * sequentially solve rather than subdividing. + * + * @since 1.7 + * @author Doug Lea + */ +public abstract class RecursiveTask extends ForkJoinTask { + private static final long serialVersionUID = 5232453952276485270L; + + /** + * The result of the computation. + */ + V result; + + /** + * The main computation performed by this task. + */ + protected abstract V compute(); + + public final V getRawResult() { + return result; + } + + protected final void setRawResult(V value) { + result = value; + } + + /** + * Implements execution conventions for RecursiveTask. + */ + protected final boolean exec() { + result = compute(); + return true; + } + +} diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index f8927f667a..687c3d8191 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -8,6 +8,7 @@ import java.util.Collection import java.util.concurrent.atomic.AtomicLong import akka.util.Duration import java.util.concurrent._ +import akka.jsr166y._ object ThreadPoolConfig { type QueueFactory = () ⇒ BlockingQueue[Runnable] @@ -160,9 +161,15 @@ object MonitorableThreadFactory { case class MonitorableThreadFactory(name: String, daemonic: Boolean, exceptionHandler: Thread.UncaughtExceptionHandler = MonitorableThreadFactory.doNothing) - extends ThreadFactory { + extends ThreadFactory with ForkJoinPool.ForkJoinWorkerThreadFactory { protected val counter = new AtomicLong + def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = { + val t = ForkJoinPool.defaultForkJoinWorkerThreadFactory.newThread(pool) + t.setDaemon(daemonic) + t + } + def newThread(runnable: Runnable) = { val t = new Thread(runnable, name + counter.incrementAndGet()) t.setUncaughtExceptionHandler(exceptionHandler) From 9318f700c568c5e4af34e59eb58e58de9d3f61fd Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 27 Jan 2012 14:58:17 +0100 Subject: [PATCH 09/94] Fixing so that it's possible to add the FJ config to MessageDispatcherConfigurator --- .../akka/dispatch/AbstractDispatcher.scala | 2 +- .../akka/dispatch/BalancingDispatcher.scala | 4 ++-- .../scala/akka/dispatch/Dispatchers.scala | 19 ++++++++++++++----- 3 files changed, 17 insertions(+), 8 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 29be04fe40..7b17a0e2d7 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -335,7 +335,7 @@ abstract class MessageDispatcherConfigurator(val config: Config, val prerequisit def configureThreadPool( config: Config, - createDispatcher: ⇒ (ThreadPoolConfig) ⇒ MessageDispatcher): ThreadPoolConfigDispatcherBuilder = { + createDispatcher: ⇒ (ExecutorServiceFactoryProvider) ⇒ MessageDispatcher): ThreadPoolConfigDispatcherBuilder = { import ThreadPoolConfigDispatcherBuilder.conf_? //Apply the following options to the config if they are present in the config diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index c4742df81a..8542ac69c8 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -31,9 +31,9 @@ class BalancingDispatcher( throughput: Int, throughputDeadlineTime: Duration, mailboxType: MailboxType, - config: ThreadPoolConfig, + _executorServiceFactoryProvider: ExecutorServiceFactoryProvider, _shutdownTimeout: Duration) - extends Dispatcher(_prerequisites, _id, throughput, throughputDeadlineTime, mailboxType, config, _shutdownTimeout) { + extends Dispatcher(_prerequisites, _id, throughput, throughputDeadlineTime, mailboxType, _executorServiceFactoryProvider, _shutdownTimeout) { val buddies = new ConcurrentSkipListSet[ActorCell](akka.util.Helpers.IdentityHashComparator) val rebalance = new AtomicBoolean(false) diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 3871905905..0996551f9c 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -205,10 +205,19 @@ class PinnedDispatcherConfigurator(config: Config, prerequisites: DispatcherPrer /** * Creates new dispatcher for each invocation. */ - override def dispatcher(): MessageDispatcher = configureThreadPool(config, - threadPoolConfig ⇒ - new PinnedDispatcher(prerequisites, null, config.getString("id"), mailboxType, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS), - threadPoolConfig)).build + override def dispatcher(): MessageDispatcher = configureThreadPool(config, { + case t: ThreadPoolConfig ⇒ new PinnedDispatcher( + prerequisites, null, config.getString("id"), mailboxType, + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS), t) + case other ⇒ + prerequisites.eventStream.publish( + Warning("PinnedDispatcherConfigurator", + this.getClass, + "PinnedDispatcher [%s] not configured to use ThreadPoolExecutor, falling back to default config.".format( + config.getString("id")))) + new PinnedDispatcher( + prerequisites, null, config.getString("id"), mailboxType, + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS), ThreadPoolConfig()) + }).build } From b61ade5edaf2112fe25db1db24172d75083ae88b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 27 Jan 2012 15:19:48 +0100 Subject: [PATCH 10/94] Fixing so that PinnedDispatcher always uses the correct ThreadPool --- .../src/main/scala/akka/dispatch/Dispatchers.scala | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 0996551f9c..ae1ac40606 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -202,22 +202,24 @@ class BalancingDispatcherConfigurator(config: Config, prerequisites: DispatcherP */ class PinnedDispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends MessageDispatcherConfigurator(config, prerequisites) { + + def createPinnedDispatcherWith(tpc: ThreadPoolConfig): PinnedDispatcher = + new PinnedDispatcher( + prerequisites, null, config.getString("id"), mailboxType, + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS), tpc) + /** * Creates new dispatcher for each invocation. */ override def dispatcher(): MessageDispatcher = configureThreadPool(config, { - case t: ThreadPoolConfig ⇒ new PinnedDispatcher( - prerequisites, null, config.getString("id"), mailboxType, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS), t) + case t: ThreadPoolConfig ⇒ createPinnedDispatcherWith(t) case other ⇒ prerequisites.eventStream.publish( Warning("PinnedDispatcherConfigurator", this.getClass, "PinnedDispatcher [%s] not configured to use ThreadPoolExecutor, falling back to default config.".format( config.getString("id")))) - new PinnedDispatcher( - prerequisites, null, config.getString("id"), mailboxType, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS), ThreadPoolConfig()) + createPinnedDispatcherWith(ThreadPoolConfig()) }).build } From b045383a728a716094fe8f92097a4e758cbd28a9 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 27 Jan 2012 15:30:24 +0100 Subject: [PATCH 11/94] Changing signature of the createThreadPool in MDC to return the MD --- .../src/test/scala/akka/actor/dispatch/ActorModelSpec.scala | 4 ++-- .../src/main/scala/akka/dispatch/AbstractDispatcher.scala | 4 ++-- akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index e2b697a08f..57c3567c4e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -456,7 +456,7 @@ object DispatcherModelSpec { Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), mailboxType, threadPoolConfig, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) with MessageDispatcherInterceptor).build + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) with MessageDispatcherInterceptor) } override def dispatcher(): MessageDispatcher = instance @@ -530,7 +530,7 @@ object BalancingDispatcherModelSpec { Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), mailboxType, threadPoolConfig, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) with MessageDispatcherInterceptor).build + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) with MessageDispatcherInterceptor) } override def dispatcher(): MessageDispatcher = instance diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 7b17a0e2d7..bb4e3e42e7 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -335,7 +335,7 @@ abstract class MessageDispatcherConfigurator(val config: Config, val prerequisit def configureThreadPool( config: Config, - createDispatcher: ⇒ (ExecutorServiceFactoryProvider) ⇒ MessageDispatcher): ThreadPoolConfigDispatcherBuilder = { + createDispatcher: ⇒ (ExecutorServiceFactoryProvider) ⇒ MessageDispatcher): MessageDispatcher = { import ThreadPoolConfigDispatcherBuilder.conf_? //Apply the following options to the config if they are present in the config @@ -354,6 +354,6 @@ abstract class MessageDispatcherConfigurator(val config: Config, val prerequisit case x ⇒ throw new IllegalArgumentException("[%s] is not a valid task-queue-type [array|linked]!" format x) } case _ ⇒ None - })(queueFactory ⇒ _.setQueueFactory(queueFactory))) + })(queueFactory ⇒ _.setQueueFactory(queueFactory))).build } } diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index ae1ac40606..29ddc6c495 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -164,7 +164,7 @@ class DispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisi Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), mailboxType, threadPoolConfig, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS))).build + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS))) /** * Returns the same dispatcher instance for each invocation @@ -187,7 +187,7 @@ class BalancingDispatcherConfigurator(config: Config, prerequisites: DispatcherP config.getInt("throughput"), Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), mailboxType, threadPoolConfig, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS))).build + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS))) /** * Returns the same dispatcher instance for each invocation @@ -220,6 +220,6 @@ class PinnedDispatcherConfigurator(config: Config, prerequisites: DispatcherPrer "PinnedDispatcher [%s] not configured to use ThreadPoolExecutor, falling back to default config.".format( config.getString("id")))) createPinnedDispatcherWith(ThreadPoolConfig()) - }).build + }) } From dcde34f8a015dabf91c1ec68aa782fb6202a5863 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 27 Jan 2012 19:57:24 +0100 Subject: [PATCH 12/94] Updating to latest version of FJP --- .../main/java/akka/jsr166y/ForkJoinPool.java | 198 ++++++++++-------- 1 file changed, 112 insertions(+), 86 deletions(-) diff --git a/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java b/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java index e5d7bedb2c..f6eb5de94e 100644 --- a/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java +++ b/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java @@ -6,8 +6,6 @@ package akka.jsr166y; -import akka.util.Unsafe; - import java.util.ArrayList; import java.util.Arrays; import java.util.Collection; @@ -25,6 +23,7 @@ import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; import java.util.concurrent.locks.ReentrantLock; import java.util.concurrent.locks.Condition; +import akka.util.Unsafe; /** * An {@link ExecutorService} for running {@link ForkJoinTask}s. @@ -197,20 +196,22 @@ public class ForkJoinPool extends AbstractExecutorService { * WorkQueues are also used in a similar way for tasks submitted * to the pool. We cannot mix these tasks in the same queues used * for work-stealing (this would contaminate lifo/fifo - * processing). Instead, we loosely associate (via hashing) - * submission queues with submitting threads, and randomly scan - * these queues as well when looking for work. In essence, - * submitters act like workers except that they never take tasks, - * and they are multiplexed on to a finite number of shared work - * queues. However, classes are set up so that future extensions - * could allow submitters to optionally help perform tasks as - * well. Pool submissions from internal workers are also allowed, - * but use randomized rather than thread-hashed queue indices to - * avoid imbalance. Insertion of tasks in shared mode requires a - * lock (mainly to protect in the case of resizing) but we use - * only a simple spinlock (using bits in field runState), because - * submitters encountering a busy queue try or create others so - * never block. + * processing). Instead, we loosely associate submission queues + * with submitting threads, using a form of hashing. The + * ThreadLocal Submitter class contains a value initially used as + * a hash code for choosing existing queues, but may be randomly + * repositioned upon contention with other submitters. In + * essence, submitters act like workers except that they never + * take tasks, and they are multiplexed on to a finite number of + * shared work queues. However, classes are set up so that future + * extensions could allow submitters to optionally help perform + * tasks as well. Pool submissions from internal workers are also + * allowed, but use randomized rather than thread-hashed queue + * indices to avoid imbalance. Insertion of tasks in shared mode + * requires a lock (mainly to protect in the case of resizing) but + * we use only a simple spinlock (using bits in field runState), + * because submitters encountering a busy queue try or create + * others so never block. * * Management. * ========== @@ -1087,27 +1088,58 @@ public class ForkJoinPool extends AbstractExecutorService { } /** - * Computes a hash code for the given thread. This method is - * expected to provide higher-quality hash codes than those using - * method hashCode(). +<<<<<<< ForkJoinPool.java + * Per-thread records for (typically non-FJ) threads that submit + * to pools. Cureently holds only psuedo-random seed / index that + * is used to chose submission queues in method doSubmit. In the + * future, this may incorporate a means to implement different + * task rejection and resubmission policies. */ - static final int hashThread(Thread t) { - long id = (t == null) ? 0L : t.getId(); // Use MurmurHash of thread id - int h = (int)id ^ (int)(id >>> 32); - h ^= h >>> 16; - h *= 0x85ebca6b; - h ^= h >>> 13; - h *= 0xc2b2ae35; - return h ^ (h >>> 16); + static final class Submitter { + int seed; // seed for random submission queue selection + + // Heuristic padding to ameliorate unfortunate memory placements + int p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pa, pb, pc, pd, pe; + + Submitter() { + // Use identityHashCode, forced negative, for seed + seed = System.identityHashCode(Thread.currentThread()) | (1 << 31); + } + + /** + * Computes next value for random probes. Like method + * WorkQueue.nextSeed, this is manually inlined in several + * usages to avoid writes inside busy loops. + */ + final int nextSeed() { + int r = seed; + r ^= r << 13; + r ^= r >>> 17; + return seed = r ^= r << 5; + } } + /** ThreadLocal class for Submitters */ + static final class ThreadSubmitter extends ThreadLocal { + public Submitter initialValue() { return new Submitter(); } + } + + /** + * Per-thread submission bookeeping. Shared across all pools + * to reduce ThreadLocal pollution and because random motion + * to avoid contention in one pool is likely to hold for others. + */ + static final ThreadSubmitter submitters = new ThreadSubmitter(); + /** * Top-level runloop for workers */ final void runWorker(ForkJoinWorkerThread wt) { + // Initialize queue array and seed in this thread WorkQueue w = wt.workQueue; - w.growArray(false); // Initialize queue array and seed in this thread - w.seed = hashThread(Thread.currentThread()) | (1 << 31); // force < 0 + w.growArray(false); + // Same initial hash as Submitters + w.seed = System.identityHashCode(Thread.currentThread()) | (1 << 31); do {} while (w.runTask(scan(w))); } @@ -1220,6 +1252,37 @@ public class ForkJoinPool extends AbstractExecutorService { U.throwException(ex); } + /** + * Tries to add and register a new queue at the given index. + * + * @param idx the workQueues array index to register the queue + * @return the queue, or null if could not add because could + * not acquire lock or idx is unusable + */ + private WorkQueue tryAddSharedQueue(int idx) { + WorkQueue q = null; + ReentrantLock lock = this.lock; + if (idx >= 0 && (idx & 1) == 0 && !lock.isLocked()) { + // create queue outside of lock but only if apparently free + WorkQueue nq = new WorkQueue(null, SHARED_QUEUE); + if (lock.tryLock()) { + try { + WorkQueue[] ws = workQueues; + if (ws != null && idx < ws.length) { + if ((q = ws[idx]) == null) { + int rs; // update runState seq + ws[idx] = q = nq; + runState = (((rs = runState) & SHUTDOWN) | + ((rs + RS_SEQ) & ~SHUTDOWN)); + } + } + } finally { + lock.unlock(); + } + } + } + return q; + } // Maintaining ctl counts @@ -1322,73 +1385,35 @@ public class ForkJoinPool extends AbstractExecutorService { // Submissions /** - * Unless shutting down, adds the given task to some submission - * queue; using a randomly chosen queue index if the caller is a - * ForkJoinWorkerThread, else one based on caller thread's hash - * code. If no queue exists at the index, one is created. If the - * queue is busy, another is chosen by sweeping through the queues - * array. + * Unless shutting down, adds the given task to a submission queue + * at submitter's current queue index. If no queue exists at the + * index, one is created unless pool lock is busy. If the queue + * and/or lock are busy, another index is randomly chosen. */ private void doSubmit(ForkJoinTask task) { if (task == null) throw new NullPointerException(); - Thread t = Thread.currentThread(); - int r = ((t instanceof ForkJoinWorkerThread) ? - ((ForkJoinWorkerThread)t).workQueue.nextSeed() : hashThread(t)); - for (;;) { + Submitter s = submitters.get(); + for (int r = s.seed;;) { + WorkQueue q; int k; int rs = runState, m = rs & SMASK; - int j = r &= (m & ~1); // even numbered queues WorkQueue[] ws = workQueues; - if (rs < 0 || ws == null) - throw new RejectedExecutionException(); // shutting down - if (ws.length > m) { // consistency check - for (WorkQueue q;;) { // circular sweep - if (((q = ws[j]) != null || - (q = tryAddSharedQueue(j)) != null) && - q.trySharedPush(task)) { - signalWork(); - return; - } - if ((j = (j + 2) & m) == r) { - Thread.yield(); // all queues busy - break; - } - } + if (rs < 0 || ws == null) // shutting down + throw new RejectedExecutionException(); + if (ws.length > m && // k must be at index + ((q = ws[k = (r << 1) & m]) != null || + (q = tryAddSharedQueue(k)) != null) && + q.trySharedPush(task)) { + signalWork(); + return; } + r ^= r << 13; // xorshift seed to new position + r ^= r >>> 17; + if (((s.seed = r ^= r << 5) & m) == 0) + Thread.yield(); // occasionally yield if busy } } - /** - * Tries to add and register a new queue at the given index. - * - * @param idx the workQueues array index to register the queue - * @return the queue, or null if could not add because could - * not acquire lock or idx is unusable - */ - private WorkQueue tryAddSharedQueue(int idx) { - WorkQueue q = null; - ReentrantLock lock = this.lock; - if (idx >= 0 && (idx & 1) == 0 && !lock.isLocked()) { - // create queue outside of lock but only if apparently free - WorkQueue nq = new WorkQueue(null, SHARED_QUEUE); - if (lock.tryLock()) { - try { - WorkQueue[] ws = workQueues; - if (ws != null && idx < ws.length) { - if ((q = ws[idx]) == null) { - int rs; // update runState seq - ws[idx] = q = nq; - runState = (((rs = runState) & SHUTDOWN) | - ((rs + RS_SEQ) & ~SHUTDOWN)); - } - } - } finally { - lock.unlock(); - } - } - } - return q; - } // Scanning for tasks @@ -2627,4 +2652,5 @@ public class ForkJoinPool extends AbstractExecutorService { private static sun.misc.Unsafe getUnsafe() { return Unsafe.instance; } + } From 1a122986a1474dcc7c0a1698f4d02e737d0a92b9 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 30 Jan 2012 10:08:10 +0100 Subject: [PATCH 13/94] I really dislike final var --- akka-actor/src/main/scala/akka/actor/ActorCell.scala | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 5aaf4ae8d5..b9f0d70d43 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -209,10 +209,10 @@ private[akka] class ActorCell( /** * In milliseconds */ - final var receiveTimeoutData: (Long, Cancellable) = + var receiveTimeoutData: (Long, Cancellable) = if (_receiveTimeout.isDefined) (_receiveTimeout.get.toMillis, emptyCancellable) else emptyReceiveTimeoutData - final var childrenRefs: TreeMap[String, ChildRestartStats] = emptyChildrenRefs + var childrenRefs: TreeMap[String, ChildRestartStats] = emptyChildrenRefs private def _actorOf(props: Props, name: String): ActorRef = { if (system.settings.SerializeAllCreators && !props.creator.isInstanceOf[NoSerializationVerificationNeeded]) { @@ -255,16 +255,16 @@ private[akka] class ActorCell( a.stop() } - final var currentMessage: Envelope = null + var currentMessage: Envelope = null - final var actor: Actor = _ + var actor: Actor = _ - final var stopping = false + var stopping = false @volatile //This must be volatile since it isn't protected by the mailbox status var mailbox: Mailbox = _ - final var nextNameSequence: Long = 0 + var nextNameSequence: Long = 0 //Not thread safe, so should only be used inside the actor that inhabits this ActorCell final protected def randomName(): String = { From d12d56a1aefaa3dcfdb0e838081acce975ad9f77 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 30 Jan 2012 13:27:48 +0100 Subject: [PATCH 14/94] Move hotswap stack into Actor trait. See #1717 --- akka-actor/src/main/scala/Errtest.scala | 49 +++++++++++++++++++ .../src/main/scala/akka/actor/Actor.scala | 19 ++++++- .../src/main/scala/akka/actor/ActorCell.scala | 17 +++---- .../src/main/scala/akka/actor/ActorRef.scala | 10 ++-- .../src/main/scala/akka/actor/Props.scala | 1 - .../scala/akka/testkit/TestActorRef.scala | 5 +- 6 files changed, 78 insertions(+), 23 deletions(-) create mode 100644 akka-actor/src/main/scala/Errtest.scala diff --git a/akka-actor/src/main/scala/Errtest.scala b/akka-actor/src/main/scala/Errtest.scala new file mode 100644 index 0000000000..c778e17ebe --- /dev/null +++ b/akka-actor/src/main/scala/Errtest.scala @@ -0,0 +1,49 @@ + +import akka.actor._ +import com.typesafe.config.ConfigFactory +import akka.event.LoggingReceive +import scala.annotation.tailrec + +object Errtest extends App { + + val config = ConfigFactory.parseString(""" + akka.loglevel = DEBUG + akka.actor.debug { + receive = on + lifecycle = on + } + """) + + val sys = ActorSystem("ErrSys", config) + val top = sys.actorOf(Props[Top], name = "top") + + for (n ← 1 to 100) { + top ! "run " + n + Thread.sleep(1000) + } +} + +class Top extends Actor { + var c: ActorRef = _ + def receive = LoggingReceive { + case x ⇒ + c = context.actorOf(Props[Child]); + c ! "ok" + } +} + +class Child extends Actor { + + //throw new Error("Simulated ERR") + blowUp(0) + + //not @tailrec + private final def blowUp(n: Long): Long = { + blowUp(n + 1) + 1 + } + + def receive = LoggingReceive { + case x ⇒ + //context.system.shutdown(); + } +} \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 94aef4bdef..386989f346 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -7,6 +7,7 @@ package akka.actor import akka.AkkaException import scala.reflect.BeanProperty import scala.util.control.NoStackTrace +import scala.collection.immutable.Stack import java.util.regex.Pattern /** @@ -112,6 +113,8 @@ object Actor { def isDefinedAt(x: Any) = false def apply(x: Any) = throw new UnsupportedOperationException("Empty behavior apply()") } + + private final val emptyBehaviourStack: Stack[Actor.Receive] = Stack.empty } /** @@ -172,7 +175,7 @@ trait Actor { type Receive = Actor.Receive /** - * Stores the context for this actor, including self, sender, and hotswap. + * Stores the context for this actor, including self, and sender. * It is implicit to support operations such as `forward`. * * [[akka.actor.ActorContext]] is the Scala API. `getContext` returns a @@ -282,7 +285,6 @@ trait Actor { // ========================================= private[akka] final def apply(msg: Any) = { - val behaviorStack = context.asInstanceOf[ActorCell].hotswap msg match { case msg if behaviorStack.nonEmpty && behaviorStack.head.isDefinedAt(msg) ⇒ behaviorStack.head.apply(msg) case msg if behaviorStack.isEmpty && processingBehavior.isDefinedAt(msg) ⇒ processingBehavior.apply(msg) @@ -291,5 +293,18 @@ trait Actor { } private[this] val processingBehavior = receive //ProcessingBehavior is the original behavior + + private[akka] def pushBehavior(behavior: Receive): Unit = { + behaviorStack = behaviorStack.push(behavior) + } + + private[akka] def popBehavior(): Unit = { + val stack = behaviorStack + if (stack.nonEmpty) behaviorStack = stack.pop + } + + private[akka] def clearBehaviorStack(): Unit = { behaviorStack = emptyBehaviourStack } + + private var behaviorStack: Stack[PartialFunction[Any, Unit]] = emptyBehaviourStack } diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 5aaf4ae8d5..2f8eee04ef 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -174,8 +174,7 @@ private[akka] class ActorCell( val self: InternalActorRef, val props: Props, @volatile var parent: InternalActorRef, - /*no member*/ _receiveTimeout: Option[Duration], - var hotswap: Stack[PartialFunction[Any, Unit]]) extends UntypedActorContext { + /*no member*/ _receiveTimeout: Option[Duration]) extends UntypedActorContext { import ActorCell._ @@ -389,7 +388,6 @@ private[akka] class ActorCell( } } actor = freshActor // assign it here so if preStart fails, we can null out the sef-refs next call - hotswap = Props.noHotSwap // Reset the behavior freshActor.postRestart(cause) if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(freshActor), "restarted")) @@ -509,9 +507,9 @@ private[akka] class ActorCell( } } - def become(behavior: Actor.Receive, discardOld: Boolean = true) { + def become(behavior: Actor.Receive, discardOld: Boolean = true): Unit = { if (discardOld) unbecome() - hotswap = hotswap.push(behavior) + actor.pushBehavior(behavior) } /** @@ -527,10 +525,7 @@ private[akka] class ActorCell( become(newReceive, discardOld) } - def unbecome() { - val h = hotswap - if (h.nonEmpty) hotswap = h.pop - } + def unbecome(): Unit = actor.popBehavior() def autoReceiveMessage(msg: Envelope) { if (system.settings.DebugAutoReceive) @@ -547,9 +542,9 @@ private[akka] class ActorCell( } private def doTerminate() { + val a = actor try { try { - val a = actor if (a ne null) a.postStop() } finally { dispatcher.detach(this) @@ -563,7 +558,7 @@ private[akka] class ActorCell( } finally { currentMessage = null clearActorFields() - hotswap = Props.noHotSwap + if (a ne null) a.clearBehaviorStack() } } } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index fa6c9962e7..1199a35152 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -224,8 +224,7 @@ private[akka] class LocalActorRef private[akka] ( _supervisor: InternalActorRef, val path: ActorPath, val systemService: Boolean = false, - _receiveTimeout: Option[Duration] = None, - _hotswap: Stack[PartialFunction[Any, Unit]] = Props.noHotSwap) + _receiveTimeout: Option[Duration] = None) extends InternalActorRef with LocalRef { /* @@ -238,7 +237,7 @@ private[akka] class LocalActorRef private[akka] ( * us to use purely factory methods for creating LocalActorRefs. */ @volatile - private var actorCell = newActorCell(_system, this, _props, _supervisor, _receiveTimeout, _hotswap) + private var actorCell = newActorCell(_system, this, _props, _supervisor, _receiveTimeout) actorCell.start() protected def newActorCell( @@ -246,9 +245,8 @@ private[akka] class LocalActorRef private[akka] ( ref: InternalActorRef, props: Props, supervisor: InternalActorRef, - receiveTimeout: Option[Duration], - hotswap: Stack[PartialFunction[Any, Unit]]): ActorCell = - new ActorCell(system, ref, props, supervisor, receiveTimeout, hotswap) + receiveTimeout: Option[Duration]): ActorCell = + new ActorCell(system, ref, props, supervisor, receiveTimeout) protected def actorContext: ActorContext = actorCell diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index cd9a62abe7..2362c4c255 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -22,7 +22,6 @@ object Props { final val defaultRoutedProps: RouterConfig = NoRouter - final val noHotSwap: Stack[Actor.Receive] = Stack.empty final val empty = new Props(() ⇒ new Actor { def receive = Actor.emptyBehavior }) /** diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index 18618a8f0c..3cfbf0ce1b 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -41,9 +41,8 @@ class TestActorRef[T <: Actor]( ref: InternalActorRef, props: Props, supervisor: InternalActorRef, - receiveTimeout: Option[Duration], - hotswap: Stack[PartialFunction[Any, Unit]]): ActorCell = - new ActorCell(system, ref, props, supervisor, receiveTimeout, hotswap) { + receiveTimeout: Option[Duration]): ActorCell = + new ActorCell(system, ref, props, supervisor, receiveTimeout) { override def autoReceiveMessage(msg: Envelope) { msg.message match { case InternalGetActor ⇒ sender ! actor From bd29df5eb047133aefdd25c15d5ef2072bcd9ca9 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 30 Jan 2012 13:40:57 +0100 Subject: [PATCH 15/94] Removed file that was added by misstake --- akka-actor/src/main/scala/Errtest.scala | 49 ------------------------- 1 file changed, 49 deletions(-) delete mode 100644 akka-actor/src/main/scala/Errtest.scala diff --git a/akka-actor/src/main/scala/Errtest.scala b/akka-actor/src/main/scala/Errtest.scala deleted file mode 100644 index c778e17ebe..0000000000 --- a/akka-actor/src/main/scala/Errtest.scala +++ /dev/null @@ -1,49 +0,0 @@ - -import akka.actor._ -import com.typesafe.config.ConfigFactory -import akka.event.LoggingReceive -import scala.annotation.tailrec - -object Errtest extends App { - - val config = ConfigFactory.parseString(""" - akka.loglevel = DEBUG - akka.actor.debug { - receive = on - lifecycle = on - } - """) - - val sys = ActorSystem("ErrSys", config) - val top = sys.actorOf(Props[Top], name = "top") - - for (n ← 1 to 100) { - top ! "run " + n - Thread.sleep(1000) - } -} - -class Top extends Actor { - var c: ActorRef = _ - def receive = LoggingReceive { - case x ⇒ - c = context.actorOf(Props[Child]); - c ! "ok" - } -} - -class Child extends Actor { - - //throw new Error("Simulated ERR") - blowUp(0) - - //not @tailrec - private final def blowUp(n: Long): Long = { - blowUp(n + 1) + 1 - } - - def receive = LoggingReceive { - case x ⇒ - //context.system.shutdown(); - } -} \ No newline at end of file From c1dd4463b940187dd49f92f5c566dbe7f687363d Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 30 Jan 2012 13:44:56 +0100 Subject: [PATCH 16/94] Restructuring how executors are configured and making sure people can plug in their own --- .../akka/actor/dispatch/ActorModelSpec.scala | 36 +++++----- .../actor/dispatch/DispatcherActorSpec.scala | 14 ++-- .../test/scala/akka/config/ConfigSpec.scala | 60 ++++++++++------- akka-actor/src/main/resources/reference.conf | 65 ++++++++++++------- .../akka/dispatch/AbstractDispatcher.scala | 37 +++++++++-- .../scala/akka/dispatch/Dispatchers.scala | 55 ++++++++-------- .../akka/dispatch/ThreadPoolBuilder.scala | 41 +++++------- 7 files changed, 180 insertions(+), 128 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index 57c3567c4e..45e1954486 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -448,16 +448,14 @@ object DispatcherModelSpec { class MessageDispatcherInterceptorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends MessageDispatcherConfigurator(config, prerequisites) { - private val instance: MessageDispatcher = { - configureThreadPool(config, - threadPoolConfig ⇒ new Dispatcher(prerequisites, - config.getString("id"), - config.getInt("throughput"), - Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), - mailboxType, - threadPoolConfig, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) with MessageDispatcherInterceptor) - } + private val instance: MessageDispatcher = + new Dispatcher(prerequisites, + config.getString("id"), + config.getInt("throughput"), + Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), + mailboxType, + configureExecutor(), + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) with MessageDispatcherInterceptor override def dispatcher(): MessageDispatcher = instance } @@ -522,16 +520,14 @@ object BalancingDispatcherModelSpec { class BalancingMessageDispatcherInterceptorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends MessageDispatcherConfigurator(config, prerequisites) { - private val instance: MessageDispatcher = { - configureThreadPool(config, - threadPoolConfig ⇒ new BalancingDispatcher(prerequisites, - config.getString("id"), - config.getInt("throughput"), - Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), - mailboxType, - threadPoolConfig, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) with MessageDispatcherInterceptor) - } + private val instance: MessageDispatcher = + new BalancingDispatcher(prerequisites, + config.getString("id"), + config.getInt("throughput"), + Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), + mailboxType, + configureExecutor(), + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) with MessageDispatcherInterceptor override def dispatcher(): MessageDispatcher = instance } diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala index 2dce8346db..4b3dd4a5b3 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala @@ -16,14 +16,20 @@ object DispatcherActorSpec { } test-throughput-dispatcher { throughput = 101 - core-pool-size-min = 1 - core-pool-size-max = 1 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = 1 + core-pool-size-max = 1 + } } test-throughput-deadline-dispatcher { throughput = 2 throughput-deadline-time = 100 milliseconds - core-pool-size-min = 1 - core-pool-size-max = 1 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = 1 + core-pool-size-max = 1 + } } """ diff --git a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala index 67c7a51b60..a29ee517a3 100644 --- a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala @@ -18,35 +18,49 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference) { val settings = system.settings val config = settings.config - import config._ - getString("akka.version") must equal("2.0-SNAPSHOT") - settings.ConfigVersion must equal("2.0-SNAPSHOT") + { + import config._ - getBoolean("akka.daemonic") must equal(false) + getString("akka.version") must equal("2.0-SNAPSHOT") + settings.ConfigVersion must equal("2.0-SNAPSHOT") - getString("akka.actor.default-dispatcher.type") must equal("Dispatcher") - getMilliseconds("akka.actor.default-dispatcher.keep-alive-time") must equal(60 * 1000) - getDouble("akka.actor.default-dispatcher.core-pool-size-factor") must equal(3.0) - getDouble("akka.actor.default-dispatcher.max-pool-size-factor") must equal(3.0) - getInt("akka.actor.default-dispatcher.task-queue-size") must equal(-1) - getString("akka.actor.default-dispatcher.task-queue-type") must equal("linked") - getBoolean("akka.actor.default-dispatcher.allow-core-timeout") must equal(true) - getInt("akka.actor.default-dispatcher.mailbox-capacity") must equal(-1) - getMilliseconds("akka.actor.default-dispatcher.mailbox-push-timeout-time") must equal(10 * 1000) - getString("akka.actor.default-dispatcher.mailboxType") must be("") - getMilliseconds("akka.actor.default-dispatcher.shutdown-timeout") must equal(1 * 1000) - getInt("akka.actor.default-dispatcher.throughput") must equal(5) - getMilliseconds("akka.actor.default-dispatcher.throughput-deadline-time") must equal(0) + getBoolean("akka.daemonic") must equal(false) + getBoolean("akka.actor.serialize-messages") must equal(false) + settings.SerializeAllMessages must equal(false) - getBoolean("akka.actor.serialize-messages") must equal(false) - settings.SerializeAllMessages must equal(false) + getInt("akka.scheduler.ticksPerWheel") must equal(512) + settings.SchedulerTicksPerWheel must equal(512) - getInt("akka.scheduler.ticksPerWheel") must equal(512) - settings.SchedulerTicksPerWheel must equal(512) + getMilliseconds("akka.scheduler.tickDuration") must equal(100) + settings.SchedulerTickDuration must equal(100 millis) + } - getMilliseconds("akka.scheduler.tickDuration") must equal(100) - settings.SchedulerTickDuration must equal(100 millis) + { + val c = config.getConfig("akka.actor.default-dispatcher") + + { + c.getString("type") must equal("Dispatcher") + c.getString("executor") must equal("thread-pool-executor") + c.getInt("mailbox-capacity") must equal(-1) + c.getMilliseconds("mailbox-push-timeout-time") must equal(10 * 1000) + c.getString("mailboxType") must be("") + c.getMilliseconds("shutdown-timeout") must equal(1 * 1000) + c.getInt("throughput") must equal(5) + c.getMilliseconds("throughput-deadline-time") must equal(0) + } + + { + val pool = c.getConfig("thread-pool-executor") + import pool._ + getMilliseconds("keep-alive-time") must equal(60 * 1000) + getDouble("core-pool-size-factor") must equal(3.0) + getDouble("max-pool-size-factor") must equal(3.0) + getInt("task-queue-size") must equal(-1) + getString("task-queue-type") must equal("linked") + getBoolean("allow-core-timeout") must equal(true) + } + } } } } diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index 999c4286c2..ffaedde045 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -158,37 +158,58 @@ akka { # parameters type = "Dispatcher" - # Keep alive time for threads - keep-alive-time = 60s + #Which kind of ExecutorService to use for this dispatcher + #Valid options: "thread-pool-executor" requires a "thread-pool-executor" section + # "fork-join-executor" requires a "fork-join-executor" section + # A FQCN of a class extending ExecutorServiceConfigurator + executor = "thread-pool-executor" - # minimum number of threads to cap factor-based core number to - core-pool-size-min = 8 + # This will be used if you have set "executor = "thread-pool-executor"" + thread-pool-executor { + # Keep alive time for threads + keep-alive-time = 60s - # No of core threads ... ceil(available processors * factor) - core-pool-size-factor = 3.0 + # minimum number of threads to cap factor-based core number to + core-pool-size-min = 8 - # maximum number of threads to cap factor-based number to - core-pool-size-max = 64 + # No of core threads ... ceil(available processors * factor) + core-pool-size-factor = 3.0 - # Hint: max-pool-size is only used for bounded task queues - # minimum number of threads to cap factor-based max number to - max-pool-size-min = 8 + # maximum number of threads to cap factor-based number to + core-pool-size-max = 64 - # Max no of threads ... ceil(available processors * factor) - max-pool-size-factor = 3.0 + # Hint: max-pool-size is only used for bounded task queues + # minimum number of threads to cap factor-based max number to + max-pool-size-min = 8 - # maximum number of threads to cap factor-based max number to - max-pool-size-max = 64 + # Max no of threads ... ceil(available processors * factor) + max-pool-size-factor = 3.0 - # Specifies the bounded capacity of the task queue (< 1 == unbounded) - task-queue-size = -1 + # maximum number of threads to cap factor-based max number to + max-pool-size-max = 64 - # Specifies which type of task queue will be used, can be "array" or - # "linked" (default) - task-queue-type = "linked" + # Specifies the bounded capacity of the task queue (< 1 == unbounded) + task-queue-size = -1 - # Allow core threads to time out - allow-core-timeout = on + # Specifies which type of task queue will be used, can be "array" or + # "linked" (default) + task-queue-type = "linked" + + # Allow core threads to time out + allow-core-timeout = on + } + + # This will be used if you have set "executor = "fork-join-executor"" + fork-join-executor { + # minimum number of threads to cap factor-based parallelism number to + parallelism-min = 8 + + # Parallelism (threads) ... ceil(available processors * factor) + parallelism-factor = 3.0 + + # maximum number of threads to cap factor-based parallelism number to + parallelism-max = 64 + } # How long time the dispatcher will wait for new actors until it shuts down shutdown-timeout = 1s diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index bb4e3e42e7..77b272d1a5 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -292,6 +292,8 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext protected[akka] def shutdown(): Unit } +abstract class ExecutorServiceConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceFactoryProvider + /** * Base class to be used for hooking in new dispatchers into Dispatchers. */ @@ -333,14 +335,32 @@ abstract class MessageDispatcherConfigurator(val config: Config, val prerequisit } } - def configureThreadPool( - config: Config, - createDispatcher: ⇒ (ExecutorServiceFactoryProvider) ⇒ MessageDispatcher): MessageDispatcher = { - import ThreadPoolConfigDispatcherBuilder.conf_? + def configureExecutor(): ExecutorServiceConfigurator = { + config.getString("executor") match { + case null | "" ⇒ throw new IllegalArgumentException("""Missing "executor" in config file for dispatcher [%s]""".format(config.getString("id"))) + case "thread-pool-executor" ⇒ new ThreadPoolExecutorConfigurator(config.getConfig("thread-pool-executor"), prerequisites) + //case "fork-join-executor" => new ForkJoinExecutorConfigurator(config.getConfig("fork-join-executor"), prerequisites) + case fqcn ⇒ + val constructorSignature = Array[Class[_]](classOf[Config], classOf[DispatcherPrerequisites]) + ReflectiveAccess.createInstance[ExecutorServiceConfigurator](fqcn, constructorSignature, Array[AnyRef](config, prerequisites)) match { + case Right(instance) ⇒ instance + case Left(exception) ⇒ + throw new IllegalArgumentException( + ("Cannot instantiate ExecutorServiceConfigurator (\"executor = [%s]\"), defined in [%s], " + + "make sure it has an accessible constructor with a [%s,%s] signature") + .format(fqcn, config.getString("id"), classOf[Config], classOf[DispatcherPrerequisites]), exception) + } + } + } +} - //Apply the following options to the config if they are present in the config +class ThreadPoolExecutorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceConfigurator(config, prerequisites) { + import ThreadPoolConfigBuilder.conf_? - ThreadPoolConfigDispatcherBuilder(createDispatcher, ThreadPoolConfig()) + val threadPoolConfig: ThreadPoolConfig = createThreadPoolConfigBuilder(config, prerequisites).config + + def createThreadPoolConfigBuilder(config: Config, prerequisites: DispatcherPrerequisites): ThreadPoolConfigBuilder = { + ThreadPoolConfigBuilder(ThreadPoolConfig()) .setKeepAliveTime(Duration(config getMilliseconds "keep-alive-time", TimeUnit.MILLISECONDS)) .setAllowCoreThreadTimeout(config getBoolean "allow-core-timeout") .setCorePoolSizeFromFactor(config getInt "core-pool-size-min", config getDouble "core-pool-size-factor", config getInt "core-pool-size-max") @@ -354,6 +374,9 @@ abstract class MessageDispatcherConfigurator(val config: Config, val prerequisit case x ⇒ throw new IllegalArgumentException("[%s] is not a valid task-queue-type [array|linked]!" format x) } case _ ⇒ None - })(queueFactory ⇒ _.setQueueFactory(queueFactory))).build + })(queueFactory ⇒ _.setQueueFactory(queueFactory))) } + + def createExecutorServiceFactory(name: String, threadFactory: ThreadFactory): ExecutorServiceFactory = + threadPoolConfig.createExecutorServiceFactory(name, threadFactory) } diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 29ddc6c495..fd58346955 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -156,15 +156,14 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc class DispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends MessageDispatcherConfigurator(config, prerequisites) { - private val instance = - configureThreadPool(config, - threadPoolConfig ⇒ new Dispatcher(prerequisites, - config.getString("id"), - config.getInt("throughput"), - Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), - mailboxType, - threadPoolConfig, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS))) + private val instance = new Dispatcher( + prerequisites, + config.getString("id"), + config.getInt("throughput"), + Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), + mailboxType, + configureExecutor(), + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) /** * Returns the same dispatcher instance for each invocation @@ -180,14 +179,13 @@ class DispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisi class BalancingDispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends MessageDispatcherConfigurator(config, prerequisites) { - private val instance = - configureThreadPool(config, - threadPoolConfig ⇒ new BalancingDispatcher(prerequisites, - config.getString("id"), - config.getInt("throughput"), - Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), - mailboxType, threadPoolConfig, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS))) + private val instance = new BalancingDispatcher( + prerequisites, + config.getString("id"), + config.getInt("throughput"), + Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), + mailboxType, configureExecutor(), + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) /** * Returns the same dispatcher instance for each invocation @@ -203,23 +201,22 @@ class BalancingDispatcherConfigurator(config: Config, prerequisites: DispatcherP class PinnedDispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends MessageDispatcherConfigurator(config, prerequisites) { - def createPinnedDispatcherWith(tpc: ThreadPoolConfig): PinnedDispatcher = - new PinnedDispatcher( - prerequisites, null, config.getString("id"), mailboxType, - Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS), tpc) - - /** - * Creates new dispatcher for each invocation. - */ - override def dispatcher(): MessageDispatcher = configureThreadPool(config, { - case t: ThreadPoolConfig ⇒ createPinnedDispatcherWith(t) + val threadPoolConfig: ThreadPoolConfig = configureExecutor() match { + case e: ThreadPoolExecutorConfigurator ⇒ e.threadPoolConfig case other ⇒ prerequisites.eventStream.publish( Warning("PinnedDispatcherConfigurator", this.getClass, "PinnedDispatcher [%s] not configured to use ThreadPoolExecutor, falling back to default config.".format( config.getString("id")))) - createPinnedDispatcherWith(ThreadPoolConfig()) - }) + ThreadPoolConfig() + } + /** + * Creates new dispatcher for each invocation. + */ + override def dispatcher(): MessageDispatcher = + new PinnedDispatcher( + prerequisites, null, config.getString("id"), mailboxType, + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS), threadPoolConfig) } diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index 687c3d8191..e9430340fa 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -87,70 +87,65 @@ case class ThreadPoolConfig(allowCorePoolTimeout: Boolean = ThreadPoolConfig.def new ThreadPoolExecutorServiceFactory(threadFactory) } -trait DispatcherBuilder { - def build: MessageDispatcher -} - -object ThreadPoolConfigDispatcherBuilder { - def conf_?[T](opt: Option[T])(fun: (T) ⇒ ThreadPoolConfigDispatcherBuilder ⇒ ThreadPoolConfigDispatcherBuilder): Option[(ThreadPoolConfigDispatcherBuilder) ⇒ ThreadPoolConfigDispatcherBuilder] = opt map fun +object ThreadPoolConfigBuilder { + def conf_?[T](opt: Option[T])(fun: (T) ⇒ ThreadPoolConfigBuilder ⇒ ThreadPoolConfigBuilder): Option[(ThreadPoolConfigBuilder) ⇒ ThreadPoolConfigBuilder] = opt map fun } /** * A DSL to configure and create a MessageDispatcher with a ThreadPoolExecutor */ -case class ThreadPoolConfigDispatcherBuilder(dispatcherFactory: (ThreadPoolConfig) ⇒ MessageDispatcher, config: ThreadPoolConfig) extends DispatcherBuilder { +case class ThreadPoolConfigBuilder(config: ThreadPoolConfig) { import ThreadPoolConfig._ - def build: MessageDispatcher = dispatcherFactory(config) - def withNewThreadPoolWithCustomBlockingQueue(newQueueFactory: QueueFactory): ThreadPoolConfigDispatcherBuilder = + def withNewThreadPoolWithCustomBlockingQueue(newQueueFactory: QueueFactory): ThreadPoolConfigBuilder = this.copy(config = config.copy(queueFactory = newQueueFactory)) - def withNewThreadPoolWithCustomBlockingQueue(queue: BlockingQueue[Runnable]): ThreadPoolConfigDispatcherBuilder = + def withNewThreadPoolWithCustomBlockingQueue(queue: BlockingQueue[Runnable]): ThreadPoolConfigBuilder = withNewThreadPoolWithCustomBlockingQueue(reusableQueue(queue)) - def withNewThreadPoolWithLinkedBlockingQueueWithUnboundedCapacity: ThreadPoolConfigDispatcherBuilder = + def withNewThreadPoolWithLinkedBlockingQueueWithUnboundedCapacity: ThreadPoolConfigBuilder = this.copy(config = config.copy(queueFactory = linkedBlockingQueue())) - def withNewThreadPoolWithLinkedBlockingQueueWithCapacity(capacity: Int): ThreadPoolConfigDispatcherBuilder = + def withNewThreadPoolWithLinkedBlockingQueueWithCapacity(capacity: Int): ThreadPoolConfigBuilder = this.copy(config = config.copy(queueFactory = linkedBlockingQueue(capacity))) - def withNewThreadPoolWithSynchronousQueueWithFairness(fair: Boolean): ThreadPoolConfigDispatcherBuilder = + def withNewThreadPoolWithSynchronousQueueWithFairness(fair: Boolean): ThreadPoolConfigBuilder = this.copy(config = config.copy(queueFactory = synchronousQueue(fair))) - def withNewThreadPoolWithArrayBlockingQueueWithCapacityAndFairness(capacity: Int, fair: Boolean): ThreadPoolConfigDispatcherBuilder = + def withNewThreadPoolWithArrayBlockingQueueWithCapacityAndFairness(capacity: Int, fair: Boolean): ThreadPoolConfigBuilder = this.copy(config = config.copy(queueFactory = arrayBlockingQueue(capacity, fair))) - def setCorePoolSize(size: Int): ThreadPoolConfigDispatcherBuilder = + def setCorePoolSize(size: Int): ThreadPoolConfigBuilder = if (config.maxPoolSize < size) this.copy(config = config.copy(corePoolSize = size, maxPoolSize = size)) else this.copy(config = config.copy(corePoolSize = size)) - def setMaxPoolSize(size: Int): ThreadPoolConfigDispatcherBuilder = + def setMaxPoolSize(size: Int): ThreadPoolConfigBuilder = if (config.corePoolSize > size) this.copy(config = config.copy(corePoolSize = size, maxPoolSize = size)) else this.copy(config = config.copy(maxPoolSize = size)) - def setCorePoolSizeFromFactor(min: Int, multiplier: Double, max: Int): ThreadPoolConfigDispatcherBuilder = + def setCorePoolSizeFromFactor(min: Int, multiplier: Double, max: Int): ThreadPoolConfigBuilder = setCorePoolSize(scaledPoolSize(min, multiplier, max)) - def setMaxPoolSizeFromFactor(min: Int, multiplier: Double, max: Int): ThreadPoolConfigDispatcherBuilder = + def setMaxPoolSizeFromFactor(min: Int, multiplier: Double, max: Int): ThreadPoolConfigBuilder = setMaxPoolSize(scaledPoolSize(min, multiplier, max)) - def setKeepAliveTimeInMillis(time: Long): ThreadPoolConfigDispatcherBuilder = + def setKeepAliveTimeInMillis(time: Long): ThreadPoolConfigBuilder = setKeepAliveTime(Duration(time, TimeUnit.MILLISECONDS)) - def setKeepAliveTime(time: Duration): ThreadPoolConfigDispatcherBuilder = + def setKeepAliveTime(time: Duration): ThreadPoolConfigBuilder = this.copy(config = config.copy(threadTimeout = time)) - def setAllowCoreThreadTimeout(allow: Boolean): ThreadPoolConfigDispatcherBuilder = + def setAllowCoreThreadTimeout(allow: Boolean): ThreadPoolConfigBuilder = this.copy(config = config.copy(allowCorePoolTimeout = allow)) - def setQueueFactory(newQueueFactory: QueueFactory): ThreadPoolConfigDispatcherBuilder = + def setQueueFactory(newQueueFactory: QueueFactory): ThreadPoolConfigBuilder = this.copy(config = config.copy(queueFactory = newQueueFactory)) - def configure(fs: Option[Function[ThreadPoolConfigDispatcherBuilder, ThreadPoolConfigDispatcherBuilder]]*): ThreadPoolConfigDispatcherBuilder = fs.foldLeft(this)((c, f) ⇒ f.map(_(c)).getOrElse(c)) + def configure(fs: Option[Function[ThreadPoolConfigBuilder, ThreadPoolConfigBuilder]]*): ThreadPoolConfigBuilder = fs.foldLeft(this)((c, f) ⇒ f.map(_(c)).getOrElse(c)) } object MonitorableThreadFactory { From dc0fc349cb55919b56a45651cacafc938b3eb1fb Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 30 Jan 2012 14:22:19 +0100 Subject: [PATCH 17/94] Removed processingBehavior, everything in behaviorStack. See #1717 --- .../src/main/scala/akka/actor/Actor.scala | 30 ++++++++++++------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 386989f346..9ed73d8945 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -9,6 +9,7 @@ import scala.reflect.BeanProperty import scala.util.control.NoStackTrace import scala.collection.immutable.Stack import java.util.regex.Pattern +import scala.annotation.tailrec /** * Marker trait to show which Messages are automatically handled by Akka @@ -114,7 +115,6 @@ object Actor { def apply(x: Any) = throw new UnsupportedOperationException("Empty behavior apply()") } - private final val emptyBehaviourStack: Stack[Actor.Receive] = Stack.empty } /** @@ -284,27 +284,37 @@ trait Actor { // ==== INTERNAL IMPLEMENTATION DETAILS ==== // ========================================= + /** + * For Akka internal use only. + */ private[akka] final def apply(msg: Any) = { msg match { - case msg if behaviorStack.nonEmpty && behaviorStack.head.isDefinedAt(msg) ⇒ behaviorStack.head.apply(msg) - case msg if behaviorStack.isEmpty && processingBehavior.isDefinedAt(msg) ⇒ processingBehavior.apply(msg) - case unknown ⇒ unhandled(unknown) + case msg if behaviorStack.head.isDefinedAt(msg) ⇒ behaviorStack.head.apply(msg) + case unknown ⇒ unhandled(unknown) } } - private[this] val processingBehavior = receive //ProcessingBehavior is the original behavior - + /** + * For Akka internal use only. + */ private[akka] def pushBehavior(behavior: Receive): Unit = { behaviorStack = behaviorStack.push(behavior) } + /** + * For Akka internal use only. + */ private[akka] def popBehavior(): Unit = { - val stack = behaviorStack - if (stack.nonEmpty) behaviorStack = stack.pop + val original = behaviorStack + val popped = original.pop + behaviorStack = if (popped.isEmpty) original else popped } - private[akka] def clearBehaviorStack(): Unit = { behaviorStack = emptyBehaviourStack } + /** + * For Akka internal use only. + */ + private[akka] def clearBehaviorStack(): Unit = Stack.empty[Receive].push(behaviorStack.last) - private var behaviorStack: Stack[PartialFunction[Any, Unit]] = emptyBehaviourStack + private var behaviorStack: Stack[Receive] = Stack.empty[Receive].push(receive) } From 0c4e6cbe1d2466cacbf1245fef7bd34cc148b4a9 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 30 Jan 2012 14:49:50 +0100 Subject: [PATCH 18/94] removed unused import --- akka-actor/src/main/scala/akka/actor/Actor.scala | 1 - 1 file changed, 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 9ed73d8945..1e6ea485fc 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -9,7 +9,6 @@ import scala.reflect.BeanProperty import scala.util.control.NoStackTrace import scala.collection.immutable.Stack import java.util.regex.Pattern -import scala.annotation.tailrec /** * Marker trait to show which Messages are automatically handled by Akka From 847a2e1885a9df36bf1f88677c3fc52b3a003316 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 30 Jan 2012 15:34:56 +0100 Subject: [PATCH 19/94] Adding support for the ForkJoinPool --- .../akka/dispatch/AbstractDispatcher.scala | 30 +++++++++++++++++-- 1 file changed, 28 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 77b272d1a5..5bc5b7dc94 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -14,6 +14,7 @@ import akka.event.EventStream import com.typesafe.config.Config import akka.util.ReflectiveAccess import akka.serialization.SerializationExtension +import akka.jsr166y.ForkJoinPool final case class Envelope(val message: Any, val sender: ActorRef)(system: ActorSystem) { if (message.isInstanceOf[AnyRef]) { @@ -339,7 +340,7 @@ abstract class MessageDispatcherConfigurator(val config: Config, val prerequisit config.getString("executor") match { case null | "" ⇒ throw new IllegalArgumentException("""Missing "executor" in config file for dispatcher [%s]""".format(config.getString("id"))) case "thread-pool-executor" ⇒ new ThreadPoolExecutorConfigurator(config.getConfig("thread-pool-executor"), prerequisites) - //case "fork-join-executor" => new ForkJoinExecutorConfigurator(config.getConfig("fork-join-executor"), prerequisites) + case "fork-join-executor" ⇒ new ForkJoinExecutorConfigurator(config.getConfig("fork-join-executor"), prerequisites) case fqcn ⇒ val constructorSignature = Array[Class[_]](classOf[Config], classOf[DispatcherPrerequisites]) ReflectiveAccess.createInstance[ExecutorServiceConfigurator](fqcn, constructorSignature, Array[AnyRef](config, prerequisites)) match { @@ -359,7 +360,7 @@ class ThreadPoolExecutorConfigurator(config: Config, prerequisites: DispatcherPr val threadPoolConfig: ThreadPoolConfig = createThreadPoolConfigBuilder(config, prerequisites).config - def createThreadPoolConfigBuilder(config: Config, prerequisites: DispatcherPrerequisites): ThreadPoolConfigBuilder = { + protected def createThreadPoolConfigBuilder(config: Config, prerequisites: DispatcherPrerequisites): ThreadPoolConfigBuilder = { ThreadPoolConfigBuilder(ThreadPoolConfig()) .setKeepAliveTime(Duration(config getMilliseconds "keep-alive-time", TimeUnit.MILLISECONDS)) .setAllowCoreThreadTimeout(config getBoolean "allow-core-timeout") @@ -380,3 +381,28 @@ class ThreadPoolExecutorConfigurator(config: Config, prerequisites: DispatcherPr def createExecutorServiceFactory(name: String, threadFactory: ThreadFactory): ExecutorServiceFactory = threadPoolConfig.createExecutorServiceFactory(name, threadFactory) } + +/*int parallelism, + ForkJoinWorkerThreadFactory factory, + Thread.UncaughtExceptionHandler handler, + boolean asyncMode*/ + +class ForkJoinExecutorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceConfigurator(config, prerequisites) { + + def validate(t: ThreadFactory): ForkJoinPool.ForkJoinWorkerThreadFactory = prerequisites.threadFactory match { + case correct: ForkJoinPool.ForkJoinWorkerThreadFactory ⇒ correct + case x ⇒ throw new IllegalStateException("The prerequisites for the ForkJoinExecutorConfigurator is a ForkJoinPool.ForkJoinWorkerThreadFactory!") + } + + class ForkJoinExecutorServiceFactory(val threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, + val parallelism: Int) extends ExecutorServiceFactory { + def createExecutorService: ExecutorService = new ForkJoinPool(parallelism, threadFactory, MonitorableThreadFactory.doNothing, true) + } + final def createExecutorServiceFactory(name: String, threadFactory: ThreadFactory): ExecutorServiceFactory = + new ForkJoinExecutorServiceFactory( + validate(threadFactory), + ThreadPoolConfig.scaledPoolSize( + config.getInt("parallelism-min"), + config.getDouble("parallelism-factor"), + config.getInt("parallelism-max"))) +} From 118202f8187aae431bcd268f5a2892efcf84b503 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 30 Jan 2012 16:12:11 +0100 Subject: [PATCH 20/94] Enabled zeromq extension for the zeromq tests --- .../src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala b/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala index 983a2951c1..633e598e02 100644 --- a/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala +++ b/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala @@ -11,7 +11,7 @@ import akka.actor.{ Cancellable, Actor, Props, ActorRef } object ConcurrentSocketActorSpec { val config = """ akka { - extensions = [] + extensions = ["akka.zeromq.ZeroMQExtension"] } """ } From 12d8b5bf4bcf8e7535084e337b069b5a9ea6b482 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 24 Jan 2012 11:59:57 +0100 Subject: [PATCH 21/94] Changed copyright header to Typesafe. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/actor/Scheduler.scala | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 72d429b450..eed0060e52 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -1,15 +1,7 @@ -/* - * Copyright 2007 WorldWide Conferencing, LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. +/** + * Copyright (C) 2009-2011 Typesafe Inc. */ + package akka.actor import akka.util.Duration From 6cb887e1a58f1fcfb21dcd02bb950eab7fff4ba3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 24 Jan 2012 12:00:53 +0100 Subject: [PATCH 22/94] Changed akka.util.Timer to use nanos and added a 'timeLeft' method. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/util/Duration.scala | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/akka-actor/src/main/scala/akka/util/Duration.scala b/akka-actor/src/main/scala/akka/util/Duration.scala index 65d6e6148c..312d733904 100644 --- a/akka-actor/src/main/scala/akka/util/Duration.scala +++ b/akka-actor/src/main/scala/akka/util/Duration.scala @@ -17,21 +17,26 @@ class TimerException(message: String) extends RuntimeException(message) * import akka.util.duration._ * import akka.util.Timer * - * val timer = Timer(30.seconds) + * val timer = Timer(30 seconds) * while (timer.isTicking) { ... } * */ -case class Timer(duration: Duration, throwExceptionOnTimeout: Boolean = false) { - val startTimeInMillis = System.currentTimeMillis - val timeoutInMillis = duration.toMillis +case class Timer(timeout: Duration, throwExceptionOnTimeout: Boolean = false) { + val startTime = Duration(System.nanoTime, NANOSECONDS) + + def timeLeft: Duration = { + val time = timeout.toNanos - (System.nanoTime - startTime.toNanos) + if (time <= 0) Duration(0, NANOSECONDS) + else Duration(time, NANOSECONDS) + } /** * Returns true while the timer is ticking. After that it either throws and exception or * returns false. Depending on if the 'throwExceptionOnTimeout' argument is true or false. */ def isTicking: Boolean = { - if (!(timeoutInMillis > (System.currentTimeMillis - startTimeInMillis))) { - if (throwExceptionOnTimeout) throw new TimerException("Time out after " + duration) + if (!(timeout.toNanos > (System.nanoTime - startTime.toNanos))) { + if (throwExceptionOnTimeout) throw new TimerException("Time out after " + timeout) else false } else true } From cef742c5dbb1bde60d16aee6d8a30f58051d3e33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 24 Jan 2012 12:01:42 +0100 Subject: [PATCH 23/94] Fixed ugly logging in NettyRemoteSupport (plus misc minor formatting). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../main/scala/akka/remote/netty/NettyRemoteSupport.scala | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index e9fe83dd7e..a225dd7aa8 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -157,7 +157,10 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor def unbindClient(remoteAddress: Address): Unit = { clientsLock.writeLock().lock() try { - remoteClients.foreach { case (k, v) ⇒ if (v.isBoundTo(remoteAddress)) { v.shutdown(); remoteClients.remove(k) } } + remoteClients foreach { + case (k, v) ⇒ + if (v.isBoundTo(remoteAddress)) { v.shutdown(); remoteClients.remove(k) } + } } finally { clientsLock.writeLock().unlock() } @@ -227,7 +230,8 @@ class DefaultDisposableChannelGroup(name: String) extends DefaultChannelGroup(na override def close(): ChannelGroupFuture = { guard.writeLock().lock() try { - if (open.getAndSet(false)) super.close() else throw new IllegalStateException("ChannelGroup already closed, cannot add new channel") + if (open.getAndSet(false)) super.close() + else throw new IllegalStateException("ChannelGroup already closed, cannot add new channel") } finally { guard.writeLock().unlock() } From d7ae9f275c618c64354b232e27dfb58b3dbbce85 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 24 Jan 2012 12:03:38 +0100 Subject: [PATCH 24/94] Minor code and ScalaDoc formatting changes. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/actor/ActorSystem.scala | 1 + .../src/main/scala/akka/remote/VectorClock.scala | 12 +++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index e3235a5cec..c7a868ffd9 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -1,6 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ + package akka.actor import akka.config.ConfigurationException diff --git a/akka-remote/src/main/scala/akka/remote/VectorClock.scala b/akka-remote/src/main/scala/akka/remote/VectorClock.scala index 9da70111e9..fde9bb84e7 100644 --- a/akka-remote/src/main/scala/akka/remote/VectorClock.scala +++ b/akka-remote/src/main/scala/akka/remote/VectorClock.scala @@ -12,8 +12,8 @@ class VectorClockException(message: String) extends AkkaException(message) * Representation of a Vector-based clock (counting clock), inspired by Lamport logical clocks. * * Reference: - * Leslie Lamport (1978). "Time, clocks, and the ordering of events in a distributed system". Communications of the ACM 21 (7): 558-565. - * Friedemann Mattern (1988). "Virtual Time and Global States of Distributed Systems". Workshop on Parallel and Distributed Algorithms: pp. 215-226 + * 1) Leslie Lamport (1978). "Time, clocks, and the ordering of events in a distributed system". Communications of the ACM 21 (7): 558-565. + * 2) Friedemann Mattern (1988). "Virtual Time and Global States of Distributed Systems". Workshop on Parallel and Distributed Algorithms: pp. 215-226 */ case class VectorClock( versions: Vector[VectorClock.Entry] = Vector.empty[VectorClock.Entry], @@ -55,9 +55,11 @@ object VectorClock { /** * The result of comparing two vector clocks. * Either: - * 1) v1 is BEFORE v2 - * 2) v1 is AFTER t2 - * 3) v1 happens CONCURRENTLY to v2 + * {{ + * 1) v1 is BEFORE v2 + * 2) v1 is AFTER t2 + * 3) v1 happens CONCURRENTLY to v2 + * }} */ sealed trait Ordering case object Before extends Ordering From 465c29107d04445a52a35b3ba4097b6ab4aa2e09 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 30 Jan 2012 16:34:25 +0100 Subject: [PATCH 25/94] Migrating tests to use the new config for dispatchers --- .../scala/akka/actor/ConsistencySpec.scala | 15 ++++++---- .../actor/LocalActorRefProviderSpec.scala | 7 +++-- .../scala/akka/actor/TypedActorSpec.scala | 11 ++++--- .../workbench/BenchmarkConfig.scala | 28 +++++++++++++----- .../routing/ConfiguredLocalRoutingSpec.scala | 7 +++-- akka-actor/src/main/resources/reference.conf | 14 ++++----- .../akka/dispatch/AbstractDispatcher.scala | 9 +++--- .../akka/dispatch/ThreadPoolBuilder.scala | 11 +++---- .../docs/dispatcher/DispatcherDocSpec.scala | 29 ++++++++++++++----- .../test/scala/akka/testkit/AkkaSpec.scala | 15 ++++++---- .../transactor/CoordinatedIncrementSpec.scala | 7 +++-- 11 files changed, 96 insertions(+), 57 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala index 981ce89ef6..6f8639f4a4 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala @@ -9,12 +9,15 @@ object ConsistencySpec { consistency-dispatcher { throughput = 1 keep-alive-time = 1 ms - core-pool-size-min = 10 - core-pool-size-max = 10 - max-pool-size-min = 10 - max-pool-size-max = 10 - task-queue-type = array - task-queue-size = 7 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = 10 + core-pool-size-max = 10 + max-pool-size-min = 10 + max-pool-size-max = 10 + task-queue-type = array + task-queue-size = 7 + } } """ class CacheMisaligned(var value: Long, var padding1: Long, var padding2: Long, var padding3: Int) //Vars, no final fences diff --git a/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala index 82cd08fa77..5ebd8ff565 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala @@ -14,8 +14,11 @@ object LocalActorRefProviderSpec { akka { actor { default-dispatcher { - core-pool-size-min = 16 - core-pool-size-max = 16 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = 16 + core-pool-size-max = 16 + } } } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala index 49b37cc506..b83fe78338 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala @@ -25,10 +25,13 @@ object TypedActorSpec { val config = """ pooled-dispatcher { type = BalancingDispatcher - core-pool-size-min = 60 - core-pool-size-max = 60 - max-pool-size-min = 60 - max-pool-size-max = 60 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = 60 + core-pool-size-max = 60 + max-pool-size-min = 60 + max-pool-size-max = 60 + } } """ diff --git a/akka-actor-tests/src/test/scala/akka/performance/workbench/BenchmarkConfig.scala b/akka-actor-tests/src/test/scala/akka/performance/workbench/BenchmarkConfig.scala index 11ed21c9aa..65294d014a 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/workbench/BenchmarkConfig.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/workbench/BenchmarkConfig.scala @@ -21,19 +21,28 @@ object BenchmarkConfig { useDummyOrderbook = false client-dispatcher { - core-pool-size-min = ${benchmark.maxClients} - core-pool-size-max = ${benchmark.maxClients} + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = ${benchmark.maxClients} + core-pool-size-max = ${benchmark.maxClients} + } } destination-dispatcher { - core-pool-size-min = ${benchmark.maxClients} - core-pool-size-max = ${benchmark.maxClients} + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = ${benchmark.maxClients} + core-pool-size-max = ${benchmark.maxClients} + } } high-throughput-dispatcher { throughput = 10000 - core-pool-size-min = ${benchmark.maxClients} - core-pool-size-max = ${benchmark.maxClients} + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = ${benchmark.maxClients} + core-pool-size-max = ${benchmark.maxClients} + } } pinned-dispatcher { @@ -42,8 +51,11 @@ object BenchmarkConfig { latency-dispatcher { throughput = 1 - core-pool-size-min = ${benchmark.maxClients} - core-pool-size-max = ${benchmark.maxClients} + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = ${benchmark.maxClients} + core-pool-size-max = ${benchmark.maxClients} + } } } """) diff --git a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala index f2707e042c..62800b8830 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala @@ -13,8 +13,11 @@ object ConfiguredLocalRoutingSpec { akka { actor { default-dispatcher { - core-pool-size-min = 8 - core-pool-size-max = 16 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = 8 + core-pool-size-max = 16 + } } } } diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index ffaedde045..74f7b5b245 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -158,8 +158,8 @@ akka { # parameters type = "Dispatcher" - #Which kind of ExecutorService to use for this dispatcher - #Valid options: "thread-pool-executor" requires a "thread-pool-executor" section + # Which kind of ExecutorService to use for this dispatcher + # Valid options: "thread-pool-executor" requires a "thread-pool-executor" section # "fork-join-executor" requires a "fork-join-executor" section # A FQCN of a class extending ExecutorServiceConfigurator executor = "thread-pool-executor" @@ -169,13 +169,13 @@ akka { # Keep alive time for threads keep-alive-time = 60s - # minimum number of threads to cap factor-based core number to + # Min number of threads to cap factor-based core number to core-pool-size-min = 8 # No of core threads ... ceil(available processors * factor) core-pool-size-factor = 3.0 - # maximum number of threads to cap factor-based number to + # Max number of threads to cap factor-based number to core-pool-size-max = 64 # Hint: max-pool-size is only used for bounded task queues @@ -185,7 +185,7 @@ akka { # Max no of threads ... ceil(available processors * factor) max-pool-size-factor = 3.0 - # maximum number of threads to cap factor-based max number to + # Max number of threads to cap factor-based max number to max-pool-size-max = 64 # Specifies the bounded capacity of the task queue (< 1 == unbounded) @@ -201,13 +201,13 @@ akka { # This will be used if you have set "executor = "fork-join-executor"" fork-join-executor { - # minimum number of threads to cap factor-based parallelism number to + # Min number of threads to cap factor-based parallelism number to parallelism-min = 8 # Parallelism (threads) ... ceil(available processors * factor) parallelism-factor = 3.0 - # maximum number of threads to cap factor-based parallelism number to + # Max number of threads to cap factor-based parallelism number to parallelism-max = 64 } diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 66dd0385c9..e3e312b720 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -338,17 +338,16 @@ abstract class MessageDispatcherConfigurator(val config: Config, val prerequisit def configureExecutor(): ExecutorServiceConfigurator = { config.getString("executor") match { - case null | "" ⇒ throw new IllegalArgumentException("""Missing "executor" in config file for dispatcher [%s]""".format(config.getString("id"))) - case "thread-pool-executor" ⇒ new ThreadPoolExecutorConfigurator(config.getConfig("thread-pool-executor"), prerequisites) - case "fork-join-executor" ⇒ new ForkJoinExecutorConfigurator(config.getConfig("fork-join-executor"), prerequisites) + case null | "" | "thread-pool-executor" ⇒ new ThreadPoolExecutorConfigurator(config.getConfig("thread-pool-executor"), prerequisites) + case "fork-join-executor" ⇒ new ForkJoinExecutorConfigurator(config.getConfig("fork-join-executor"), prerequisites) case fqcn ⇒ val constructorSignature = Array[Class[_]](classOf[Config], classOf[DispatcherPrerequisites]) ReflectiveAccess.createInstance[ExecutorServiceConfigurator](fqcn, constructorSignature, Array[AnyRef](config, prerequisites)) match { case Right(instance) ⇒ instance case Left(exception) ⇒ throw new IllegalArgumentException( - ("Cannot instantiate ExecutorServiceConfigurator (\"executor = [%s]\"), defined in [%s], " + - "make sure it has an accessible constructor with a [%s,%s] signature") + ("""Cannot instantiate ExecutorServiceConfigurator ("executor = [%s]"), defined in [%s], + make sure it has an accessible constructor with a [%s,%s] signature""") .format(fqcn, config.getString("id"), classOf[Config], classOf[DispatcherPrerequisites]), exception) } } diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index 369f3cdaf2..4612fdca1f 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -160,14 +160,11 @@ case class MonitorableThreadFactory(name: String, extends ThreadFactory with ForkJoinPool.ForkJoinWorkerThreadFactory { protected val counter = new AtomicLong - def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = { - val t = ForkJoinPool.defaultForkJoinWorkerThreadFactory.newThread(pool) - t.setDaemon(daemonic) - t - } + def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = wire(ForkJoinPool.defaultForkJoinWorkerThreadFactory.newThread(pool)) - def newThread(runnable: Runnable) = { - val t = new Thread(runnable, name + counter.incrementAndGet()) + def newThread(runnable: Runnable): Thread = wire(new Thread(runnable, name + counter.incrementAndGet())) + + protected def wire[T <: Thread](t: T): T = { t.setUncaughtExceptionHandler(exceptionHandler) t.setDaemon(daemonic) contextClassLoader foreach (t.setContextClassLoader(_)) diff --git a/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala b/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala index d0e0945fe8..0df4e3ca5b 100644 --- a/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/dispatcher/DispatcherDocSpec.scala @@ -22,12 +22,17 @@ object DispatcherDocSpec { my-dispatcher { # Dispatcher is the name of the event-based dispatcher type = Dispatcher - # minimum number of threads to cap factor-based core number to - core-pool-size-min = 2 - # No of core threads ... ceil(available processors * factor) - core-pool-size-factor = 2.0 - # maximum number of threads to cap factor-based number to - core-pool-size-max = 10 + # What kind of ExecutionService to use + executor = "thread-pool-executor" + # Configuration for the thread pool + thread-pool-executor { + # minimum number of threads to cap factor-based core number to + core-pool-size-min = 2 + # No of core threads ... ceil(available processors * factor) + core-pool-size-factor = 2.0 + # maximum number of threads to cap factor-based number to + core-pool-size-max = 10 + } # Throughput defines the number of messages that are processed in a batch before the # thread is returned to the pool. Set to 1 for as fair as possible. throughput = 100 @@ -37,8 +42,11 @@ object DispatcherDocSpec { //#my-bounded-config my-dispatcher-bounded-queue { type = Dispatcher - core-pool-size-factor = 8.0 - max-pool-size-factor = 16.0 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-factor = 8.0 + max-pool-size-factor = 16.0 + } # Specifies the bounded capacity of the mailbox queue mailbox-capacity = 100 throughput = 3 @@ -48,6 +56,11 @@ object DispatcherDocSpec { //#my-balancing-config my-balancing-dispatcher { type = BalancingDispatcher + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-factor = 8.0 + max-pool-size-factor = 16.0 + } } //#my-balancing-config diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala index c8db05b171..20f7e8b16a 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala @@ -29,12 +29,15 @@ object AkkaSpec { stdout-loglevel = "WARNING" actor { default-dispatcher { - core-pool-size-factor = 2 - core-pool-size-min = 8 - core-pool-size-max = 8 - max-pool-size-factor = 2 - max-pool-size-min = 8 - max-pool-size-max = 8 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-factor = 2 + core-pool-size-min = 8 + core-pool-size-max = 8 + max-pool-size-factor = 2 + max-pool-size-min = 8 + max-pool-size-max = 8 + } } } } diff --git a/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala b/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala index 265d4a9eaf..9c019a56a5 100644 --- a/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala +++ b/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala @@ -20,8 +20,11 @@ object CoordinatedIncrement { akka { actor { default-dispatcher { - core-pool-size-min = 5 - core-pool-size-max = 16 + executor = "thread-pool-executor" + thread-pool-executor { + core-pool-size-min = 5 + core-pool-size-max = 16 + } } } } From 98d4864e0498b4694bbdd1fb10398cbc50d7edb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 24 Jan 2012 12:09:32 +0100 Subject: [PATCH 26/94] Added initial join cluster through seed nodes phase to Gossiper plus misc other fixes and additions. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Added JoinCluster phase (connect and get initial data from seed nodes) to Gossiper. - Added '/system/cluster' daemon actor to Gossiper responsible for gossip communication. - Added various config options to Gossiper. - Fixed misc bugs in Gossiper. Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/remote/Gossiper.scala | 249 ++++++++++++++---- .../scala/akka/remote/RemoteSettings.scala | 17 +- 2 files changed, 206 insertions(+), 60 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/Gossiper.scala b/akka-remote/src/main/scala/akka/remote/Gossiper.scala index d99414f9c9..20e803c7eb 100644 --- a/akka-remote/src/main/scala/akka/remote/Gossiper.scala +++ b/akka-remote/src/main/scala/akka/remote/Gossiper.scala @@ -7,21 +7,24 @@ package akka.remote import akka.actor._ import akka.actor.Status._ import akka.event.Logging -import akka.util.Duration +import akka.util._ +import akka.dispatch.Await import akka.config.ConfigurationException import java.util.concurrent.atomic.AtomicReference -import java.util.concurrent.TimeUnit.SECONDS +import java.util.concurrent.TimeUnit._ +import java.util.concurrent.TimeoutException import java.security.SecureRandom import System.{ currentTimeMillis ⇒ newTimestamp } import scala.collection.immutable.Map import scala.annotation.tailrec -import java.util.concurrent.TimeoutException import akka.dispatch.Await import akka.pattern.ask +import com.google.protobuf.ByteString + /** * Interface for node membership change listener. */ @@ -78,6 +81,37 @@ case class Gossip( */ // ====== END - NEW GOSSIP IMPLEMENTATION ====== +/** + * Interface for node membership change listener. + */ +trait NodeMembershipChangeListener { + def nodeConnected(node: ParsedTransportAddress) + def nodeDisconnected(node: ParsedTransportAddress) +} + +sealed trait ClusterMessage extends Serializable + +case object JoinCluster extends ClusterMessage + +/** + * Represents the node state of to gossip, versioned by a vector clock. + */ +case class Gossip( + version: VectorClock, + node: ParsedTransportAddress, + availableNodes: Set[ParsedTransportAddress] = Set.empty[ParsedTransportAddress], + unavailableNodes: Set[ParsedTransportAddress] = Set.empty[ParsedTransportAddress]) extends ClusterMessage + +class ClusterDaemon(system: ActorSystem, gossiper: Gossiper) extends Actor { + val log = Logging(system, "ClusterDaemon") + + def receive = { + case JoinCluster ⇒ sender ! gossiper.latestGossip + case gossip: Gossip ⇒ gossiper.tell(gossip) + case unknown ⇒ log.error("Unknown message sent to cluster daemon [" + unknown + "]") + } +} + /** * This module is responsible for Gossiping cluster information. The abstraction maintains the list of live * and dead nodes. Periodically i.e. every 1 second this module chooses a random node and initiates a round @@ -93,7 +127,7 @@ case class Gossip( * gossip to random seed with certain probability depending on number of unreachable, seed and live nodes. * */ -class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { +case class Gossiper(remote: Remote, system: ActorSystemImpl) { /** * Represents the state for this Gossiper. Implemented using optimistic lockless concurrency, @@ -103,35 +137,63 @@ class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { currentGossip: Gossip, nodeMembershipChangeListeners: Set[NodeMembershipChangeListener] = Set.empty[NodeMembershipChangeListener]) + // configuration private val remoteSettings = remote.remoteSettings private val serialization = remote.serialization - private val log = Logging(system, "Gossiper") private val failureDetector = remote.failureDetector - private val connectionManager = new RemoteConnectionManager(system, remote, Map.empty[Address, ActorRef]) - private val seeds = { + private val initalDelayForGossip = remoteSettings.InitalDelayForGossip + private val gossipFrequency = remoteSettings.GossipFrequency + + implicit val seedNodeConnectionTimeout = remoteSettings.SeedNodeConnectionTimeout + implicit val defaultTimeout = Timeout(remoteSettings.RemoteSystemDaemonAckTimeout) + + // seed nodes + private val seeds: Set[ParsedTransportAddress] = { + val seeds = remoteSettings.SeedNodes flatMap { + case uta: UnparsedTransportAddress ⇒ + uta.parse(remote.transports) match { + case pta: ParsedTransportAddress ⇒ Some(pta) + case _ ⇒ None + } + case _ ⇒ None + } if (remoteSettings.SeedNodes.isEmpty) throw new ConfigurationException( "At least one seed node must be defined in the configuration [akka.cluster.seed-nodes]") else remoteSettings.SeedNodes } - private val address = remote.transport.address - private val nodeFingerprint = address.## - + private val log = Logging(system, "Gossiper") private val random = SecureRandom.getInstance("SHA1PRNG") - private val initalDelayForGossip = remoteSettings.InitialDelayForGossip - private val gossipFrequency = remoteSettings.GossipFrequency - + private val connectionManager = new RemoteConnectionManager(system, remote, Map.empty[Address, ActorRef]) + private val clusterDaemon = system.systemActorOf(Props(new ClusterDaemon(system, this)), "cluster") private val state = new AtomicReference[State](State(currentGossip = newGossip())) - { - // start periodic gossip and cluster scrutinization - default is run them every second with 1/2 second in between - system.scheduler.schedule(Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(initateGossip()) - system.scheduler.schedule(Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(scrutinize()) - } + log.info("Starting cluster Gossiper...") + + // join the cluster by connecting to one of the seed nodes and retrieve current cluster state (Gossip) + joinCluster(Timer(remoteSettings.MaxTimeToRetryJoiningCluster)) + + // start periodic gossip and cluster scrutinization - default is run them every second with 1/2 second in between + val initateGossipCanceller = system.scheduler.schedule( + Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(initateGossip()) + val scrutinizeCanceller = system.scheduler.schedule( + Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(scrutinize()) /** - * Tell the gossiper some gossip news. + * Shuts down all connections to other nodes, the cluster daemon and the periodic gossip and cleanup tasks. + */ + def shutdown() { + connectionManager.shutdown() + system.stop(clusterDaemon) + initateGossipCanceller.cancel() + scrutinizeCanceller.cancel() + } + + def latestGossip: Gossip = state.get.currentGossip + + /** + * Tell the gossiper some gossip. */ @tailrec final def tell(newGossip: Gossip) { @@ -141,33 +203,29 @@ class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { val oldState = state.get val latestGossip = latestVersionOf(newGossip, oldState.currentGossip) - val oldAvailableNodes = latestGossip.availableNodes - val oldUnavailableNodes = latestGossip.unavailableNodes + val latestAvailableNodes = latestGossip.availableNodes + val latestUnavailableNodes = latestGossip.unavailableNodes - if (!(oldAvailableNodes contains gossipingNode) && !(oldUnavailableNodes contains gossipingNode)) { + if (!(latestAvailableNodes contains gossipingNode) && !(latestUnavailableNodes contains gossipingNode)) { // we have a new node - val newGossip = latestGossip copy (availableNodes = oldAvailableNodes + gossipingNode) + val newGossip = latestGossip copy (availableNodes = latestAvailableNodes + gossipingNode) val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) // if we won the race then update else try again if (!state.compareAndSet(oldState, newState)) tell(newGossip) // recur else { // create connections for all new nodes in the latest gossip - for { - node ← oldAvailableNodes - if connectionManager.connectionFor(node).isEmpty - } { - val connectionFactory = () ⇒ system.actorFor(RootActorPath(gossipingNode) / "remote") - connectionManager.putIfAbsent(node, connectionFactory) // create a new remote connection to the new node + (latestAvailableNodes + gossipingNode) foreach { node ⇒ + setUpConnectionToNode(node) oldState.nodeMembershipChangeListeners foreach (_ nodeConnected node) // notify listeners about the new nodes } } - } else if (oldUnavailableNodes contains gossipingNode) { + } else if (latestUnavailableNodes contains gossipingNode) { // gossip from an old former dead node - val newUnavailableNodes = oldUnavailableNodes - gossipingNode - val newAvailableNodes = oldAvailableNodes + gossipingNode + val newUnavailableNodes = latestUnavailableNodes - gossipingNode + val newAvailableNodes = latestAvailableNodes + gossipingNode val newGossip = latestGossip copy (availableNodes = newAvailableNodes, unavailableNodes = newUnavailableNodes) val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) @@ -178,6 +236,9 @@ class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { } } + /** + * Registers a listener to subscribe to cluster membership changes. + */ @tailrec final def registerListener(listener: NodeMembershipChangeListener) { val oldState = state.get @@ -186,6 +247,9 @@ class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { if (!state.compareAndSet(oldState, newState)) registerListener(listener) // recur } + /** + * Unsubscribes to cluster membership changes. + */ @tailrec final def unregisterListener(listener: NodeMembershipChangeListener) { val oldState = state.get @@ -194,6 +258,67 @@ class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { if (!state.compareAndSet(oldState, newState)) unregisterListener(listener) // recur } + /** + * Sets up remote connections to all the nodes in the argument list. + */ + private def connectToNodes(nodes: Seq[ParsedTransportAddress]) { + nodes foreach { node ⇒ + setUpConnectionToNode(node) + state.get.nodeMembershipChangeListeners foreach (_ nodeConnected node) // notify listeners about the new nodes + } + } + + // FIXME should shuffle list randomly before start traversing to avoid connecting to some node on every node + @tailrec + final private def connectToRandomNodeOf(nodes: Seq[ParsedTransportAddress]): ActorRef = { + nodes match { + case node :: rest ⇒ + setUpConnectionToNode(node) match { + case Some(connection) ⇒ connection + case None ⇒ connectToRandomNodeOf(rest) // recur if + } + case Nil ⇒ + throw new RemoteConnectionException( + "Could not establish connection to any of the nodes in the argument list") + } + } + + /** + * Joins the cluster by connecting to one of the seed nodes and retrieve current cluster state (Gossip). + */ + private def joinCluster(timer: Timer) { + val seedNodes = seedNodesWithoutMyself // filter out myself + + if (!seedNodes.isEmpty) { // if we have seed nodes to contact + connectToNodes(seedNodes) + + try { + log.info("Trying to join cluster through one of the seed nodes [{}]", seedNodes.mkString(", ")) + + Await.result(connectToRandomNodeOf(seedNodes) ? JoinCluster, seedNodeConnectionTimeout) match { + case initialGossip: Gossip ⇒ + // just sets/overwrites the state/gossip regardless of what it was before + // since it should be treated as the initial state + state.set(state.get copy (currentGossip = initialGossip)) + log.debug("Received initial gossip [{}] from seed node", initialGossip) + + case unknown ⇒ + throw new IllegalStateException("Expected initial gossip from seed, received [" + unknown + "]") + } + } catch { + case e: Exception ⇒ + log.error( + "Could not join cluster through any of the seed nodes - retrying for another {} seconds", + timer.timeLeft.toSeconds) + + if (timer.timeLeft.toMillis > 0) joinCluster(timer) // recur - retry joining the cluster + else throw new RemoteConnectionException( + "Could not join cluster (any of the seed nodes) - giving up after trying for " + + timer.timeout.toSeconds + " seconds") + } + } + } + /** * Initates a new round of gossip. */ @@ -209,47 +334,49 @@ class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { // 1. gossip to alive nodes val gossipedToSeed = - if (oldAvailableNodesSize > 0) gossipTo(oldAvailableNodes) + if (oldAvailableNodesSize > 0) gossipToRandomNodeOf(oldAvailableNodes) else false // 2. gossip to dead nodes if (oldUnavailableNodesSize > 0) { val probability: Double = oldUnavailableNodesSize / (oldAvailableNodesSize + 1) - if (random.nextDouble() < probability) gossipTo(oldUnavailableNodes) + if (random.nextDouble() < probability) gossipToRandomNodeOf(oldUnavailableNodes) } // 3. gossip to a seed for facilitating partition healing - if ((!gossipedToSeed || oldAvailableNodesSize < 1) && (seeds.head != address)) { - if (oldAvailableNodesSize == 0) gossipTo(seeds) + if ((!gossipedToSeed || oldAvailableNodesSize < 1) && (seeds.head != remoteAddress)) { + if (oldAvailableNodesSize == 0) gossipToRandomNodeOf(seeds) else { val probability = 1.0 / oldAvailableNodesSize + oldUnavailableNodesSize - if (random.nextDouble() <= probability) gossipTo(seeds) + if (random.nextDouble() <= probability) gossipToRandomNodeOf(seeds) } } } /** - * Gossips set of nodes passed in as argument. Returns 'true' if it gossiped to a "seed" node. + * Gossips to a random node in the set of nodes passed in as argument. + * + * @returns 'true' if it gossiped to a "seed" node. */ - private def gossipTo(nodes: Set[Address]): Boolean = { - val peers = nodes filter (_ != address) // filter out myself + private def gossipToRandomNodeOf(nodes: Set[ParsedTransportAddress]): Boolean = { + val peers = nodes filter (_ != remoteAddress) // filter out myself val peer = selectRandomNode(peers) val oldState = state.get val oldGossip = oldState.currentGossip - val connection = connectionManager.connectionFor(peer).getOrElse( - throw new IllegalStateException("Connection for [" + peer + "] is not set up")) - - try { - val t = remoteSettings.RemoteSystemDaemonAckTimeout - Await.result(connection.?(newGossip)(t), t) match { - case Success(receiver) ⇒ log.debug("Gossip sent to [{}] was successfully received", receiver) - case Failure(cause) ⇒ log.error(cause, cause.toString) - } - } catch { - case e: TimeoutException ⇒ log.error(e, "Gossip to [%s] timed out".format(connection.path)) - case e: Exception ⇒ - log.error(e, "Could not gossip to [{}] due to: {}", connection.path, e.toString) + setUpConnectionToNode(peer) match { + case Some(connection) ⇒ + try { + Await.result(connection ? newGossip, seedNodeConnectionTimeout) match { + case Success(receiver) ⇒ log.debug("Gossip sent to [{}] was successfully received", receiver) + case Failure(cause) ⇒ log.error(cause, cause.toString) + } + } catch { + case e: TimeoutException ⇒ log.error(e, "Gossip to [%s] timed out".format(connection.path)) + case e: Exception ⇒ log.error(e, "Could not gossip to [{}] due to: {}", connection.path, e.toString) + } + case None ⇒ + // FIXME what to do if the node can't be reached for gossiping - mark as unavailable in failure detector? } seeds exists (peer == _) @@ -287,6 +414,20 @@ class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { } } + private def setUpConnectionToNode(node: ParsedTransportAddress): Option[ActorRef] = { + //connectionManager.newConnection(node, RootActorPath(RemoteSystemAddress(system.name, node)) / "system" / "cluster") + try { + Some( + connectionManager.putIfAbsent( + node, + () ⇒ system.actorFor(RootActorPath(RemoteSystemAddress(system.name, node)) / "system" / "cluster"))) + // connectionManager.connectionFor(node).getOrElse( + // throw new RemoteConnectionException("Could not set up connection to node [" + node + "]")) + } catch { + case e: Exception ⇒ None + } + } + private def newGossip(): Gossip = Gossip( version = VectorClock(), node = address, @@ -305,6 +446,8 @@ class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { } } + private def seedNodesWithoutMyself: List[Address] = seeds.filter(_ != remoteAddress.transport).toList + private def selectRandomNode(nodes: Set[Address]): Address = { nodes.toList(random.nextInt(nodes.size)) } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index 6509d19383..a2ca0435b9 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -24,14 +24,17 @@ class RemoteSettings(val config: Config, val systemName: String) { val FailureDetectorThreshold = getInt("akka.remote.failure-detector.threshold") val FailureDetectorMaxSampleSize = getInt("akka.remote.failure-detector.max-sample-size") - // Gossiper - val RemoteSystemDaemonAckTimeout = Duration(getMilliseconds("akka.remote.remote-daemon-ack-timeout"), MILLISECONDS) - val InitialDelayForGossip = Duration(getMilliseconds("akka.remote.gossip.initialDelay"), MILLISECONDS) - val GossipFrequency = Duration(getMilliseconds("akka.remote.gossip.frequency"), MILLISECONDS) // TODO cluster config will go into akka-cluster/reference.conf when we enable that module - val SeedNodes = Set.empty[Address] ++ getStringList("akka.cluster.seed-nodes").asScala.collect { - case AddressExtractor(addr) ⇒ addr + // cluster config section + val UseCluster = getBoolean("akka.cluster.use-cluster") + val SeedNodeConnectionTimeout = Duration(config.getMilliseconds("akka.cluster.seed-node-connection-timeout"), MILLISECONDS) + val MaxTimeToRetryJoiningCluster = Duration(config.getMilliseconds("akka.cluster.max-time-to-retry-joining-cluster"), MILLISECONDS) + val InitalDelayForGossip = Duration(getMilliseconds("akka.cluster.gossip.initialDelay"), MILLISECONDS) + val GossipFrequency = Duration(getMilliseconds("akka.cluster.gossip.frequency"), MILLISECONDS) + val SeedNodes = Set.empty[RemoteNettyAddress] ++ getStringList("akka.cluster.seed-nodes").asScala.collect { + case RemoteAddressExtractor(addr) ⇒ addr.transport } + val RemoteSystemDaemonAckTimeout = Duration(getMilliseconds("akka.remote.remote-daemon-ack-timeout"), MILLISECONDS) val UntrustedMode = getBoolean("akka.remote.untrusted-mode") -} \ No newline at end of file +} From c944dafda401d440a44aaf5f1ae2eadea4f735b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 27 Jan 2012 14:50:33 +0100 Subject: [PATCH 27/94] Added 'Versioned' abstraction which is versioned through a VectorClock (including tests). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../main/scala/akka/remote/VectorClock.scala | 14 ++++ .../scala/akka/remote/VectorClockSpec.scala | 84 ++++++++++++++++++- 2 files changed, 95 insertions(+), 3 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/VectorClock.scala b/akka-remote/src/main/scala/akka/remote/VectorClock.scala index fde9bb84e7..42ea917669 100644 --- a/akka-remote/src/main/scala/akka/remote/VectorClock.scala +++ b/akka-remote/src/main/scala/akka/remote/VectorClock.scala @@ -8,6 +8,20 @@ import akka.AkkaException class VectorClockException(message: String) extends AkkaException(message) +trait Versioned { + def version: VectorClock +} + +object Versioned { + def latestVersionOf[T <: Versioned](versioned1: T, versioned2: T): T = { + (versioned1.version compare versioned2.version) match { + case VectorClock.Before ⇒ versioned2 // version 1 is BEFORE (older), use version 2 + case VectorClock.After ⇒ versioned1 // version 1 is AFTER (newer), use version 1 + case VectorClock.Concurrent ⇒ versioned1 // can't establish a causal relationship between versions => conflict - keeping version 1 + } + } +} + /** * Representation of a Vector-based clock (counting clock), inspired by Lamport logical clocks. * diff --git a/akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala b/akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala index 5bfda16666..03e4109423 100644 --- a/akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala @@ -6,7 +6,7 @@ import akka.testkit.AkkaSpec class VectorClockSpec extends AkkaSpec { import VectorClock._ - "An VectorClock" must { + "A VectorClock" must { "have zero versions when created" in { val clock = VectorClock() @@ -40,7 +40,7 @@ class VectorClockSpec extends AkkaSpec { clock1.compare(clock2) must not be (Concurrent) } - "A clock should not happen before an identical clock" in { + "not happen before an identical clock" in { val clock1_1 = VectorClock() val clock2_1 = clock1_1.increment(1, System.currentTimeMillis) val clock3_1 = clock2_1.increment(2, System.currentTimeMillis) @@ -54,7 +54,7 @@ class VectorClockSpec extends AkkaSpec { clock4_1.compare(clock4_2) must not be (Concurrent) } - "A clock should happen before an identical clock with a single additional event" in { + "happen before an identical clock with a single additional event" in { val clock1_1 = VectorClock() val clock2_1 = clock1_1.increment(1, System.currentTimeMillis) val clock3_1 = clock2_1.increment(2, System.currentTimeMillis) @@ -121,4 +121,82 @@ class VectorClockSpec extends AkkaSpec { clock5_1.compare(clock3_2) must be(After) } } + + "A Versioned" must { + class TestVersioned(val version: VectorClock = VectorClock()) extends Versioned { + def increment(v: Int, time: Long) = new TestVersioned(version.increment(v, time)) + } + + "have zero versions when created" in { + val versioned = new TestVersioned() + versioned.version.versions must be(Vector()) + } + + "happen before an identical versioned with a single additional event" in { + val versioned1_1 = new TestVersioned() + val versioned2_1 = versioned1_1.increment(1, System.currentTimeMillis) + val versioned3_1 = versioned2_1.increment(2, System.currentTimeMillis) + val versioned4_1 = versioned3_1.increment(1, System.currentTimeMillis) + + val versioned1_2 = new TestVersioned() + val versioned2_2 = versioned1_2.increment(1, System.currentTimeMillis) + val versioned3_2 = versioned2_2.increment(2, System.currentTimeMillis) + val versioned4_2 = versioned3_2.increment(1, System.currentTimeMillis) + val versioned5_2 = versioned4_2.increment(3, System.currentTimeMillis) + + Versioned.latestVersionOf[TestVersioned](versioned4_1, versioned5_2) must be(versioned5_2) + } + + "Two versioneds with different events should be concurrent: 1" in { + var versioned1_1 = new TestVersioned() + val versioned2_1 = versioned1_1.increment(1, System.currentTimeMillis) + + val versioned1_2 = new TestVersioned() + val versioned2_2 = versioned1_2.increment(2, System.currentTimeMillis) + + Versioned.latestVersionOf[TestVersioned](versioned2_1, versioned2_2) must be(versioned2_1) + } + + "Two versioneds with different events should be concurrent: 2" in { + val versioned1_3 = new TestVersioned() + val versioned2_3 = versioned1_3.increment(1, System.currentTimeMillis) + val versioned3_3 = versioned2_3.increment(2, System.currentTimeMillis) + val versioned4_3 = versioned3_3.increment(1, System.currentTimeMillis) + + val versioned1_4 = new TestVersioned() + val versioned2_4 = versioned1_4.increment(1, System.currentTimeMillis) + val versioned3_4 = versioned2_4.increment(1, System.currentTimeMillis) + val versioned4_4 = versioned3_4.increment(3, System.currentTimeMillis) + + Versioned.latestVersionOf[TestVersioned](versioned4_3, versioned4_4) must be(versioned4_3) + } + + "be earlier than another versioned if it has an older version" in { + val versioned1_1 = new TestVersioned() + val versioned2_1 = versioned1_1.increment(2, System.currentTimeMillis) + val versioned3_1 = versioned2_1.increment(2, System.currentTimeMillis) + + val versioned1_2 = new TestVersioned() + val versioned2_2 = versioned1_2.increment(1, System.currentTimeMillis) + val versioned3_2 = versioned2_2.increment(2, System.currentTimeMillis) + val versioned4_2 = versioned3_2.increment(2, System.currentTimeMillis) + val versioned5_2 = versioned4_2.increment(3, System.currentTimeMillis) + + Versioned.latestVersionOf[TestVersioned](versioned3_1, versioned5_2) must be(versioned5_2) + } + + "be later than another versioned if it has an newer version" in { + val versioned1_1 = new TestVersioned() + val versioned2_1 = versioned1_1.increment(1, System.currentTimeMillis) + val versioned3_1 = versioned2_1.increment(2, System.currentTimeMillis) + val versioned4_1 = versioned3_1.increment(2, System.currentTimeMillis) + val versioned5_1 = versioned4_1.increment(3, System.currentTimeMillis) + + val versioned1_2 = new TestVersioned() + val versioned2_2 = versioned1_2.increment(2, System.currentTimeMillis) + val versioned3_2 = versioned2_2.increment(2, System.currentTimeMillis) + + Versioned.latestVersionOf[TestVersioned](versioned5_1, versioned3_2) must be(versioned5_1) + } + } } From 5f107e7b9fd89f2381a5bf7c001aa8f515da30d3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 28 Jan 2012 15:33:24 +0100 Subject: [PATCH 28/94] Added logging to AccrualFailureDetector MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../akka/remote/AccrualFailureDetector.scala | 23 +++++++++++++------ .../remote/AccrualFailureDetectorSpec.scala | 12 ++++++---- 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala b/akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala index 2d7a831b9d..1c9cb45c08 100644 --- a/akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala +++ b/akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala @@ -10,6 +10,9 @@ import scala.annotation.tailrec import System.{ currentTimeMillis ⇒ newTimestamp } import akka.actor.{ ActorSystem, Address } +import akka.actor.ActorSystem +import akka.event.Logging + /** * Implementation of 'The Phi Accrual Failure Detector' by Hayashibara et al. as defined in their paper: * [http://ddg.jaist.ac.jp/pub/HDY+04.pdf] @@ -20,12 +23,14 @@ import akka.actor.{ ActorSystem, Address } *

* Default threshold is 8, but can be configured in the Akka config. */ -class AccrualFailureDetector(val threshold: Int = 8, val maxSampleSize: Int = 1000) { +class AccrualFailureDetector(val threshold: Int = 8, val maxSampleSize: Int = 1000, system: ActorSystem) { private final val PhiFactor = 1.0 / math.log(10.0) private case class FailureStats(mean: Double = 0.0D, variance: Double = 0.0D, deviation: Double = 0.0D) + private val log = Logging(system, "FailureDetector") + /** * Implement using optimistic lockless concurrency, all state is represented * by this immutable case class and managed by an AtomicReference. @@ -49,6 +54,7 @@ class AccrualFailureDetector(val threshold: Int = 8, val maxSampleSize: Int = 10 */ @tailrec final def heartbeat(connection: Address) { + log.info("Heartbeat from connection [{}] ", connection) val oldState = state.get val latestTimestamp = oldState.timestamps.get(connection) @@ -132,12 +138,15 @@ class AccrualFailureDetector(val threshold: Int = 8, val maxSampleSize: Int = 10 def phi(connection: Address): Double = { val oldState = state.get val oldTimestamp = oldState.timestamps.get(connection) - if (oldTimestamp.isEmpty) 0.0D // treat unmanaged connections, e.g. with zero heartbeats, as healthy connections - else { - val timestampDiff = newTimestamp - oldTimestamp.get - val mean = oldState.failureStats.get(connection).getOrElse(FailureStats()).mean - PhiFactor * timestampDiff / mean - } + val phi = + if (oldTimestamp.isEmpty) 0.0D // treat unmanaged connections, e.g. with zero heartbeats, as healthy connections + else { + val timestampDiff = newTimestamp - oldTimestamp.get + val mean = oldState.failureStats.get(connection).getOrElse(FailureStats()).mean + PhiFactor * timestampDiff / mean + } + log.debug("Phi value [{}] and threshold [{}] for connection [{}] ", phi, threshold, connection) + phi } /** diff --git a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala index 17a848b8d3..cffc424408 100644 --- a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala @@ -4,13 +4,15 @@ import java.net.InetSocketAddress import akka.testkit.AkkaSpec import akka.actor.Address -class AccrualFailureDetectorSpec extends AkkaSpec { +class AccrualFailureDetectorSpec extends AkkaSpec(""" + akka.loglevel = "DEBUG" +""") { "An AccrualFailureDetector" must { val conn = Address("akka", "", Some("localhost"), Some(2552)) "mark node as available after a series of successful heartbeats" in { - val fd = new AccrualFailureDetector() + val fd = new AccrualFailureDetector(system = system) fd.heartbeat(conn) @@ -25,7 +27,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec { // FIXME how should we deal with explicit removal of connection? - if triggered as failure then we have a problem in boostrap - see line 142 in AccrualFailureDetector "mark node as dead after explicit removal of connection" ignore { - val fd = new AccrualFailureDetector + val fd = new AccrualFailureDetector(system = system) fd.heartbeat(conn) @@ -43,7 +45,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec { } "mark node as dead if heartbeat are missed" in { - val fd = new AccrualFailureDetector(threshold = 3) + val fd = new AccrualFailureDetector(threshold = 3, system = system) fd.heartbeat(conn) @@ -61,7 +63,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec { } "mark node as available if it starts heartbeat again after being marked dead due to detection of failure" in { - val fd = new AccrualFailureDetector(threshold = 3) + val fd = new AccrualFailureDetector(threshold = 3, system = system) fd.heartbeat(conn) From f73bee3dcda626b7264a22cdece9b1cc8b1d3cb2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 28 Jan 2012 15:34:46 +0100 Subject: [PATCH 29/94] Added test for testing the Failure Detector when used together with Gossiper and a set of remote cluster nodes. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../test/scala/akka/remote/GossiperSpec.scala | 13 --- .../GossipingAccrualFailureDetectorSpec.scala | 95 +++++++++++++++++++ 2 files changed, 95 insertions(+), 13 deletions(-) delete mode 100644 akka-remote/src/test/scala/akka/remote/GossiperSpec.scala create mode 100644 akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala diff --git a/akka-remote/src/test/scala/akka/remote/GossiperSpec.scala b/akka-remote/src/test/scala/akka/remote/GossiperSpec.scala deleted file mode 100644 index 12e2925b26..0000000000 --- a/akka-remote/src/test/scala/akka/remote/GossiperSpec.scala +++ /dev/null @@ -1,13 +0,0 @@ -package akka.remote - -import java.net.InetSocketAddress -import akka.testkit.AkkaSpec - -class GossiperSpec extends AkkaSpec { - - "An Gossiper" must { - - "..." in { - } - } -} diff --git a/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala new file mode 100644 index 0000000000..85f1c5a084 --- /dev/null +++ b/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala @@ -0,0 +1,95 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ +package akka.remote + +import java.net.InetSocketAddress + +import akka.testkit._ +import akka.dispatch._ +import akka.actor._ +import com.typesafe.config._ + +class GossipingAccrualFailureDetectorSpec extends AkkaSpec(""" + akka { + loglevel = "INFO" + actor.provider = "akka.remote.RemoteActorRefProvider" + + remote.server.hostname = localhost + remote.server.port = 5550 + remote.failure-detector.threshold = 3 + cluster.seed-nodes = ["akka://localhost:5551"] + } + """) with ImplicitSender { + + val conn1 = RemoteNettyAddress("localhost", 5551) + val node1 = ActorSystem("GossiperSpec", ConfigFactory + .parseString("akka { remote.server.port=5551, cluster.use-cluster = on }") + .withFallback(system.settings.config)) + val remote1 = + node1.asInstanceOf[ActorSystemImpl] + .provider.asInstanceOf[RemoteActorRefProvider] + .remote + val gossiper1 = remote1.gossiper + val fd1 = remote1.failureDetector + gossiper1 must be('defined) + + val conn2 = RemoteNettyAddress("localhost", 5552) + val node2 = ActorSystem("GossiperSpec", ConfigFactory + .parseString("akka { remote.server.port=5552, cluster.use-cluster = on }") + .withFallback(system.settings.config)) + val remote2 = + node2.asInstanceOf[ActorSystemImpl] + .provider.asInstanceOf[RemoteActorRefProvider] + .remote + val gossiper2 = remote2.gossiper + val fd2 = remote2.failureDetector + gossiper2 must be('defined) + + val conn3 = RemoteNettyAddress("localhost", 5553) + val node3 = ActorSystem("GossiperSpec", ConfigFactory + .parseString("akka { remote.server.port=5553, cluster.use-cluster = on }") + .withFallback(system.settings.config)) + val remote3 = + node3.asInstanceOf[ActorSystemImpl] + .provider.asInstanceOf[RemoteActorRefProvider] + .remote + val gossiper3 = remote3.gossiper + val fd3 = remote3.failureDetector + gossiper3 must be('defined) + + "A Gossip-driven Failure Detector" must { + + "receive gossip heartbeats so that all healthy nodes in the cluster are marked 'available'" ignore { + Thread.sleep(5000) // let them gossip for 10 seconds + fd1.isAvailable(conn2) must be(true) + fd1.isAvailable(conn3) must be(true) + fd2.isAvailable(conn1) must be(true) + fd2.isAvailable(conn3) must be(true) + fd3.isAvailable(conn1) must be(true) + fd3.isAvailable(conn2) must be(true) + } + + "mark node as 'unavailable' if a node in the cluster is shut down and its heartbeats stops" ignore { + // kill node 3 + gossiper3.get.shutdown() + node3.shutdown() + Thread.sleep(5000) // let them gossip for 10 seconds + + fd1.isAvailable(conn2) must be(true) + fd1.isAvailable(conn3) must be(false) + fd2.isAvailable(conn1) must be(true) + fd2.isAvailable(conn3) must be(false) + } + } + + override def atTermination() { + gossiper1.get.shutdown() + gossiper2.get.shutdown() + gossiper3.get.shutdown() + node1.shutdown() + node2.shutdown() + node3.shutdown() + // FIXME Ordering problem - If we shut down the ActorSystem before the Gossiper then we get an IllegalStateException + } +} From b012a9a7beb6b9af6cd94eca782e8e440c1504d0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 30 Jan 2012 11:37:02 +0100 Subject: [PATCH 30/94] Added some options to the cluster config section. Moved gossip config from remote to cluster section. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- akka-remote/src/main/resources/reference.conf | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 86de93527c..f9c6430f6f 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -131,11 +131,6 @@ akka { max-sample-size = 1000 } - gossip { - initialDelay = 5s - frequency = 1s - } - # The dispatcher used for remote system messages compute-grid-dispatcher { # defaults to same settings as default-dispatcher @@ -150,6 +145,13 @@ akka { } cluster { + use-cluster = off seed-nodes = [] + max-time-to-retry-joining-cluster = 30s + seed-node-connection-timeout = 30s + gossip { + initialDelay = 5s + frequency = 1s + } } } From e166c74f8de201c20516f1bd28ed186af694c485 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 30 Jan 2012 11:41:41 +0100 Subject: [PATCH 31/94] Enhanced the Gossip state with member status, ring convergence flags etc. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Added member status, ring convergence flags etc to Gossip state. * Updated Gossiper to use Member throughout instead of ParsedTransportAddress. * Commented out cluster membership updating to be replaced by the one in the cluster specification. Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/remote/Gossiper.scala | 396 +++++++++--------- 1 file changed, 191 insertions(+), 205 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/Gossiper.scala b/akka-remote/src/main/scala/akka/remote/Gossiper.scala index 20e803c7eb..e5b6e938bc 100644 --- a/akka-remote/src/main/scala/akka/remote/Gossiper.scala +++ b/akka-remote/src/main/scala/akka/remote/Gossiper.scala @@ -11,13 +11,13 @@ import akka.util._ import akka.dispatch.Await import akka.config.ConfigurationException -import java.util.concurrent.atomic.AtomicReference +import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean } import java.util.concurrent.TimeUnit._ import java.util.concurrent.TimeoutException import java.security.SecureRandom import System.{ currentTimeMillis ⇒ newTimestamp } -import scala.collection.immutable.Map +import scala.collection.immutable.{ Map, SortedSet } import scala.annotation.tailrec import akka.dispatch.Await @@ -26,105 +26,94 @@ import akka.pattern.ask import com.google.protobuf.ByteString /** - * Interface for node membership change listener. + * Interface for member membership change listener. */ trait NodeMembershipChangeListener { - def nodeConnected(node: Address) - def nodeDisconnected(node: Address) + def memberConnected(member: Member) + def memberDisconnected(member: Member) } /** - * Represents the node state of to gossip, versioned by a vector clock. + * Base trait for all cluster messages. All ClusterMessage's are serializable. */ -case class Gossip( - version: VectorClock, - node: Address, - availableNodes: Set[Address] = Set.empty[Address], - unavailableNodes: Set[Address] = Set.empty[Address]) - -// ====== START - NEW GOSSIP IMPLEMENTATION ====== -/* - case class Gossip( - version: VectorClock, - node: ParsedTransportAddress, - leader: ParsedTransportAddress, // FIXME leader is always head of 'members', so we probably don't need this field - members: SortedSet[Member] = SortetSet.empty[Member](Ordering.fromLessThan[String](_ > _)), // sorted set of members with their status, sorted by name - seen: Map[Member, VectorClock] = Map.empty[Member, VectorClock], // for ring convergence - pendingChanges: Option[Vector[PendingPartitioningChange]] = None, // for handoff - meta: Option[Map[String, Array[Byte]]] = None) // misc meta-data - - case class Member(address: ParsedTransportAddress, status: MemberStatus) - - sealed trait MemberStatus - object MemberStatus { - case class Joining(version: VectorClock) extends MemberStatus - case class Up(version: VectorClock) extends MemberStatus - case class Leaving(version: VectorClock) extends MemberStatus - case class Exiting(version: VectorClock) extends MemberStatus - case class Down(version: VectorClock) extends MemberStatus - } - - sealed trait PendingPartitioningStatus - object PendingPartitioningStatus { - case object Complete extends PendingPartitioningStatus - case object Awaiting extends PendingPartitioningStatus - } - - // FIXME what is this? - type VNodeMod = AnyRef - - case class PendingPartitioningChange( - owner: ParsedTransportAddress, - nextOwner: ParsedTransportAddress, - changes: Vector[VNodeMod], - status: PendingPartitioningStatus) -*/ -// ====== END - NEW GOSSIP IMPLEMENTATION ====== - /** - * Interface for node membership change listener. + * Command to join the cluster. */ -trait NodeMembershipChangeListener { - def nodeConnected(node: ParsedTransportAddress) - def nodeDisconnected(node: ParsedTransportAddress) -} - -sealed trait ClusterMessage extends Serializable - case object JoinCluster extends ClusterMessage /** - * Represents the node state of to gossip, versioned by a vector clock. + * Represents the state of the cluster; cluster ring membership, ring convergence, meta data - all versioned by a vector clock. */ case class Gossip( - version: VectorClock, - node: ParsedTransportAddress, - availableNodes: Set[ParsedTransportAddress] = Set.empty[ParsedTransportAddress], - unavailableNodes: Set[ParsedTransportAddress] = Set.empty[ParsedTransportAddress]) extends ClusterMessage + version: VectorClock = VectorClock(), + member: Address, + // sorted set of members with their status, sorted by name + members: SortedSet[Member] = SortedSet.empty[Member](Ordering.fromLessThan[Member](_.address.toString > _.address.toString)), + unavailableMembers: Set[Member] = Set.empty[Member], + // for ring convergence + seen: Map[Member, VectorClock] = Map.empty[Member, VectorClock], + // for handoff + //pendingChanges: Option[Vector[PendingPartitioningChange]] = None, + meta: Option[Map[String, Array[Byte]]] = None) + extends ClusterMessage // is a serializable cluster message + with Versioned // has a vector clock as version -class ClusterDaemon(system: ActorSystem, gossiper: Gossiper) extends Actor { +/** + * Represents the address and the current status of a cluster member node. + */ +case class Member(address: Address, status: MemberStatus) extends ClusterMessage + +/** + * Defines the current status of a cluster member node + * + * Can be one of: Joining, Up, Leaving, Exiting and Down. + */ +sealed trait MemberStatus extends ClusterMessage with Versioned +object MemberStatus { + case class Joining(version: VectorClock = VectorClock()) extends MemberStatus + case class Up(version: VectorClock = VectorClock()) extends MemberStatus + case class Leaving(version: VectorClock = VectorClock()) extends MemberStatus + case class Exiting(version: VectorClock = VectorClock()) extends MemberStatus + case class Down(version: VectorClock = VectorClock()) extends MemberStatus +} + +// sealed trait PendingPartitioningStatus +// object PendingPartitioningStatus { +// case object Complete extends PendingPartitioningStatus +// case object Awaiting extends PendingPartitioningStatus +// } + +// case class PendingPartitioningChange( +// owner: Address, +// nextOwner: Address, +// changes: Vector[VNodeMod], +// status: PendingPartitioningStatus) + +final class ClusterDaemon(system: ActorSystem, gossiper: Gossiper) extends Actor { val log = Logging(system, "ClusterDaemon") def receive = { - case JoinCluster ⇒ sender ! gossiper.latestGossip - case gossip: Gossip ⇒ gossiper.tell(gossip) - case unknown ⇒ log.error("Unknown message sent to cluster daemon [" + unknown + "]") + case JoinCluster ⇒ sender ! gossiper.latestGossip + case gossip: Gossip ⇒ + gossiper.tell(gossip) + + case unknown ⇒ log.error("Unknown message sent to cluster daemon [" + unknown + "]") } } /** * This module is responsible for Gossiping cluster information. The abstraction maintains the list of live - * and dead nodes. Periodically i.e. every 1 second this module chooses a random node and initiates a round + * and dead members. Periodically i.e. every 1 second this module chooses a random member and initiates a round * of Gossip with it. Whenever it gets gossip updates it updates the Failure Detector with the liveness * information. *

- * During each of these runs the node initiates gossip exchange according to following rules (as defined in the + * During each of these runs the member initiates gossip exchange according to following rules (as defined in the * Cassandra documentation [http://wiki.apache.org/cassandra/ArchitectureGossip]: *

- *   1) Gossip to random live node (if any)
- *   2) Gossip to random unreachable node with certain probability depending on number of unreachable and live nodes
- *   3) If the node gossiped to at (1) was not seed, or the number of live nodes is less than number of seeds,
- *       gossip to random seed with certain probability depending on number of unreachable, seed and live nodes.
+ *   1) Gossip to random live member (if any)
+ *   2) Gossip to random unreachable member with certain probability depending on number of unreachable and live members
+ *   3) If the member gossiped to at (1) was not seed, or the number of live members is less than number of seeds,
+ *       gossip to random seed with certain probability depending on number of unreachable, seed and live members.
  * 
*/ case class Gossiper(remote: Remote, system: ActorSystemImpl) { @@ -135,7 +124,7 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { */ private case class State( currentGossip: Gossip, - nodeMembershipChangeListeners: Set[NodeMembershipChangeListener] = Set.empty[NodeMembershipChangeListener]) + memberMembershipChangeListeners: Set[NodeMembershipChangeListener] = Set.empty[NodeMembershipChangeListener]) // configuration private val remoteSettings = remote.remoteSettings @@ -148,46 +137,53 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { implicit val seedNodeConnectionTimeout = remoteSettings.SeedNodeConnectionTimeout implicit val defaultTimeout = Timeout(remoteSettings.RemoteSystemDaemonAckTimeout) - // seed nodes - private val seeds: Set[ParsedTransportAddress] = { + // seed members + private val seeds: Set[Member] = { val seeds = remoteSettings.SeedNodes flatMap { case uta: UnparsedTransportAddress ⇒ uta.parse(remote.transports) match { - case pta: ParsedTransportAddress ⇒ Some(pta) + case pta: Address ⇒ Some(Member(pta, MemberStatus.Up())) case _ ⇒ None } case _ ⇒ None } if (remoteSettings.SeedNodes.isEmpty) throw new ConfigurationException( - "At least one seed node must be defined in the configuration [akka.cluster.seed-nodes]") + "At least one seed member must be defined in the configuration [akka.cluster.seed-members]") else remoteSettings.SeedNodes } + private val isRunning = new AtomicBoolean(true) private val log = Logging(system, "Gossiper") private val random = SecureRandom.getInstance("SHA1PRNG") private val connectionManager = new RemoteConnectionManager(system, remote, Map.empty[Address, ActorRef]) + + // Is it right to put this guy under the /system path or should we have a top-level /cluster or something else...? private val clusterDaemon = system.systemActorOf(Props(new ClusterDaemon(system, this)), "cluster") private val state = new AtomicReference[State](State(currentGossip = newGossip())) log.info("Starting cluster Gossiper...") - // join the cluster by connecting to one of the seed nodes and retrieve current cluster state (Gossip) + // join the cluster by connecting to one of the seed members and retrieve current cluster state (Gossip) joinCluster(Timer(remoteSettings.MaxTimeToRetryJoiningCluster)) - // start periodic gossip and cluster scrutinization - default is run them every second with 1/2 second in between + // start periodic gossip and cluster scrutinization val initateGossipCanceller = system.scheduler.schedule( Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(initateGossip()) + val scrutinizeCanceller = system.scheduler.schedule( Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(scrutinize()) /** - * Shuts down all connections to other nodes, the cluster daemon and the periodic gossip and cleanup tasks. + * Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks. */ def shutdown() { - connectionManager.shutdown() - system.stop(clusterDaemon) - initateGossipCanceller.cancel() - scrutinizeCanceller.cancel() + if (isRunning.compareAndSet(true, false)) { + log.info("Shutting down Gossiper for [{}]", remoteAddress) + connectionManager.shutdown() + system.stop(clusterDaemon) + initateGossipCanceller.cancel() + scrutinizeCanceller.cancel() + } } def latestGossip: Gossip = state.get.currentGossip @@ -195,45 +191,56 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { /** * Tell the gossiper some gossip. */ - @tailrec + //@tailrec final def tell(newGossip: Gossip) { - val gossipingNode = newGossip.node + val gossipingNode = newGossip.member failureDetector heartbeat gossipingNode // update heartbeat in failure detector - val oldState = state.get - val latestGossip = latestVersionOf(newGossip, oldState.currentGossip) - val latestAvailableNodes = latestGossip.availableNodes - val latestUnavailableNodes = latestGossip.unavailableNodes + // FIXME all below here is WRONG - redesign with cluster convergence in mind - if (!(latestAvailableNodes contains gossipingNode) && !(latestUnavailableNodes contains gossipingNode)) { - // we have a new node - val newGossip = latestGossip copy (availableNodes = latestAvailableNodes + gossipingNode) - val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) + // val oldState = state.get + // println("-------- NEW VERSION " + newGossip) + // println("-------- OLD VERSION " + oldState.currentGossip) + // val latestGossip = VectorClock.latestVersionOf(newGossip, oldState.currentGossip) + // println("-------- WINNING VERSION " + latestGossip) - // if we won the race then update else try again - if (!state.compareAndSet(oldState, newState)) tell(newGossip) // recur - else { - // create connections for all new nodes in the latest gossip - (latestAvailableNodes + gossipingNode) foreach { node ⇒ - setUpConnectionToNode(node) - oldState.nodeMembershipChangeListeners foreach (_ nodeConnected node) // notify listeners about the new nodes - } - } + // val latestAvailableNodes = latestGossip.members + // val latestUnavailableNodes = latestGossip.unavailableMembers + // println("=======>>> gossipingNode: " + gossipingNode) + // println("=======>>> latestAvailableNodes: " + latestAvailableNodes) + // if (!(latestAvailableNodes contains gossipingNode) && !(latestUnavailableNodes contains gossipingNode)) { + // println("-------- NEW NODE") + // // we have a new member + // val newGossip = latestGossip copy (availableNodes = latestAvailableNodes + gossipingNode) + // val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) - } else if (latestUnavailableNodes contains gossipingNode) { - // gossip from an old former dead node + // println("--------- new GOSSIP " + newGossip.members) + // println("--------- new STATE " + newState) + // // if we won the race then update else try again + // if (!state.compareAndSet(oldState, newState)) tell(newGossip) // recur + // else { + // println("---------- WON RACE - setting state") + // // create connections for all new members in the latest gossip + // (latestAvailableNodes + gossipingNode) foreach { member ⇒ + // setUpConnectionToNode(member) + // oldState.memberMembershipChangeListeners foreach (_ memberConnected member) // notify listeners about the new members + // } + // } - val newUnavailableNodes = latestUnavailableNodes - gossipingNode - val newAvailableNodes = latestAvailableNodes + gossipingNode + // } else if (latestUnavailableNodes contains gossipingNode) { + // // gossip from an old former dead member - val newGossip = latestGossip copy (availableNodes = newAvailableNodes, unavailableNodes = newUnavailableNodes) - val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) + // val newUnavailableMembers = latestUnavailableNodes - gossipingNode + // val newMembers = latestAvailableNodes + gossipingNode - // if we won the race then update else try again - if (!state.compareAndSet(oldState, newState)) tell(newGossip) // recur - else oldState.nodeMembershipChangeListeners foreach (_ nodeConnected gossipingNode) // notify listeners on successful update of state - } + // val newGossip = latestGossip copy (availableNodes = newMembers, unavailableNodes = newUnavailableMembers) + // val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) + + // // if we won the race then update else try again + // if (!state.compareAndSet(oldState, newState)) tell(newGossip) // recur + // else oldState.memberMembershipChangeListeners foreach (_ memberConnected gossipingNode) // notify listeners on successful update of state + // } } /** @@ -242,8 +249,8 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { @tailrec final def registerListener(listener: NodeMembershipChangeListener) { val oldState = state.get - val newListeners = oldState.nodeMembershipChangeListeners + listener - val newState = oldState copy (nodeMembershipChangeListeners = newListeners) + val newListeners = oldState.memberMembershipChangeListeners + listener + val newState = oldState copy (memberMembershipChangeListeners = newListeners) if (!state.compareAndSet(oldState, newState)) registerListener(listener) // recur } @@ -253,54 +260,54 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { @tailrec final def unregisterListener(listener: NodeMembershipChangeListener) { val oldState = state.get - val newListeners = oldState.nodeMembershipChangeListeners - listener - val newState = oldState copy (nodeMembershipChangeListeners = newListeners) + val newListeners = oldState.memberMembershipChangeListeners - listener + val newState = oldState copy (memberMembershipChangeListeners = newListeners) if (!state.compareAndSet(oldState, newState)) unregisterListener(listener) // recur } /** - * Sets up remote connections to all the nodes in the argument list. + * Sets up remote connections to all the members in the argument list. */ - private def connectToNodes(nodes: Seq[ParsedTransportAddress]) { - nodes foreach { node ⇒ - setUpConnectionToNode(node) - state.get.nodeMembershipChangeListeners foreach (_ nodeConnected node) // notify listeners about the new nodes + private def connectToNodes(members: Seq[Member]) { + members foreach { member ⇒ + setUpConnectionToNode(member) + state.get.memberMembershipChangeListeners foreach (_ memberConnected member) // notify listeners about the new members } } - // FIXME should shuffle list randomly before start traversing to avoid connecting to some node on every node + // FIXME should shuffle list randomly before start traversing to avoid connecting to some member on every member @tailrec - final private def connectToRandomNodeOf(nodes: Seq[ParsedTransportAddress]): ActorRef = { - nodes match { - case node :: rest ⇒ - setUpConnectionToNode(node) match { + final private def connectToRandomNodeOf(members: Seq[Member]): ActorRef = { + members match { + case member :: rest ⇒ + setUpConnectionToNode(member) match { case Some(connection) ⇒ connection case None ⇒ connectToRandomNodeOf(rest) // recur if } case Nil ⇒ throw new RemoteConnectionException( - "Could not establish connection to any of the nodes in the argument list") + "Could not establish connection to any of the members in the argument list") } } /** - * Joins the cluster by connecting to one of the seed nodes and retrieve current cluster state (Gossip). + * Joins the cluster by connecting to one of the seed members and retrieve current cluster state (Gossip). */ private def joinCluster(timer: Timer) { val seedNodes = seedNodesWithoutMyself // filter out myself - if (!seedNodes.isEmpty) { // if we have seed nodes to contact + if (!seedNodes.isEmpty) { // if we have seed members to contact connectToNodes(seedNodes) try { - log.info("Trying to join cluster through one of the seed nodes [{}]", seedNodes.mkString(", ")) + log.info("Trying to join cluster through one of the seed members [{}]", seedNodes.mkString(", ")) Await.result(connectToRandomNodeOf(seedNodes) ? JoinCluster, seedNodeConnectionTimeout) match { case initialGossip: Gossip ⇒ // just sets/overwrites the state/gossip regardless of what it was before // since it should be treated as the initial state state.set(state.get copy (currentGossip = initialGossip)) - log.debug("Received initial gossip [{}] from seed node", initialGossip) + log.debug("Received initial gossip [{}] from seed member", initialGossip) case unknown ⇒ throw new IllegalStateException("Expected initial gossip from seed, received [" + unknown + "]") @@ -308,13 +315,19 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { } catch { case e: Exception ⇒ log.error( - "Could not join cluster through any of the seed nodes - retrying for another {} seconds", + "Could not join cluster through any of the seed members - retrying for another {} seconds", timer.timeLeft.toSeconds) - if (timer.timeLeft.toMillis > 0) joinCluster(timer) // recur - retry joining the cluster - else throw new RemoteConnectionException( - "Could not join cluster (any of the seed nodes) - giving up after trying for " + - timer.timeout.toSeconds + " seconds") + // retry joining the cluster unless + // 1. Gossiper is shut down + // 2. The connection time window has expired + if (isRunning.get) { + println("=======>>> isRun: " + isRunning.get + " " + remoteAddress) + if (timer.timeLeft.toMillis > 0) joinCluster(timer) // recur + else throw new RemoteConnectionException( + "Could not join cluster (any of the seed members) - giving up after trying for " + + timer.timeout.toSeconds + " seconds") + } } } } @@ -326,64 +339,50 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { val oldState = state.get val oldGossip = oldState.currentGossip - val oldAvailableNodes = oldGossip.availableNodes - val oldUnavailableNodes = oldGossip.unavailableNodes + val oldMembers = oldGossip.members + val oldMembersSize = oldMembers.size - val oldAvailableNodesSize = oldAvailableNodes.size - val oldUnavailableNodesSize = oldUnavailableNodes.size + val oldUnavailableMembers = oldGossip.unavailableMembers + val oldUnavailableMembersSize = oldUnavailableMembers.size - // 1. gossip to alive nodes + // 1. gossip to alive members val gossipedToSeed = - if (oldAvailableNodesSize > 0) gossipToRandomNodeOf(oldAvailableNodes) + if (oldUnavailableMembersSize > 0) gossipToRandomNodeOf(oldMembers) else false - // 2. gossip to dead nodes - if (oldUnavailableNodesSize > 0) { - val probability: Double = oldUnavailableNodesSize / (oldAvailableNodesSize + 1) - if (random.nextDouble() < probability) gossipToRandomNodeOf(oldUnavailableNodes) + // 2. gossip to dead members + if (oldUnavailableMembersSize > 0) { + val probability: Double = oldUnavailableMembersSize / (oldMembersSize + 1) + if (random.nextDouble() < probability) gossipToRandomNodeOf(oldUnavailableMembers) } // 3. gossip to a seed for facilitating partition healing - if ((!gossipedToSeed || oldAvailableNodesSize < 1) && (seeds.head != remoteAddress)) { - if (oldAvailableNodesSize == 0) gossipToRandomNodeOf(seeds) + if ((!gossipedToSeed || oldMembersSize < 1) && (seeds.head != remoteAddress)) { + if (oldMembersSize == 0) gossipToRandomNodeOf(seeds) else { - val probability = 1.0 / oldAvailableNodesSize + oldUnavailableNodesSize + val probability = 1.0 / oldMembersSize + oldUnavailableMembersSize if (random.nextDouble() <= probability) gossipToRandomNodeOf(seeds) } } } /** - * Gossips to a random node in the set of nodes passed in as argument. + * Gossips to a random member in the set of members passed in as argument. * - * @returns 'true' if it gossiped to a "seed" node. + * @returns 'true' if it gossiped to a "seed" member. */ - private def gossipToRandomNodeOf(nodes: Set[ParsedTransportAddress]): Boolean = { - val peers = nodes filter (_ != remoteAddress) // filter out myself + private def gossipToRandomNodeOf(members: Set[Member]): Boolean = { + val peers = members filter (_.address != remoteAddress) // filter out myself val peer = selectRandomNode(peers) val oldState = state.get val oldGossip = oldState.currentGossip - - setUpConnectionToNode(peer) match { - case Some(connection) ⇒ - try { - Await.result(connection ? newGossip, seedNodeConnectionTimeout) match { - case Success(receiver) ⇒ log.debug("Gossip sent to [{}] was successfully received", receiver) - case Failure(cause) ⇒ log.error(cause, cause.toString) - } - } catch { - case e: TimeoutException ⇒ log.error(e, "Gossip to [%s] timed out".format(connection.path)) - case e: Exception ⇒ log.error(e, "Could not gossip to [{}] due to: {}", connection.path, e.toString) - } - case None ⇒ - // FIXME what to do if the node can't be reached for gossiping - mark as unavailable in failure detector? - } - + // if connection can't be established/found => ignore it since the failure detector will take care of the potential problem + setUpConnectionToNode(peer) foreach { _ ! newGossip } seeds exists (peer == _) } /** - * Scrutinizes the cluster; marks nodes detected by the failure detector as unavailable, and notifies all listeners + * Scrutinizes the cluster; marks members detected by the failure detector as unavailable, and notifies all listeners * of the change in the cluster membership. */ @tailrec @@ -391,15 +390,15 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { val oldState = state.get val oldGossip = oldState.currentGossip - val oldAvailableNodes = oldGossip.availableNodes - val oldUnavailableNodes = oldGossip.unavailableNodes - val newlyDetectedUnavailableNodes = oldAvailableNodes filterNot failureDetector.isAvailable + val oldMembers = oldGossip.members + val oldUnavailableMembers = oldGossip.unavailableMembers + val newlyDetectedUnavailableMembers = oldMembers filterNot (member ⇒ failureDetector.isAvailable(member.address)) - if (!newlyDetectedUnavailableNodes.isEmpty) { // we have newly detected nodes marked as unavailable - val newAvailableNodes = oldAvailableNodes diff newlyDetectedUnavailableNodes - val newUnavailableNodes = oldUnavailableNodes ++ newlyDetectedUnavailableNodes + if (!newlyDetectedUnavailableMembers.isEmpty) { // we have newly detected members marked as unavailable + val newMembers = oldMembers diff newlyDetectedUnavailableMembers + val newUnavailableMembers = oldUnavailableMembers ++ newlyDetectedUnavailableMembers - val newGossip = oldGossip copy (availableNodes = newAvailableNodes, unavailableNodes = newUnavailableNodes) + val newGossip = oldGossip copy (members = newMembers, unavailableMembers = newUnavailableMembers) val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) // if we won the race then update else try again @@ -407,48 +406,35 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { else { // notify listeners on successful update of state for { - deadNode ← newUnavailableNodes - listener ← oldState.nodeMembershipChangeListeners - } listener nodeDisconnected deadNode + deadNode ← newUnavailableMembers + listener ← oldState.memberMembershipChangeListeners + } listener memberDisconnected deadNode } } } - private def setUpConnectionToNode(node: ParsedTransportAddress): Option[ActorRef] = { - //connectionManager.newConnection(node, RootActorPath(RemoteSystemAddress(system.name, node)) / "system" / "cluster") + private def setUpConnectionToNode(member: Member): Option[ActorRef] = { + val address = member.address try { Some( connectionManager.putIfAbsent( - node, - () ⇒ system.actorFor(RootActorPath(RemoteSystemAddress(system.name, node)) / "system" / "cluster"))) - // connectionManager.connectionFor(node).getOrElse( - // throw new RemoteConnectionException("Could not set up connection to node [" + node + "]")) + address, + () ⇒ system.actorFor(RootActorPath(RemoteSystemAddress(system.name, address)) / "system" / "cluster"))) } catch { case e: Exception ⇒ None } } - private def newGossip(): Gossip = Gossip( - version = VectorClock(), - node = address, - availableNodes = Set(address)) + private def newGossip(): Gossip = Gossip(member = address) private def incrementVersionForGossip(from: Gossip): Gossip = { - val newVersion = from.version.increment(nodeFingerprint, newTimestamp) + val newVersion = from.version.increment(memberFingerprint, newTimestamp) from copy (version = newVersion) } - private def latestVersionOf(newGossip: Gossip, oldGossip: Gossip): Gossip = { - (newGossip.version compare oldGossip.version) match { - case VectorClock.After ⇒ newGossip // gossiped version is newer, use new version - case VectorClock.Before ⇒ oldGossip // gossiped version is older, use old version - case VectorClock.Concurrent ⇒ oldGossip // can't establish a causal relationship between two versions => conflict - } - } + private def seedNodesWithoutMyself: List[Member] = seeds.filter(_ != remoteAddress.transport).toList - private def seedNodesWithoutMyself: List[Address] = seeds.filter(_ != remoteAddress.transport).toList - - private def selectRandomNode(nodes: Set[Address]): Address = { - nodes.toList(random.nextInt(nodes.size)) + private def selectRandomNode(members: Set[Member]): Member = { + members.toList(random.nextInt(members.size)) } } From 269ff0aa96a246b7be2695eff8212700235b01c8 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 30 Jan 2012 16:47:33 +0100 Subject: [PATCH 32/94] ZeroMQ module's ConcurrentSocketActorSpec fails. * Fixed usage of extension. See #1746 * Clarified usage of load extensions in docs. See #1745 --- akka-docs/java/extending-akka.rst | 6 ++++++ .../akka/docs/extension/ExtensionDocSpec.scala | 17 ++++++++++++++++- akka-docs/scala/extending-akka.rst | 5 +++++ .../akka/zeromq/ConcurrentSocketActorSpec.scala | 8 ++------ 4 files changed, 29 insertions(+), 7 deletions(-) diff --git a/akka-docs/java/extending-akka.rst b/akka-docs/java/extending-akka.rst index ac60147881..0d88248cc8 100644 --- a/akka-docs/java/extending-akka.rst +++ b/akka-docs/java/extending-akka.rst @@ -54,6 +54,12 @@ Loading from Configuration To be able to load extensions from your Akka configuration you must add FQCNs of implementations of either ``ExtensionId`` or ``ExtensionIdProvider`` in the "akka.extensions" section of the config you provide to your ``ActorSystem``. +:: + + akka { + extensions = ["akka.docs.extension.ExtensionDocTestBase.CountExtension"] + } + Applicability ============= diff --git a/akka-docs/scala/code/akka/docs/extension/ExtensionDocSpec.scala b/akka-docs/scala/code/akka/docs/extension/ExtensionDocSpec.scala index 0c778a4812..05baa28ecb 100644 --- a/akka-docs/scala/code/akka/docs/extension/ExtensionDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/extension/ExtensionDocSpec.scala @@ -41,6 +41,15 @@ object CountExtension //#extensionid object ExtensionDocSpec { + + val config = """ + //#config + akka { + extensions = ["akka.docs.extension.CountExtension$"] + } + //#config + """ + //#extension-usage-actor class MyActor extends Actor { @@ -64,7 +73,7 @@ object ExtensionDocSpec { //#extension-usage-actor-trait } -class ExtensionDocSpec extends AkkaSpec { +class ExtensionDocSpec extends AkkaSpec(ExtensionDocSpec.config) { import ExtensionDocSpec._ "demonstrate how to create an extension in Scala" in { @@ -73,4 +82,10 @@ class ExtensionDocSpec extends AkkaSpec { //#extension-usage } + "demonstrate how to lookup a configured extension in Scala" in { + //#extension-lookup + system.extension(CountExtension) + //#extension-lookup + } + } diff --git a/akka-docs/scala/extending-akka.rst b/akka-docs/scala/extending-akka.rst index 0fe149e0f2..7627326767 100644 --- a/akka-docs/scala/extending-akka.rst +++ b/akka-docs/scala/extending-akka.rst @@ -48,6 +48,11 @@ Loading from Configuration To be able to load extensions from your Akka configuration you must add FQCNs of implementations of either ``ExtensionId`` or ``ExtensionIdProvider`` in the ``akka.extensions`` section of the config you provide to your ``ActorSystem``. +.. includecode:: code/akka/docs/extension/ExtensionDocSpec.scala + :include: config + +Note that in this case ``CountExtension`` is an object and therefore the class name ends with ``$``. + Applicability ============= diff --git a/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala b/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala index 633e598e02..7c498bd653 100644 --- a/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala +++ b/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala @@ -9,11 +9,7 @@ import akka.util.duration._ import akka.actor.{ Cancellable, Actor, Props, ActorRef } object ConcurrentSocketActorSpec { - val config = """ -akka { - extensions = ["akka.zeromq.ZeroMQExtension"] -} -""" + val config = "" } class ConcurrentSocketActorSpec @@ -23,7 +19,7 @@ class ConcurrentSocketActorSpec val endpoint = "tcp://127.0.0.1:%s" format { val s = new java.net.ServerSocket(0); try s.getLocalPort finally s.close() } - def zmq = system.extension(ZeroMQExtension) + def zmq = ZeroMQExtension(system) "ConcurrentSocketActor" should { "support pub-sub connections" in { From 1a5e7590ea719988ca45c6c01379e8d1d8bca712 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 30 Jan 2012 17:32:24 +0100 Subject: [PATCH 33/94] Adding tests for tryRecover and andThen --- .../test/scala/akka/dispatch/FutureSpec.scala | 28 +++++++++- .../src/main/scala/akka/dispatch/Future.scala | 51 +++++++++++++------ 2 files changed, 63 insertions(+), 16 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala index 0e162708d3..62e3cb8e22 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala @@ -13,10 +13,10 @@ import akka.testkit.AkkaSpec import org.scalatest.junit.JUnitSuite import akka.testkit.DefaultTimeout import akka.testkit.TestLatch -import java.util.concurrent.{ TimeoutException, TimeUnit, CountDownLatch } import scala.runtime.NonLocalReturnControl import akka.pattern.ask import java.lang.{ IllegalStateException, ArithmeticException } +import java.util.concurrent._ object FutureSpec { class TestActor extends Actor { @@ -302,6 +302,32 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa } } + "tryRecover from exceptions" in { + val o = new IllegalStateException("original") + val r = new IllegalStateException("recovered") + + intercept[IllegalStateException] { + Await.result(Promise.failed[String](o) tryRecover { case _ if false == true ⇒ Promise.successful("yay!") }, timeout.duration) + } must be(o) + + Await.result(Promise.failed[String](o) tryRecover { case _ ⇒ Promise.successful("yay!") }, timeout.duration) must equal("yay!") + + intercept[IllegalStateException] { + Await.result(Promise.failed[String](o) tryRecover { case _ ⇒ Promise.failed[String](r) }, timeout.duration) + } must be(r) + } + + "andThen like a boss" in { + val q = new LinkedBlockingQueue[Int] + for (i ← 1 to 1000) { + Await.result(Future { q.add(1); 3 } andThen { case _ ⇒ q.add(2) } andThen { case Right(0) ⇒ q.add(Int.MaxValue) } andThen { case _ ⇒ q.add(3); }, timeout.duration) must be(3) + q.poll() must be(1) + q.poll() must be(2) + q.poll() must be(3) + q.clear() + } + } + "firstCompletedOf" in { val futures = Vector.fill[Future[Int]](10)(Promise[Int]()) :+ Promise.successful[Int](5) Await.result(Future.firstCompletedOf(futures), timeout.duration) must be(5) diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index 4918253002..90eb6a0b8d 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -362,7 +362,7 @@ sealed trait Future[+T] extends Await.Awaitable[T] { case Right(r) ⇒ that onSuccess { case r2 ⇒ p success ((r, r2)) } } that onFailure { case f ⇒ p failure f } - p + p.future } /** @@ -435,7 +435,7 @@ sealed trait Future[+T] extends Await.Awaitable[T] { case Left(t) ⇒ p success t case Right(r) ⇒ p failure new NoSuchElementException("Future.failed not completed with a throwable. Instead completed with: " + r) } - p + p.future } /** @@ -448,7 +448,7 @@ sealed trait Future[+T] extends Await.Awaitable[T] { case r @ Right(_) ⇒ p complete r case _ ⇒ p completeWith that } - p + p.future } /** @@ -463,26 +463,26 @@ sealed trait Future[+T] extends Await.Awaitable[T] { * */ final def recover[A >: T](pf: PartialFunction[Throwable, A]): Future[A] = { - val future = Promise[A]() + val p = Promise[A]() onComplete { - case Left(e) if pf isDefinedAt e ⇒ future.complete(try { Right(pf(e)) } catch { case x: Exception ⇒ Left(x) }) - case otherwise ⇒ future complete otherwise + case Left(e) if pf isDefinedAt e ⇒ p.complete(try { Right(pf(e)) } catch { case x: Exception ⇒ Left(x) }) + case otherwise ⇒ p complete otherwise } - future + p.future } /** - * Creates a new future that will handle any matching throwable that this - * future might contain by assigning it a value of another future. + * Returns a new Future that will, in case this future fails, + * be completed with the resulting Future of the given PartialFunction, + * if the given PartialFunction matches the failure of the original Future. * - * If there is no match, or if this future contains - * a valid result then the new future will contain the same result. + * If the PartialFunction throws, that Throwable will be propagated to the returned Future. * * Example: * * {{{ * val f = Future { Int.MaxValue } - * future (6 / 0) tryRecover { case e: ArithmeticException => f } // result: Int.MaxValue + * Future (6 / 0) tryRecover { case e: ArithmeticException => f } // result: Int.MaxValue * }}} */ def tryRecover[U >: T](pf: PartialFunction[Throwable, Future[U]]): Future[U] = { @@ -497,6 +497,27 @@ sealed trait Future[+T] extends Await.Awaitable[T] { p.future } + /** + * Returns a new Future that will contain the completed result of this Future, + * and which will invoke the supplied PartialFunction when completed. + * + * This allows for establishing order of side-effects. + * + * {{{ + * Future { 5 } andThen { + * case something => assert(something is awesome) + * } andThen { + * case Left(t) => handleProblem(t) + * case Right(v) => dealWithSuccess(v) + * } + * }}} + */ + def andThen[U](pf: PartialFunction[Either[Throwable, T], U]): Future[T] = { + val p = Promise[T]() + onComplete { case r ⇒ try if (pf isDefinedAt r) pf(r) finally p complete r } + p.future + } + /** * Creates a new Future by applying a function to the successful result of * this Future. If this Future is completed with an exception then the new @@ -545,7 +566,7 @@ sealed trait Future[+T] extends Await.Awaitable[T] { case e: ClassCastException ⇒ Left(e) }) } - fa + fa.future } /** @@ -576,7 +597,7 @@ sealed trait Future[+T] extends Await.Awaitable[T] { logError("Future.flatMap", e) } } - p + p.future } /** @@ -616,7 +637,7 @@ sealed trait Future[+T] extends Await.Awaitable[T] { Left(e) }) } - p + p.future } protected def logError(msg: String, problem: Throwable): Unit = { From 32b5e5314600f8339d48754ad7d5d2433b09b7ce Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Mon, 30 Jan 2012 17:42:50 +0100 Subject: [PATCH 34/94] Renaming tests to reflect current APIs --- .../src/test/scala/akka/dispatch/FutureSpec.scala | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala index 62e3cb8e22..89735d1a94 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala @@ -881,7 +881,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa "be completed" in { f((future, _) ⇒ future must be('completed)) } "contain a value" in { f((future, result) ⇒ future.value must be(Some(Right(result)))) } "return result with 'get'" in { f((future, result) ⇒ Await.result(future, timeout.duration) must be(result)) } - "return result with 'Await.sync'" in { f((future, result) ⇒ Await.result(future, timeout.duration) must be(result)) } + "return result with 'Await.result'" in { f((future, result) ⇒ Await.result(future, timeout.duration) must be(result)) } "not timeout" in { f((future, _) ⇒ Await.ready(future, 0 millis)) } "filter result" in { f { (future, result) ⇒ @@ -932,7 +932,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa }) } "throw exception with 'get'" in { f((future, message) ⇒ (evaluating { Await.result(future, timeout.duration) } must produce[E]).getMessage must be(message)) } - "throw exception with 'Await.sync'" in { f((future, message) ⇒ (evaluating { Await.result(future, timeout.duration) } must produce[E]).getMessage must be(message)) } + "throw exception with 'Await.result'" in { f((future, message) ⇒ (evaluating { Await.result(future, timeout.duration) } must produce[E]).getMessage must be(message)) } "retain exception with filter" in { f { (future, message) ⇒ (evaluating { Await.result(future filter (_ ⇒ true), timeout.duration) } must produce[E]).getMessage must be(message) From ad1c84b950101240818a12ceb913d69d22627493 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 30 Jan 2012 19:40:28 +0100 Subject: [PATCH 35/94] Merged with master. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/remote/Gossiper.scala | 40 ++--- .../akka/remote/RemoteActorRefProvider.scala | 13 +- .../scala/akka/remote/RemoteSettings.scala | 6 +- .../GossipingAccrualFailureDetectorSpec.scala | 166 +++++++++--------- 4 files changed, 115 insertions(+), 110 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/Gossiper.scala b/akka-remote/src/main/scala/akka/remote/Gossiper.scala index e5b6e938bc..55165f0891 100644 --- a/akka-remote/src/main/scala/akka/remote/Gossiper.scala +++ b/akka-remote/src/main/scala/akka/remote/Gossiper.scala @@ -36,6 +36,8 @@ trait NodeMembershipChangeListener { /** * Base trait for all cluster messages. All ClusterMessage's are serializable. */ +sealed trait ClusterMessage extends Serializable + /** * Command to join the cluster. */ @@ -116,7 +118,7 @@ final class ClusterDaemon(system: ActorSystem, gossiper: Gossiper) extends Actor * gossip to random seed with certain probability depending on number of unreachable, seed and live members. * */ -case class Gossiper(remote: Remote, system: ActorSystemImpl) { +case class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { /** * Represents the state for this Gossiper. Implemented using optimistic lockless concurrency, @@ -128,10 +130,15 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { // configuration private val remoteSettings = remote.remoteSettings + + private val protocol = "akka" // TODO should this be hardcoded? + private val address = remote.transport.address + private val memberFingerprint = address.## + private val serialization = remote.serialization private val failureDetector = remote.failureDetector - private val initalDelayForGossip = remoteSettings.InitalDelayForGossip + private val initialDelayForGossip = remoteSettings.InitialDelayForGossip private val gossipFrequency = remoteSettings.GossipFrequency implicit val seedNodeConnectionTimeout = remoteSettings.SeedNodeConnectionTimeout @@ -139,17 +146,9 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { // seed members private val seeds: Set[Member] = { - val seeds = remoteSettings.SeedNodes flatMap { - case uta: UnparsedTransportAddress ⇒ - uta.parse(remote.transports) match { - case pta: Address ⇒ Some(Member(pta, MemberStatus.Up())) - case _ ⇒ None - } - case _ ⇒ None - } if (remoteSettings.SeedNodes.isEmpty) throw new ConfigurationException( "At least one seed member must be defined in the configuration [akka.cluster.seed-members]") - else remoteSettings.SeedNodes + else remoteSettings.SeedNodes map (address ⇒ Member(address, MemberStatus.Up())) } private val isRunning = new AtomicBoolean(true) @@ -168,17 +167,17 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { // start periodic gossip and cluster scrutinization val initateGossipCanceller = system.scheduler.schedule( - Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(initateGossip()) + Duration(initialDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(initateGossip()) val scrutinizeCanceller = system.scheduler.schedule( - Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(scrutinize()) + Duration(initialDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(scrutinize()) /** * Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks. */ def shutdown() { if (isRunning.compareAndSet(true, false)) { - log.info("Shutting down Gossiper for [{}]", remoteAddress) + log.info("Shutting down Gossiper for [{}]", address) connectionManager.shutdown() system.stop(clusterDaemon) initateGossipCanceller.cancel() @@ -322,7 +321,6 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { // 1. Gossiper is shut down // 2. The connection time window has expired if (isRunning.get) { - println("=======>>> isRun: " + isRunning.get + " " + remoteAddress) if (timer.timeLeft.toMillis > 0) joinCluster(timer) // recur else throw new RemoteConnectionException( "Could not join cluster (any of the seed members) - giving up after trying for " + @@ -357,7 +355,7 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { } // 3. gossip to a seed for facilitating partition healing - if ((!gossipedToSeed || oldMembersSize < 1) && (seeds.head != remoteAddress)) { + if ((!gossipedToSeed || oldMembersSize < 1) && (seeds.head != address)) { if (oldMembersSize == 0) gossipToRandomNodeOf(seeds) else { val probability = 1.0 / oldMembersSize + oldUnavailableMembersSize @@ -372,7 +370,7 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { * @returns 'true' if it gossiped to a "seed" member. */ private def gossipToRandomNodeOf(members: Set[Member]): Boolean = { - val peers = members filter (_.address != remoteAddress) // filter out myself + val peers = members filter (_.address != address) // filter out myself val peer = selectRandomNode(peers) val oldState = state.get val oldGossip = oldState.currentGossip @@ -419,7 +417,7 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { Some( connectionManager.putIfAbsent( address, - () ⇒ system.actorFor(RootActorPath(RemoteSystemAddress(system.name, address)) / "system" / "cluster"))) + () ⇒ system.actorFor(RootActorPath(Address(protocol, system.name)) / "system" / "cluster"))) } catch { case e: Exception ⇒ None } @@ -432,9 +430,7 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { from copy (version = newVersion) } - private def seedNodesWithoutMyself: List[Member] = seeds.filter(_ != remoteAddress.transport).toList + private def seedNodesWithoutMyself: List[Member] = seeds.filter(_.address != address).toList - private def selectRandomNode(members: Set[Member]): Member = { - members.toList(random.nextInt(members.size)) - } + private def selectRandomNode(members: Set[Member]): Member = members.toList(random.nextInt(members.size)) } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index 6081372e6b..a169f9e9b5 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -4,6 +4,7 @@ package akka.remote +import akka.AkkaException import akka.actor._ import akka.dispatch._ import akka.event.{ DeathWatch, Logging, LoggingAdapter } @@ -15,6 +16,10 @@ import akka.util.ReflectiveAccess import akka.serialization.Serialization import akka.serialization.SerializationExtension +class RemoteException(msg: String) extends AkkaException(msg) +class RemoteCommunicationException(msg: String) extends RemoteException(msg) +class RemoteConnectionException(msg: String) extends RemoteException(msg) + /** * Remote ActorRefProvider. Starts up actor on remote node and creates a RemoteActorRef representing it. */ @@ -41,8 +46,6 @@ class RemoteActorRefProvider( val deathWatch = new RemoteDeathWatch(local.deathWatch, this) - val failureDetector = new AccrualFailureDetector(remoteSettings.FailureDetectorThreshold, remoteSettings.FailureDetectorMaxSampleSize) - // these are only available after init() def rootGuardian = local.rootGuardian def guardian = local.guardian @@ -54,6 +57,10 @@ class RemoteActorRefProvider( def tempPath() = local.tempPath() def tempContainer = local.tempContainer + @volatile + private var _failureDetector: AccrualFailureDetector = _ + def failureDetector: AccrualFailureDetector = _failureDetector + @volatile private var _transport: RemoteTransport = _ def transport: RemoteTransport = _transport @@ -73,6 +80,8 @@ class RemoteActorRefProvider( def init(system: ActorSystemImpl) { local.init(system) + _failureDetector = new AccrualFailureDetector(remoteSettings.FailureDetectorThreshold, remoteSettings.FailureDetectorMaxSampleSize, system) + _remoteDaemon = new RemoteSystemDaemon(system, rootPath / "remote", rootGuardian, log) local.registerExtraNames(Map(("remote", remoteDaemon))) diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index a2ca0435b9..84428d739b 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -29,10 +29,10 @@ class RemoteSettings(val config: Config, val systemName: String) { val UseCluster = getBoolean("akka.cluster.use-cluster") val SeedNodeConnectionTimeout = Duration(config.getMilliseconds("akka.cluster.seed-node-connection-timeout"), MILLISECONDS) val MaxTimeToRetryJoiningCluster = Duration(config.getMilliseconds("akka.cluster.max-time-to-retry-joining-cluster"), MILLISECONDS) - val InitalDelayForGossip = Duration(getMilliseconds("akka.cluster.gossip.initialDelay"), MILLISECONDS) + val InitialDelayForGossip = Duration(getMilliseconds("akka.cluster.gossip.initialDelay"), MILLISECONDS) val GossipFrequency = Duration(getMilliseconds("akka.cluster.gossip.frequency"), MILLISECONDS) - val SeedNodes = Set.empty[RemoteNettyAddress] ++ getStringList("akka.cluster.seed-nodes").asScala.collect { - case RemoteAddressExtractor(addr) ⇒ addr.transport + val SeedNodes = Set.empty[Address] ++ getStringList("akka.cluster.seed-nodes").asScala.collect { + case AddressExtractor(addr) ⇒ addr } val RemoteSystemDaemonAckTimeout = Duration(getMilliseconds("akka.remote.remote-daemon-ack-timeout"), MILLISECONDS) diff --git a/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala index 85f1c5a084..1e954b34fb 100644 --- a/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala @@ -1,95 +1,95 @@ -/** - * Copyright (C) 2009-2011 Typesafe Inc. - */ -package akka.remote +// /** +// * Copyright (C) 2009-2011 Typesafe Inc. +// */ +// package akka.remote -import java.net.InetSocketAddress +// import java.net.InetSocketAddress -import akka.testkit._ -import akka.dispatch._ -import akka.actor._ -import com.typesafe.config._ +// import akka.testkit._ +// import akka.dispatch._ +// import akka.actor._ +// import com.typesafe.config._ -class GossipingAccrualFailureDetectorSpec extends AkkaSpec(""" - akka { - loglevel = "INFO" - actor.provider = "akka.remote.RemoteActorRefProvider" +// class GossipingAccrualFailureDetectorSpec extends AkkaSpec(""" +// akka { +// loglevel = "INFO" +// actor.provider = "akka.remote.RemoteActorRefProvider" - remote.server.hostname = localhost - remote.server.port = 5550 - remote.failure-detector.threshold = 3 - cluster.seed-nodes = ["akka://localhost:5551"] - } - """) with ImplicitSender { +// remote.server.hostname = localhost +// remote.server.port = 5550 +// remote.failure-detector.threshold = 3 +// cluster.seed-nodes = ["akka://localhost:5551"] +// } +// """) with ImplicitSender { - val conn1 = RemoteNettyAddress("localhost", 5551) - val node1 = ActorSystem("GossiperSpec", ConfigFactory - .parseString("akka { remote.server.port=5551, cluster.use-cluster = on }") - .withFallback(system.settings.config)) - val remote1 = - node1.asInstanceOf[ActorSystemImpl] - .provider.asInstanceOf[RemoteActorRefProvider] - .remote - val gossiper1 = remote1.gossiper - val fd1 = remote1.failureDetector - gossiper1 must be('defined) +// val conn1 = Address("akka", system.systemName, Some("localhost"), Some(5551)) +// val node1 = ActorSystem("GossiperSpec", ConfigFactory +// .parseString("akka { remote.server.port=5551, cluster.use-cluster = on }") +// .withFallback(system.settings.config)) +// val remote1 = +// node1.asInstanceOf[ActorSystemImpl] +// .provider.asInstanceOf[RemoteActorRefProvider] +// .remote +// val gossiper1 = remote1.gossiper +// val fd1 = remote1.failureDetector +// gossiper1 must be('defined) - val conn2 = RemoteNettyAddress("localhost", 5552) - val node2 = ActorSystem("GossiperSpec", ConfigFactory - .parseString("akka { remote.server.port=5552, cluster.use-cluster = on }") - .withFallback(system.settings.config)) - val remote2 = - node2.asInstanceOf[ActorSystemImpl] - .provider.asInstanceOf[RemoteActorRefProvider] - .remote - val gossiper2 = remote2.gossiper - val fd2 = remote2.failureDetector - gossiper2 must be('defined) +// val conn2 = RemoteNettyAddress("localhost", 5552) +// val node2 = ActorSystem("GossiperSpec", ConfigFactory +// .parseString("akka { remote.server.port=5552, cluster.use-cluster = on }") +// .withFallback(system.settings.config)) +// val remote2 = +// node2.asInstanceOf[ActorSystemImpl] +// .provider.asInstanceOf[RemoteActorRefProvider] +// .remote +// val gossiper2 = remote2.gossiper +// val fd2 = remote2.failureDetector +// gossiper2 must be('defined) - val conn3 = RemoteNettyAddress("localhost", 5553) - val node3 = ActorSystem("GossiperSpec", ConfigFactory - .parseString("akka { remote.server.port=5553, cluster.use-cluster = on }") - .withFallback(system.settings.config)) - val remote3 = - node3.asInstanceOf[ActorSystemImpl] - .provider.asInstanceOf[RemoteActorRefProvider] - .remote - val gossiper3 = remote3.gossiper - val fd3 = remote3.failureDetector - gossiper3 must be('defined) +// val conn3 = RemoteNettyAddress("localhost", 5553) +// val node3 = ActorSystem("GossiperSpec", ConfigFactory +// .parseString("akka { remote.server.port=5553, cluster.use-cluster = on }") +// .withFallback(system.settings.config)) +// val remote3 = +// node3.asInstanceOf[ActorSystemImpl] +// .provider.asInstanceOf[RemoteActorRefProvider] +// .remote +// val gossiper3 = remote3.gossiper +// val fd3 = remote3.failureDetector +// gossiper3 must be('defined) - "A Gossip-driven Failure Detector" must { +// "A Gossip-driven Failure Detector" must { - "receive gossip heartbeats so that all healthy nodes in the cluster are marked 'available'" ignore { - Thread.sleep(5000) // let them gossip for 10 seconds - fd1.isAvailable(conn2) must be(true) - fd1.isAvailable(conn3) must be(true) - fd2.isAvailable(conn1) must be(true) - fd2.isAvailable(conn3) must be(true) - fd3.isAvailable(conn1) must be(true) - fd3.isAvailable(conn2) must be(true) - } +// "receive gossip heartbeats so that all healthy nodes in the cluster are marked 'available'" ignore { +// Thread.sleep(5000) // let them gossip for 10 seconds +// fd1.isAvailable(conn2) must be(true) +// fd1.isAvailable(conn3) must be(true) +// fd2.isAvailable(conn1) must be(true) +// fd2.isAvailable(conn3) must be(true) +// fd3.isAvailable(conn1) must be(true) +// fd3.isAvailable(conn2) must be(true) +// } - "mark node as 'unavailable' if a node in the cluster is shut down and its heartbeats stops" ignore { - // kill node 3 - gossiper3.get.shutdown() - node3.shutdown() - Thread.sleep(5000) // let them gossip for 10 seconds +// "mark node as 'unavailable' if a node in the cluster is shut down and its heartbeats stops" ignore { +// // kill node 3 +// gossiper3.get.shutdown() +// node3.shutdown() +// Thread.sleep(5000) // let them gossip for 10 seconds - fd1.isAvailable(conn2) must be(true) - fd1.isAvailable(conn3) must be(false) - fd2.isAvailable(conn1) must be(true) - fd2.isAvailable(conn3) must be(false) - } - } +// fd1.isAvailable(conn2) must be(true) +// fd1.isAvailable(conn3) must be(false) +// fd2.isAvailable(conn1) must be(true) +// fd2.isAvailable(conn3) must be(false) +// } +// } - override def atTermination() { - gossiper1.get.shutdown() - gossiper2.get.shutdown() - gossiper3.get.shutdown() - node1.shutdown() - node2.shutdown() - node3.shutdown() - // FIXME Ordering problem - If we shut down the ActorSystem before the Gossiper then we get an IllegalStateException - } -} +// override def atTermination() { +// gossiper1.get.shutdown() +// gossiper2.get.shutdown() +// gossiper3.get.shutdown() +// node1.shutdown() +// node2.shutdown() +// node3.shutdown() +// // FIXME Ordering problem - If we shut down the ActorSystem before the Gossiper then we get an IllegalStateException +// } +// } From e8892eb387814dd528a690a09f7e69b68fcc592b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 30 Jan 2012 19:57:05 +0100 Subject: [PATCH 36/94] Moved failure detector config from 'remote' to 'cluster' --- akka-remote/src/main/resources/reference.conf | 38 ++++++++++--------- .../scala/akka/remote/RemoteSettings.scala | 6 +-- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index f9c6430f6f..76f1980615 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -61,23 +61,23 @@ akka { # it reuses inbound connections for replies, which is called a passive client connection (i.e. from server # to client). netty { - + # (O) In case of increased latency / overflow how long # should we wait (blocking the sender) until we deem the send to be cancelled? # 0 means "never backoff", any positive number will indicate time to block at most. backoff-timeout = 0ms - + # (I&O) Generate your own with '$AKKA_HOME/scripts/generate_config_with_secure_cookie.sh' # or using 'akka.util.Crypt.generateSecureCookie' secure-cookie = "" - + # (I) Should the remote server require that it peers share the same secure-cookie # (defined in the 'remote' section)? require-cookie = off # (I) Reuse inbound connections for outbound messages use-passive-connections = on - + # (I) The hostname or ip to bind the remoting to, # InetAddress.getLocalHost.getHostAddress is used if empty hostname = "" @@ -118,19 +118,6 @@ akka { reconnection-time-window = 600s } - # accrual failure detection config - failure-detector { - - # defines the failure detector threshold - # A low threshold is prone to generate many wrong suspicions but ensures - # a quick detection in the event of a real crash. Conversely, a high - # threshold generates fewer mistakes but needs more time to detect - # actual crashes - threshold = 8 - - max-sample-size = 1000 - } - # The dispatcher used for remote system messages compute-grid-dispatcher { # defaults to same settings as default-dispatcher @@ -146,9 +133,24 @@ akka { cluster { use-cluster = off + seed-nodes = [] - max-time-to-retry-joining-cluster = 30s seed-node-connection-timeout = 30s + max-time-to-retry-joining-cluster = 30s + + # accrual failure detection config + failure-detector { + + # defines the failure detector threshold + # A low threshold is prone to generate many wrong suspicions but ensures + # a quick detection in the event of a real crash. Conversely, a high + # threshold generates fewer mistakes but needs more time to detect + # actual crashes + threshold = 8 + + max-sample-size = 1000 + } + gossip { initialDelay = 5s frequency = 1s diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index 84428d739b..0060233246 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -20,13 +20,11 @@ class RemoteSettings(val config: Config, val systemName: String) { val LogReceive = getBoolean("akka.remote.log-received-messages") val LogSend = getBoolean("akka.remote.log-sent-messages") - // AccrualFailureDetector - val FailureDetectorThreshold = getInt("akka.remote.failure-detector.threshold") - val FailureDetectorMaxSampleSize = getInt("akka.remote.failure-detector.max-sample-size") - // TODO cluster config will go into akka-cluster/reference.conf when we enable that module // cluster config section val UseCluster = getBoolean("akka.cluster.use-cluster") + val FailureDetectorThreshold = getInt("akka.cluster.failure-detector.threshold") + val FailureDetectorMaxSampleSize = getInt("akka.cluster.failure-detector.max-sample-size") val SeedNodeConnectionTimeout = Duration(config.getMilliseconds("akka.cluster.seed-node-connection-timeout"), MILLISECONDS) val MaxTimeToRetryJoiningCluster = Duration(config.getMilliseconds("akka.cluster.max-time-to-retry-joining-cluster"), MILLISECONDS) val InitialDelayForGossip = Duration(getMilliseconds("akka.cluster.gossip.initialDelay"), MILLISECONDS) From 1f47e93088a327a2f57900267902a0c0f385cdc0 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 30 Jan 2012 20:37:52 +0100 Subject: [PATCH 37/94] Adjusted performance benchmarks for fjpool. See #1728 --- .../TellThroughput10000PerformanceSpec.scala | 169 ----------------- ...ThroughputComputationPerformanceSpec.scala | 7 +- .../TellThroughputPerformanceSpec.scala | 59 +++++- ...hputPinnedDispatchersPerformanceSpec.scala | 171 ------------------ .../TradingLatencyPerformanceSpec.scala | 4 +- .../trading/system/TradingSystem.scala | 6 +- .../TradingThroughputPerformanceSpec.scala | 4 +- .../workbench/BenchmarkConfig.scala | 52 +++--- .../workbench/PerformanceSpec.scala | 3 +- 9 files changed, 85 insertions(+), 390 deletions(-) delete mode 100644 akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughput10000PerformanceSpec.scala delete mode 100644 akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPinnedDispatchersPerformanceSpec.scala diff --git a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughput10000PerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughput10000PerformanceSpec.scala deleted file mode 100644 index 1ef92549c2..0000000000 --- a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughput10000PerformanceSpec.scala +++ /dev/null @@ -1,169 +0,0 @@ -package akka.performance.microbench - -import akka.performance.workbench.PerformanceSpec -import org.apache.commons.math.stat.descriptive.DescriptiveStatistics -import akka.actor._ -import java.util.concurrent.{ ThreadPoolExecutor, CountDownLatch, TimeUnit } -import akka.dispatch._ -import java.util.concurrent.ThreadPoolExecutor.AbortPolicy -import java.util.concurrent.BlockingQueue -import java.util.concurrent.LinkedBlockingQueue -import akka.util.Duration -import akka.util.duration._ - -// -server -Xms512M -Xmx1024M -XX:+UseParallelGC -Dbenchmark=true -Dbenchmark.repeatFactor=500 -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class TellThroughput10000PerformanceSpec extends PerformanceSpec { - import TellThroughput10000PerformanceSpec._ - - val repeat = 30000L * repeatFactor - - "Tell" must { - "warmup" in { - runScenario(4, warmup = true) - } - "warmup more" in { - runScenario(4, warmup = true) - } - "perform with load 1" in { - runScenario(1) - } - "perform with load 2" in { - runScenario(2) - } - "perform with load 4" in { - runScenario(4) - } - "perform with load 6" in { - runScenario(6) - } - "perform with load 8" in { - runScenario(8) - } - "perform with load 10" in { - runScenario(10) - } - "perform with load 12" in { - runScenario(12) - } - "perform with load 14" in { - runScenario(14) - } - "perform with load 16" in { - runScenario(16) - } - "perform with load 18" in { - runScenario(18) - } - "perform with load 20" in { - runScenario(20) - } - "perform with load 22" in { - runScenario(22) - } - "perform with load 24" in { - runScenario(24) - } - "perform with load 26" in { - runScenario(26) - } - "perform with load 28" in { - runScenario(28) - } - "perform with load 30" in { - runScenario(30) - } - "perform with load 32" in { - runScenario(32) - } - "perform with load 34" in { - runScenario(34) - } - "perform with load 36" in { - runScenario(36) - } - "perform with load 38" in { - runScenario(38) - } - "perform with load 40" in { - runScenario(40) - } - "perform with load 42" in { - runScenario(42) - } - "perform with load 44" in { - runScenario(44) - } - "perform with load 46" in { - runScenario(46) - } - "perform with load 48" in { - runScenario(48) - } - - def runScenario(numberOfClients: Int, warmup: Boolean = false) { - if (acceptClients(numberOfClients)) { - - val dispatcherKey = "benchmark.high-throughput-dispatcher" - val latch = new CountDownLatch(numberOfClients) - val repeatsPerClient = repeat / numberOfClients - val destinations = for (i ← 0 until numberOfClients) - yield system.actorOf(Props(new Destination).withDispatcher(dispatcherKey)) - val clients = for ((dest, j) ← destinations.zipWithIndex) - yield system.actorOf(Props(new Client(dest, latch, repeatsPerClient)).withDispatcher(dispatcherKey)) - - val start = System.nanoTime - clients.foreach(_ ! Run) - val ok = latch.await(maxRunDuration.toMillis, TimeUnit.MILLISECONDS) - val durationNs = (System.nanoTime - start) - - if (!warmup) { - ok must be(true) - logMeasurement(numberOfClients, durationNs, repeat) - } - clients.foreach(system.stop(_)) - destinations.foreach(system.stop(_)) - - } - } - } -} - -object TellThroughput10000PerformanceSpec { - - case object Run - case object Msg - - class Destination extends Actor { - def receive = { - case Msg ⇒ sender ! Msg - } - } - - class Client( - actor: ActorRef, - latch: CountDownLatch, - repeat: Long) extends Actor { - - var sent = 0L - var received = 0L - - def receive = { - case Msg ⇒ - received += 1 - if (sent < repeat) { - actor ! Msg - sent += 1 - } else if (received >= repeat) { - latch.countDown() - } - case Run ⇒ - for (i ← 0L until math.min(20000L, repeat)) { - actor ! Msg - sent += 1 - } - } - - } - -} diff --git a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputComputationPerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputComputationPerformanceSpec.scala index 0b47a1f722..4bee0c8655 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputComputationPerformanceSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputComputationPerformanceSpec.scala @@ -100,15 +100,14 @@ class TellThroughputComputationPerformanceSpec extends PerformanceSpec { def runScenario(numberOfClients: Int, warmup: Boolean = false) { if (acceptClients(numberOfClients)) { - val clientDispatcher = "benchmark.client-dispatcher" - val destinationDispatcher = "benchmark.destination-dispatcher" + val throughputDispatcher = "benchmark.throughput-dispatcher" val latch = new CountDownLatch(numberOfClients) val repeatsPerClient = repeat / numberOfClients val destinations = for (i ← 0 until numberOfClients) - yield system.actorOf(Props(new Destination).withDispatcher(destinationDispatcher)) + yield system.actorOf(Props(new Destination).withDispatcher(throughputDispatcher)) val clients = for (dest ← destinations) - yield system.actorOf(Props(new Client(dest, latch, repeatsPerClient)).withDispatcher(clientDispatcher)) + yield system.actorOf(Props(new Client(dest, latch, repeatsPerClient)).withDispatcher(throughputDispatcher)) val start = System.nanoTime clients.foreach(_ ! Run) diff --git a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPerformanceSpec.scala index 552dbf62e9..f028fec6b0 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPerformanceSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPerformanceSpec.scala @@ -16,10 +16,10 @@ class TellThroughputPerformanceSpec extends PerformanceSpec { "Tell" must { "warmup" in { - runScenario(4, warmup = true) + runScenario(8, warmup = true) } "warmup more" in { - runScenario(4, warmup = true) + runScenario(8, warmup = true) } "perform with load 1" in { runScenario(1) @@ -48,19 +48,66 @@ class TellThroughputPerformanceSpec extends PerformanceSpec { "perform with load 16" in { runScenario(16) } + "perform with load 18" in { + runScenario(18) + } + "perform with load 20" in { + runScenario(20) + } + "perform with load 22" in { + runScenario(22) + } + "perform with load 24" in { + runScenario(24) + } + "perform with load 26" in { + runScenario(26) + } + "perform with load 28" in { + runScenario(28) + } + "perform with load 30" in { + runScenario(30) + } + "perform with load 32" in { + runScenario(32) + } + "perform with load 34" in { + runScenario(34) + } + "perform with load 36" in { + runScenario(36) + } + "perform with load 38" in { + runScenario(38) + } + "perform with load 40" in { + runScenario(40) + } + "perform with load 42" in { + runScenario(42) + } + "perform with load 44" in { + runScenario(44) + } + "perform with load 46" in { + runScenario(46) + } + "perform with load 48" in { + runScenario(48) + } def runScenario(numberOfClients: Int, warmup: Boolean = false) { if (acceptClients(numberOfClients)) { - val clientDispatcher = "benchmark.client-dispatcher" - val destinationDispatcher = "benchmark.destination-dispatcher" + val throughputDispatcher = "benchmark.throughput-dispatcher" val latch = new CountDownLatch(numberOfClients) val repeatsPerClient = repeat / numberOfClients val destinations = for (i ← 0 until numberOfClients) - yield system.actorOf(Props(new Destination).withDispatcher(destinationDispatcher)) + yield system.actorOf(Props(new Destination).withDispatcher(throughputDispatcher)) val clients = for (dest ← destinations) - yield system.actorOf(Props(new Client(dest, latch, repeatsPerClient)).withDispatcher(clientDispatcher)) + yield system.actorOf(Props(new Client(dest, latch, repeatsPerClient)).withDispatcher(throughputDispatcher)) val start = System.nanoTime clients.foreach(_ ! Run) diff --git a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPinnedDispatchersPerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPinnedDispatchersPerformanceSpec.scala deleted file mode 100644 index 4d9ad3eef1..0000000000 --- a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPinnedDispatchersPerformanceSpec.scala +++ /dev/null @@ -1,171 +0,0 @@ -package akka.performance.microbench - -import akka.performance.workbench.PerformanceSpec -import org.apache.commons.math.stat.descriptive.DescriptiveStatistics -import akka.actor._ -import java.util.concurrent.{ ThreadPoolExecutor, CountDownLatch, TimeUnit } -import akka.dispatch._ -import java.util.concurrent.ThreadPoolExecutor.AbortPolicy -import java.util.concurrent.BlockingQueue -import java.util.concurrent.LinkedBlockingQueue -import akka.util.Duration -import akka.util.duration._ - -// -server -Xms512M -Xmx1024M -XX:+UseParallelGC -Dbenchmark=true -Dbenchmark.repeatFactor=500 -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class TellThroughputPinnedDispatchersPerformanceSpec extends PerformanceSpec { - import TellThroughputPinnedDispatchersPerformanceSpec._ - - val repeat = 30000L * repeatFactor - - "Tell" must { - "warmup" in { - runScenario(4, warmup = true) - } - "warmup more" in { - runScenario(4, warmup = true) - } - "perform with load 1" in { - runScenario(1) - } - "perform with load 2" in { - runScenario(2) - } - "perform with load 4" in { - runScenario(4) - } - "perform with load 6" in { - runScenario(6) - } - "perform with load 8" in { - runScenario(8) - } - "perform with load 10" in { - runScenario(10) - } - "perform with load 12" in { - runScenario(12) - } - "perform with load 14" in { - runScenario(14) - } - "perform with load 16" in { - runScenario(16) - } - "perform with load 18" in { - runScenario(18) - } - "perform with load 20" in { - runScenario(20) - } - "perform with load 22" in { - runScenario(22) - } - "perform with load 24" in { - runScenario(24) - } - "perform with load 26" in { - runScenario(26) - } - "perform with load 28" in { - runScenario(28) - } - "perform with load 30" in { - runScenario(30) - } - "perform with load 32" in { - runScenario(32) - } - "perform with load 34" in { - runScenario(34) - } - "perform with load 36" in { - runScenario(36) - } - "perform with load 38" in { - runScenario(38) - } - "perform with load 40" in { - runScenario(40) - } - "perform with load 42" in { - runScenario(42) - } - "perform with load 44" in { - runScenario(44) - } - "perform with load 46" in { - runScenario(46) - } - "perform with load 48" in { - runScenario(48) - } - - def runScenario(numberOfClients: Int, warmup: Boolean = false) { - if (acceptClients(numberOfClients)) { - - val pinnedDispatcher = "benchmark.pinned-dispatcher" - - val latch = new CountDownLatch(numberOfClients) - val repeatsPerClient = repeat / numberOfClients - - val destinations = for (i ← 0 until numberOfClients) - yield system.actorOf(Props(new Destination).withDispatcher(pinnedDispatcher)) - val clients = for ((dest, j) ← destinations.zipWithIndex) - yield system.actorOf(Props(new Client(dest, latch, repeatsPerClient)).withDispatcher(pinnedDispatcher)) - - val start = System.nanoTime - clients.foreach(_ ! Run) - val ok = latch.await(maxRunDuration.toMillis, TimeUnit.MILLISECONDS) - val durationNs = (System.nanoTime - start) - - if (!warmup) { - ok must be(true) - logMeasurement(numberOfClients, durationNs, repeat) - } - clients.foreach(system.stop(_)) - destinations.foreach(system.stop(_)) - - } - } - } -} - -object TellThroughputPinnedDispatchersPerformanceSpec { - - case object Run - case object Msg - - class Destination extends Actor { - def receive = { - case Msg ⇒ sender ! Msg - } - } - - class Client( - actor: ActorRef, - latch: CountDownLatch, - repeat: Long) extends Actor { - - var sent = 0L - var received = 0L - - def receive = { - case Msg ⇒ - received += 1 - if (sent < repeat) { - actor ! Msg - sent += 1 - } else if (received >= repeat) { - latch.countDown() - } - case Run ⇒ - for (i ← 0L until math.min(1000L, repeat)) { - actor ! Msg - sent += 1 - } - } - - } - -} diff --git a/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingLatencyPerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingLatencyPerformanceSpec.scala index 9ba77e71e8..58b2e7e315 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingLatencyPerformanceSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingLatencyPerformanceSpec.scala @@ -84,7 +84,7 @@ class TradingLatencyPerformanceSpec extends PerformanceSpec { } yield Bid(s + i, 100 - i, 1000) val orders = askOrders.zip(bidOrders).map(x ⇒ Seq(x._1, x._2)).flatten - val clientDispatcher = "benchmark.client-dispatcher" + val latencyDispatcher = "benchmark.trading-dispatcher" val ordersPerClient = repeat * orders.size / numberOfClients val totalNumberOfOrders = ordersPerClient * numberOfClients @@ -93,7 +93,7 @@ class TradingLatencyPerformanceSpec extends PerformanceSpec { val start = System.nanoTime val clients = (for (i ← 0 until numberOfClients) yield { val receiver = receivers(i % receivers.size) - val props = Props(new Client(receiver, orders, latch, ordersPerClient, clientDelay.toMicros.toInt)).withDispatcher(clientDispatcher) + val props = Props(new Client(receiver, orders, latch, ordersPerClient, clientDelay.toMicros.toInt)).withDispatcher(latencyDispatcher) system.actorOf(props) }) diff --git a/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingSystem.scala b/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingSystem.scala index 7fe2783a9a..1adb2ecbc7 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingSystem.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingSystem.scala @@ -39,11 +39,9 @@ class AkkaTradingSystem(val system: ActorSystem) extends TradingSystem { val orDispatcher = orderReceiverDispatcher val meDispatcher = matchingEngineDispatcher - // by default we use default-dispatcher - def orderReceiverDispatcher: Option[String] = None + def orderReceiverDispatcher: Option[String] = Some("benchmark.trading-dispatcher") - // by default we use default-dispatcher - def matchingEngineDispatcher: Option[String] = None + def matchingEngineDispatcher: Option[String] = Some("benchmark.trading-dispatcher") override val orderbooksGroupedByMatchingEngine: List[List[Orderbook]] = for (groupOfSymbols: List[String] ← OrderbookRepository.orderbookSymbolsGroupedByMatchingEngine) diff --git a/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingThroughputPerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingThroughputPerformanceSpec.scala index 7092f87666..a1033d7682 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingThroughputPerformanceSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingThroughputPerformanceSpec.scala @@ -81,7 +81,7 @@ class TradingThroughputPerformanceSpec extends PerformanceSpec { } yield Bid(s + i, 100 - i, 1000) val orders = askOrders.zip(bidOrders).map(x ⇒ Seq(x._1, x._2)).flatten - val clientDispatcher = "benchmark.client-dispatcher" + val throughputDispatcher = "benchmark.trading-dispatcher" val ordersPerClient = repeat * orders.size / numberOfClients val totalNumberOfOrders = ordersPerClient * numberOfClients @@ -90,7 +90,7 @@ class TradingThroughputPerformanceSpec extends PerformanceSpec { val start = System.nanoTime val clients = (for (i ← 0 until numberOfClients) yield { val receiver = receivers(i % receivers.size) - val props = Props(new Client(receiver, orders, latch, ordersPerClient)).withDispatcher(clientDispatcher) + val props = Props(new Client(receiver, orders, latch, ordersPerClient)).withDispatcher(throughputDispatcher) system.actorOf(props) }) diff --git a/akka-actor-tests/src/test/scala/akka/performance/workbench/BenchmarkConfig.scala b/akka-actor-tests/src/test/scala/akka/performance/workbench/BenchmarkConfig.scala index 65294d014a..e31e667678 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/workbench/BenchmarkConfig.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/workbench/BenchmarkConfig.scala @@ -20,41 +20,30 @@ object BenchmarkConfig { resultDir = "target/benchmark" useDummyOrderbook = false - client-dispatcher { - executor = "thread-pool-executor" - thread-pool-executor { - core-pool-size-min = ${benchmark.maxClients} - core-pool-size-max = ${benchmark.maxClients} + throughput-dispatcher { + throughput = 5 + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = ${benchmark.maxClients} + parallelism-max = ${benchmark.maxClients} } } - destination-dispatcher { - executor = "thread-pool-executor" - thread-pool-executor { - core-pool-size-min = ${benchmark.maxClients} - core-pool-size-max = ${benchmark.maxClients} - } - } - - high-throughput-dispatcher { - throughput = 10000 - executor = "thread-pool-executor" - thread-pool-executor { - core-pool-size-min = ${benchmark.maxClients} - core-pool-size-max = ${benchmark.maxClients} - } - } - - pinned-dispatcher { - type = PinnedDispatcher - } - latency-dispatcher { throughput = 1 - executor = "thread-pool-executor" - thread-pool-executor { - core-pool-size-min = ${benchmark.maxClients} - core-pool-size-max = ${benchmark.maxClients} + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = ${benchmark.maxClients} + parallelism-max = ${benchmark.maxClients} + } + } + + trading-dispatcher { + throughput = 5 + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = ${benchmark.maxClients} + parallelism-max = ${benchmark.maxClients} } } } @@ -62,8 +51,9 @@ object BenchmarkConfig { private val longRunningBenchmarkConfig = ConfigFactory.parseString(""" benchmark { longRunning = true + minClients = 4 maxClients = 48 - repeatFactor = 150 + repeatFactor = 2000 maxRunDuration = 120 seconds useDummyOrderbook = true } diff --git a/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala index 3d27f8a303..ca6e42d67f 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala @@ -31,7 +31,8 @@ abstract class PerformanceSpec(cfg: Config = BenchmarkConfig.config) extends Akk def compareResultWith: Option[String] = None def acceptClients(numberOfClients: Int): Boolean = { - (minClients <= numberOfClients && numberOfClients <= maxClients) + (minClients <= numberOfClients && numberOfClients <= maxClients && + (maxClients <= 16 || numberOfClients % 4 == 0)) } def logMeasurement(numberOfClients: Int, durationNs: Long, n: Long) { From 799ab8b482c9bc3748efa87bf5ddc905d54ec859 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Mon, 30 Jan 2012 20:38:40 +0100 Subject: [PATCH 38/94] Fixed import clash when using java7, fj in java.util.concurrent._. See #1728 --- .../scala/akka/dispatch/ThreadPoolBuilder.scala | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index 4612fdca1f..5be5f1b0e1 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -5,10 +5,20 @@ package akka.dispatch import java.util.Collection -import java.util.concurrent.atomic.AtomicLong import akka.util.Duration -import java.util.concurrent._ import akka.jsr166y._ +import java.util.concurrent.atomic.AtomicLong +import java.util.concurrent.ArrayBlockingQueue +import java.util.concurrent.BlockingQueue +import java.util.concurrent.Callable +import java.util.concurrent.ExecutorService +import java.util.concurrent.LinkedBlockingQueue +import java.util.concurrent.RejectedExecutionHandler +import java.util.concurrent.RejectedExecutionException +import java.util.concurrent.SynchronousQueue +import java.util.concurrent.TimeUnit +import java.util.concurrent.ThreadFactory +import java.util.concurrent.ThreadPoolExecutor object ThreadPoolConfig { type QueueFactory = () ⇒ BlockingQueue[Runnable] From 6fbd72591e8a936f7da43ae95798cd223bf8d537 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 31 Jan 2012 08:34:17 +0100 Subject: [PATCH 39/94] Review comments. See #1717 --- akka-actor/src/main/scala/akka/actor/Actor.scala | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 1e6ea485fc..fd16ed3f39 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -287,10 +287,9 @@ trait Actor { * For Akka internal use only. */ private[akka] final def apply(msg: Any) = { - msg match { - case msg if behaviorStack.head.isDefinedAt(msg) ⇒ behaviorStack.head.apply(msg) - case unknown ⇒ unhandled(unknown) - } + // TODO would it be more efficient to assume that most messages are matched and catch MatchError instead of using isDefinedAt? + val head = behaviorStack.head + if (head.isDefinedAt(msg)) head.apply(msg) else unhandled(msg) } /** @@ -312,7 +311,8 @@ trait Actor { /** * For Akka internal use only. */ - private[akka] def clearBehaviorStack(): Unit = Stack.empty[Receive].push(behaviorStack.last) + private[akka] def clearBehaviorStack(): Unit = + behaviorStack = Stack.empty[Receive].push(behaviorStack.last) private var behaviorStack: Stack[Receive] = Stack.empty[Receive].push(receive) } From 7f684456f2ff26a5c8aa1901c48e1e5eb199e346 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 31 Jan 2012 09:27:21 +0100 Subject: [PATCH 40/94] Rewrite ActorTimeoutSpec not to be iffy. See #1593 --- .../scala/akka/actor/ActorTimeoutSpec.scala | 41 ++++--------------- 1 file changed, 9 insertions(+), 32 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala index c8df739b48..6800033d4c 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala @@ -3,53 +3,30 @@ */ package akka.actor -import org.scalatest.BeforeAndAfterAll import akka.util.duration._ -import akka.testkit.AkkaSpec -import akka.testkit.DefaultTimeout -import java.util.concurrent.TimeoutException +import akka.testkit._ import akka.dispatch.Await import akka.util.Timeout import akka.pattern.{ ask, AskTimeoutException } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class ActorTimeoutSpec extends AkkaSpec with BeforeAndAfterAll with DefaultTimeout { +class ActorTimeoutSpec extends AkkaSpec { - val defaultTimeout = system.settings.ActorTimeout.duration - val testTimeout = if (system.settings.ActorTimeout.duration < 400.millis) 500 millis else 100 millis + val testTimeout = 200.millis.dilated "An Actor-based Future" must { - "use the global default timeout if no implicit in scope" in { - within(defaultTimeout - 100.millis, defaultTimeout + 400.millis) { - val echo = system.actorOf(Props.empty) - try { - val d = system.settings.ActorTimeout.duration - val f = echo ? "hallo" - intercept[AskTimeoutException] { Await.result(f, d + d) } - } finally { system.stop(echo) } - } - } - "use implicitly supplied timeout" in { implicit val timeout = Timeout(testTimeout) - within(testTimeout - 100.millis, testTimeout + 300.millis) { - val echo = system.actorOf(Props.empty) - try { - val f = (echo ? "hallo").mapTo[String] - intercept[AskTimeoutException] { Await.result(f, testTimeout + testTimeout) } - } finally { system.stop(echo) } - } + val echo = system.actorOf(Props.empty) + val f = (echo ? "hallo") + intercept[AskTimeoutException] { Await.result(f, testTimeout * 2) } } "use explicitly supplied timeout" in { - within(testTimeout - 100.millis, testTimeout + 300.millis) { - val echo = system.actorOf(Props.empty) - val f = echo.?("hallo")(testTimeout) - try { - intercept[AskTimeoutException] { Await.result(f, testTimeout + 300.millis) } - } finally { system.stop(echo) } - } + val echo = system.actorOf(Props.empty) + val f = echo.?("hallo")(testTimeout) + intercept[AskTimeoutException] { Await.result(f, testTimeout * 2) } } } } From 3fb62e1d9cd76234a36e89a94a4a14620f3e8a5b Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 31 Jan 2012 10:01:29 +0100 Subject: [PATCH 41/94] Updated to latest jsr166y FJ --- .../main/java/akka/jsr166y/ForkJoinPool.java | 1312 +++++++++-------- .../main/java/akka/jsr166y/ForkJoinTask.java | 12 +- 2 files changed, 671 insertions(+), 653 deletions(-) diff --git a/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java b/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java index f6eb5de94e..f92e5541f4 100644 --- a/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java +++ b/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java @@ -21,7 +21,7 @@ import java.util.concurrent.RunnableFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.ReentrantLock; +import java.util.concurrent.locks.AbstractQueuedSynchronizer; import java.util.concurrent.locks.Condition; import akka.util.Unsafe; @@ -61,17 +61,16 @@ import akka.util.Unsafe; * convenient form for informal monitoring. * *

As is the case with other ExecutorServices, there are three - * main task execution methods summarized in the following - * table. These are designed to be used primarily by clients not - * already engaged in fork/join computations in the current pool. The - * main forms of these methods accept instances of {@code - * ForkJoinTask}, but overloaded forms also allow mixed execution of - * plain {@code Runnable}- or {@code Callable}- based activities as - * well. However, tasks that are already executing in a pool should - * normally instead use the within-computation forms listed in the - * table unless using async event-style tasks that are not usually - * joined, in which case there is little difference among choice of - * methods. + * main task execution methods summarized in the following table. + * These are designed to be used primarily by clients not already + * engaged in fork/join computations in the current pool. The main + * forms of these methods accept instances of {@code ForkJoinTask}, + * but overloaded forms also allow mixed execution of plain {@code + * Runnable}- or {@code Callable}- based activities as well. However, + * tasks that are already executing in a pool should normally instead + * use the within-computation forms listed in the table unless using + * async event-style tasks that are not usually joined, in which case + * there is little difference among choice of methods. * * * @@ -132,14 +131,14 @@ public class ForkJoinPool extends AbstractExecutorService { * * This class and its nested classes provide the main * functionality and control for a set of worker threads: - * Submissions from non-FJ threads enter into submission - * queues. Workers take these tasks and typically split them into - * subtasks that may be stolen by other workers. Preference rules - * give first priority to processing tasks from their own queues - * (LIFO or FIFO, depending on mode), then to randomized FIFO - * steals of tasks in other queues. + * Submissions from non-FJ threads enter into submission queues. + * Workers take these tasks and typically split them into subtasks + * that may be stolen by other workers. Preference rules give + * first priority to processing tasks from their own queues (LIFO + * or FIFO, depending on mode), then to randomized FIFO steals of + * tasks in other queues. * - * WorkQueues. + * WorkQueues * ========== * * Most operations occur within work-stealing queues (in nested @@ -157,7 +156,7 @@ public class ForkJoinPool extends AbstractExecutorService { * (http://research.sun.com/scalable/pubs/index.html) and * "Idempotent work stealing" by Michael, Saraswat, and Vechev, * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186). - * The main differences ultimately stem from gc requirements that + * The main differences ultimately stem from GC requirements that * we null out taken slots as soon as we can, to maintain as small * a footprint as possible even in programs generating huge * numbers of tasks. To accomplish this, we shift the CAS @@ -189,9 +188,9 @@ public class ForkJoinPool extends AbstractExecutorService { * rarely provide the best possible performance on a given * machine, but portably provide good throughput by averaging over * these factors. (Further, even if we did try to use such - * information, we do not usually have a basis for exploiting - * it. For example, some sets of tasks profit from cache - * affinities, but others are harmed by cache pollution effects.) + * information, we do not usually have a basis for exploiting it. + * For example, some sets of tasks profit from cache affinities, + * but others are harmed by cache pollution effects.) * * WorkQueues are also used in a similar way for tasks submitted * to the pool. We cannot mix these tasks in the same queues used @@ -205,15 +204,13 @@ public class ForkJoinPool extends AbstractExecutorService { * take tasks, and they are multiplexed on to a finite number of * shared work queues. However, classes are set up so that future * extensions could allow submitters to optionally help perform - * tasks as well. Pool submissions from internal workers are also - * allowed, but use randomized rather than thread-hashed queue - * indices to avoid imbalance. Insertion of tasks in shared mode - * requires a lock (mainly to protect in the case of resizing) but - * we use only a simple spinlock (using bits in field runState), - * because submitters encountering a busy queue try or create - * others so never block. + * tasks as well. Insertion of tasks in shared mode requires a + * lock (mainly to protect in the case of resizing) but we use + * only a simple spinlock (using bits in field runState), because + * submitters encountering a busy queue move on to try or create + * other queues, so never block. * - * Management. + * Management * ========== * * The main throughput advantages of work-stealing stem from @@ -223,7 +220,7 @@ public class ForkJoinPool extends AbstractExecutorService { * tactic for avoiding bottlenecks is packing nearly all * essentially atomic control state into two volatile variables * that are by far most often read (not written) as status and - * consistency checks + * consistency checks. * * Field "ctl" contains 64 bits holding all the information needed * to atomically decide to add, inactivate, enqueue (on an event @@ -237,7 +234,10 @@ public class ForkJoinPool extends AbstractExecutorService { * deregister WorkQueues, as well as to enable shutdown. It is * only modified under a lock (normally briefly held, but * occasionally protecting allocations and resizings) but even - * when locked remains available to check consistency. + * when locked remains available to check consistency. An + * auxiliary field "growHints", also only modified under lock, + * contains a candidate index for the next WorkQueue and + * a mask for submission queue indices. * * Recording WorkQueues. WorkQueues are recorded in the * "workQueues" array that is created upon pool construction and @@ -253,9 +253,11 @@ public class ForkJoinPool extends AbstractExecutorService { * presized to hold twice #parallelism workers (which is unlikely * to need further resizing during execution). But to avoid * dealing with so many null slots, variable runState includes a - * mask for the nearest power of two that contains all current - * workers. All worker thread creation is on-demand, triggered by - * task submissions, replacement of terminated workers, and/or + * mask for the nearest power of two that contains all currently + * used indices. + * + * All worker thread creation is on-demand, triggered by task + * submissions, replacement of terminated workers, and/or * compensation for blocked workers. However, all other support * code is set up to work with other policies. To ensure that we * do not hold on to worker references that would prevent GC, ALL @@ -268,13 +270,12 @@ public class ForkJoinPool extends AbstractExecutorService { * both index-check and null-check the IDs. All such accesses * ignore bad IDs by returning out early from what they are doing, * since this can only be associated with termination, in which - * case it is OK to give up. - * - * All uses of the workQueues array check that it is non-null - * (even if previously non-null). This allows nulling during - * termination, which is currently not necessary, but remains an - * option for resource-revocation-based shutdown schemes. It also - * helps reduce JIT issuance of uncommon-trap code, which tends to + * case it is OK to give up. All uses of the workQueues array + * also check that it is non-null (even if previously + * non-null). This allows nulling during termination, which is + * currently not necessary, but remains an option for + * resource-revocation-based shutdown schemes. It also helps + * reduce JIT issuance of uncommon-trap code, which tends to * unnecessarily complicate control flow in some methods. * * Event Queuing. Unlike HPC work-stealing frameworks, we cannot @@ -302,7 +303,7 @@ public class ForkJoinPool extends AbstractExecutorService { * some other queued worker rather than itself, which has the same * net effect. Because enqueued workers may actually be rescanning * rather than waiting, we set and clear the "parker" field of - * Workqueues to reduce unnecessary calls to unpark. (This + * WorkQueues to reduce unnecessary calls to unpark. (This * requires a secondary recheck to avoid missed signals.) Note * the unusual conventions about Thread.interrupts surrounding * parking and other blocking: Because interrupts are used solely @@ -330,7 +331,7 @@ public class ForkJoinPool extends AbstractExecutorService { * terminating all workers after long periods of non-use. * * Shutdown and Termination. A call to shutdownNow atomically sets - * a runState bit and then (non-atomically) sets each workers + * a runState bit and then (non-atomically) sets each worker's * runState status, cancels all unprocessed tasks, and wakes up * all waiting workers. Detecting whether termination should * commence after a non-abrupt shutdown() call requires more work @@ -339,18 +340,18 @@ public class ForkJoinPool extends AbstractExecutorService { * indication but non-abrupt shutdown still requires a rechecking * scan for any workers that are inactive but not queued. * - * Joining Tasks. - * ============== + * Joining Tasks + * ============= * * Any of several actions may be taken when one worker is waiting - * to join a task stolen (or always held by) another. Because we + * to join a task stolen (or always held) by another. Because we * are multiplexing many tasks on to a pool of workers, we can't * just let them block (as in Thread.join). We also cannot just * reassign the joiner's run-time stack with another and replace * it later, which would be a form of "continuation", that even if * possible is not necessarily a good idea since we sometimes need - * both an unblocked task and its continuation to - * progress. Instead we combine two tactics: + * both an unblocked task and its continuation to progress. + * Instead we combine two tactics: * * Helping: Arranging for the joiner to execute some task that it * would be running if the steal had not occurred. @@ -385,7 +386,7 @@ public class ForkJoinPool extends AbstractExecutorService { * (http://portal.acm.org/citation.cfm?id=155354). It differs in * that: (1) We only maintain dependency links across workers upon * steals, rather than use per-task bookkeeping. This sometimes - * requires a linear scan of workers array to locate stealers, but + * requires a linear scan of workQueues array to locate stealers, but * often doesn't because stealers leave hints (that may become * stale/wrong) of where to locate them. A stealHint is only a * hint because a worker might have had multiple steals and the @@ -420,34 +421,59 @@ public class ForkJoinPool extends AbstractExecutorService { * managed by ForkJoinPool, so are directly accessed. There is * little point trying to reduce this, since any associated future * changes in representations will need to be accompanied by - * algorithmic changes anyway. All together, these low-level - * implementation choices produce as much as a factor of 4 - * performance improvement compared to naive implementations, and - * enable the processing of billions of tasks per second, at the - * expense of some ugliness. + * algorithmic changes anyway. Several methods intrinsically + * sprawl because they must accumulate sets of consistent reads of + * volatiles held in local variables. Methods signalWork() and + * scan() are the main bottlenecks, so are especially heavily + * micro-optimized/mangled. There are lots of inline assignments + * (of form "while ((local = field) != 0)") which are usually the + * simplest way to ensure the required read orderings (which are + * sometimes critical). This leads to a "C"-like style of listing + * declarations of these locals at the heads of methods or blocks. + * There are several occurrences of the unusual "do {} while + * (!cas...)" which is the simplest way to force an update of a + * CAS'ed variable. There are also other coding oddities that help + * some methods perform reasonably even when interpreted (not + * compiled). * - * Methods signalWork() and scan() are the main bottlenecks so are - * especially heavily micro-optimized/mangled. There are lots of - * inline assignments (of form "while ((local = field) != 0)") - * which are usually the simplest way to ensure the required read - * orderings (which are sometimes critical). This leads to a - * "C"-like style of listing declarations of these locals at the - * heads of methods or blocks. There are several occurrences of - * the unusual "do {} while (!cas...)" which is the simplest way - * to force an update of a CAS'ed variable. There are also other - * coding oddities that help some methods perform reasonably even - * when interpreted (not compiled). - * - * The order of declarations in this file is: (1) declarations of - * statics (2) fields (along with constants used when unpacking - * some of them), listed in an order that tends to reduce - * contention among them a bit under most JVMs; (3) nested - * classes; (4) internal control methods; (5) callbacks and other - * support for ForkJoinTask methods; (6) exported methods (plus a - * few little helpers); (7) static block initializing all statics - * in a minimally dependent order. + * The order of declarations in this file is: + * (1) Static utility functions + * (2) Nested (static) classes + * (3) Static fields + * (4) Fields, along with constants used when unpacking some of them + * (5) Internal control methods + * (6) Callbacks and other support for ForkJoinTask methods + * (7) Exported methods + * (8) Static block initializing statics in minimally dependent order */ + // Static utilities + + /** + * Computes an initial hash code (also serving as a non-zero + * random seed) for a thread id. This method is expected to + * provide higher-quality hash codes than using method hashCode(). + */ + static final int hashId(long id) { + int h = (int)id ^ (int)(id >>> 32); // Use MurmurHash of thread id + h ^= h >>> 16; h *= 0x85ebca6b; + h ^= h >>> 13; h *= 0xc2b2ae35; + h ^= h >>> 16; + return (h == 0) ? 1 : h; // ensure nonzero + } + + /** + * If there is a security manager, makes sure caller has + * permission to modify threads. + */ + private static void checkPermission() { + SecurityManager security = System.getSecurityManager(); + if (security != null) + security.checkPermission(modifyThreadPermission); + } + + // Nested classes + /** * Factory for creating new {@link ForkJoinWorkerThread}s. * A {@code ForkJoinWorkerThreadFactory} must be defined and used @@ -476,164 +502,40 @@ public class ForkJoinPool extends AbstractExecutorService { } /** - * Creates a new ForkJoinWorkerThread. This factory is used unless - * overridden in ForkJoinPool constructors. + * A simple non-reentrant lock used for exclusion when managing + * queues and workers. We use a custom lock so that we can readily + * probe lock state in constructions that check among alternative + * actions. The lock is normally only very briefly held, and + * sometimes treated as a spinlock, but other usages block to + * reduce overall contention in those cases where locked code + * bodies perform allocation/resizing. */ - public static final ForkJoinWorkerThreadFactory - defaultForkJoinWorkerThreadFactory; - - /** - * Permission required for callers of methods that may start or - * kill threads. - */ - private static final RuntimePermission modifyThreadPermission; - - /** - * If there is a security manager, makes sure caller has - * permission to modify threads. - */ - private static void checkPermission() { - SecurityManager security = System.getSecurityManager(); - if (security != null) - security.checkPermission(modifyThreadPermission); + static final class Mutex extends AbstractQueuedSynchronizer { + public final boolean tryAcquire(int ignore) { + return compareAndSetState(0, 1); + } + public final boolean tryRelease(int ignore) { + setState(0); + return true; + } + public final void lock() { acquire(0); } + public final void unlock() { release(0); } + public final boolean isHeldExclusively() { return getState() == 1; } + public final Condition newCondition() { return new ConditionObject(); } } /** - * Generator for assigning sequence numbers as pool names. + * Class for artificial tasks that are used to replace the target + * of local joins if they are removed from an interior queue slot + * in WorkQueue.tryRemoveAndExec. We don't need the proxy to + * actually do anything beyond having a unique identity. */ - private static final AtomicInteger poolNumberGenerator; - - /** - * Bits and masks for control variables - * - * Field ctl is a long packed with: - * AC: Number of active running workers minus target parallelism (16 bits) - * TC: Number of total workers minus target parallelism (16 bits) - * ST: true if pool is terminating (1 bit) - * EC: the wait count of top waiting thread (15 bits) - * ID: ~(poolIndex >>> 1) of top of Treiber stack of waiters (16 bits) - * - * When convenient, we can extract the upper 32 bits of counts and - * the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e = - * (int)ctl. The ec field is never accessed alone, but always - * together with id and st. The offsets of counts by the target - * parallelism and the positionings of fields makes it possible to - * perform the most common checks via sign tests of fields: When - * ac is negative, there are not enough active workers, when tc is - * negative, there are not enough total workers, when id is - * negative, there is at least one waiting worker, and when e is - * negative, the pool is terminating. To deal with these possibly - * negative fields, we use casts in and out of "short" and/or - * signed shifts to maintain signedness. - * - * When a thread is queued (inactivated), its eventCount field is - * negative, which is the only way to tell if a worker is - * prevented from executing tasks, even though it must continue to - * scan for them to avoid queuing races. - * - * Field runState is an int packed with: - * SHUTDOWN: true if shutdown is enabled (1 bit) - * SEQ: a sequence number updated upon (de)registering workers (15 bits) - * MASK: mask (power of 2 - 1) covering all registered poolIndexes (16 bits) - * - * The combination of mask and sequence number enables simple - * consistency checks: Staleness of read-only operations on the - * workers and queues arrays can be checked by comparing runState - * before vs after the reads. The low 16 bits (i.e, anding with - * SMASK) hold (the smallest power of two covering all worker - * indices, minus one. The mask for queues (vs workers) is twice - * this value plus 1. - */ - - // bit positions/shifts for fields - private static final int AC_SHIFT = 48; - private static final int TC_SHIFT = 32; - private static final int ST_SHIFT = 31; - private static final int EC_SHIFT = 16; - - // bounds - private static final int MAX_ID = 0x7fff; // max poolIndex - private static final int SMASK = 0xffff; // mask short bits - private static final int SHORT_SIGN = 1 << 15; - private static final int INT_SIGN = 1 << 31; - - // masks - private static final long STOP_BIT = 0x0001L << ST_SHIFT; - private static final long AC_MASK = ((long)SMASK) << AC_SHIFT; - private static final long TC_MASK = ((long)SMASK) << TC_SHIFT; - - // units for incrementing and decrementing - private static final long TC_UNIT = 1L << TC_SHIFT; - private static final long AC_UNIT = 1L << AC_SHIFT; - - // masks and units for dealing with u = (int)(ctl >>> 32) - private static final int UAC_SHIFT = AC_SHIFT - 32; - private static final int UTC_SHIFT = TC_SHIFT - 32; - private static final int UAC_MASK = SMASK << UAC_SHIFT; - private static final int UTC_MASK = SMASK << UTC_SHIFT; - private static final int UAC_UNIT = 1 << UAC_SHIFT; - private static final int UTC_UNIT = 1 << UTC_SHIFT; - - // masks and units for dealing with e = (int)ctl - private static final int E_MASK = 0x7fffffff; // no STOP_BIT - private static final int E_SEQ = 1 << EC_SHIFT; - - // runState bits - private static final int SHUTDOWN = 1 << 31; - private static final int RS_SEQ = 1 << 16; - private static final int RS_SEQ_MASK = 0x7fff0000; - - // access mode for WorkQueue - static final int LIFO_QUEUE = 0; - static final int FIFO_QUEUE = 1; - static final int SHARED_QUEUE = -1; - - /** - * The wakeup interval (in nanoseconds) for a worker waiting for a - * task when the pool is quiescent to instead try to shrink the - * number of workers. The exact value does not matter too - * much. It must be short enough to release resources during - * sustained periods of idleness, but not so short that threads - * are continually re-created. - */ - private static final long SHRINK_RATE = - 4L * 1000L * 1000L * 1000L; // 4 seconds - - /** - * The timeout value for attempted shrinkage, includes - * some slop to cope with system timer imprecision. - */ - private static final long SHRINK_TIMEOUT = SHRINK_RATE - (SHRINK_RATE / 10); - - /** - * The maximum stolen->joining link depth allowed in tryHelpStealer. - * Depths for legitimate chains are unbounded, but we use a fixed - * constant to avoid (otherwise unchecked) cycles and to bound - * staleness of traversal parameters at the expense of sometimes - * blocking when we could be helping. - */ - private static final int MAX_HELP_DEPTH = 16; - - /* - * Field layout order in this class tends to matter more than one - * would like. Runtime layout order is only loosely related to - * declaration order and may differ across JVMs, but the following - * empirically works OK on current JVMs. - */ - - volatile long ctl; // main pool control - final int parallelism; // parallelism level - final int localMode; // per-worker scheduling mode - int nextPoolIndex; // hint used in registerWorker - volatile int runState; // shutdown status, seq, and mask - WorkQueue[] workQueues; // main registry - final ReentrantLock lock; // for registration - final Condition termination; // for awaitTermination - final ForkJoinWorkerThreadFactory factory; // factory for new workers - final Thread.UncaughtExceptionHandler ueh; // per-worker UEH - final AtomicLong stealCount; // collect counts when terminated - final AtomicInteger nextWorkerNumber; // to create worker name string - final String workerNamePrefix; // Prefix for assigning worker names + static final class EmptyTask extends ForkJoinTask { + EmptyTask() { status = ForkJoinTask.NORMAL; } // force done + public final Void getRawResult() { return null; } + public final void setRawResult(Void x) {} + public final boolean exec() { return true; } + } /** * Queues supporting work-stealing as well as external task @@ -684,7 +586,7 @@ public class ForkJoinPool extends AbstractExecutorService { * avoiding really bad worst-case access. (Until better JVM * support is in place, this padding is dependent on transient * properties of JVM field layout rules.) We also take care in - * allocating and sizing and resizing the array. Non-shared queue + * allocating, sizing and resizing the array. Non-shared queue * arrays are initialized (via method growArray) by workers before * use. Others are allocated on first use. */ @@ -733,7 +635,7 @@ public class ForkJoinPool extends AbstractExecutorService { } /** - * Returns number of tasks in the queue + * Returns number of tasks in the queue. */ final int queueSize() { int n = base - top; // non-owner callers must read base first @@ -744,9 +646,8 @@ public class ForkJoinPool extends AbstractExecutorService { * Pushes a task. Call only by owner in unshared queues. * * @param task the task. Caller must ensure non-null. - * @param p, if non-null, pool to signal if necessary - * @throw RejectedExecutionException if array cannot - * be resized + * @param p if non-null, pool to signal if necessary + * @throw RejectedExecutionException if array cannot be resized */ final void push(ForkJoinTask task, ForkJoinPool p) { ForkJoinTask[] a; @@ -774,9 +675,9 @@ public class ForkJoinPool extends AbstractExecutorService { boolean submitted = false; if (runState == 0 && U.compareAndSwapInt(this, RUNSTATE, 0, 1)) { ForkJoinTask[] a = array; - int s = top, n = s - base; + int s = top; try { - if ((a != null && n < a.length - 1) || + if ((a != null && a.length > s + 1 - base) || (a = growArray(false)) != null) { // must presize int j = (((a.length - 1) & s) << ASHIFT) + ABASE; U.putObject(a, (long)j, task); // don't need "ordered" @@ -794,12 +695,11 @@ public class ForkJoinPool extends AbstractExecutorService { * Takes next task, if one exists, in FIFO order. */ final ForkJoinTask poll() { - ForkJoinTask[] a; int b, i; - while ((b = base) - top < 0 && (a = array) != null && - (i = (a.length - 1) & b) >= 0) { - int j = (i << ASHIFT) + ABASE; - ForkJoinTask t = (ForkJoinTask)U.getObjectVolatile(a, j); - if (t != null && base == b && + ForkJoinTask[] a; int b; ForkJoinTask t; + while ((b = base) - top < 0 && (a = array) != null) { + int j = (((a.length - 1) & b) << ASHIFT) + ABASE; + if ((t = (ForkJoinTask)U.getObjectVolatile(a, j)) != null && + base == b && U.compareAndSwapObject(a, j, t, null)) { base = b + 1; return t; @@ -809,8 +709,9 @@ public class ForkJoinPool extends AbstractExecutorService { } /** - * Takes next task, if one exists, in LIFO order. - * Call only by owner in unshared queues. + * Takes next task, if one exists, in LIFO order. Call only + * by owner in unshared queues. (We do not have a shared + * version of this method because it is never needed.) */ final ForkJoinTask pop() { ForkJoinTask t; int m; @@ -852,18 +753,17 @@ public class ForkJoinPool extends AbstractExecutorService { * Returns task at index b if b is current base of queue. */ final ForkJoinTask pollAt(int b) { - ForkJoinTask[] a; int i; - ForkJoinTask task = null; - if ((a = array) != null && (i = ((a.length - 1) & b)) >= 0) { - int j = (i << ASHIFT) + ABASE; - ForkJoinTask t = (ForkJoinTask)U.getObjectVolatile(a, j); - if (t != null && base == b && + ForkJoinTask t; ForkJoinTask[] a; + if ((a = array) != null) { + int j = (((a.length - 1) & b) << ASHIFT) + ABASE; + if ((t = (ForkJoinTask)U.getObjectVolatile(a, j)) != null && + base == b && U.compareAndSwapObject(a, j, t, null)) { base = b + 1; - task = t; + return t; } } - return task; + return null; } /** @@ -884,10 +784,9 @@ public class ForkJoinPool extends AbstractExecutorService { * Polls the given task only if it is at the current base. */ final boolean pollFor(ForkJoinTask task) { - ForkJoinTask[] a; int b, i; - if ((b = base) - top < 0 && (a = array) != null && - (i = (a.length - 1) & b) >= 0) { - int j = (i << ASHIFT) + ABASE; + ForkJoinTask[] a; int b; + if ((b = base) - top < 0 && (a = array) != null) { + int j = (((a.length - 1) & b) << ASHIFT) + ABASE; if (U.getObjectVolatile(a, j) == task && base == b && U.compareAndSwapObject(a, j, task, null)) { base = b + 1; @@ -981,7 +880,7 @@ public class ForkJoinPool extends AbstractExecutorService { } /** - * Removes and cancels all known tasks, ignoring any exceptions + * Removes and cancels all known tasks, ignoring any exceptions. */ final void cancelAll() { ForkJoinTask.cancelIgnoringExceptions(currentJoin); @@ -990,6 +889,20 @@ public class ForkJoinPool extends AbstractExecutorService { ForkJoinTask.cancelIgnoringExceptions(t); } + /** + * Computes next value for random probes. Scans don't require + * a very high quality generator, but also not a crummy one. + * Marsaglia xor-shift is cheap and works well enough. Note: + * This is manually inlined in several usages in ForkJoinPool + * to avoid writes inside busy scan loops. + */ + final int nextSeed() { + int r = seed; + r ^= r << 13; + r ^= r >>> 17; + return seed = r ^= r << 5; + } + // Execution methods /** @@ -1024,7 +937,7 @@ public class ForkJoinPool extends AbstractExecutorService { } /** - * Executes a non-top-level (stolen) task + * Executes a non-top-level (stolen) task. */ final void runSubtask(ForkJoinTask t) { if (t != null) { @@ -1036,18 +949,31 @@ public class ForkJoinPool extends AbstractExecutorService { } /** - * Computes next value for random probes. Scans don't require - * a very high quality generator, but also not a crummy one. - * Marsaglia xor-shift is cheap and works well enough. Note: - * This is manually inlined in several usages in ForkJoinPool - * to avoid writes inside busy scan loops. + * Returns true if owned and not known to be blocked. */ - final int nextSeed() { - int r = seed; - r ^= r << 13; - r ^= r >>> 17; - r ^= r << 5; - return seed = r; + final boolean isApparentlyUnblocked() { + Thread wt; Thread.State s; + return (eventCount >= 0 && + (wt = owner) != null && + (s = wt.getState()) != Thread.State.BLOCKED && + s != Thread.State.WAITING && + s != Thread.State.TIMED_WAITING); + } + + /** + * If this owned and is not already interrupted, try to + * interrupt and/or unpark, ignoring exceptions. + */ + final void interruptOwner() { + Thread wt, p; + if ((wt = owner) != null && !wt.isInterrupted()) { + try { + wt.interrupt(); + } catch (SecurityException ignore) { + } + } + if ((p = parker) != null) + U.unpark(p); } // Unsafe mechanics @@ -1075,48 +1001,15 @@ public class ForkJoinPool extends AbstractExecutorService { } /** - * Class for artificial tasks that are used to replace the target - * of local joins if they are removed from an interior queue slot - * in WorkQueue.tryRemoveAndExec. We don't need the proxy to - * actually do anything beyond having a unique identity. - */ - static final class EmptyTask extends ForkJoinTask { - EmptyTask() { status = ForkJoinTask.NORMAL; } // force done - public Void getRawResult() { return null; } - public void setRawResult(Void x) {} - public boolean exec() { return true; } - } - - /** -<<<<<<< ForkJoinPool.java - * Per-thread records for (typically non-FJ) threads that submit - * to pools. Cureently holds only psuedo-random seed / index that - * is used to chose submission queues in method doSubmit. In the - * future, this may incorporate a means to implement different - * task rejection and resubmission policies. + * Per-thread records for threads that submit to pools. Currently + * holds only pseudo-random seed / index that is used to choose + * submission queues in method doSubmit. In the future, this may + * also incorporate a means to implement different task rejection + * and resubmission policies. */ static final class Submitter { - int seed; // seed for random submission queue selection - - // Heuristic padding to ameliorate unfortunate memory placements - int p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, pa, pb, pc, pd, pe; - - Submitter() { - // Use identityHashCode, forced negative, for seed - seed = System.identityHashCode(Thread.currentThread()) | (1 << 31); - } - - /** - * Computes next value for random probes. Like method - * WorkQueue.nextSeed, this is manually inlined in several - * usages to avoid writes inside busy loops. - */ - final int nextSeed() { - int r = seed; - r ^= r << 13; - r ^= r >>> 17; - return seed = r ^= r << 5; - } + int seed; + Submitter() { seed = hashId(Thread.currentThread().getId()); } } /** ThreadLocal class for Submitters */ @@ -1124,43 +1017,186 @@ public class ForkJoinPool extends AbstractExecutorService { public Submitter initialValue() { return new Submitter(); } } + // static fields (initialized in static initializer below) + + /** + * Creates a new ForkJoinWorkerThread. This factory is used unless + * overridden in ForkJoinPool constructors. + */ + public static final ForkJoinWorkerThreadFactory + defaultForkJoinWorkerThreadFactory; + + /** + * Generator for assigning sequence numbers as pool names. + */ + private static final AtomicInteger poolNumberGenerator; + + /** + * Permission required for callers of methods that may start or + * kill threads. + */ + private static final RuntimePermission modifyThreadPermission; + /** * Per-thread submission bookeeping. Shared across all pools * to reduce ThreadLocal pollution and because random motion * to avoid contention in one pool is likely to hold for others. */ - static final ThreadSubmitter submitters = new ThreadSubmitter(); + private static final ThreadSubmitter submitters; + + // static constants /** - * Top-level runloop for workers + * The wakeup interval (in nanoseconds) for a worker waiting for a + * task when the pool is quiescent to instead try to shrink the + * number of workers. The exact value does not matter too + * much. It must be short enough to release resources during + * sustained periods of idleness, but not so short that threads + * are continually re-created. */ - final void runWorker(ForkJoinWorkerThread wt) { - // Initialize queue array and seed in this thread - WorkQueue w = wt.workQueue; - w.growArray(false); - // Same initial hash as Submitters - w.seed = System.identityHashCode(Thread.currentThread()) | (1 << 31); + private static final long SHRINK_RATE = + 4L * 1000L * 1000L * 1000L; // 4 seconds - do {} while (w.runTask(scan(w))); - } + /** + * The timeout value for attempted shrinkage, includes + * some slop to cope with system timer imprecision. + */ + private static final long SHRINK_TIMEOUT = SHRINK_RATE - (SHRINK_RATE / 10); - // Creating, registering and deregistering workers + /** + * The maximum stolen->joining link depth allowed in tryHelpStealer. + * Depths for legitimate chains are unbounded, but we use a fixed + * constant to avoid (otherwise unchecked) cycles and to bound + * staleness of traversal parameters at the expense of sometimes + * blocking when we could be helping. + */ + private static final int MAX_HELP_DEPTH = 16; + + /** + * Bits and masks for control variables + * + * Field ctl is a long packed with: + * AC: Number of active running workers minus target parallelism (16 bits) + * TC: Number of total workers minus target parallelism (16 bits) + * ST: true if pool is terminating (1 bit) + * EC: the wait count of top waiting thread (15 bits) + * ID: poolIndex of top of Treiber stack of waiters (16 bits) + * + * When convenient, we can extract the upper 32 bits of counts and + * the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e = + * (int)ctl. The ec field is never accessed alone, but always + * together with id and st. The offsets of counts by the target + * parallelism and the positionings of fields makes it possible to + * perform the most common checks via sign tests of fields: When + * ac is negative, there are not enough active workers, when tc is + * negative, there are not enough total workers, and when e is + * negative, the pool is terminating. To deal with these possibly + * negative fields, we use casts in and out of "short" and/or + * signed shifts to maintain signedness. + * + * When a thread is queued (inactivated), its eventCount field is + * set negative, which is the only way to tell if a worker is + * prevented from executing tasks, even though it must continue to + * scan for them to avoid queuing races. Note however that + * eventCount updates lag releases so usage requires care. + * + * Field runState is an int packed with: + * SHUTDOWN: true if shutdown is enabled (1 bit) + * SEQ: a sequence number updated upon (de)registering workers (15 bits) + * MASK: mask (power of 2 - 1) covering all registered poolIndexes (16 bits) + * + * The combination of mask and sequence number enables simple + * consistency checks: Staleness of read-only operations on the + * workQueues array can be checked by comparing runState before vs + * after the reads. The low 16 bits (i.e, anding with SMASK) hold + * the smallest power of two covering all indices, minus + * one. + */ + + // bit positions/shifts for fields + private static final int AC_SHIFT = 48; + private static final int TC_SHIFT = 32; + private static final int ST_SHIFT = 31; + private static final int EC_SHIFT = 16; + + // bounds + private static final int POOL_MAX = 0x7fff; // max #workers - 1 + private static final int SMASK = 0xffff; // short bits + private static final int SQMASK = 0xfffe; // even short bits + private static final int SHORT_SIGN = 1 << 15; + private static final int INT_SIGN = 1 << 31; + + // masks + private static final long STOP_BIT = 0x0001L << ST_SHIFT; + private static final long AC_MASK = ((long)SMASK) << AC_SHIFT; + private static final long TC_MASK = ((long)SMASK) << TC_SHIFT; + + // units for incrementing and decrementing + private static final long TC_UNIT = 1L << TC_SHIFT; + private static final long AC_UNIT = 1L << AC_SHIFT; + + // masks and units for dealing with u = (int)(ctl >>> 32) + private static final int UAC_SHIFT = AC_SHIFT - 32; + private static final int UTC_SHIFT = TC_SHIFT - 32; + private static final int UAC_MASK = SMASK << UAC_SHIFT; + private static final int UTC_MASK = SMASK << UTC_SHIFT; + private static final int UAC_UNIT = 1 << UAC_SHIFT; + private static final int UTC_UNIT = 1 << UTC_SHIFT; + + // masks and units for dealing with e = (int)ctl + private static final int E_MASK = 0x7fffffff; // no STOP_BIT + private static final int E_SEQ = 1 << EC_SHIFT; + + // runState bits + private static final int SHUTDOWN = 1 << 31; + private static final int RS_SEQ = 1 << 16; + private static final int RS_SEQ_MASK = 0x7fff0000; + + // access mode for WorkQueue + static final int LIFO_QUEUE = 0; + static final int FIFO_QUEUE = 1; + static final int SHARED_QUEUE = -1; + + // Instance fields + + /* + * Field layout order in this class tends to matter more than one + * would like. Runtime layout order is only loosely related to + * declaration order and may differ across JVMs, but the following + * empirically works OK on current JVMs. + */ + + volatile long ctl; // main pool control + final int parallelism; // parallelism level + final int localMode; // per-worker scheduling mode + int growHints; // for expanding indices/ranges + volatile int runState; // shutdown status, seq, and mask + WorkQueue[] workQueues; // main registry + final Mutex lock; // for registration + final Condition termination; // for awaitTermination + final ForkJoinWorkerThreadFactory factory; // factory for new workers + final Thread.UncaughtExceptionHandler ueh; // per-worker UEH + final AtomicLong stealCount; // collect counts when terminated + final AtomicInteger nextWorkerNumber; // to create worker name string + final String workerNamePrefix; // to create worker name string + + // Creating, registering, deregistering and running workers /** * Tries to create and start a worker */ private void addWorker() { Throwable ex = null; - ForkJoinWorkerThread w = null; + ForkJoinWorkerThread wt = null; try { - if ((w = factory.newThread(this)) != null) { - w.start(); + if ((wt = factory.newThread(this)) != null) { + wt.start(); return; } } catch (Throwable e) { ex = e; } - deregisterWorker(w, ex); + deregisterWorker(wt, ex); // adjust counts etc on failure } /** @@ -1176,29 +1212,28 @@ public class ForkJoinPool extends AbstractExecutorService { /** * Callback from ForkJoinWorkerThread constructor to establish and - * record its WorkQueue + * record its WorkQueue. * * @param wt the worker thread */ final void registerWorker(ForkJoinWorkerThread wt) { WorkQueue w = wt.workQueue; - ReentrantLock lock = this.lock; + Mutex lock = this.lock; lock.lock(); try { - int k = nextPoolIndex; + int g = growHints, k = g & SMASK; WorkQueue[] ws = workQueues; if (ws != null) { // ignore on shutdown int n = ws.length; - if (k < 0 || (k & 1) == 0 || k >= n || ws[k] != null) { + if ((k & 1) == 0 || k >= n || ws[k] != null) { for (k = 1; k < n && ws[k] != null; k += 2) ; // workers are at odd indices if (k >= n) // resize workQueues = ws = Arrays.copyOf(ws, n << 1); } - w.poolIndex = k; - w.eventCount = ~(k >>> 1) & SMASK; // Set up wait count - ws[k] = w; // record worker - nextPoolIndex = k + 2; + w.eventCount = w.poolIndex = k; // establish before recording + ws[k] = w; + growHints = (g & ~SMASK) | ((k + 2) & SMASK); int rs = runState; int m = rs & SMASK; // recalculate runState mask if (k > m) @@ -1211,8 +1246,8 @@ public class ForkJoinPool extends AbstractExecutorService { } /** - * Final callback from terminating worker, as well as failure to - * construct or start a worker in addWorker. Removes record of + * Final callback from terminating worker, as well as upon failure + * to construct or start a worker in addWorker. Removes record of * worker from array, and adjusts counts. If pool is shutting * down, tries to complete termination. * @@ -1225,12 +1260,14 @@ public class ForkJoinPool extends AbstractExecutorService { w.runState = -1; // ensure runState is set stealCount.getAndAdd(w.totalSteals + w.nsteals); int idx = w.poolIndex; - ReentrantLock lock = this.lock; + Mutex lock = this.lock; lock.lock(); try { // remove record from array WorkQueue[] ws = workQueues; - if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w) - ws[nextPoolIndex = idx] = null; + if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w) { + ws[idx] = null; + growHints = (growHints & ~SMASK) | idx; + } } finally { lock.unlock(); } @@ -1242,10 +1279,12 @@ public class ForkJoinPool extends AbstractExecutorService { ((c - TC_UNIT) & TC_MASK) | (c & ~(AC_MASK|TC_MASK))))); - if (!tryTerminate(false) && w != null) { + if (!tryTerminate(false, false) && w != null) { w.cancelAll(); // cancel remaining tasks if (w.array != null) // suppress signal if never ran signalWork(); // wake up or create replacement + if (ex == null) // help clean refs on way out + ForkJoinTask.helpExpungeStaleExceptions(); } if (ex != null) // rethrow @@ -1253,41 +1292,79 @@ public class ForkJoinPool extends AbstractExecutorService { } /** - * Tries to add and register a new queue at the given index. - * - * @param idx the workQueues array index to register the queue - * @return the queue, or null if could not add because could - * not acquire lock or idx is unusable + * Top-level runloop for workers, called by ForkJoinWorkerThread.run. */ - private WorkQueue tryAddSharedQueue(int idx) { - WorkQueue q = null; - ReentrantLock lock = this.lock; - if (idx >= 0 && (idx & 1) == 0 && !lock.isLocked()) { - // create queue outside of lock but only if apparently free - WorkQueue nq = new WorkQueue(null, SHARED_QUEUE); - if (lock.tryLock()) { - try { - WorkQueue[] ws = workQueues; - if (ws != null && idx < ws.length) { - if ((q = ws[idx]) == null) { - int rs; // update runState seq - ws[idx] = q = nq; - runState = (((rs = runState) & SHUTDOWN) | - ((rs + RS_SEQ) & ~SHUTDOWN)); - } + final void runWorker(ForkJoinWorkerThread wt) { + // Initialize queue array and seed in this thread + WorkQueue w = wt.workQueue; + w.growArray(false); + w.seed = hashId(Thread.currentThread().getId()); + + do {} while (w.runTask(scan(w))); + } + + // Submissions + + /** + * Unless shutting down, adds the given task to a submission queue + * at submitter's current queue index (modulo submission + * range). If no queue exists at the index, one is created unless + * pool lock is busy. If the queue and/or lock are busy, another + * index is randomly chosen. The mask in growHints controls the + * effective index range of queues considered. The mask is + * expanded, up to the current workerQueue mask, upon any detected + * contention but otherwise remains small to avoid needlessly + * creating queues when there is no contention. + */ + private void doSubmit(ForkJoinTask task) { + if (task == null) + throw new NullPointerException(); + Submitter s = submitters.get(); + for (int r = s.seed, m = growHints >>> 16;;) { + WorkQueue[] ws; WorkQueue q; Mutex lk; + int k = r & m & SQMASK; // use only even indices + if (runState < 0 || (ws = workQueues) == null || ws.length <= k) + throw new RejectedExecutionException(); // shutting down + if ((q = ws[k]) == null && (lk = lock).tryAcquire(0)) { + try { // try to create new queue + if (ws == workQueues && (q = ws[k]) == null) { + int rs; // update runState seq + ws[k] = q = new WorkQueue(null, SHARED_QUEUE); + runState = (((rs = runState) & SHUTDOWN) | + ((rs + RS_SEQ) & ~SHUTDOWN)); } } finally { - lock.unlock(); + lk.unlock(); } } + if (q != null) { + if (q.trySharedPush(task)) { + signalWork(); + return; + } + else if (m < parallelism - 1 && m < (runState & SMASK)) { + Mutex lock = this.lock; + lock.lock(); // block until lock free + int g = growHints; + if (g >>> 16 == m) // expand range + growHints = (((m << 1) + 1) << 16) | (g & SMASK); + lock.unlock(); // no need for try/finally + } + else if ((r & m) == 0) + Thread.yield(); // occasionally yield if busy + } + if (m == (m = growHints >>> 16)) { + r ^= r << 13; // update seed unless new range + r ^= r >>> 17; // same xorshift as WorkQueues + s.seed = r ^= r << 5; + } } - return q; } // Maintaining ctl counts /** - * Increments active count; mainly called upon return from blocking + * Increments active count; mainly called upon return from blocking. */ final void incrementActiveCount() { long c; @@ -1295,42 +1372,32 @@ public class ForkJoinPool extends AbstractExecutorService { } /** - * Activates or creates a worker + * Tries to activate or create a worker if too few are active. */ final void signalWork() { - /* - * The while condition is true if: (there is are too few total - * workers OR there is at least one waiter) AND (there are too - * few active workers OR the pool is terminating). The value - * of e distinguishes the remaining cases: zero (no waiters) - * for create, negative if terminating (in which case do - * nothing), else release a waiter. The secondary checks for - * release (non-null array etc) can fail if the pool begins - * terminating after the test, and don't impose any added cost - * because JVMs must perform null and bounds checks anyway. - */ - long c; int e, u; - while ((((e = (int)(c = ctl)) | (u = (int)(c >>> 32))) & - (INT_SIGN|SHORT_SIGN)) == (INT_SIGN|SHORT_SIGN)) { - WorkQueue[] ws = workQueues; int i; WorkQueue w; Thread p; - if (e == 0) { // add a new worker - if (U.compareAndSwapLong - (this, CTL, c, (long)(((u + UTC_UNIT) & UTC_MASK) | - ((u + UAC_UNIT) & UAC_MASK)) << 32)) { - addWorker(); - break; + long c; int u; + while ((u = (int)((c = ctl) >>> 32)) < 0) { // too few active + WorkQueue[] ws = workQueues; int e, i; WorkQueue w; Thread p; + if ((e = (int)c) > 0) { // at least one waiting + if (ws != null && (i = e & SMASK) < ws.length && + (w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) { + long nc = (((long)(w.nextWait & E_MASK)) | + ((long)(u + UAC_UNIT) << 32)); + if (U.compareAndSwapLong(this, CTL, c, nc)) { + w.eventCount = (e + E_SEQ) & E_MASK; + if ((p = w.parker) != null) + U.unpark(p); // activate and release + break; + } } + else + break; } - else if (e > 0 && ws != null && - (i = ((~e << 1) | 1) & SMASK) < ws.length && - (w = ws[i]) != null && - w.eventCount == (e | INT_SIGN)) { - if (U.compareAndSwapLong - (this, CTL, c, (((long)(w.nextWait & E_MASK)) | - ((long)(u + UAC_UNIT) << 32)))) { - w.eventCount = (e + E_SEQ) & E_MASK; - if ((p = w.parker) != null) - U.unpark(p); // release a waiting worker + else if (e == 0 && (u & SHORT_SIGN) != 0) { // too few total + long nc = (long)(((u + UTC_UNIT) & UTC_MASK) | + ((u + UAC_UNIT) & UAC_MASK)) << 32; + if (U.compareAndSwapLong(this, CTL, c, nc)) { + addWorker(); break; } } @@ -1347,19 +1414,17 @@ public class ForkJoinPool extends AbstractExecutorService { * @return true if the caller can block, else should recheck and retry */ final boolean tryCompensate() { - WorkQueue[] ws; WorkQueue w; Thread p; + WorkQueue w; Thread p; int pc = parallelism, e, u, ac, tc, i; long c = ctl; - + WorkQueue[] ws = workQueues; if ((e = (int)c) >= 0) { if ((ac = ((u = (int)(c >>> 32)) >> UAC_SHIFT)) <= 0 && - e != 0 && (ws = workQueues) != null && - (i = ((~e << 1) | 1) & SMASK) < ws.length && + e != 0 && ws != null && (i = e & SMASK) < ws.length && (w = ws[i]) != null) { + long nc = (long)(w.nextWait & E_MASK) | (c & (AC_MASK|TC_MASK)); if (w.eventCount == (e | INT_SIGN) && - U.compareAndSwapLong - (this, CTL, c, ((long)(w.nextWait & E_MASK) | - (c & (AC_MASK|TC_MASK))))) { + U.compareAndSwapLong(this, CTL, c, nc)) { w.eventCount = (e + E_SEQ) & E_MASK; if ((p = w.parker) != null) U.unpark(p); @@ -1371,7 +1436,7 @@ public class ForkJoinPool extends AbstractExecutorService { if (U.compareAndSwapLong(this, CTL, c, nc)) return true; // no compensation needed } - else if (tc + pc < MAX_ID) { + else if (tc + pc < POOL_MAX) { long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK); if (U.compareAndSwapLong(this, CTL, c, nc)) { addWorker(); @@ -1382,39 +1447,6 @@ public class ForkJoinPool extends AbstractExecutorService { return false; } - // Submissions - - /** - * Unless shutting down, adds the given task to a submission queue - * at submitter's current queue index. If no queue exists at the - * index, one is created unless pool lock is busy. If the queue - * and/or lock are busy, another index is randomly chosen. - */ - private void doSubmit(ForkJoinTask task) { - if (task == null) - throw new NullPointerException(); - Submitter s = submitters.get(); - for (int r = s.seed;;) { - WorkQueue q; int k; - int rs = runState, m = rs & SMASK; - WorkQueue[] ws = workQueues; - if (rs < 0 || ws == null) // shutting down - throw new RejectedExecutionException(); - if (ws.length > m && // k must be at index - ((q = ws[k = (r << 1) & m]) != null || - (q = tryAddSharedQueue(k)) != null) && - q.trySharedPush(task)) { - signalWork(); - return; - } - r ^= r << 13; // xorshift seed to new position - r ^= r >>> 17; - if (((s.seed = r ^= r << 5) & m) == 0) - Thread.yield(); // occasionally yield if busy - } - } - - // Scanning for tasks /** @@ -1426,7 +1458,7 @@ public class ForkJoinPool extends AbstractExecutorService { * re-invocation. * * The scan searches for tasks across queues, randomly selecting - * the first #queues probes, favoring steals 2:1 over submissions + * the first #queues probes, favoring steals over submissions * (by exploiting even/odd indexing), and then performing a * circular sweep of all queues. The scan terminates upon either * finding a non-empty queue, or completing a full sweep. If the @@ -1435,6 +1467,8 @@ public class ForkJoinPool extends AbstractExecutorService { * following actions, after which the caller will retry calling * this method unless terminated. * + * * If pool is terminating, terminate the worker. + * * * If not a complete sweep, try to release a waiting worker. If * the scan terminated because the worker is inactivated, then the * released worker will often be the calling worker, and it can @@ -1442,14 +1476,10 @@ public class ForkJoinPool extends AbstractExecutorService { * another worker, but with same net effect. Releasing in other * cases as well ensures that we have enough workers running. * - * * If the caller has run a task since the the last empty scan, + * * If the caller has run a task since the last empty scan, * return (to allow rescan) if other workers are not also yet * enqueued. Field WorkQueue.rescans counts down on each scan to - * ensure eventual inactivation, and occasional calls to - * Thread.yield to help avoid interference with more useful - * activities on the system. - * - * * If pool is terminating, terminate the worker + * ensure eventual inactivation and blocking. * * * If not already enqueued, try to inactivate and enqueue the * worker on wait queue. @@ -1463,83 +1493,80 @@ public class ForkJoinPool extends AbstractExecutorService { * @return a task or null of none found */ private final ForkJoinTask scan(WorkQueue w) { - boolean swept = false; // true after full empty scan - WorkQueue[] ws; // volatile read order matters - int r = w.seed, ec = w.eventCount; // ec is negative if inactive + boolean swept = false; // true after full empty scan + WorkQueue[] ws; // volatile read order matters + int r = w.seed, ec = w.eventCount; // ec is negative if inactive int rs = runState, m = rs & SMASK; - if ((ws = workQueues) != null && ws.length > m) { - ForkJoinTask task = null; - for (int k = 0, j = -2 - m; ; ++j) { + if ((ws = workQueues) != null && ws.length > m) { // consistency check + for (int k = 0, j = -1 - m; ; ++j) { WorkQueue q; int b; - if (j < 0) { // random probes while j negative + if (j < 0) { // random probes while j negative r ^= r << 13; r ^= r >>> 17; k = (r ^= r << 5) | (j & 1); - } // worker (not submit) for odd j - else // cyclic scan when j >= 0 - k += (m >>> 1) | 1; // step by half to reduce bias - + } // worker (not submit) for odd j + else // cyclic scan when j >= 0 + k += 7; // step 7 reduces array packing bias if ((q = ws[k & m]) != null && (b = q.base) - q.top < 0) { - if (ec >= 0) - task = q.pollAt(b); // steal + ForkJoinTask t = (ec >= 0) ? q.pollAt(b) : null; + w.seed = r; // save seed for next scan + if (t != null) + return t; break; } - else if (j > m) { - if (rs == runState) // staleness check + else if (j - m > m) { + if (rs == runState) // staleness check swept = true; break; } } - w.seed = r; // save seed for next scan - if (task != null) - return task; - } - // Decode ctl on empty scan - long c = ctl; int e = (int)c, a = (int)(c >> AC_SHIFT), nr, ns; - if (!swept) { // try to release a waiter - WorkQueue v; Thread p; - if (e > 0 && a < 0 && ws != null && - (v = ws[((~e << 1) | 1) & m]) != null && - v.eventCount == (e | INT_SIGN) && U.compareAndSwapLong - (this, CTL, c, ((long)(v.nextWait & E_MASK) | - ((c + AC_UNIT) & (AC_MASK|TC_MASK))))) { - v.eventCount = (e + E_SEQ) & E_MASK; - if ((p = v.parker) != null) - U.unpark(p); + // Decode ctl on empty scan + long c = ctl; int e = (int)c, a = (int)(c >> AC_SHIFT), nr, ns; + if (e < 0) // pool is terminating + w.runState = -1; + else if (!swept) { // try to release a waiter + WorkQueue v; Thread p; + if (e > 0 && a < 0 && (v = ws[e & m]) != null && + v.eventCount == (e | INT_SIGN)) { + long nc = ((long)(v.nextWait & E_MASK) | + ((c + AC_UNIT) & (AC_MASK|TC_MASK))); + if (U.compareAndSwapLong(this, CTL, c, nc)) { + v.eventCount = (e + E_SEQ) & E_MASK; + if ((p = v.parker) != null) + U.unpark(p); + } + } } - } - else if ((nr = w.rescans) > 0) { // continue rescanning - int ac = a + parallelism; - if ((w.rescans = (ac < nr) ? ac : nr - 1) > 0 && w.seed < 0 && - w.eventCount == ec) - Thread.yield(); // 1 bit randomness for yield call - } - else if (e < 0) // pool is terminating - w.runState = -1; - else if (ec >= 0) { // try to enqueue - long nc = (long)ec | ((c - AC_UNIT) & (AC_MASK|TC_MASK)); - w.nextWait = e; - w.eventCount = ec | INT_SIGN; // mark as inactive - if (!U.compareAndSwapLong(this, CTL, c, nc)) - w.eventCount = ec; // back out on CAS failure - else if ((ns = w.nsteals) != 0) { // set rescans if ran task - if (a <= 0) // ... unless too many active + else if ((nr = w.rescans) > 0) { // continue rescanning + int ac = a + parallelism; + if (((w.rescans = (ac < nr) ? ac : nr - 1) & 3) == 0 && + w.eventCount == ec) + Thread.yield(); // occasionally yield + } + else if (ec >= 0) { // try to enqueue + long nc = (long)ec | ((c - AC_UNIT) & (AC_MASK|TC_MASK)); + w.nextWait = e; + w.eventCount = ec | INT_SIGN;// mark as inactive + if (!U.compareAndSwapLong(this, CTL, c, nc)) + w.eventCount = ec; // unmark on CAS failure + else if ((ns = w.nsteals) != 0) { + w.nsteals = 0; // set rescans if ran task w.rescans = a + parallelism; - w.nsteals = 0; - w.totalSteals += ns; + w.totalSteals += ns; + } } - } - else{ // already queued - if (parallelism == -a) - idleAwaitWork(w); // quiescent - if (w.eventCount == ec) { - Thread.interrupted(); // clear status - ForkJoinWorkerThread wt = w.owner; - U.putObject(wt, PARKBLOCKER, this); - w.parker = wt; // emulate LockSupport.park - if (w.eventCount == ec) // recheck - U.park(false, 0L); // block - w.parker = null; - U.putObject(wt, PARKBLOCKER, null); + else { // already queued + if (parallelism == -a) + idleAwaitWork(w); // quiescent + if (w.eventCount == ec) { + Thread.interrupted(); // clear status + ForkJoinWorkerThread wt = w.owner; + U.putObject(wt, PARKBLOCKER, this); + w.parker = wt; // emulate LockSupport.park + if (w.eventCount == ec) // recheck + U.park(false, 0L); // block + w.parker = null; + U.putObject(wt, PARKBLOCKER, null); + } } } return null; @@ -1547,23 +1574,22 @@ public class ForkJoinPool extends AbstractExecutorService { /** * If inactivating worker w has caused pool to become quiescent, - * check for pool termination, and, so long as this is not the - * only worker, wait for event for up to SHRINK_RATE nanosecs On - * timeout, if ctl has not changed, terminate the worker, which - * will in turn wake up another worker to possibly repeat this - * process. + * checks for pool termination, and, so long as this is not the + * only worker, waits for event for up to SHRINK_RATE nanosecs. + * On timeout, if ctl has not changed, terminates the worker, + * which will in turn wake up another worker to possibly repeat + * this process. * * @param w the calling worker */ private void idleAwaitWork(WorkQueue w) { long c; int nw, ec; - if (!tryTerminate(false) && + if (!tryTerminate(false, false) && (int)((c = ctl) >> AC_SHIFT) + parallelism == 0 && (ec = w.eventCount) == ((int)c | INT_SIGN) && (nw = w.nextWait) != 0) { long nc = ((long)(nw & E_MASK) | // ctl to restore on timeout ((c + AC_UNIT) & AC_MASK) | (c & TC_MASK)); - ForkJoinTask.helpExpungeStaleExceptions(); // help clean ForkJoinWorkerThread wt = w.owner; while (ctl == c) { long startTime = System.nanoTime(); @@ -1578,8 +1604,8 @@ public class ForkJoinPool extends AbstractExecutorService { break; if (System.nanoTime() - startTime >= SHRINK_TIMEOUT && U.compareAndSwapLong(this, CTL, c, nc)) { - w.runState = -1; // shrink w.eventCount = (ec + E_SEQ) | E_MASK; + w.runState = -1; // shrink break; } } @@ -1691,17 +1717,18 @@ public class ForkJoinPool extends AbstractExecutorService { return null; if (ws.length > m) { WorkQueue q; - for (int n = m << 2, k = r, j = -n;;) { - r ^= r << 13; r ^= r >>> 17; r ^= r << 5; + for (int k = 0, j = -1 - m;; ++j) { + if (j < 0) { + r ^= r << 13; r ^= r >>> 17; k = r ^= r << 5; + } + else + k += 7; if ((q = ws[(k | 1) & m]) != null && q.base - q.top < 0) { w.seed = r; return q; } - else if (j > n) + else if (j - m > m) return null; - else - k = (j++ < 0) ? r : k + ((m >>> 1) | 1); - } } } @@ -1747,7 +1774,7 @@ public class ForkJoinPool extends AbstractExecutorService { } /** - * Gets and removes a local or stolen task for the given worker + * Gets and removes a local or stolen task for the given worker. * * @return a task, if available */ @@ -1779,99 +1806,85 @@ public class ForkJoinPool extends AbstractExecutorService { 8); } - // Termination + // Termination /** - * Sets SHUTDOWN bit of runState under lock - */ - private void enableShutdown() { - ReentrantLock lock = this.lock; - if (runState >= 0) { - lock.lock(); // don't need try/finally - runState |= SHUTDOWN; - lock.unlock(); - } - } - - /** - * Possibly initiates and/or completes termination. Upon - * termination, cancels all queued tasks and then + * Possibly initiates and/or completes termination. The caller + * triggering termination runs three passes through workQueues: + * (0) Setting termination status, followed by wakeups of queued + * workers; (1) cancelling all tasks; (2) interrupting lagging + * threads (likely in external tasks, but possibly also blocked in + * joins). Each pass repeats previous steps because of potential + * lagging thread creation. * * @param now if true, unconditionally terminate, else only * if no work and no active workers + * @param enable if true, enable shutdown when next possible * @return true if now terminating or terminated */ - private boolean tryTerminate(boolean now) { + private boolean tryTerminate(boolean now, boolean enable) { + Mutex lock = this.lock; for (long c;;) { if (((c = ctl) & STOP_BIT) != 0) { // already terminating if ((short)(c >>> TC_SHIFT) == -parallelism) { - ReentrantLock lock = this.lock; // signal when no workers lock.lock(); // don't need try/finally termination.signalAll(); // signal when 0 workers lock.unlock(); } return true; } - if (!now) { - if ((int)(c >> AC_SHIFT) != -parallelism || runState >= 0 || + if (runState >= 0) { // not yet enabled + if (!enable) + return false; + lock.lock(); + runState |= SHUTDOWN; + lock.unlock(); + } + if (!now) { // check if idle & no tasks + if ((int)(c >> AC_SHIFT) != -parallelism || hasQueuedSubmissions()) return false; // Check for unqueued inactive workers. One pass suffices. WorkQueue[] ws = workQueues; WorkQueue w; if (ws != null) { - int n = ws.length; - for (int i = 1; i < n; i += 2) { + for (int i = 1; i < ws.length; i += 2) { if ((w = ws[i]) != null && w.eventCount >= 0) return false; } } } - if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) - startTerminating(); - } - } - - /** - * Initiates termination: Runs three passes through workQueues: - * (0) Setting termination status, followed by wakeups of queued - * workers; (1) cancelling all tasks; (2) interrupting lagging - * threads (likely in external tasks, but possibly also blocked in - * joins). Each pass repeats previous steps because of potential - * lagging thread creation. - */ - private void startTerminating() { - for (int pass = 0; pass < 3; ++pass) { - WorkQueue[] ws = workQueues; - if (ws != null) { - WorkQueue w; Thread wt; - int n = ws.length; - for (int j = 0; j < n; ++j) { - if ((w = ws[j]) != null) { - w.runState = -1; - if (pass > 0) { - w.cancelAll(); - if (pass > 1 && (wt = w.owner) != null && - !wt.isInterrupted()) { - try { - wt.interrupt(); - } catch (SecurityException ignore) { + if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) { + for (int pass = 0; pass < 3; ++pass) { + WorkQueue[] ws = workQueues; + if (ws != null) { + WorkQueue w; + int n = ws.length; + for (int i = 0; i < n; ++i) { + if ((w = ws[i]) != null) { + w.runState = -1; + if (pass > 0) { + w.cancelAll(); + if (pass > 1) + w.interruptOwner(); } } } - } - } - // Wake up workers parked on event queue - int i, e; long c; Thread p; - while ((i = ((~(e = (int)(c = ctl)) << 1) | 1) & SMASK) < n && - (w = ws[i]) != null && - w.eventCount == (e | INT_SIGN)) { - long nc = ((long)(w.nextWait & E_MASK) | - ((c + AC_UNIT) & AC_MASK) | - (c & (TC_MASK|STOP_BIT))); - if (U.compareAndSwapLong(this, CTL, c, nc)) { - w.eventCount = (e + E_SEQ) & E_MASK; - if ((p = w.parker) != null) - U.unpark(p); + // Wake up workers parked on event queue + int i, e; long cc; Thread p; + while ((e = (int)(cc = ctl) & E_MASK) != 0 && + (i = e & SMASK) < n && + (w = ws[i]) != null) { + long nc = ((long)(w.nextWait & E_MASK) | + ((cc + AC_UNIT) & AC_MASK) | + (cc & (TC_MASK|STOP_BIT))); + if (w.eventCount == (e | INT_SIGN) && + U.compareAndSwapLong(this, CTL, cc, nc)) { + w.eventCount = (e + E_SEQ) & E_MASK; + w.runState = -1; + if ((p = w.parker) != null) + U.unpark(p); + } + } } } } @@ -1947,35 +1960,30 @@ public class ForkJoinPool extends AbstractExecutorService { checkPermission(); if (factory == null) throw new NullPointerException(); - if (parallelism <= 0 || parallelism > MAX_ID) + if (parallelism <= 0 || parallelism > POOL_MAX) throw new IllegalArgumentException(); this.parallelism = parallelism; this.factory = factory; this.ueh = handler; this.localMode = asyncMode ? FIFO_QUEUE : LIFO_QUEUE; - this.nextPoolIndex = 1; + this.growHints = 1; long np = (long)(-parallelism); // offset ctl counts this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK); // initialize workQueues array with room for 2*parallelism if possible int n = parallelism << 1; - if (n >= MAX_ID) - n = MAX_ID; + if (n >= POOL_MAX) + n = POOL_MAX; else { // See Hackers Delight, sec 3.2, where n < (1 << 16) n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; } - this.workQueues = new WorkQueue[(n + 1) << 1]; - ReentrantLock lck = this.lock = new ReentrantLock(); - this.termination = lck.newCondition(); + this.workQueues = new WorkQueue[(n + 1) << 1]; // #slots = 2 * #workers + this.termination = (this.lock = new Mutex()).newCondition(); this.stealCount = new AtomicLong(); this.nextWorkerNumber = new AtomicInteger(); StringBuilder sb = new StringBuilder("ForkJoinPool-"); sb.append(poolNumberGenerator.incrementAndGet()); sb.append("-worker-"); this.workerNamePrefix = sb.toString(); - // Create initial submission queue - WorkQueue sq = tryAddSharedQueue(0); - if (sq != null) - sq.growArray(false); } // Execution methods @@ -2093,25 +2101,31 @@ public class ForkJoinPool extends AbstractExecutorService { * @throws RejectedExecutionException {@inheritDoc} */ public List> invokeAll(Collection> tasks) { - ArrayList> forkJoinTasks = - new ArrayList>(tasks.size()); - for (Callable task : tasks) - forkJoinTasks.add(ForkJoinTask.adapt(task)); - invoke(new InvokeAll(forkJoinTasks)); - + // In previous versions of this class, this method constructed + // a task to run ForkJoinTask.invokeAll, but now external + // invocation of multiple tasks is at least as efficient. + List> fs = new ArrayList>(tasks.size()); + // Workaround needed because method wasn't declared with + // wildcards in return type but should have been. @SuppressWarnings({"unchecked", "rawtypes"}) - List> futures = (List>) (List) forkJoinTasks; - return futures; - } + List> futures = (List>) (List) fs; - static final class InvokeAll extends RecursiveAction { - final ArrayList> tasks; - InvokeAll(ArrayList> tasks) { this.tasks = tasks; } - public void compute() { - try { invokeAll(tasks); } - catch (Exception ignore) {} + boolean done = false; + try { + for (Callable t : tasks) { + ForkJoinTask f = ForkJoinTask.adapt(t); + doSubmit(f); + fs.add(f); + } + for (ForkJoinTask f : fs) + f.quietlyJoin(); + done = true; + return futures; + } finally { + if (!done) + for (ForkJoinTask f : fs) + f.cancel(false); } - private static final long serialVersionUID = -7914297376763021607L; } /** @@ -2176,14 +2190,8 @@ public class ForkJoinPool extends AbstractExecutorService { int rc = 0; WorkQueue[] ws; WorkQueue w; if ((ws = workQueues) != null) { - int n = ws.length; - for (int i = 1; i < n; i += 2) { - Thread.State s; ForkJoinWorkerThread wt; - if ((w = ws[i]) != null && (wt = w.owner) != null && - w.eventCount >= 0 && - (s = wt.getState()) != Thread.State.BLOCKED && - s != Thread.State.WAITING && - s != Thread.State.TIMED_WAITING) + for (int i = 1; i < ws.length; i += 2) { + if ((w = ws[i]) != null && w.isApparentlyUnblocked()) ++rc; } } @@ -2232,8 +2240,7 @@ public class ForkJoinPool extends AbstractExecutorService { long count = stealCount.get(); WorkQueue[] ws; WorkQueue w; if ((ws = workQueues) != null) { - int n = ws.length; - for (int i = 1; i < n; i += 2) { + for (int i = 1; i < ws.length; i += 2) { if ((w = ws[i]) != null) count += w.totalSteals; } @@ -2255,8 +2262,7 @@ public class ForkJoinPool extends AbstractExecutorService { long count = 0; WorkQueue[] ws; WorkQueue w; if ((ws = workQueues) != null) { - int n = ws.length; - for (int i = 1; i < n; i += 2) { + for (int i = 1; i < ws.length; i += 2) { if ((w = ws[i]) != null) count += w.queueSize(); } @@ -2275,8 +2281,7 @@ public class ForkJoinPool extends AbstractExecutorService { int count = 0; WorkQueue[] ws; WorkQueue w; if ((ws = workQueues) != null) { - int n = ws.length; - for (int i = 0; i < n; i += 2) { + for (int i = 0; i < ws.length; i += 2) { if ((w = ws[i]) != null) count += w.queueSize(); } @@ -2293,8 +2298,7 @@ public class ForkJoinPool extends AbstractExecutorService { public boolean hasQueuedSubmissions() { WorkQueue[] ws; WorkQueue w; if ((ws = workQueues) != null) { - int n = ws.length; - for (int i = 0; i < n; i += 2) { + for (int i = 0; i < ws.length; i += 2) { if ((w = ws[i]) != null && w.queueSize() != 0) return true; } @@ -2312,8 +2316,7 @@ public class ForkJoinPool extends AbstractExecutorService { protected ForkJoinTask pollSubmission() { WorkQueue[] ws; WorkQueue w; ForkJoinTask t; if ((ws = workQueues) != null) { - int n = ws.length; - for (int i = 0; i < n; i += 2) { + for (int i = 0; i < ws.length; i += 2) { if ((w = ws[i]) != null && (t = w.poll()) != null) return t; } @@ -2342,8 +2345,7 @@ public class ForkJoinPool extends AbstractExecutorService { int count = 0; WorkQueue[] ws; WorkQueue w; ForkJoinTask t; if ((ws = workQueues) != null) { - int n = ws.length; - for (int i = 0; i < n; ++i) { + for (int i = 0; i < ws.length; ++i) { if ((w = ws[i]) != null) { while ((t = w.poll()) != null) { c.add(t); @@ -2363,12 +2365,27 @@ public class ForkJoinPool extends AbstractExecutorService { * @return a string identifying this pool, as well as its state */ public String toString() { - long st = getStealCount(); - long qt = getQueuedTaskCount(); - long qs = getQueuedSubmissionCount(); - int rc = getRunningThreadCount(); - int pc = parallelism; + // Use a single pass through workQueues to collect counts + long qt = 0L, qs = 0L; int rc = 0; + long st = stealCount.get(); long c = ctl; + WorkQueue[] ws; WorkQueue w; + if ((ws = workQueues) != null) { + for (int i = 0; i < ws.length; ++i) { + if ((w = ws[i]) != null) { + int size = w.queueSize(); + if ((i & 1) == 0) + qs += size; + else { + qt += size; + st += w.totalSteals; + if (w.isApparentlyUnblocked()) + ++rc; + } + } + } + } + int pc = parallelism; int tc = pc + (short)(c >>> TC_SHIFT); int ac = pc + (int)(c >> AC_SHIFT); if (ac < 0) // ignore transient negative @@ -2404,8 +2421,7 @@ public class ForkJoinPool extends AbstractExecutorService { */ public void shutdown() { checkPermission(); - enableShutdown(); - tryTerminate(false); + tryTerminate(false, true); } /** @@ -2426,8 +2442,7 @@ public class ForkJoinPool extends AbstractExecutorService { */ public List shutdownNow() { checkPermission(); - enableShutdown(); - tryTerminate(true); + tryTerminate(true, true); return Collections.emptyList(); } @@ -2484,7 +2499,7 @@ public class ForkJoinPool extends AbstractExecutorService { public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { long nanos = unit.toNanos(timeout); - final ReentrantLock lock = this.lock; + final Mutex lock = this.lock; lock.lock(); try { for (;;) { @@ -2625,7 +2640,6 @@ public class ForkJoinPool extends AbstractExecutorService { // Unsafe mechanics private static final sun.misc.Unsafe U; private static final long CTL; - private static final long RUNSTATE; private static final long PARKBLOCKER; static { @@ -2633,15 +2647,13 @@ public class ForkJoinPool extends AbstractExecutorService { modifyThreadPermission = new RuntimePermission("modifyThread"); defaultForkJoinWorkerThreadFactory = new DefaultForkJoinWorkerThreadFactory(); - int s; + submitters = new ThreadSubmitter(); try { U = getUnsafe(); Class k = ForkJoinPool.class; - Class tk = Thread.class; CTL = U.objectFieldOffset (k.getDeclaredField("ctl")); - RUNSTATE = U.objectFieldOffset - (k.getDeclaredField("runState")); + Class tk = Thread.class; PARKBLOCKER = U.objectFieldOffset (tk.getDeclaredField("parkBlocker")); } catch (Exception e) { @@ -2649,8 +2661,14 @@ public class ForkJoinPool extends AbstractExecutorService { } } + /** + * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. + * Replace with a simple call to Unsafe.getUnsafe when integrating + * into a jdk. + * + * @return a sun.misc.Unsafe + */ private static sun.misc.Unsafe getUnsafe() { return Unsafe.instance; } - } diff --git a/akka-actor/src/main/java/akka/jsr166y/ForkJoinTask.java b/akka-actor/src/main/java/akka/jsr166y/ForkJoinTask.java index fe31c4b165..996d05e647 100644 --- a/akka-actor/src/main/java/akka/jsr166y/ForkJoinTask.java +++ b/akka-actor/src/main/java/akka/jsr166y/ForkJoinTask.java @@ -93,7 +93,7 @@ import java.lang.reflect.Constructor; * performs the most common form of parallel invocation: forking a set * of tasks and joining them all. * - *

In the most typical usages, a fork-join pair act like a a call + *

In the most typical usages, a fork-join pair act like a call * (fork) and return (join) from a parallel recursive function. As is * the case with other forms of recursive calls, returns (joins) * should be performed innermost-first. For example, {@code a.fork(); @@ -143,10 +143,10 @@ import java.lang.reflect.Constructor; * use these {@code protected} methods or marks for any purpose, but * they may be of use in the construction of specialized subclasses. * For example, parallel graph traversals can use the supplied methods - * to avoid revisiting nodes/tasks that have already been - * processed. Also, completion based designs can use them to record - * that one subtask has completed. (Method names for marking are bulky - * in part to encourage definition of methods that reflect their usage + * to avoid revisiting nodes/tasks that have already been processed. + * Also, completion based designs can use them to record that one + * subtask has completed. (Method names for marking are bulky in part + * to encourage definition of methods that reflect their usage * patterns.) * *

Most base support methods are {@code final}, to prevent @@ -439,7 +439,7 @@ public abstract class ForkJoinTask implements Future, Serializable { * any ForkJoinPool will call helpExpungeStaleExceptions when its * pool becomes isQuiescent. */ - static final class ExceptionNode extends WeakReference>{ + static final class ExceptionNode extends WeakReference> { final Throwable ex; ExceptionNode next; final long thrower; // use id not ref to avoid weak cycles From 3d226cb8efb60228277b9df61145c3fc9c006b3d Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 31 Jan 2012 10:12:45 +0100 Subject: [PATCH 42/94] Switching to fork join as default dispatcher and adding tests for it --- .../test/scala/akka/config/ConfigSpec.scala | 15 +++++++++- akka-actor/src/main/resources/reference.conf | 30 ++++++++++--------- .../akka/dispatch/AbstractDispatcher.scala | 11 ++++--- 3 files changed, 35 insertions(+), 21 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala index a29ee517a3..ad39057d1d 100644 --- a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala @@ -39,9 +39,11 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference) { { val c = config.getConfig("akka.actor.default-dispatcher") + //General dispatcher config + { c.getString("type") must equal("Dispatcher") - c.getString("executor") must equal("thread-pool-executor") + c.getString("executor") must equal("fork-join-executor") c.getInt("mailbox-capacity") must equal(-1) c.getMilliseconds("mailbox-push-timeout-time") must equal(10 * 1000) c.getString("mailboxType") must be("") @@ -50,6 +52,17 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference) { c.getMilliseconds("throughput-deadline-time") must equal(0) } + //Fork join executor config + + { + val pool = c.getConfig("fork-join-executor") + pool.getInt("parallelism-min") must equal(8) + pool.getDouble("parallelism-factor") must equal(3.0) + pool.getInt("parallelism-max") must equal(64) + } + + //Thread pool executor config + { val pool = c.getConfig("thread-pool-executor") import pool._ diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index 74f7b5b245..23d573e794 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -159,10 +159,24 @@ akka { type = "Dispatcher" # Which kind of ExecutorService to use for this dispatcher - # Valid options: "thread-pool-executor" requires a "thread-pool-executor" section + # Valid options: # "fork-join-executor" requires a "fork-join-executor" section + # "thread-pool-executor" requires a "thread-pool-executor" section + # or # A FQCN of a class extending ExecutorServiceConfigurator - executor = "thread-pool-executor" + executor = "fork-join-executor" + + # This will be used if you have set "executor = "fork-join-executor"" + fork-join-executor { + # Min number of threads to cap factor-based parallelism number to + parallelism-min = 8 + + # Parallelism (threads) ... ceil(available processors * factor) + parallelism-factor = 3.0 + + # Max number of threads to cap factor-based parallelism number to + parallelism-max = 64 + } # This will be used if you have set "executor = "thread-pool-executor"" thread-pool-executor { @@ -199,18 +213,6 @@ akka { allow-core-timeout = on } - # This will be used if you have set "executor = "fork-join-executor"" - fork-join-executor { - # Min number of threads to cap factor-based parallelism number to - parallelism-min = 8 - - # Parallelism (threads) ... ceil(available processors * factor) - parallelism-factor = 3.0 - - # Max number of threads to cap factor-based parallelism number to - parallelism-max = 64 - } - # How long time the dispatcher will wait for new actors until it shuts down shutdown-timeout = 1s diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index e3e312b720..d4c8f5f560 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -338,17 +338,16 @@ abstract class MessageDispatcherConfigurator(val config: Config, val prerequisit def configureExecutor(): ExecutorServiceConfigurator = { config.getString("executor") match { - case null | "" | "thread-pool-executor" ⇒ new ThreadPoolExecutorConfigurator(config.getConfig("thread-pool-executor"), prerequisites) - case "fork-join-executor" ⇒ new ForkJoinExecutorConfigurator(config.getConfig("fork-join-executor"), prerequisites) + case null | "" | "fork-join-executor" ⇒ new ForkJoinExecutorConfigurator(config.getConfig("fork-join-executor"), prerequisites) + case "thread-pool-executor" ⇒ new ThreadPoolExecutorConfigurator(config.getConfig("thread-pool-executor"), prerequisites) case fqcn ⇒ val constructorSignature = Array[Class[_]](classOf[Config], classOf[DispatcherPrerequisites]) ReflectiveAccess.createInstance[ExecutorServiceConfigurator](fqcn, constructorSignature, Array[AnyRef](config, prerequisites)) match { case Right(instance) ⇒ instance - case Left(exception) ⇒ - throw new IllegalArgumentException( - ("""Cannot instantiate ExecutorServiceConfigurator ("executor = [%s]"), defined in [%s], + case Left(exception) ⇒ throw new IllegalArgumentException( + ("""Cannot instantiate ExecutorServiceConfigurator ("executor = [%s]"), defined in [%s], make sure it has an accessible constructor with a [%s,%s] signature""") - .format(fqcn, config.getString("id"), classOf[Config], classOf[DispatcherPrerequisites]), exception) + .format(fqcn, config.getString("id"), classOf[Config], classOf[DispatcherPrerequisites]), exception) } } } From d0d2bee5c6bc3eb3fe74ce6efa7ee5df5c99c8e7 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 31 Jan 2012 10:22:20 +0100 Subject: [PATCH 43/94] Adding correct classloader to executor config --- .../src/main/scala/akka/dispatch/AbstractDispatcher.scala | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index d4c8f5f560..943eeb2b33 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -342,7 +342,7 @@ abstract class MessageDispatcherConfigurator(val config: Config, val prerequisit case "thread-pool-executor" ⇒ new ThreadPoolExecutorConfigurator(config.getConfig("thread-pool-executor"), prerequisites) case fqcn ⇒ val constructorSignature = Array[Class[_]](classOf[Config], classOf[DispatcherPrerequisites]) - ReflectiveAccess.createInstance[ExecutorServiceConfigurator](fqcn, constructorSignature, Array[AnyRef](config, prerequisites)) match { + ReflectiveAccess.createInstance[ExecutorServiceConfigurator](fqcn, constructorSignature, Array[AnyRef](config, prerequisites), prerequisites.classloader) match { case Right(instance) ⇒ instance case Left(exception) ⇒ throw new IllegalArgumentException( ("""Cannot instantiate ExecutorServiceConfigurator ("executor = [%s]"), defined in [%s], @@ -380,11 +380,6 @@ class ThreadPoolExecutorConfigurator(config: Config, prerequisites: DispatcherPr threadPoolConfig.createExecutorServiceFactory(name, threadFactory) } -/*int parallelism, - ForkJoinWorkerThreadFactory factory, - Thread.UncaughtExceptionHandler handler, - boolean asyncMode*/ - class ForkJoinExecutorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceConfigurator(config, prerequisites) { def validate(t: ThreadFactory): ForkJoinPool.ForkJoinWorkerThreadFactory = prerequisites.threadFactory match { From 4c2a44ec7a49903f8fb5e135bec1de132138e699 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 31 Jan 2012 11:02:56 +0100 Subject: [PATCH 44/94] Removing JMX from akka-actor --- akka-actor/src/main/scala/akka/util/JMX.scala | 34 ------------------- 1 file changed, 34 deletions(-) delete mode 100644 akka-actor/src/main/scala/akka/util/JMX.scala diff --git a/akka-actor/src/main/scala/akka/util/JMX.scala b/akka-actor/src/main/scala/akka/util/JMX.scala deleted file mode 100644 index 44d1410d6b..0000000000 --- a/akka-actor/src/main/scala/akka/util/JMX.scala +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.util - -import akka.event.Logging.Error -import java.lang.management.ManagementFactory -import javax.management.{ ObjectInstance, ObjectName, InstanceAlreadyExistsException, InstanceNotFoundException } -import akka.actor.ActorSystem - -object JMX { - private val mbeanServer = ManagementFactory.getPlatformMBeanServer - - def nameFor(hostname: String, service: String, bean: String): ObjectName = - new ObjectName("akka.%s:type=%s,name=%s".format(hostname, service, bean.replace(":", "_"))) - - def register(name: ObjectName, mbean: AnyRef)(implicit system: ActorSystem): Option[ObjectInstance] = try { - Some(mbeanServer.registerMBean(mbean, name)) - } catch { - case e: InstanceAlreadyExistsException ⇒ - Some(mbeanServer.getObjectInstance(name)) - case e: Exception ⇒ - system.eventStream.publish(Error(e, "JMX", this.getClass, "Error when registering mbean [%s]".format(mbean))) - None - } - - def unregister(mbean: ObjectName)(implicit system: ActorSystem) = try { - mbeanServer.unregisterMBean(mbean) - } catch { - case e: InstanceNotFoundException ⇒ {} - case e: Exception ⇒ system.eventStream.publish(Error(e, "JMX", this.getClass, "Error while unregistering mbean [%s]".format(mbean))) - } -} From 418ab3f6f6d18dd67bdf361fc81189452f43a077 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 31 Jan 2012 13:33:04 +0100 Subject: [PATCH 45/94] Moved Gossiper, FailureDetector and VectorClock (with tests) to the akka-cluster module. Deleted all old unused cluster code (ZooKeeper-based stuff). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../java/akka/cluster/LocalBookKeeper.java | 187 -- .../cluster/zookeeper/DistributedQueue.java | 312 --- .../cluster/zookeeper/ZooKeeperQueue.java | 173 -- .../cluster}/AccrualFailureDetector.scala | 12 +- .../scala/akka/cluster/BookKeeperServer.scala | 35 - .../src/main/scala/akka/cluster/Cluster.scala | 1876 ----------------- .../scala/akka/cluster/ClusterActorRef.scala | 129 -- .../scala/akka/cluster/ClusterDeployer.scala | 205 -- .../main/scala/akka/cluster}/Gossiper.scala | 15 +- .../scala/akka/cluster/LocalCluster.scala | 105 - .../cluster}/RemoteConnectionManager.scala | 8 +- .../scala/akka/cluster/TransactionLog.scala | 604 ------ .../scala/akka/cluster}/VectorClock.scala | 2 +- .../metrics/LocalNodeMetricsManager.scala | 226 -- .../cluster/metrics/MetricsProvider.scala | 154 -- .../scala/akka/cluster/storage/Storage.scala | 366 ---- .../akka/cluster/zookeeper/AkkaZkClient.scala | 34 - .../cluster/zookeeper/AkkaZooKeeper.scala | 32 - .../cluster/zookeeper/ZooKeeperBarrier.scala | 104 - .../GossipMembershipMultiJvmSpec.scala | 2 +- .../NewLeaderChangeListenerMultiJvmNode1.conf | 2 - .../NewLeaderChangeListenerMultiJvmNode1.opts | 1 - .../NewLeaderChangeListenerMultiJvmNode2.conf | 2 - .../NewLeaderChangeListenerMultiJvmNode2.opts | 1 - .../NewLeaderChangeListenerMultiJvmSpec.scala | 63 - ...eConnectedChangeListenerMultiJvmNode1.conf | 2 - ...eConnectedChangeListenerMultiJvmNode1.opts | 1 - ...eConnectedChangeListenerMultiJvmNode2.conf | 2 - ...eConnectedChangeListenerMultiJvmNode2.opts | 1 - ...eConnectedChangeListenerMultiJvmSpec.scala | 65 - ...sconnectedChangeListenerMultiJvmNode1.conf | 2 - ...sconnectedChangeListenerMultiJvmNode1.opts | 1 - ...sconnectedChangeListenerMultiJvmNode2.conf | 2 - ...sconnectedChangeListenerMultiJvmNode2.opts | 1 - ...sconnectedChangeListenerMultiJvmSpec.scala | 65 - .../ConfigurationStorageMultiJvmNode1.conf | 2 - .../ConfigurationStorageMultiJvmNode1.opts | 1 - .../ConfigurationStorageMultiJvmNode2.conf | 2 - .../ConfigurationStorageMultiJvmNode2.opts | 1 - .../ConfigurationStorageMultiJvmSpec.scala | 89 - .../election/LeaderElectionMultiJvmNode1.conf | 2 - .../election/LeaderElectionMultiJvmNode1.opts | 1 - .../election/LeaderElectionMultiJvmNode2.conf | 2 - .../election/LeaderElectionMultiJvmNode2.opts | 1 - .../election/LeaderElectionMultiJvmSpec.scala | 71 - .../registry/RegistryStoreMultiJvmNode1.conf | 2 - .../registry/RegistryStoreMultiJvmNode1.opts | 1 - .../registry/RegistryStoreMultiJvmNode2.conf | 2 - .../registry/RegistryStoreMultiJvmNode2.opts | 1 - .../registry/RegistryStoreMultiJvmSpec.scala | 116 - .../deployment/DeploymentMultiJvmNode1.conf | 4 - .../deployment/DeploymentMultiJvmNode1.opts | 1 - .../deployment/DeploymentMultiJvmNode2.conf | 4 - .../deployment/DeploymentMultiJvmNode2.opts | 1 - .../deployment/DeploymentMultiJvmSpec.scala | 75 - .../local/LocalMetricsMultiJvmNode1.conf | 4 - .../local/LocalMetricsMultiJvmNode1.opts | 1 - .../local/LocalMetricsMultiJvmSpec.scala | 134 -- .../remote/RemoteMetricsMultiJvmNode1.conf | 3 - .../remote/RemoteMetricsMultiJvmNode1.opts | 1 - .../remote/RemoteMetricsMultiJvmNode2.conf | 3 - .../remote/RemoteMetricsMultiJvmNode2.opts | 1 - .../remote/RemoteMetricsMultiJvmSpec.scala | 133 -- .../MigrationExplicitMultiJvmNode1.conf | 2 - .../MigrationExplicitMultiJvmNode1.opts | 1 - .../MigrationExplicitMultiJvmNode2.conf | 2 - .../MigrationExplicitMultiJvmNode2.opts | 1 - .../MigrationExplicitMultiJvmSpec.scala | 112 - .../ClusterActorRefCleanupMultiJvmNode1.conf | 6 - .../ClusterActorRefCleanupMultiJvmNode1.opts | 1 - .../ClusterActorRefCleanupMultiJvmNode2.conf | 5 - .../ClusterActorRefCleanupMultiJvmNode2.opts | 1 - .../ClusterActorRefCleanupMultiJvmNode3.conf | 5 - .../ClusterActorRefCleanupMultiJvmNode3.opts | 1 - .../ClusterActorRefCleanupMultiJvmSpec.scala | 154 -- ...LogWriteBehindNoSnapshotMultiJvmNode1.conf | 7 - ...LogWriteBehindNoSnapshotMultiJvmNode1.opts | 1 - ...LogWriteBehindNoSnapshotMultiJvmNode2.conf | 7 - ...LogWriteBehindNoSnapshotMultiJvmNode2.opts | 1 - ...LogWriteBehindNoSnapshotMultiJvmSpec.scala | 99 - ...onLogWriteBehindSnapshotMultiJvmNode1.conf | 7 - ...onLogWriteBehindSnapshotMultiJvmNode1.opts | 1 - ...onLogWriteBehindSnapshotMultiJvmNode2.conf | 7 - ...onLogWriteBehindSnapshotMultiJvmNode2.opts | 1 - ...onLogWriteBehindSnapshotMultiJvmSpec.scala | 118 -- ...ogWriteThroughNoSnapshotMultiJvmNode1.conf | 7 - ...ogWriteThroughNoSnapshotMultiJvmNode1.opts | 1 - ...ogWriteThroughNoSnapshotMultiJvmNode2.conf | 7 - ...ogWriteThroughNoSnapshotMultiJvmNode2.opts | 1 - ...ogWriteThroughNoSnapshotMultiJvmSpec.scala | 99 - ...nLogWriteThroughSnapshotMultiJvmNode1.conf | 7 - ...nLogWriteThroughSnapshotMultiJvmNode1.opts | 1 - ...nLogWriteThroughSnapshotMultiJvmNode2.conf | 7 - ...nLogWriteThroughSnapshotMultiJvmNode2.opts | 1 - ...nLogWriteThroughSnapshotMultiJvmSpec.scala | 116 - .../DirectRoutingFailoverMultiJvmNode1.conf | 5 - .../DirectRoutingFailoverMultiJvmNode1.opts | 1 - .../DirectRoutingFailoverMultiJvmNode2.conf | 5 - .../DirectRoutingFailoverMultiJvmNode2.opts | 1 - .../DirectRoutingFailoverMultiJvmSpec.scala | 90 - .../homenode/HomeNode1MultiJvmSpec.scala | 60 - .../homenode/HomeNodeMultiJvmNode1.conf | 6 - .../homenode/HomeNodeMultiJvmNode1.opts | 1 - .../homenode/HomeNodeMultiJvmNode2.conf | 6 - .../homenode/HomeNodeMultiJvmNode2.opts | 1 - ...ngleReplicaDirectRoutingMultiJvmNode1.conf | 4 - ...ngleReplicaDirectRoutingMultiJvmNode1.opts | 1 - ...ngleReplicaDirectRoutingMultiJvmNode2.conf | 4 - ...ngleReplicaDirectRoutingMultiJvmNode2.opts | 1 - ...ngleReplicaDirectRoutingMultiJvmSpec.scala | 62 - .../failover/RandomFailoverMultiJvmNode1.conf | 8 - .../failover/RandomFailoverMultiJvmNode1.opts | 1 - .../failover/RandomFailoverMultiJvmNode2.conf | 8 - .../failover/RandomFailoverMultiJvmNode2.opts | 1 - .../failover/RandomFailoverMultiJvmNode3.conf | 8 - .../failover/RandomFailoverMultiJvmNode3.opts | 1 - .../failover/RandomFailoverMultiJvmSpec.scala | 145 -- .../homenode/HomeNodeMultiJvmNode1.conf | 8 - .../homenode/HomeNodeMultiJvmNode1.opts | 1 - .../homenode/HomeNodeMultiJvmNode2.conf | 8 - .../homenode/HomeNodeMultiJvmNode2.opts | 1 - .../homenode/HomeNodeMultiJvmSpec.scala | 60 - .../Random1ReplicaMultiJvmNode1.conf | 4 - .../Random1ReplicaMultiJvmNode1.opts | 1 - .../Random1ReplicaMultiJvmSpec.scala | 51 - .../Random3ReplicasMultiJvmNode1.conf | 4 - .../Random3ReplicasMultiJvmNode1.opts | 1 - .../Random3ReplicasMultiJvmNode2.conf | 4 - .../Random3ReplicasMultiJvmNode2.opts | 1 - .../Random3ReplicasMultiJvmNode3.conf | 4 - .../Random3ReplicasMultiJvmNode3.opts | 1 - .../Random3ReplicasMultiJvmSpec.scala | 119 -- .../RoundRobinFailoverMultiJvmNode1.conf | 8 - .../RoundRobinFailoverMultiJvmNode1.opts | 1 - .../RoundRobinFailoverMultiJvmNode2.conf | 8 - .../RoundRobinFailoverMultiJvmNode2.opts | 1 - .../RoundRobinFailoverMultiJvmNode3.conf | 8 - .../RoundRobinFailoverMultiJvmNode3.opts | 1 - .../RoundRobinFailoverMultiJvmSpec.scala | 146 -- .../homenode/HomeNodeMultiJvmNode1.conf | 8 - .../homenode/HomeNodeMultiJvmNode1.opts | 1 - .../homenode/HomeNodeMultiJvmNode2.conf | 5 - .../homenode/HomeNodeMultiJvmNode2.opts | 1 - .../homenode/HomeNodeMultiJvmSpec.scala | 63 - .../RoundRobin1ReplicaMultiJvmNode1.conf | 4 - .../RoundRobin1ReplicaMultiJvmNode1.opts | 1 - .../RoundRobin1ReplicaMultiJvmSpec.scala | 49 - .../RoundRobin2ReplicasMultiJvmNode1.conf | 4 - .../RoundRobin2ReplicasMultiJvmNode1.opts | 1 - .../RoundRobin2ReplicasMultiJvmNode2.conf | 4 - .../RoundRobin2ReplicasMultiJvmNode2.opts | 1 - .../RoundRobin2ReplicasMultiJvmSpec.scala | 121 -- .../RoundRobin3ReplicasMultiJvmNode1.conf | 4 - .../RoundRobin3ReplicasMultiJvmNode1.opts | 1 - .../RoundRobin3ReplicasMultiJvmNode2.conf | 4 - .../RoundRobin3ReplicasMultiJvmNode2.opts | 1 - .../RoundRobin3ReplicasMultiJvmNode3.conf | 4 - .../RoundRobin3ReplicasMultiJvmNode3.opts | 1 - .../RoundRobin3ReplicasMultiJvmSpec.scala | 158 -- .../ScatterGatherFailoverMultiJvmNode1.conf | 6 - .../ScatterGatherFailoverMultiJvmNode1.opts | 1 - .../ScatterGatherFailoverMultiJvmNode2.conf | 6 - .../ScatterGatherFailoverMultiJvmNode2.opts | 1 - .../ScatterGatherFailoverMultiJvmSpec.scala | 114 - .../sample/PingPongMultiJvmExample.scala | 227 -- .../cluster}/AccrualFailureDetectorSpec.scala | 2 +- .../AsynchronousTransactionLogSpec.scala | 230 -- .../GossipingAccrualFailureDetectorSpec.scala | 2 +- .../SynchronousTransactionLogSpec.scala | 190 -- .../scala/akka/cluster}/VectorClockSpec.scala | 2 +- .../sample/ClusteredPingPongSample.scala | 134 -- .../cluster/sample/ComputeGridSample.scala | 91 - .../cluster/storage/InMemoryStorageSpec.scala | 241 --- .../cluster/storage/StorageTestUtils.scala | 15 - .../storage/ZooKeeperStorageSpec.scala | 132 -- .../akka/remote/RemoteActorRefProvider.scala | 6 - .../src/test/resources/log4j.properties | 58 - .../src/test/resources/logback-test.xml | 26 - akka-remote/src/test/resources/zoo.cfg | 12 - project/AkkaBuild.scala | 28 +- 180 files changed, 45 insertions(+), 9014 deletions(-) delete mode 100644 akka-cluster/src/main/java/akka/cluster/LocalBookKeeper.java delete mode 100644 akka-cluster/src/main/java/akka/cluster/zookeeper/DistributedQueue.java delete mode 100644 akka-cluster/src/main/java/akka/cluster/zookeeper/ZooKeeperQueue.java rename {akka-remote/src/main/scala/akka/remote => akka-cluster/src/main/scala/akka/cluster}/AccrualFailureDetector.scala (99%) delete mode 100644 akka-cluster/src/main/scala/akka/cluster/BookKeeperServer.scala delete mode 100644 akka-cluster/src/main/scala/akka/cluster/Cluster.scala delete mode 100644 akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala delete mode 100644 akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala rename {akka-remote/src/main/scala/akka/remote => akka-cluster/src/main/scala/akka/cluster}/Gossiper.scala (97%) delete mode 100644 akka-cluster/src/main/scala/akka/cluster/LocalCluster.scala rename {akka-remote/src/main/scala/akka/remote => akka-cluster/src/main/scala/akka/cluster}/RemoteConnectionManager.scala (96%) delete mode 100644 akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala rename {akka-remote/src/main/scala/akka/remote => akka-cluster/src/main/scala/akka/cluster}/VectorClock.scala (99%) delete mode 100644 akka-cluster/src/main/scala/akka/cluster/metrics/LocalNodeMetricsManager.scala delete mode 100644 akka-cluster/src/main/scala/akka/cluster/metrics/MetricsProvider.scala delete mode 100644 akka-cluster/src/main/scala/akka/cluster/storage/Storage.scala delete mode 100644 akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZkClient.scala delete mode 100644 akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZooKeeper.scala delete mode 100644 akka-cluster/src/main/scala/akka/cluster/zookeeper/ZooKeeperBarrier.scala rename {akka-remote/src/multi-jvm/scala/akka/remote => akka-cluster/src/multi-jvm/scala/akka/cluster}/GossipMembershipMultiJvmSpec.scala (99%) delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNode1MultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/sample/PingPongMultiJvmExample.scala rename {akka-remote/src/test/scala/akka/remote => akka-cluster/src/test/scala/akka/cluster}/AccrualFailureDetectorSpec.scala (99%) delete mode 100644 akka-cluster/src/test/scala/akka/cluster/AsynchronousTransactionLogSpec.scala rename {akka-remote/src/test/scala/akka/remote => akka-cluster/src/test/scala/akka/cluster}/GossipingAccrualFailureDetectorSpec.scala (99%) delete mode 100644 akka-cluster/src/test/scala/akka/cluster/SynchronousTransactionLogSpec.scala rename {akka-remote/src/test/scala/akka/remote => akka-cluster/src/test/scala/akka/cluster}/VectorClockSpec.scala (99%) delete mode 100644 akka-cluster/src/test/scala/akka/cluster/sample/ClusteredPingPongSample.scala delete mode 100644 akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala delete mode 100644 akka-cluster/src/test/scala/akka/cluster/storage/InMemoryStorageSpec.scala delete mode 100644 akka-cluster/src/test/scala/akka/cluster/storage/StorageTestUtils.scala delete mode 100644 akka-cluster/src/test/scala/akka/cluster/storage/ZooKeeperStorageSpec.scala delete mode 100644 akka-remote/src/test/resources/log4j.properties delete mode 100644 akka-remote/src/test/resources/logback-test.xml delete mode 100644 akka-remote/src/test/resources/zoo.cfg diff --git a/akka-cluster/src/main/java/akka/cluster/LocalBookKeeper.java b/akka-cluster/src/main/java/akka/cluster/LocalBookKeeper.java deleted file mode 100644 index 413b9a3154..0000000000 --- a/akka-cluster/src/main/java/akka/cluster/LocalBookKeeper.java +++ /dev/null @@ -1,187 +0,0 @@ -package akka.cluster; - -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.BufferedReader; -import java.io.File; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.Socket; - -import org.apache.bookkeeper.proto.BookieServer; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooKeeper; -import org.apache.zookeeper.ZooDefs.Ids; -import org.apache.zookeeper.server.NIOServerCnxnFactory; -import org.apache.zookeeper.server.ZooKeeperServer; - -public class LocalBookKeeper { - public static final int CONNECTION_TIMEOUT = 30000; - - int numberOfBookies; - - public LocalBookKeeper() { - numberOfBookies = 3; - } - - public LocalBookKeeper(int numberOfBookies) { - this(); - this.numberOfBookies = numberOfBookies; - } - - private final String HOSTPORT = "127.0.0.1:2181"; - NIOServerCnxnFactory serverFactory; - ZooKeeperServer zks; - ZooKeeper zkc; - int ZooKeeperDefaultPort = 2181; - File ZkTmpDir; - - //BookKeeper variables - File tmpDirs[]; - BookieServer bs[]; - Integer initialPort = 5000; - - /** - * @param args - */ - - public void runZookeeper(int maxCC) throws IOException{ - // create a ZooKeeper server(dataDir, dataLogDir, port) - //ServerStats.registerAsConcrete(); - //ClientBase.setupTestEnv(); - ZkTmpDir = File.createTempFile("zookeeper", "test"); - ZkTmpDir.delete(); - ZkTmpDir.mkdir(); - - try { - zks = new ZooKeeperServer(ZkTmpDir, ZkTmpDir, ZooKeeperDefaultPort); - serverFactory = new NIOServerCnxnFactory(); - serverFactory.configure(new InetSocketAddress(ZooKeeperDefaultPort), maxCC); - serverFactory.startup(zks); - } catch (Exception e) { - // TODO Auto-generated catch block - } - - boolean b = waitForServerUp(HOSTPORT, CONNECTION_TIMEOUT); - } - - public void initializeZookeper() { - //initialize the zk client with values - try { - zkc = new ZooKeeper("127.0.0.1", ZooKeeperDefaultPort, new emptyWatcher()); - zkc.create("/ledgers", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); - zkc.create("/ledgers/available", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); - // No need to create an entry for each requested bookie anymore as the - // BookieServers will register themselves with ZooKeeper on startup. - } catch (KeeperException e) { - } catch (InterruptedException e) { - } catch (IOException e) { - } - } - - public void runBookies() throws IOException{ - // Create Bookie Servers (B1, B2, B3) - - tmpDirs = new File[numberOfBookies]; - bs = new BookieServer[numberOfBookies]; - - for(int i = 0; i < numberOfBookies; i++) { - tmpDirs[i] = File.createTempFile("bookie" + Integer.toString(i), "test"); - tmpDirs[i].delete(); - tmpDirs[i].mkdir(); - - bs[i] = new BookieServer(initialPort + i, InetAddress.getLocalHost().getHostAddress() + ":" - + ZooKeeperDefaultPort, tmpDirs[i], new File[]{tmpDirs[i]}); - bs[i].start(); - } - } - - public static void main(String[] args) throws IOException, InterruptedException { - if(args.length < 1) { - usage(); - System.exit(-1); - } - LocalBookKeeper lb = new LocalBookKeeper(Integer.parseInt(args[0])); - lb.runZookeeper(1000); - lb.initializeZookeper(); - lb.runBookies(); - while (true) { - Thread.sleep(5000); - } - } - - private static void usage() { - System.err.println("Usage: LocalBookKeeper number-of-bookies"); - } - - /* User for testing purposes, void */ - class emptyWatcher implements Watcher{ - public void process(WatchedEvent event) {} - } - - public static boolean waitForServerUp(String hp, long timeout) { - long start = System.currentTimeMillis(); - String split[] = hp.split(":"); - String host = split[0]; - int port = Integer.parseInt(split[1]); - while (true) { - try { - Socket sock = new Socket(host, port); - BufferedReader reader = null; - try { - OutputStream outstream = sock.getOutputStream(); - outstream.write("stat".getBytes()); - outstream.flush(); - - reader = - new BufferedReader( - new InputStreamReader(sock.getInputStream())); - String line = reader.readLine(); - if (line != null && line.startsWith("Zookeeper version:")) { - return true; - } - } finally { - sock.close(); - if (reader != null) { - reader.close(); - } - } - } catch (IOException e) { - // ignore as this is expected - } - - if (System.currentTimeMillis() > start + timeout) { - break; - } - try { - Thread.sleep(250); - } catch (InterruptedException e) { - // ignore - } - } - return false; - } - -} diff --git a/akka-cluster/src/main/java/akka/cluster/zookeeper/DistributedQueue.java b/akka-cluster/src/main/java/akka/cluster/zookeeper/DistributedQueue.java deleted file mode 100644 index 7bb87bc414..0000000000 --- a/akka-cluster/src/main/java/akka/cluster/zookeeper/DistributedQueue.java +++ /dev/null @@ -1,312 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package akka.cluster.zookeeper; - -import java.util.List; -import java.util.NoSuchElementException; -import java.util.TreeMap; -import java.util.concurrent.CountDownLatch; - -import org.apache.log4j.Logger; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.ZooKeeper; -import org.apache.zookeeper.data.ACL; -import org.apache.zookeeper.data.Stat; - -/** - * - * A protocol to implement a distributed queue. - * - */ - -public class DistributedQueue { - private static final Logger LOG = Logger.getLogger(DistributedQueue.class); - - private final String dir; - - private ZooKeeper zookeeper; - private List acl = ZooDefs.Ids.OPEN_ACL_UNSAFE; - - private final String prefix = "qn-"; - - - public DistributedQueue(ZooKeeper zookeeper, String dir, List acl) { - this.dir = dir; - - if(acl != null) { - this.acl = acl; - } - this.zookeeper = zookeeper; - - } - - - - /** - * Returns a Map of the children, ordered by id. - * @param watcher optional watcher on getChildren() operation. - * @return map from id to child name for all children - */ - private TreeMap orderedChildren(Watcher watcher) throws KeeperException, InterruptedException { - TreeMap orderedChildren = new TreeMap(); - - List childNames = null; - try{ - childNames = zookeeper.getChildren(dir, watcher); - }catch (KeeperException.NoNodeException e) { - throw e; - } - - for(String childName : childNames) { - try{ - //Check format - if(!childName.regionMatches(0, prefix, 0, prefix.length())) { - LOG.warn("Found child node with improper name: " + childName); - continue; - } - String suffix = childName.substring(prefix.length()); - Long childId = new Long(suffix); - orderedChildren.put(childId,childName); - }catch(NumberFormatException e) { - LOG.warn("Found child node with improper format : " + childName + " " + e,e); - } - } - - return orderedChildren; - } - - /** - * Find the smallest child node. - * @return The name of the smallest child node. - */ - private String smallestChildName() throws KeeperException, InterruptedException { - long minId = Long.MAX_VALUE; - String minName = ""; - - List childNames = null; - - try{ - childNames = zookeeper.getChildren(dir, false); - }catch(KeeperException.NoNodeException e) { - LOG.warn("Caught: " +e,e); - return null; - } - - for(String childName : childNames) { - try{ - //Check format - if(!childName.regionMatches(0, prefix, 0, prefix.length())) { - LOG.warn("Found child node with improper name: " + childName); - continue; - } - String suffix = childName.substring(prefix.length()); - long childId = Long.parseLong(suffix); - if(childId < minId) { - minId = childId; - minName = childName; - } - }catch(NumberFormatException e) { - LOG.warn("Found child node with improper format : " + childName + " " + e,e); - } - } - - - if(minId < Long.MAX_VALUE) { - return minName; - }else{ - return null; - } - } - - /** - * Return the head of the queue without modifying the queue. - * @return the data at the head of the queue. - * @throws NoSuchElementException - * @throws KeeperException - * @throws InterruptedException - */ - public byte[] element() throws NoSuchElementException, KeeperException, InterruptedException { - TreeMap orderedChildren; - - // element, take, and remove follow the same pattern. - // We want to return the child node with the smallest sequence number. - // Since other clients are remove()ing and take()ing nodes concurrently, - // the child with the smallest sequence number in orderedChildren might be gone by the time we check. - // We don't call getChildren again until we have tried the rest of the nodes in sequence order. - while(true) { - try{ - orderedChildren = orderedChildren(null); - }catch(KeeperException.NoNodeException e) { - throw new NoSuchElementException(); - } - if(orderedChildren.size() == 0 ) throw new NoSuchElementException(); - - for(String headNode : orderedChildren.values()) { - if(headNode != null) { - try{ - return zookeeper.getData(dir+"/"+headNode, false, null); - }catch(KeeperException.NoNodeException e) { - //Another client removed the node first, try next - } - } - } - - } - } - - - /** - * Attempts to remove the head of the queue and return it. - * @return The former head of the queue - * @throws NoSuchElementException - * @throws KeeperException - * @throws InterruptedException - */ - public byte[] remove() throws NoSuchElementException, KeeperException, InterruptedException { - TreeMap orderedChildren; - // Same as for element. Should refactor this. - while(true) { - try{ - orderedChildren = orderedChildren(null); - }catch(KeeperException.NoNodeException e) { - throw new NoSuchElementException(); - } - if(orderedChildren.size() == 0) throw new NoSuchElementException(); - - for(String headNode : orderedChildren.values()) { - String path = dir +"/"+headNode; - try{ - byte[] data = zookeeper.getData(path, false, null); - zookeeper.delete(path, -1); - return data; - }catch(KeeperException.NoNodeException e) { - // Another client deleted the node first. - } - } - - } - } - - private class LatchChildWatcher implements Watcher { - - CountDownLatch latch; - - public LatchChildWatcher() { - latch = new CountDownLatch(1); - } - - public void process(WatchedEvent event) { - LOG.debug("Watcher fired on path: " + event.getPath() + " state: " + - event.getState() + " type " + event.getType()); - latch.countDown(); - } - public void await() throws InterruptedException { - latch.await(); - } - } - - /** - * Removes the head of the queue and returns it, blocks until it succeeds. - * @return The former head of the queue - * @throws NoSuchElementException - * @throws KeeperException - * @throws InterruptedException - */ - public byte[] take() throws KeeperException, InterruptedException { - TreeMap orderedChildren; - // Same as for element. Should refactor this. - while(true) { - LatchChildWatcher childWatcher = new LatchChildWatcher(); - try{ - orderedChildren = orderedChildren(childWatcher); - }catch(KeeperException.NoNodeException e) { - zookeeper.create(dir, new byte[0], acl, CreateMode.PERSISTENT); - continue; - } - if(orderedChildren.size() == 0) { - childWatcher.await(); - continue; - } - - for(String headNode : orderedChildren.values()) { - String path = dir +"/"+headNode; - try{ - byte[] data = zookeeper.getData(path, false, null); - zookeeper.delete(path, -1); - return data; - }catch(KeeperException.NoNodeException e) { - // Another client deleted the node first. - } - } - } - } - - /** - * Inserts data into queue. - * @param data - * @return true if data was successfully added - */ - public boolean offer(byte[] data) throws KeeperException, InterruptedException{ - for(;;) { - try{ - zookeeper.create(dir+"/"+prefix, data, acl, CreateMode.PERSISTENT_SEQUENTIAL); - return true; - }catch(KeeperException.NoNodeException e) { - zookeeper.create(dir, new byte[0], acl, CreateMode.PERSISTENT); - } - } - - } - - /** - * Returns the data at the first element of the queue, or null if the queue is empty. - * @return data at the first element of the queue, or null. - * @throws KeeperException - * @throws InterruptedException - */ - public byte[] peek() throws KeeperException, InterruptedException{ - try{ - return element(); - }catch(NoSuchElementException e) { - return null; - } - } - - - /** - * Attempts to remove the head of the queue and return it. Returns null if the queue is empty. - * @return Head of the queue or null. - * @throws KeeperException - * @throws InterruptedException - */ - public byte[] poll() throws KeeperException, InterruptedException { - try{ - return remove(); - }catch(NoSuchElementException e) { - return null; - } - } - - - -} diff --git a/akka-cluster/src/main/java/akka/cluster/zookeeper/ZooKeeperQueue.java b/akka-cluster/src/main/java/akka/cluster/zookeeper/ZooKeeperQueue.java deleted file mode 100644 index 8867d97e00..0000000000 --- a/akka-cluster/src/main/java/akka/cluster/zookeeper/ZooKeeperQueue.java +++ /dev/null @@ -1,173 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.zookeeper; - -import java.io.Serializable; -import java.util.List; -import java.util.ArrayList; - -import org.I0Itec.zkclient.ExceptionUtil; -import org.I0Itec.zkclient.IZkChildListener; -import org.I0Itec.zkclient.ZkClient; -import org.I0Itec.zkclient.exception.ZkNoNodeException; - -public class ZooKeeperQueue { - - protected static class Element { - private String _name; - private T _data; - - public Element(String name, T data) { - _name = name; - _data = data; - } - - public String getName() { - return _name; - } - - public T getData() { - return _data; - } - } - - protected final ZkClient _zkClient; - private final String _elementsPath; - private final String _rootPath; - private final boolean _isBlocking; - - public ZooKeeperQueue(ZkClient zkClient, String rootPath, boolean isBlocking) { - _zkClient = zkClient; - _rootPath = rootPath; - _isBlocking = isBlocking; - _elementsPath = rootPath + "/queue"; - if (!_zkClient.exists(rootPath)) { - _zkClient.createPersistent(rootPath, true); - _zkClient.createPersistent(_elementsPath, true); - } - } - - public String enqueue(T element) { - try { - String sequential = _zkClient.createPersistentSequential(getElementRoughPath(), element); - String elementId = sequential.substring(sequential.lastIndexOf('/') + 1); - return elementId; - } catch (Exception e) { - throw ExceptionUtil.convertToRuntimeException(e); - } - } - - public T dequeue() throws InterruptedException { - if (_isBlocking) { - Element element = getFirstElement(); - _zkClient.delete(getElementPath(element.getName())); - return element.getData(); - } else { - throw new UnsupportedOperationException("Non-blocking ZooKeeperQueue is not yet supported"); - /* FIXME DOES NOT WORK - try { - String headName = getSmallestElement(_zkClient.getChildren(_elementsPath)); - String headPath = getElementPath(headName); - return (T) _zkClient.readData(headPath); - } catch (ZkNoNodeException e) { - return null; - } - */ - } - } - - public boolean containsElement(String elementId) { - String zkPath = getElementPath(elementId); - return _zkClient.exists(zkPath); - } - - public T peek() throws InterruptedException { - Element element = getFirstElement(); - if (element == null) { - return null; - } - return element.getData(); - } - - @SuppressWarnings("unchecked") - public List getElements() { - List paths =_zkClient.getChildren(_elementsPath); - List elements = new ArrayList(); - for (String path: paths) { - elements.add((T)_zkClient.readData(path)); - } - return elements; - } - - public int size() { - return _zkClient.getChildren(_elementsPath).size(); - } - - public void clear() { - _zkClient.deleteRecursive(_rootPath); - } - - public boolean isEmpty() { - return size() == 0; - } - - private String getElementRoughPath() { - return getElementPath("item" + "-"); - } - - private String getElementPath(String elementId) { - return _elementsPath + "/" + elementId; - } - - private String getSmallestElement(List list) { - String smallestElement = list.get(0); - for (String element : list) { - if (element.compareTo(smallestElement) < 0) { - smallestElement = element; - } - } - return smallestElement; - } - - @SuppressWarnings("unchecked") - protected Element getFirstElement() throws InterruptedException { - final Object mutex = new Object(); - IZkChildListener notifyListener = new IZkChildListener() { - @Override - public void handleChildChange(String parentPath, List currentChilds) throws Exception { - synchronized (mutex) { - mutex.notify(); - } - } - }; - try { - while (true) { - List elementNames; - synchronized (mutex) { - elementNames = _zkClient.subscribeChildChanges(_elementsPath, notifyListener); - while (elementNames == null || elementNames.isEmpty()) { - mutex.wait(); - elementNames = _zkClient.getChildren(_elementsPath); - } - } - String elementName = getSmallestElement(elementNames); - try { - String elementPath = getElementPath(elementName); - return new Element(elementName, (T) _zkClient.readData(elementPath)); - } catch (ZkNoNodeException e) { - // somebody else picked up the element first, so we have to - // retry with the new first element - } - } - } catch (InterruptedException e) { - throw e; - } catch (Exception e) { - throw ExceptionUtil.convertToRuntimeException(e); - } finally { - _zkClient.unsubscribeChildChanges(_elementsPath, notifyListener); - } - } - -} diff --git a/akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala similarity index 99% rename from akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala rename to akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index 1c9cb45c08..892f7a026d 100644 --- a/akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -2,16 +2,16 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.remote +package akka.cluster + +import akka.actor.{ ActorSystem, Address } +import akka.event.Logging -import java.util.concurrent.atomic.AtomicReference import scala.collection.immutable.Map import scala.annotation.tailrec -import System.{ currentTimeMillis ⇒ newTimestamp } -import akka.actor.{ ActorSystem, Address } -import akka.actor.ActorSystem -import akka.event.Logging +import java.util.concurrent.atomic.AtomicReference +import System.{ currentTimeMillis ⇒ newTimestamp } /** * Implementation of 'The Phi Accrual Failure Detector' by Hayashibara et al. as defined in their paper: diff --git a/akka-cluster/src/main/scala/akka/cluster/BookKeeperServer.scala b/akka-cluster/src/main/scala/akka/cluster/BookKeeperServer.scala deleted file mode 100644 index 679af24d03..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/BookKeeperServer.scala +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import org.apache.bookkeeper.proto.BookieServer - -import java.io.File - -/* -A simple use of BookKeeper is to implement a write-ahead transaction log. A server maintains an in-memory data structure -(with periodic snapshots for example) and logs changes to that structure before it applies the change. The system -server creates a ledger at startup and store the ledger id and password in a well known place (ZooKeeper maybe). When -it needs to make a change, the server adds an entry with the change information to a ledger and apply the change when -BookKeeper adds the entry successfully. The server can even use asyncAddEntry to queue up many changes for high change -throughput. BooKeeper meticulously logs the changes in order and call the completion functions in order. - -When the system server dies, a backup server will come online, get the last snapshot and then it will open the -ledger of the old server and read all the entries from the time the snapshot was taken. (Since it doesn't know the last -entry number it will use MAX_INTEGER). Once all the entries have been processed, it will close the ledger and start a -new one for its use. -*/ - -object BookKeeperServer { - val port = 3181 - val zkServers = "localhost:2181" - val journal = new File("./bk/journal") - val ledgers = Array(new File("./bk/ledger")) - val bookie = new BookieServer(port, zkServers, journal, ledgers) - - def start() { - bookie.start() - bookie.join() - } -} diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala deleted file mode 100644 index 130149b491..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ /dev/null @@ -1,1876 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import org.apache.zookeeper._ -import org.apache.zookeeper.Watcher.Event._ -import org.apache.zookeeper.data.Stat -import org.apache.zookeeper.recipes.lock.{ WriteLock, LockListener } - -import org.I0Itec.zkclient._ -import org.I0Itec.zkclient.serialize._ -import org.I0Itec.zkclient.exception._ - -import java.util.{ List ⇒ JList } -import java.util.concurrent.atomic.{ AtomicBoolean, AtomicReference } -import java.util.concurrent.{ CopyOnWriteArrayList, Callable, ConcurrentHashMap } -import javax.management.StandardMBean -import java.net.InetSocketAddress - -import scala.collection.mutable.ConcurrentMap -import scala.collection.JavaConversions._ -import scala.annotation.tailrec - -import akka.util._ -import duration._ -import Helpers._ - -import akka.actor._ -import Actor._ -import Status._ -import DeploymentConfig._ - -import akka.event.EventHandler -import akka.config.Config -import akka.config.Config._ - -import akka.serialization.{ Serialization, Serializer, ActorSerialization, Compression } -import ActorSerialization._ -import Compression.LZF - -import akka.routing._ -import akka.cluster._ -import akka.cluster.metrics._ -import akka.cluster.zookeeper._ -import ChangeListener._ -import RemoteProtocol._ -import RemoteSystemDaemonMessageType._ - -import com.eaio.uuid.UUID - -import com.google.protobuf.ByteString -import akka.dispatch.{Await, Dispatchers, Future, PinnedDispatcher} - -// FIXME add watch for each node that when the entry for the node is removed then the node shuts itself down - -/** - * JMX MBean for the cluster service. - */ -trait ClusterNodeMBean { - - def stop() - - def disconnect() - - def reconnect() - - def resign() - - def getRemoteServerHostname: String - - def getRemoteServerPort: Int - - def getNodeName: String - - def getClusterName: String - - def getZooKeeperServerAddresses: String - - def getMemberNodes: Array[String] - - def getNodeAddress(): NodeAddress - - def getLeaderLockName: String - - def isLeader: Boolean - - def getUuidsForClusteredActors: Array[String] - - def getAddressesForClusteredActors: Array[String] - - def getUuidsForActorsInUse: Array[String] - - def getAddressesForActorsInUse: Array[String] - - def getNodesForActorInUseWithAddress(address: String): Array[String] - - def getUuidsForActorsInUseOnNode(nodeName: String): Array[String] - - def getAddressesForActorsInUseOnNode(nodeName: String): Array[String] - - def setConfigElement(key: String, value: String) - - def getConfigElement(key: String): AnyRef - - def removeConfigElement(key: String) - - def getConfigElementKeys: Array[String] - - def getMembershipPathFor(node: String): String - - def getConfigurationPathFor(key: String): String - - def getActorAddresstoNodesPathFor(actorAddress: String): String - - def getActorAddressToNodesPathForWithNodeName(actorAddress: String, nodeName: String): String - - def getNodeToUuidsPathFor(node: String): String - - // FIXME All MBean methods that take a UUID are useless, change to String - def getNodeToUuidsPathFor(node: String, uuid: UUID): String - - def getActorAddressRegistryPathFor(actorAddress: String): String - - def getActorAddressRegistrySerializerPathFor(actorAddress: String): String - - def getActorAddressRegistryUuidPathFor(actorAddress: String): String - - def getActorUuidRegistryNodePathFor(uuid: UUID): String - - def getActorUuidRegistryRemoteAddressPathFor(uuid: UUID): String - - def getActorAddressToUuidsPathFor(actorAddress: String): String - - def getActorAddressToUuidsPathForWithNodeName(actorAddress: String, uuid: UUID): String -} - -/** - * Module for the Cluster. Also holds global state such as configuration data etc. - */ -object Cluster { - val EMPTY_STRING = "".intern - - // config options - val name = Config.clusterName - val zooKeeperServers = config.getString("akka.cluster.zookeeper-server-addresses", "localhost:2181") - val remoteServerPort = config.getInt("akka.remote.server.port", 2552) - val sessionTimeout = Duration(config.getInt("akka.cluster.session-timeout", 60), TIME_UNIT).toMillis.toInt - val metricsRefreshInterval = Duration(config.getInt("akka.cluster.metrics-refresh-timeout", 2), TIME_UNIT) - val connectionTimeout = Duration(config.getInt("akka.cluster.connection-timeout", 60), TIME_UNIT).toMillis.toInt - val maxTimeToWaitUntilConnected = Duration(config.getInt("akka.cluster.max-time-to-wait-until-connected", 30), TIME_UNIT).toMillis.toInt - val shouldCompressData = config.getBool("akka.remote.use-compression", false) - val enableJMX = config.getBool("akka.enable-jmx", true) - val remoteDaemonAckTimeout = Duration(config.getInt("akka.remote.remote-daemon-ack-timeout", 30), TIME_UNIT).toMillis.toInt - val includeRefNodeInReplicaSet = config.getBool("akka.cluster.include-ref-node-in-replica-set", true) - - @volatile - private var properties = Map.empty[String, String] - - /** - * Use to override JVM options such as -Dakka.cluster.nodename=node1 etc. - * Currently supported options are: - *

-   *   Cluster setProperty ("akka.cluster.nodename", "node1")
-   *   Cluster setProperty ("akka.remote.hostname", "darkstar.lan")
-   *   Cluster setProperty ("akka.remote.port", "1234")
-   * 
- */ - def setProperty(property: (String, String)) { - properties = properties + property - } - - private def nodename: String = properties.get("akka.cluster.nodename") match { - case Some(uberride) ⇒ uberride - case None ⇒ Config.nodename - } - - private def hostname: String = properties.get("akka.remote.hostname") match { - case Some(uberride) ⇒ uberride - case None ⇒ Config.hostname - } - - private def port: Int = properties.get("akka.remote.port") match { - case Some(uberride) ⇒ uberride.toInt - case None ⇒ Config.remoteServerPort - } - - val defaultZooKeeperSerializer = new SerializableSerializer - - /** - * The node address. - */ - val nodeAddress = NodeAddress(name, nodename) - - /** - * The reference to the running ClusterNode. - */ - val node = { - if (nodeAddress eq null) throw new IllegalArgumentException("NodeAddress can't be null") - new DefaultClusterNode(nodeAddress, hostname, port, zooKeeperServers, defaultZooKeeperSerializer) - } - - /** - * Creates a new AkkaZkClient. - */ - def newZkClient(): AkkaZkClient = new AkkaZkClient(zooKeeperServers, sessionTimeout, connectionTimeout, defaultZooKeeperSerializer) - - def uuidToString(uuid: UUID): String = uuid.toString - - def stringToUuid(uuid: String): UUID = { - if (uuid eq null) throw new ClusterException("UUID is null") - if (uuid == "") throw new ClusterException("UUID is an empty string") - try { - new UUID(uuid) - } catch { - case e: StringIndexOutOfBoundsException ⇒ - val error = new ClusterException("UUID not valid [" + uuid + "]") - EventHandler.error(error, this, "") - throw error - } - } - - def uuidProtocolToUuid(uuid: UuidProtocol): UUID = new UUID(uuid.getHigh, uuid.getLow) - - def uuidToUuidProtocol(uuid: UUID): UuidProtocol = - UuidProtocol.newBuilder - .setHigh(uuid.getTime) - .setLow(uuid.getClockSeqAndNode) - .build -} - -/** - * A Cluster is made up by a bunch of jvm's, the ClusterNode. - * - * These are the path tree holding the cluster meta-data in ZooKeeper. - * - * Syntax: foo means a variable string, 'foo' means a symbol that does not change and "data" in foo[data] means the value (in bytes) for the node "foo" - * - *
- *   /clusterName/'members'/nodeName
- *   /clusterName/'config'/key[bytes]
- *
- *   /clusterName/'actor-address-to-nodes'/actorAddress/nodeName
- *   /clusterName/'actors-node-to-uuids'/nodeName/actorUuid
- *
- *   /clusterName/'actor-address-registry'/actorAddress/'serializer'[serializerName]
- *   /clusterName/'actor-address-registry'/actorAddress/'uuid'[actorUuid]
- *
- *   /clusterName/'actor-uuid-registry'/actorUuid/'node'[nodeName]
- *   /clusterName/'actor-uuid-registry'/actorUuid/'node'/ip:port
- *   /clusterName/'actor-uuid-registry'/actorUuid/'address'[actorAddress]
- *
- *   /clusterName/'actor-address-to-uuids'/actorAddress/actorUuid
- * 
- */ -class DefaultClusterNode private[akka] ( - val nodeAddress: NodeAddress, - val hostname: String = Config.hostname, - val port: Int = Config.remoteServerPort, - val zkServerAddresses: String, - val serializer: ZkSerializer) extends ErrorHandler with ClusterNode { - self ⇒ - - if ((hostname eq null) || hostname == "") throw new NullPointerException("Host name must not be null or empty string") - if (port < 1) throw new NullPointerException("Port can not be negative") - if (nodeAddress eq null) throw new IllegalArgumentException("'nodeAddress' can not be 'null'") - - val clusterJmxObjectName = JMX.nameFor(hostname, "monitoring", "cluster") - - import Cluster._ - - // private val connectToAllNewlyArrivedMembershipNodesInClusterLock = new AtomicBoolean(false) - - private[cluster] lazy val remoteClientLifeCycleHandler = actorOf(Props(new Actor { - def receive = { - case RemoteClientError(cause, client, address) ⇒ client.shutdownClientModule() - case RemoteClientDisconnected(client, address) ⇒ client.shutdownClientModule() - case _ ⇒ //ignore other - } - }), "akka.cluster.RemoteClientLifeCycleListener") - - private[cluster] lazy val remoteDaemon = new LocalActorRef(Props(new RemoteClusterDaemon(this)).copy(dispatcher = new PinnedDispatcher()), RemoteClusterDaemon.Address, systemService = true) - - private[cluster] lazy val remoteDaemonSupervisor = Supervisor( - SupervisorConfig( - OneForOneStrategy(List(classOf[Exception]), Int.MaxValue, Int.MaxValue), // is infinite restart what we want? - Supervise( - remoteDaemon, - Permanent) - :: Nil)).start() - - lazy val remoteService: RemoteSupport = { - val remote = new akka.remote.netty.NettyRemoteSupport - remote.start(hostname, port) - remote.register(RemoteClusterDaemon.Address, remoteDaemon) - remote.addListener(RemoteFailureDetector.sender) - remote.addListener(remoteClientLifeCycleHandler) - remote - } - - lazy val remoteServerAddress: InetSocketAddress = remoteService.address - - lazy val metricsManager: NodeMetricsManager = new LocalNodeMetricsManager(zkClient, Cluster.metricsRefreshInterval).start() - - // static nodes - val CLUSTER_PATH = "/" + nodeAddress.clusterName - val MEMBERSHIP_PATH = CLUSTER_PATH + "/members" - val CONFIGURATION_PATH = CLUSTER_PATH + "/config" - val PROVISIONING_PATH = CLUSTER_PATH + "/provisioning" - val ACTOR_ADDRESS_NODES_TO_PATH = CLUSTER_PATH + "/actor-address-to-nodes" - val ACTOR_ADDRESS_REGISTRY_PATH = CLUSTER_PATH + "/actor-address-registry" - val ACTOR_UUID_REGISTRY_PATH = CLUSTER_PATH + "/actor-uuid-registry" - val ACTOR_ADDRESS_TO_UUIDS_PATH = CLUSTER_PATH + "/actor-address-to-uuids" - val NODE_TO_ACTOR_UUIDS_PATH = CLUSTER_PATH + "/node-to-actors-uuids" - val NODE_METRICS = CLUSTER_PATH + "/metrics" - - val basePaths = List( - CLUSTER_PATH, - MEMBERSHIP_PATH, - ACTOR_ADDRESS_REGISTRY_PATH, - ACTOR_UUID_REGISTRY_PATH, - ACTOR_ADDRESS_NODES_TO_PATH, - NODE_TO_ACTOR_UUIDS_PATH, - ACTOR_ADDRESS_TO_UUIDS_PATH, - CONFIGURATION_PATH, - PROVISIONING_PATH, - NODE_METRICS) - - val LEADER_ELECTION_PATH = CLUSTER_PATH + "/leader" // should NOT be part of 'basePaths' only used by 'leaderLock' - - private val membershipNodePath = membershipPathFor(nodeAddress.nodeName) - - def membershipNodes: Array[String] = locallyCachedMembershipNodes.toList.toArray.asInstanceOf[Array[String]] - - // zookeeper listeners - private val stateListener = new StateListener(this) - private val membershipListener = new MembershipChildListener(this) - - // cluster node listeners - private val changeListeners = new CopyOnWriteArrayList[ChangeListener]() - - // Address -> ClusterActorRef - private[akka] val clusterActorRefs = new Index[InetSocketAddress, ClusterActorRef] - - case class VersionedConnectionState(version: Long, connections: Map[String, Tuple2[InetSocketAddress, ActorRef]]) - - // all the connections to other nodes - private[akka] val nodeConnections = { - var conns = Map.empty[String, Tuple2[InetSocketAddress, ActorRef]] - // add the remote connection to 'this' node as well, but as a 'local' actor - if (includeRefNodeInReplicaSet) conns += (nodeAddress.nodeName -> (remoteServerAddress, remoteDaemon)) - new AtomicReference[VersionedConnectionState](VersionedConnectionState(0, conns)) - } - - private val isShutdownFlag = new AtomicBoolean(false) - - // ZooKeeper client - private[cluster] val zkClient = new AkkaZkClient(zkServerAddresses, sessionTimeout, connectionTimeout, serializer) - - // leader election listener, registered to the 'leaderLock' below - private[cluster] val leaderElectionCallback = new LockListener { - override def lockAcquired() { - EventHandler.info(this, "Node [%s] is the new leader".format(self.nodeAddress.nodeName)) - self.publish(NewLeader(self.nodeAddress.nodeName)) - } - - override def lockReleased() { - EventHandler.info(this, "Node [%s] is *NOT* the leader anymore".format(self.nodeAddress.nodeName)) - } - } - - // leader election lock in ZooKeeper - private[cluster] val leaderLock = new WriteLock( - zkClient.connection.getZookeeper, - LEADER_ELECTION_PATH, null, - leaderElectionCallback) - - if (enableJMX) createMBean - - boot() - - // ======================================= - // Node - // ======================================= - - private[cluster] def boot() { - EventHandler.info(this, - ("\nCreating cluster node with" + - "\n\tcluster name = [%s]" + - "\n\tnode name = [%s]" + - "\n\tport = [%s]" + - "\n\tzookeeper server addresses = [%s]" + - "\n\tserializer = [%s]") - .format(nodeAddress.clusterName, nodeAddress.nodeName, port, zkServerAddresses, serializer)) - EventHandler.info(this, "Starting up remote server [%s]".format(remoteServerAddress.toString)) - createZooKeeperPathStructureIfNeeded() - registerListeners() - joinCluster() - joinLeaderElection() - fetchMembershipNodes() - EventHandler.info(this, "Cluster node [%s] started successfully".format(nodeAddress)) - } - - def isShutdown = isShutdownFlag.get - - def start() {} - - def shutdown() { - isShutdownFlag.set(true) - - def shutdownNode() { - ignore[ZkNoNodeException](zkClient.deleteRecursive(membershipNodePath)) - - locallyCachedMembershipNodes.clear() - - nodeConnections.get.connections.toList.foreach({ - case (_, (address, _)) ⇒ - Actor.remote.shutdownClientConnection(address) // shut down client connections - }) - - remoteService.shutdown() // shutdown server - - RemoteFailureDetector.sender.stop() - remoteClientLifeCycleHandler.stop() - remoteDaemon.stop() - - // for monitoring remote listener - registry.local.actors.filter(remoteService.hasListener).foreach(_.stop()) - - nodeConnections.set(VersionedConnectionState(0, Map.empty[String, Tuple2[InetSocketAddress, ActorRef]])) - - disconnect() - EventHandler.info(this, "Cluster node shut down [%s]".format(nodeAddress)) - } - - shutdownNode() - } - - def disconnect(): ClusterNode = { - zkClient.unsubscribeAll() - zkClient.close() - this - } - - def reconnect(): ClusterNode = { - zkClient.reconnect() - this - } - - // ======================================= - // Change notification - // ======================================= - - /** - * Registers a cluster change listener. - */ - def register(listener: ChangeListener): ClusterNode = { - changeListeners.add(listener) - this - } - - private[cluster] def publish(change: ChangeNotification) { - changeListeners.iterator.foreach(_.notify(change, this)) - } - - // ======================================= - // Leader - // ======================================= - - /** - * Returns the name of the current leader lock. - */ - def leader: String = leaderLock.getId - - /** - * Returns true if 'this' node is the current leader. - */ - def isLeader: Boolean = leaderLock.isOwner - - /** - * Explicitly resign from being a leader. If this node is not a leader then this operation is a no-op. - */ - def resign() { - if (isLeader) leaderLock.unlock() - } - - // ======================================= - // Actor - // ======================================= - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), 0, Transient, false, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], replicationScheme: ReplicationScheme, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), 0, replicationScheme, false, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], nrOfInstances: Int, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), nrOfInstances, Transient, false, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], nrOfInstances: Int, replicationScheme: ReplicationScheme, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), nrOfInstances, replicationScheme, false, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), 0, Transient, serializeMailbox, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], replicationScheme: ReplicationScheme, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), 0, replicationScheme, serializeMailbox, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], nrOfInstances: Int, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), nrOfInstances, Transient, serializeMailbox, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], nrOfInstances: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), nrOfInstances, replicationScheme, serializeMailbox, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, 0, Transient, false, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, 0, Transient, serializeMailbox, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, replicationScheme: ReplicationScheme, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, 0, replicationScheme, false, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, nrOfInstances, Transient, false, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, replicationScheme: ReplicationScheme, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, nrOfInstances, replicationScheme, false, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, nrOfInstances, Transient, serializeMailbox, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, 0, replicationScheme, serializeMailbox, serializer) - - /** - * Needed to have reflection through structural typing work. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, serializer: AnyRef): ClusterNode = - store(actorAddress, actorFactory, nrOfInstances, replicationScheme, serializeMailbox, serializer.asInstanceOf[Serializer]) - - /** - * Needed to have reflection through structural typing work. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, serializeMailbox: Boolean, serializer: AnyRef): ClusterNode = - store(actorAddress, actorFactory, nrOfInstances, Transient, serializeMailbox, serializer) - - /** - * Clusters an actor. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store( - actorAddress: String, - actorFactory: () ⇒ ActorRef, - nrOfInstances: Int, - replicationScheme: ReplicationScheme, - serializeMailbox: Boolean, - serializer: Serializer): ClusterNode = { - - EventHandler.debug(this, - "Storing actor with address [%s] in cluster".format(actorAddress)) - - val actorFactoryBytes = - Serialization.serialize(actorFactory) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - if (shouldCompressData) LZF.compress(bytes) - else bytes - } - - val actorAddressRegistryPath = actorAddressRegistryPathFor(actorAddress) - - // create ADDRESS -> Array[Byte] for actor registry - try { - zkClient.writeData(actorAddressRegistryPath, actorFactoryBytes) - } catch { - case e: ZkNoNodeException ⇒ // if not stored yet, store the actor - zkClient.retryUntilConnected(new Callable[Either[String, Exception]]() { - def call: Either[String, Exception] = { - try { - Left(zkClient.connection.create(actorAddressRegistryPath, actorFactoryBytes, CreateMode.PERSISTENT)) - } catch { - case e: KeeperException.NodeExistsException ⇒ Right(e) - } - } - }) match { - case Left(path) ⇒ path - case Right(exception) ⇒ actorAddressRegistryPath - } - } - - // create ADDRESS -> SERIALIZER CLASS NAME mapping - try { - zkClient.createPersistent(actorAddressRegistrySerializerPathFor(actorAddress), serializer.identifier.toString) - } catch { - case e: ZkNodeExistsException ⇒ zkClient.writeData(actorAddressRegistrySerializerPathFor(actorAddress), serializer.identifier.toString) - } - - // create ADDRESS -> NODE mapping - ignore[ZkNodeExistsException](zkClient.createPersistent(actorAddressToNodesPathFor(actorAddress))) - - // create ADDRESS -> UUIDs mapping - ignore[ZkNodeExistsException](zkClient.createPersistent(actorAddressToUuidsPathFor(actorAddress))) - - useActorOnNodes(nodesForNrOfInstances(nrOfInstances, Some(actorAddress)).toArray, actorAddress) - - this - } - - /** - * Removes actor from the cluster. - */ - // def remove(actorRef: ActorRef) { - // remove(actorRef.address) - // } - - /** - * Removes actor with uuid from the cluster. - */ - // def remove(actorAddress: String) { - // releaseActorOnAllNodes(actorAddress) - // // warning: ordering matters here - // // FIXME remove ADDRESS to UUID mapping? - // ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAddressToUuidsPathFor(actorAddress))) - // ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAddressRegistryPathFor(actorAddress))) - // ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAddressToNodesPathFor(actorAddress))) - // } - - /** - * Is the actor with uuid clustered or not? - */ - def isClustered(actorAddress: String): Boolean = zkClient.exists(actorAddressRegistryPathFor(actorAddress)) - - /** - * Is the actor with uuid in use on 'this' node or not? - */ - def isInUseOnNode(actorAddress: String): Boolean = isInUseOnNode(actorAddress, nodeAddress) - - /** - * Is the actor with uuid in use or not? - */ - def isInUseOnNode(actorAddress: String, node: NodeAddress): Boolean = zkClient.exists(actorAddressToNodesPathFor(actorAddress, node.nodeName)) - - /** - * Is the actor with uuid in use or not? - */ - def isInUseOnNode(actorAddress: String, nodeName: String): Boolean = zkClient.exists(actorAddressToNodesPathFor(actorAddress, nodeName)) - - /** - * Checks out an actor for use on this node, e.g. checked out as a 'LocalActorRef' but it makes it available - * for remote access through lookup by its UUID. - */ - def use[T <: Actor](actorAddress: String): Option[LocalActorRef] = { - val nodeName = nodeAddress.nodeName - - val actorFactoryPath = actorAddressRegistryPathFor(actorAddress) - zkClient.retryUntilConnected(new Callable[Either[Exception, () ⇒ LocalActorRef]]() { - def call: Either[Exception, () ⇒ LocalActorRef] = { - try { - - val actorFactoryBytes = - if (shouldCompressData) LZF.uncompress(zkClient.connection.readData(actorFactoryPath, new Stat, false)) - else zkClient.connection.readData(actorFactoryPath, new Stat, false) - - val actorFactory = - Serialization.deserialize(actorFactoryBytes, classOf[() ⇒ LocalActorRef], None) match { - case Left(error) ⇒ throw error - case Right(instance) ⇒ instance.asInstanceOf[() ⇒ LocalActorRef] - } - - Right(actorFactory) - } catch { - case e: KeeperException.NoNodeException ⇒ Left(e) - } - } - }) match { - case Left(exception) ⇒ throw exception - case Right(actorFactory) ⇒ - val actorRef = actorFactory() - - EventHandler.debug(this, - "Checking out actor [%s] to be used on node [%s] as local actor" - .format(actorAddress, nodeName)) - - val uuid = actorRef.uuid - - // create UUID registry - ignore[ZkNodeExistsException](zkClient.createPersistent(actorUuidRegistryPathFor(uuid))) - - // create UUID -> NODE mapping - try { - zkClient.createPersistent(actorUuidRegistryNodePathFor(uuid), nodeName) - } catch { - case e: ZkNodeExistsException ⇒ zkClient.writeData(actorUuidRegistryNodePathFor(uuid), nodeName) - } - - // create UUID -> ADDRESS - try { - zkClient.createPersistent(actorUuidRegistryAddressPathFor(uuid), actorAddress) - } catch { - case e: ZkNodeExistsException ⇒ zkClient.writeData(actorUuidRegistryAddressPathFor(uuid), actorAddress) - } - - // create UUID -> REMOTE ADDRESS (InetSocketAddress) mapping - try { - zkClient.createPersistent(actorUuidRegistryRemoteAddressPathFor(uuid), remoteServerAddress) - } catch { - case e: ZkNodeExistsException ⇒ zkClient.writeData(actorUuidRegistryRemoteAddressPathFor(uuid), remoteServerAddress) - } - - // create ADDRESS -> UUID mapping - try { - zkClient.createPersistent(actorAddressRegistryUuidPathFor(actorAddress), uuid) - } catch { - case e: ZkNodeExistsException ⇒ zkClient.writeData(actorAddressRegistryUuidPathFor(actorAddress), uuid) - } - - // create NODE -> UUID mapping - ignore[ZkNodeExistsException](zkClient.createPersistent(nodeToUuidsPathFor(nodeName, uuid), true)) - - // create ADDRESS -> UUIDs mapping - ignore[ZkNodeExistsException](zkClient.createPersistent(actorAddressToUuidsPathFor(actorAddress, uuid))) - - // create ADDRESS -> NODE mapping - ignore[ZkNodeExistsException](zkClient.createPersistent(actorAddressToNodesPathFor(actorAddress, nodeName))) - - actorRef - } - } - - /** - * Using (checking out) actor on a specific set of nodes. - */ - def useActorOnNodes(nodes: Array[String], actorAddress: String, replicateFromUuid: Option[UUID] = None) { - EventHandler.debug(this, - "Sending command to nodes [%s] for checking out actor [%s]".format(nodes.mkString(", "), actorAddress)) - - val builder = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(USE) - .setActorAddress(actorAddress) - - // set the UUID to replicated from - if available - replicateFromUuid foreach (uuid ⇒ builder.setReplicateActorFromUuid(uuidToUuidProtocol(uuid))) - - val command = builder.build - - nodes foreach { node ⇒ - nodeConnections.get.connections(node) foreach { - case (address, connection) ⇒ - sendCommandToNode(connection, command, async = false) - } - } - } - - /** - * Using (checking out) actor on all nodes in the cluster. - */ - def useActorOnAllNodes(actorAddress: String, replicateFromUuid: Option[UUID] = None) { - useActorOnNodes(membershipNodes, actorAddress, replicateFromUuid) - } - - /** - * Using (checking out) actor on a specific node. - */ - def useActorOnNode(node: String, actorAddress: String, replicateFromUuid: Option[UUID] = None) { - useActorOnNodes(Array(node), actorAddress, replicateFromUuid) - } - - /** - * Checks in an actor after done using it on this node. - */ - def release(actorRef: ActorRef) { - release(actorRef.address) - } - - /** - * Checks in an actor after done using it on this node. - */ - def release(actorAddress: String) { - - // FIXME 'Cluster.release' needs to notify all existing ClusterActorRef's that are using the instance that it is no - // longer available. Then what to do? Should we even remove this method? - - ignore[ZkNoNodeException](zkClient.delete(actorAddressToNodesPathFor(actorAddress, nodeAddress.nodeName))) - - uuidsForActorAddress(actorAddress) foreach { uuid ⇒ - EventHandler.debug(this, - "Releasing actor [%s] with UUID [%s] after usage".format(actorAddress, uuid)) - - ignore[ZkNoNodeException](zkClient.deleteRecursive(nodeToUuidsPathFor(nodeAddress.nodeName, uuid))) - ignore[ZkNoNodeException](zkClient.delete(actorUuidRegistryRemoteAddressPathFor(uuid))) - } - } - - /** - * Releases (checking in) all actors with a specific address on all nodes in the cluster where the actor is in 'use'. - */ - private[akka] def releaseActorOnAllNodes(actorAddress: String) { - EventHandler.debug(this, - "Releasing (checking in) all actors with address [%s] on all nodes in cluster".format(actorAddress)) - - val command = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(RELEASE) - .setActorAddress(actorAddress) - .build - - nodesForActorsInUseWithAddress(actorAddress) foreach { node ⇒ - nodeConnections.get.connections(node) foreach { - case (_, connection) ⇒ sendCommandToNode(connection, command, async = true) - } - } - } - - /** - * Creates an ActorRef with a Router to a set of clustered actors. - */ - def ref(actorAddress: String, router: RouterType, failureDetector: FailureDetectorType): ActorRef = - ClusterActorRef.newRef(actorAddress, router, failureDetector, Actor.TIMEOUT) - - /** - * Returns the UUIDs of all actors checked out on this node. - */ - private[akka] def uuidsForActorsInUse: Array[UUID] = uuidsForActorsInUseOnNode(nodeAddress.nodeName) - - /** - * Returns the addresses of all actors checked out on this node. - */ - def addressesForActorsInUse: Array[String] = actorAddressForUuids(uuidsForActorsInUse) - - /** - * Returns the UUIDs of all actors registered in this cluster. - */ - private[akka] def uuidsForClusteredActors: Array[UUID] = - zkClient.getChildren(ACTOR_UUID_REGISTRY_PATH).toList.map(new UUID(_)).toArray.asInstanceOf[Array[UUID]] - - /** - * Returns the addresses of all actors registered in this cluster. - */ - def addressesForClusteredActors: Array[String] = actorAddressForUuids(uuidsForClusteredActors) - - /** - * Returns the actor id for the actor with a specific UUID. - */ - private[akka] def actorAddressForUuid(uuid: UUID): Option[String] = { - try { - Some(zkClient.readData(actorUuidRegistryAddressPathFor(uuid)).asInstanceOf[String]) - } catch { - case e: ZkNoNodeException ⇒ None - } - } - - /** - * Returns the actor ids for all the actors with a specific UUID. - */ - private[akka] def actorAddressForUuids(uuids: Array[UUID]): Array[String] = - uuids map (actorAddressForUuid(_)) filter (_.isDefined) map (_.get) - - /** - * Returns the actor UUIDs for actor ID. - */ - private[akka] def uuidsForActorAddress(actorAddress: String): Array[UUID] = { - try { - zkClient.getChildren(actorAddressToUuidsPathFor(actorAddress)).toList.toArray map { - case c: CharSequence ⇒ new UUID(c) - } filter (_ ne null) - } catch { - case e: ZkNoNodeException ⇒ Array[UUID]() - } - } - - /** - * Returns the node names of all actors in use with UUID. - */ - private[akka] def nodesForActorsInUseWithAddress(actorAddress: String): Array[String] = { - try { - zkClient.getChildren(actorAddressToNodesPathFor(actorAddress)).toList.toArray.asInstanceOf[Array[String]] - } catch { - case e: ZkNoNodeException ⇒ Array[String]() - } - } - - /** - * Returns the UUIDs of all actors in use registered on a specific node. - */ - private[akka] def uuidsForActorsInUseOnNode(nodeName: String): Array[UUID] = { - try { - zkClient.getChildren(nodeToUuidsPathFor(nodeName)).toList.toArray map { - case c: CharSequence ⇒ new UUID(c) - } filter (_ ne null) - } catch { - case e: ZkNoNodeException ⇒ Array[UUID]() - } - } - - /** - * Returns the addresses of all actors in use registered on a specific node. - */ - def addressesForActorsInUseOnNode(nodeName: String): Array[String] = { - val uuids = - try { - zkClient.getChildren(nodeToUuidsPathFor(nodeName)).toList.toArray map { - case c: CharSequence ⇒ new UUID(c) - } filter (_ ne null) - } catch { - case e: ZkNoNodeException ⇒ Array[UUID]() - } - actorAddressForUuids(uuids) - } - - /** - * Returns Serializer for actor with specific address. - */ - def serializerForActor(actorAddress: String): Serializer = try { - Serialization.serializerByIdentity(zkClient.readData(actorAddressRegistrySerializerPathFor(actorAddress), new Stat).asInstanceOf[String].toByte) - } catch { - case e: ZkNoNodeException ⇒ throw new IllegalStateException("No serializer found for actor with address [%s]".format(actorAddress)) - } - - /** - * Returns addresses for nodes that the clustered actor is in use on. - */ - def inetSocketAddressesForActor(actorAddress: String): Array[(UUID, InetSocketAddress)] = { - try { - for { - uuid ← uuidsForActorAddress(actorAddress) - } yield { - val remoteAddress = zkClient.readData(actorUuidRegistryRemoteAddressPathFor(uuid)).asInstanceOf[InetSocketAddress] - (uuid, remoteAddress) - } - } catch { - case e: ZkNoNodeException ⇒ - EventHandler.warning(this, - "Could not retrieve remote socket address for node hosting actor [%s] due to: %s" - .format(actorAddress, e.toString)) - Array[(UUID, InetSocketAddress)]() - } - } - - // ======================================= - // Compute Grid - // ======================================= - - /** - * Send a function 'Function0[Unit]' to be invoked on a random number of nodes (defined by 'nrOfInstances' argument). - */ - def send(f: Function0[Unit], nrOfInstances: Int) { - Serialization.serialize(f) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - val message = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(FUNCTION_FUN0_UNIT) - .setPayload(ByteString.copyFrom(bytes)) - .build - nodeConnectionsForNrOfInstances(nrOfInstances) foreach (_ ! message) - } - } - - /** - * Send a function 'Function0[Any]' to be invoked on a random number of nodes (defined by 'nrOfInstances' argument). - * Returns an 'Array' with all the 'Future's from the computation. - */ - def send(f: Function0[Any], nrOfInstances: Int): List[Future[Any]] = { - Serialization.serialize(f) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - val message = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(FUNCTION_FUN0_ANY) - .setPayload(ByteString.copyFrom(bytes)) - .build - val results = nodeConnectionsForNrOfInstances(nrOfInstances) map (_ ? message) - results.toList.asInstanceOf[List[Future[Any]]] - } - } - - /** - * Send a function 'Function1[Any, Unit]' to be invoked on a random number of nodes (defined by 'nrOfInstances' argument) - * with the argument speficied. - */ - def send(f: Function1[Any, Unit], arg: Any, nrOfInstances: Int) { - Serialization.serialize((f, arg)) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - val message = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(FUNCTION_FUN1_ARG_UNIT) - .setPayload(ByteString.copyFrom(bytes)) - .build - nodeConnectionsForNrOfInstances(nrOfInstances) foreach (_ ! message) - } - } - - /** - * Send a function 'Function1[Any, Any]' to be invoked on a random number of nodes (defined by 'nrOfInstances' argument) - * with the argument speficied. - * Returns an 'Array' with all the 'Future's from the computation. - */ - def send(f: Function1[Any, Any], arg: Any, nrOfInstances: Int): List[Future[Any]] = { - Serialization.serialize((f, arg)) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - val message = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(FUNCTION_FUN1_ARG_ANY) - .setPayload(ByteString.copyFrom(bytes)) - .build - val results = nodeConnectionsForNrOfInstances(nrOfInstances) map (_ ? message) - results.toList.asInstanceOf[List[Future[Any]]] - } - } - - // ======================================= - // Config - // ======================================= - - /** - * Stores a configuration element under a specific key. - * If the key already exists then it will be overwritten. - */ - def setConfigElement(key: String, bytes: Array[Byte]) { - val compressedBytes = if (shouldCompressData) LZF.compress(bytes) else bytes - EventHandler.debug(this, - "Adding config value [%s] under key [%s] in cluster registry".format(key, compressedBytes)) - zkClient.retryUntilConnected(new Callable[Either[Unit, Exception]]() { - def call: Either[Unit, Exception] = { - try { - Left(zkClient.connection.create(configurationPathFor(key), compressedBytes, CreateMode.PERSISTENT)) - } catch { - case e: KeeperException.NodeExistsException ⇒ - try { - Left(zkClient.connection.writeData(configurationPathFor(key), compressedBytes)) - } catch { - case e: Exception ⇒ Right(e) - } - } - } - }) match { - case Left(_) ⇒ /* do nothing */ - case Right(exception) ⇒ throw exception - } - } - - /** - * Returns the config element for the key or NULL if no element exists under the key. - * Returns Some(element) if it exists else None - */ - def getConfigElement(key: String): Option[Array[Byte]] = try { - Some(zkClient.connection.readData(configurationPathFor(key), new Stat, true)) - } catch { - case e: KeeperException.NoNodeException ⇒ None - } - - /** - * Removes configuration element for a specific key. - * Does nothing if the key does not exist. - */ - def removeConfigElement(key: String) { - ignore[ZkNoNodeException] { - EventHandler.debug(this, - "Removing config element with key [%s] from cluster registry".format(key)) - zkClient.deleteRecursive(configurationPathFor(key)) - } - } - - /** - * Returns a list with all config element keys. - */ - def getConfigElementKeys: Array[String] = zkClient.getChildren(CONFIGURATION_PATH).toList.toArray.asInstanceOf[Array[String]] - - // ======================================= - // Private - // ======================================= - - private def sendCommandToNode(connection: ActorRef, command: RemoteSystemDaemonMessageProtocol, async: Boolean = true) { - if (async) { - connection ! command - } else { - try { - Await.result(connection ? (command, remoteDaemonAckTimeout), 10 seconds).asInstanceOf[Status] match { - case Success(status) ⇒ - EventHandler.debug(this, "Remote command sent to [%s] successfully received".format(status)) - case Failure(cause) ⇒ - EventHandler.error(cause, this, cause.toString) - throw cause - } - } catch { - case e: TimeoutException => - EventHandler.error(e, this, "Remote command to [%s] timed out".format(connection.address)) - throw e - case e: Exception ⇒ - EventHandler.error(e, this, "Could not send remote command to [%s] due to: %s".format(connection.address, e.toString)) - throw e - } - } - } - - private[cluster] def membershipPathFor(node: String): String = "%s/%s".format(MEMBERSHIP_PATH, node) - - private[cluster] def configurationPathFor(key: String): String = "%s/%s".format(CONFIGURATION_PATH, key) - - private[cluster] def actorAddressToNodesPathFor(actorAddress: String): String = "%s/%s".format(ACTOR_ADDRESS_NODES_TO_PATH, actorAddress) - - private[cluster] def actorAddressToNodesPathFor(actorAddress: String, nodeName: String): String = "%s/%s".format(actorAddressToNodesPathFor(actorAddress), nodeName) - - private[cluster] def nodeToUuidsPathFor(node: String): String = "%s/%s".format(NODE_TO_ACTOR_UUIDS_PATH, node) - - private[cluster] def nodeToUuidsPathFor(node: String, uuid: UUID): String = "%s/%s/%s".format(NODE_TO_ACTOR_UUIDS_PATH, node, uuid) - - private[cluster] def actorAddressRegistryPathFor(actorAddress: String): String = "%s/%s".format(ACTOR_ADDRESS_REGISTRY_PATH, actorAddress) - - private[cluster] def actorAddressRegistrySerializerPathFor(actorAddress: String): String = "%s/%s".format(actorAddressRegistryPathFor(actorAddress), "serializer") - - private[cluster] def actorAddressRegistryUuidPathFor(actorAddress: String): String = "%s/%s".format(actorAddressRegistryPathFor(actorAddress), "uuid") - - private[cluster] def actorUuidRegistryPathFor(uuid: UUID): String = "%s/%s".format(ACTOR_UUID_REGISTRY_PATH, uuid) - - private[cluster] def actorUuidRegistryNodePathFor(uuid: UUID): String = "%s/%s".format(actorUuidRegistryPathFor(uuid), "node") - - private[cluster] def actorUuidRegistryAddressPathFor(uuid: UUID): String = "%s/%s".format(actorUuidRegistryPathFor(uuid), "address") - - private[cluster] def actorUuidRegistryRemoteAddressPathFor(uuid: UUID): String = "%s/%s".format(actorUuidRegistryPathFor(uuid), "remote-address") - - private[cluster] def actorAddressToUuidsPathFor(actorAddress: String): String = "%s/%s".format(ACTOR_ADDRESS_TO_UUIDS_PATH, actorAddress.replace('.', '_')) - - private[cluster] def actorAddressToUuidsPathFor(actorAddress: String, uuid: UUID): String = "%s/%s".format(actorAddressToUuidsPathFor(actorAddress), uuid) - - /** - * Returns a random set with node names of size 'nrOfInstances'. - * Default nrOfInstances is 0, which returns the empty Set. - */ - private def nodesForNrOfInstances(nrOfInstances: Int = 0, actorAddress: Option[String] = None): Set[String] = { - var replicaNames = Set.empty[String] - val nrOfClusterNodes = nodeConnections.get.connections.size - - if (nrOfInstances < 1) return replicaNames - if (nrOfClusterNodes < nrOfInstances) throw new IllegalArgumentException( - "Replication factor [" + nrOfInstances + - "] is greater than the number of available nodeNames [" + nrOfClusterNodes + "]") - - val preferredNodes = - if (actorAddress.isDefined) { - // use 'preferred-nodes' in deployment config for the actor - Deployer.deploymentFor(actorAddress.get) match { - case Deploy(_, _, _, _, Cluster(nodes, _, _)) ⇒ - nodes map (node ⇒ DeploymentConfig.nodeNameFor(node)) take nrOfInstances - case _ ⇒ - throw new ClusterException("Actor [" + actorAddress.get + "] is not configured as clustered") - } - } else Vector.empty[String] - - for { - nodeName ← preferredNodes - key ← nodeConnections.get.connections.keys - if key == nodeName - } replicaNames = replicaNames + nodeName - - val nrOfCurrentReplicaNames = replicaNames.size - - val replicaSet = - if (nrOfCurrentReplicaNames > nrOfInstances) throw new IllegalStateException("Replica set is larger than replication factor") - else if (nrOfCurrentReplicaNames == nrOfInstances) replicaNames - else { - val random = new java.util.Random(System.currentTimeMillis) - while (replicaNames.size < nrOfInstances) { - replicaNames = replicaNames + membershipNodes(random.nextInt(nrOfClusterNodes)) - } - replicaNames - } - - EventHandler.debug(this, - "Picked out replica set [%s] for actor [%s]".format(replicaSet.mkString(", "), actorAddress)) - - replicaSet - } - - /** - * Returns a random set with replica connections of size 'nrOfInstances'. - * Default nrOfInstances is 0, which returns the empty Set. - */ - private def nodeConnectionsForNrOfInstances(nrOfInstances: Int = 0, actorAddress: Option[String] = None): Set[ActorRef] = { - for { - node ← nodesForNrOfInstances(nrOfInstances, actorAddress) - connectionOption ← nodeConnections.get.connections(node) - connection ← connectionOption - actorRef ← connection._2 - } yield actorRef - } - - /** - * Update the list of connections to other nodes in the cluster. - * Tail recursive, using lockless optimimistic concurrency. - * - * @return a Map with the remote socket addresses to of disconnected node connections - */ - @tailrec - final private[cluster] def connectToAllNewlyArrivedMembershipNodesInCluster( - newlyConnectedMembershipNodes: Traversable[String], - newlyDisconnectedMembershipNodes: Traversable[String]): Map[String, InetSocketAddress] = { - - var change = false - val oldState = nodeConnections.get - - var newConnections = oldState.connections //Map.empty[String, Tuple2[InetSocketAddress, ActorRef]] - - // cache the disconnected connections in a map, needed for fail-over of these connections later - var disconnectedConnections = Map.empty[String, InetSocketAddress] - newlyDisconnectedMembershipNodes foreach { node ⇒ - disconnectedConnections = disconnectedConnections + (node -> (oldState.connections(node) match { - case (address, _) ⇒ address - })) - } - - // remove connections to failed nodes - newlyDisconnectedMembershipNodes foreach { node ⇒ - newConnections = newConnections - node - change = true - } - - // add connections newly arrived nodes - newlyConnectedMembershipNodes foreach { node ⇒ - if (!newConnections.contains(node)) { - - // only connect to each replica once - remoteSocketAddressForNode(node) foreach { address ⇒ - EventHandler.debug(this, "Setting up connection to node with nodename [%s] and address [%s]".format(node, address)) - - val clusterDaemon = remoteService.actorFor( - RemoteClusterDaemon.Address, address.getHostName, address.getPort) - newConnections = newConnections + (node -> (address, clusterDaemon)) - change = true - } - } - } - - // add the remote connection to 'this' node as well, but as a 'local' actor - if (includeRefNodeInReplicaSet) - newConnections = newConnections + (nodeAddress.nodeName -> (remoteServerAddress, remoteDaemon)) - - //there was a state change, so we are now going to update the state. - val newState = new VersionedConnectionState(oldState.version + 1, newConnections) - - if (!nodeConnections.compareAndSet(oldState, newState)) { - // we failed to set the state, try again - connectToAllNewlyArrivedMembershipNodesInCluster( - newlyConnectedMembershipNodes, newlyDisconnectedMembershipNodes) - } else { - // we succeeded to set the state, return - EventHandler.info(this, "Connected to nodes [\n\t%s]".format(newConnections.mkString("\n\t"))) - disconnectedConnections - } - } - - private[cluster] def joinCluster() { - try { - EventHandler.info(this, - "Joining cluster as membership node [%s] on [%s]".format(nodeAddress, membershipNodePath)) - zkClient.createEphemeral(membershipNodePath, remoteServerAddress) - } catch { - case e: ZkNodeExistsException ⇒ - e.printStackTrace - val error = new ClusterException( - "Can't join the cluster. The node name [" + nodeAddress.nodeName + "] is already in use by another node.") - EventHandler.error(error, this, error.toString) - throw error - } - ignore[ZkNodeExistsException](zkClient.createPersistent(nodeToUuidsPathFor(nodeAddress.nodeName))) - } - - private[cluster] def joinLeaderElection(): Boolean = { - EventHandler.info(this, "Node [%s] is joining leader election".format(nodeAddress.nodeName)) - try { - leaderLock.lock - } catch { - case e: KeeperException.NodeExistsException ⇒ false - } - } - - private[cluster] def remoteSocketAddressForNode(node: String): Option[InetSocketAddress] = { - try { - Some(zkClient.readData(membershipPathFor(node), new Stat).asInstanceOf[InetSocketAddress]) - } catch { - case e: ZkNoNodeException ⇒ None - } - } - - private[cluster] def failOverClusterActorRefConnections(from: InetSocketAddress, to: InetSocketAddress) { - EventHandler.info(this, "Failing over ClusterActorRef from %s to %s".format(from, to)) - clusterActorRefs.valueIterator(from) foreach (_.failOver(from, to)) - } - - private[cluster] def migrateActorsOnFailedNodes( - failedNodes: List[String], - currentClusterNodes: List[String], - oldClusterNodes: List[String], - disconnectedConnections: Map[String, InetSocketAddress]) { - - failedNodes.foreach { failedNodeName ⇒ - - val failedNodeAddress = NodeAddress(nodeAddress.clusterName, failedNodeName) - - val myIndex = oldClusterNodes.indexWhere(_.endsWith(nodeAddress.nodeName)) - val failedNodeIndex = oldClusterNodes.indexWhere(_ == failedNodeName) - - // Migrate to the successor of the failed node (using a sorted circular list of the node names) - if ((failedNodeIndex == 0 && myIndex == oldClusterNodes.size - 1) || // No leftmost successor exists, check the tail - (failedNodeIndex == myIndex + 1)) { - // Am I the leftmost successor? - - // Takes the lead of migrating the actors. Not all to this node. - // All to this node except if the actor already resides here, then pick another node it is not already on. - - // Yes I am the node to migrate the actor to (can only be one in the cluster) - val actorUuidsForFailedNode = zkClient.getChildren(nodeToUuidsPathFor(failedNodeName)).toList - - actorUuidsForFailedNode.foreach { uuidAsString ⇒ - EventHandler.debug(this, - "Cluster node [%s] has failed, migrating actor with UUID [%s] to [%s]" - .format(failedNodeName, uuidAsString, nodeAddress.nodeName)) - - val uuid = uuidFrom(uuidAsString) - val actorAddress = actorAddressForUuid(uuid).getOrElse( - throw new IllegalStateException("No actor address found for UUID [" + uuidAsString + "]")) - - val migrateToNodeAddress = - if (!isShutdown && isInUseOnNode(actorAddress)) { - // already in use on this node, pick another node to instantiate the actor on - val replicaNodesForActor = nodesForActorsInUseWithAddress(actorAddress) - val nodesAvailableForMigration = (currentClusterNodes.toSet diff failedNodes.toSet) diff replicaNodesForActor.toSet - - if (nodesAvailableForMigration.isEmpty) throw new ClusterException( - "Can not migrate actor to new node since there are not any available nodes left. " + - "(However, the actor already has >1 replica in cluster, so we are ok)") - - NodeAddress(nodeAddress.clusterName, nodesAvailableForMigration.head) - } else { - // actor is not in use on this node, migrate it here - nodeAddress - } - - // if actor is replicated => pass along the UUID for the actor to replicate from (replay transaction log etc.) - val replicateFromUuid = - if (isReplicated(actorAddress)) Some(uuid) - else None - - migrateWithoutCheckingThatActorResidesOnItsHomeNode( - failedNodeAddress, - migrateToNodeAddress, - actorAddress, - replicateFromUuid) - } - - // notify all available nodes that they should fail-over all connections from 'from' to 'to' - val from = disconnectedConnections(failedNodeName) - val to = remoteServerAddress - - Serialization.serialize((from, to)) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - - val command = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(FAIL_OVER_CONNECTIONS) - .setPayload(ByteString.copyFrom(bytes)) - .build - - // FIXME now we are broadcasting to ALL nodes in the cluster even though a fraction might have a reference to the actors - should that be fixed? - nodeConnections.get.connections.values foreach { - case (_, connection) ⇒ sendCommandToNode(connection, command, async = true) - } - } - } - } - } - - /** - * Used when the ephemeral "home" node is already gone, so we can't check if it is available. - */ - private def migrateWithoutCheckingThatActorResidesOnItsHomeNode( - from: NodeAddress, to: NodeAddress, actorAddress: String, replicateFromUuid: Option[UUID]) { - - EventHandler.debug(this, "Migrating actor [%s] from node [%s] to node [%s]".format(actorAddress, from, to)) - if (!isInUseOnNode(actorAddress, to) && !isShutdown) { - release(actorAddress) - - val remoteAddress = remoteSocketAddressForNode(to.nodeName).getOrElse(throw new ClusterException("No remote address registered for [" + to.nodeName + "]")) - - ignore[ZkNoNodeException](zkClient.delete(actorAddressToNodesPathFor(actorAddress, from.nodeName))) - - // FIXME who takes care of this line? - //ignore[ZkNoNodeException](zkClient.delete(nodeToUuidsPathFor(from.nodeName, uuid))) - - // 'use' (check out) actor on the remote 'to' node - useActorOnNode(to.nodeName, actorAddress, replicateFromUuid) - } - } - - private def createZooKeeperPathStructureIfNeeded() { - ignore[ZkNodeExistsException] { - zkClient.create(CLUSTER_PATH, null, CreateMode.PERSISTENT) - EventHandler.info(this, "Created node [%s]".format(CLUSTER_PATH)) - } - - basePaths.foreach { path ⇒ - try { - ignore[ZkNodeExistsException](zkClient.create(path, null, CreateMode.PERSISTENT)) - EventHandler.debug(this, "Created node [%s]".format(path)) - } catch { - case e ⇒ - val error = new ClusterException(e.toString) - EventHandler.error(error, this) - throw error - } - } - } - - private def registerListeners() = { - zkClient.subscribeStateChanges(stateListener) - zkClient.subscribeChildChanges(MEMBERSHIP_PATH, membershipListener) - } - - private def unregisterListeners() = { - zkClient.unsubscribeStateChanges(stateListener) - zkClient.unsubscribeChildChanges(MEMBERSHIP_PATH, membershipListener) - } - - private def fetchMembershipNodes() { - val membershipChildren = zkClient.getChildren(MEMBERSHIP_PATH) - locallyCachedMembershipNodes.clear() - membershipChildren.iterator.foreach(locallyCachedMembershipNodes.add) - connectToAllNewlyArrivedMembershipNodesInCluster(membershipNodes, Nil) - } - - private def isReplicated(actorAddress: String): Boolean = DeploymentConfig.isReplicated(Deployer.deploymentFor(actorAddress)) - - private def createMBean = { - val clusterMBean = new StandardMBean(classOf[ClusterNodeMBean]) with ClusterNodeMBean { - - override def stop() = self.shutdown() - - override def disconnect() = self.disconnect() - - override def reconnect() = self.reconnect() - - override def resign() = self.resign() - - override def getNodeAddress = self.nodeAddress - - override def getRemoteServerHostname = self.hostname - - override def getRemoteServerPort = self.port - - override def getNodeName = self.nodeAddress.nodeName - - override def getClusterName = self.nodeAddress.clusterName - - override def getZooKeeperServerAddresses = self.zkServerAddresses - - override def getMemberNodes = self.locallyCachedMembershipNodes.iterator.map(_.toString).toArray - - override def getLeaderLockName = self.leader.toString - - override def isLeader = self.isLeader - - override def getUuidsForActorsInUse = self.uuidsForActorsInUse.map(_.toString).toArray - - override def getAddressesForActorsInUse = self.addressesForActorsInUse.map(_.toString).toArray - - override def getUuidsForClusteredActors = self.uuidsForClusteredActors.map(_.toString).toArray - - override def getAddressesForClusteredActors = self.addressesForClusteredActors.map(_.toString).toArray - - override def getNodesForActorInUseWithAddress(address: String) = self.nodesForActorsInUseWithAddress(address) - - override def getUuidsForActorsInUseOnNode(nodeName: String) = self.uuidsForActorsInUseOnNode(nodeName).map(_.toString).toArray - - override def getAddressesForActorsInUseOnNode(nodeName: String) = self.addressesForActorsInUseOnNode(nodeName).map(_.toString).toArray - - override def setConfigElement(key: String, value: String): Unit = self.setConfigElement(key, value.getBytes("UTF-8")) - - override def getConfigElement(key: String) = new String(self.getConfigElement(key).getOrElse(Array[Byte]()), "UTF-8") - - override def removeConfigElement(key: String): Unit = self.removeConfigElement(key) - - override def getConfigElementKeys = self.getConfigElementKeys.toArray - - override def getMembershipPathFor(node: String) = self.membershipPathFor(node) - - override def getConfigurationPathFor(key: String) = self.configurationPathFor(key) - - override def getActorAddresstoNodesPathFor(actorAddress: String) = self.actorAddressToNodesPathFor(actorAddress) - - override def getActorAddressToNodesPathForWithNodeName(actorAddress: String, nodeName: String) = self.actorAddressToNodesPathFor(actorAddress, nodeName) - - override def getNodeToUuidsPathFor(node: String) = self.nodeToUuidsPathFor(node) - - override def getNodeToUuidsPathFor(node: String, uuid: UUID) = self.nodeToUuidsPathFor(node, uuid) - - override def getActorAddressRegistryPathFor(actorAddress: String) = self.actorAddressRegistryPathFor(actorAddress) - - override def getActorAddressRegistrySerializerPathFor(actorAddress: String) = self.actorAddressRegistrySerializerPathFor(actorAddress) - - override def getActorAddressRegistryUuidPathFor(actorAddress: String) = self.actorAddressRegistryUuidPathFor(actorAddress) - - override def getActorUuidRegistryNodePathFor(uuid: UUID) = self.actorUuidRegistryNodePathFor(uuid) - - override def getActorUuidRegistryRemoteAddressPathFor(uuid: UUID) = self.actorUuidRegistryNodePathFor(uuid) - - override def getActorAddressToUuidsPathFor(actorAddress: String) = self.actorAddressToUuidsPathFor(actorAddress) - - override def getActorAddressToUuidsPathForWithNodeName(actorAddress: String, uuid: UUID) = self.actorAddressToUuidsPathFor(actorAddress, uuid) - } - - JMX.register(clusterJmxObjectName, clusterMBean) - - // FIXME need monitoring to lookup the cluster MBean dynamically - // Monitoring.registerLocalMBean(clusterJmxObjectName, clusterMBean) - } -} - -class MembershipChildListener(self: ClusterNode) extends IZkChildListener with ErrorHandler { - def handleChildChange(parentPath: String, currentChilds: JList[String]) { - withErrorHandler { - if (!self.isShutdown) { - if (currentChilds ne null) { - val currentClusterNodes = currentChilds.toList - if (!currentClusterNodes.isEmpty) EventHandler.debug(this, - "MembershipChildListener at [%s] has children [%s]" - .format(self.nodeAddress.nodeName, currentClusterNodes.mkString(" "))) - - // take a snapshot of the old cluster nodes and then update the list with the current connected nodes in the cluster - val oldClusterNodes = self.locallyCachedMembershipNodes.toArray.toSet.asInstanceOf[Set[String]] - self.locallyCachedMembershipNodes.clear() - currentClusterNodes foreach (self.locallyCachedMembershipNodes.add) - - val newlyConnectedMembershipNodes = (Set(currentClusterNodes: _*) diff oldClusterNodes).toList - val newlyDisconnectedMembershipNodes = (oldClusterNodes diff Set(currentClusterNodes: _*)).toList - - // update the connections with the new set of cluster nodes - val disconnectedConnections = self.connectToAllNewlyArrivedMembershipNodesInCluster(newlyConnectedMembershipNodes, newlyDisconnectedMembershipNodes) - - // if node(s) left cluster then migrate actors residing on the failed node - if (!newlyDisconnectedMembershipNodes.isEmpty) { - self.migrateActorsOnFailedNodes(newlyDisconnectedMembershipNodes, currentClusterNodes, oldClusterNodes.toList, disconnectedConnections) - } - - // publish NodeConnected and NodeDisconnect events to the listeners - newlyConnectedMembershipNodes foreach (node ⇒ self.publish(NodeConnected(node))) - newlyDisconnectedMembershipNodes foreach { node ⇒ - self.publish(NodeDisconnected(node)) - // remove metrics of a disconnected node from ZK and local cache - self.metricsManager.removeNodeMetrics(node) - } - } - } - } - } -} - -class StateListener(self: ClusterNode) extends IZkStateListener { - def handleStateChanged(state: KeeperState) { - state match { - case KeeperState.SyncConnected ⇒ - EventHandler.debug(this, "Cluster node [%s] - Connected".format(self.nodeAddress)) - self.publish(ThisNode.Connected) - case KeeperState.Disconnected ⇒ - EventHandler.debug(this, "Cluster node [%s] - Disconnected".format(self.nodeAddress)) - self.publish(ThisNode.Disconnected) - case KeeperState.Expired ⇒ - EventHandler.debug(this, "Cluster node [%s] - Expired".format(self.nodeAddress)) - self.publish(ThisNode.Expired) - } - } - - /** - * Re-initialize after the zookeeper session has expired and a new session has been created. - */ - def handleNewSession() { - EventHandler.debug(this, "Session expired re-initializing node [%s]".format(self.nodeAddress)) - self.boot() - self.publish(NewSession) - } -} - -trait ErrorHandler { - def withErrorHandler[T](body: ⇒ T) = { - try { - ignore[ZkInterruptedException](body) // FIXME Is it good to ignore ZkInterruptedException? If not, how should we handle it? - } catch { - case e: Throwable ⇒ - EventHandler.error(e, this, e.toString) - throw e - } - } -} - -object RemoteClusterDaemon { - val Address = "akka-cluster-daemon".intern - - // FIXME configure computeGridDispatcher to what? - val computeGridDispatcher = Dispatchers.newDispatcher("akka:compute-grid").build -} - -/** - * Internal "daemon" actor for cluster internal communication. - * - * It acts as the brain of the cluster that responds to cluster events (messages) and undertakes action. - */ -class RemoteClusterDaemon(cluster: ClusterNode) extends Actor { - - import RemoteClusterDaemon._ - import Cluster._ - - override def preRestart(reason: Throwable, msg: Option[Any]) { - EventHandler.debug(this, "RemoteClusterDaemon failed due to [%s] restarting...".format(reason)) - } - - def receive: Receive = { - case message: RemoteSystemDaemonMessageProtocol ⇒ - EventHandler.debug(this, - "Received command [\n%s] to RemoteClusterDaemon on node [%s]".format(message, cluster.nodeAddress.nodeName)) - - message.getMessageType match { - case USE ⇒ handleUse(message) - case RELEASE ⇒ handleRelease(message) - case STOP ⇒ cluster.shutdown() - case DISCONNECT ⇒ cluster.disconnect() - case RECONNECT ⇒ cluster.reconnect() - case RESIGN ⇒ cluster.resign() - case FAIL_OVER_CONNECTIONS ⇒ handleFailover(message) - case FUNCTION_FUN0_UNIT ⇒ handle_fun0_unit(message) - case FUNCTION_FUN0_ANY ⇒ handle_fun0_any(message) - case FUNCTION_FUN1_ARG_UNIT ⇒ handle_fun1_arg_unit(message) - case FUNCTION_FUN1_ARG_ANY ⇒ handle_fun1_arg_any(message) - //TODO: should we not deal with unrecognized message types? - } - - case unknown ⇒ EventHandler.warning(this, "Unknown message [%s]".format(unknown)) - } - - def handleRelease(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - if (message.hasActorUuid) { - cluster.actorAddressForUuid(uuidProtocolToUuid(message.getActorUuid)) foreach { address ⇒ - cluster.release(address) - } - } else if (message.hasActorAddress) { - cluster release message.getActorAddress - } else { - EventHandler.warning(this, - "None of 'uuid' or 'actorAddress'' is specified, ignoring remote cluster daemon command [%s]".format(message)) - } - } - - def handleUse(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - def deserializeMessages(entriesAsBytes: Vector[Array[Byte]]): Vector[AnyRef] = { - import akka.cluster.RemoteProtocol._ - import akka.cluster.MessageSerializer - - entriesAsBytes map { bytes ⇒ - val messageBytes = - if (Cluster.shouldCompressData) LZF.uncompress(bytes) - else bytes - MessageSerializer.deserialize(MessageProtocol.parseFrom(messageBytes), None) - } - } - - def actorOfRefToUseForReplay(snapshotAsBytes: Option[Array[Byte]], actorAddress: String, newActorRef: LocalActorRef): ActorRef = { - snapshotAsBytes match { - - // we have a new actor ref - the snapshot - case Some(bytes) ⇒ - // stop the new actor ref and use the snapshot instead - //TODO: What if that actor already has been retrieved and is being used?? - //So do we have a race here? - cluster.remoteService.unregister(actorAddress) - - // deserialize the snapshot actor ref and register it as remote actor - val uncompressedBytes = - if (Cluster.shouldCompressData) LZF.uncompress(bytes) - else bytes - - val snapshotActorRef = fromBinary(uncompressedBytes, newActorRef.uuid) - cluster.remoteService.register(actorAddress, snapshotActorRef) - - // FIXME we should call 'stop()' here (to GC the actor), but can't since that will currently - //shut down the TransactionLog for this UUID - since both this actor and the new snapshotActorRef - //have the same UUID (which they should) - //newActorRef.stop() - - snapshotActorRef - - // we have no snapshot - use the new actor ref - case None ⇒ - newActorRef - } - } - - try { - if (message.hasActorAddress) { - val actorAddress = message.getActorAddress - cluster.serializerForActor(actorAddress) foreach { serializer ⇒ - cluster.use(actorAddress, serializer) foreach { newActorRef ⇒ - cluster.remoteService.register(actorAddress, newActorRef) - - if (message.hasReplicateActorFromUuid) { - // replication is used - fetch the messages and replay them - val replicateFromUuid = uuidProtocolToUuid(message.getReplicateActorFromUuid) - val deployment = Deployer.deploymentFor(actorAddress) - val replicationScheme = DeploymentConfig.replicationSchemeFor(deployment).getOrElse( - throw new IllegalStateException( - "Actor [" + actorAddress + "] should have been configured as a replicated actor but could not find its ReplicationScheme")) - val isWriteBehind = DeploymentConfig.isWriteBehindReplication(replicationScheme) - - try { - // get the transaction log for the actor UUID - val readonlyTxLog = TransactionLog.logFor(replicateFromUuid.toString, isWriteBehind, replicationScheme) - - // get the latest snapshot (Option[Array[Byte]]) and all the subsequent messages (Array[Byte]) - val (snapshotAsBytes, entriesAsBytes) = readonlyTxLog.latestSnapshotAndSubsequentEntries - - // deserialize and restore actor snapshot. This call will automatically recreate a transaction log. - val actorRef = actorOfRefToUseForReplay(snapshotAsBytes, actorAddress, newActorRef) - - // deserialize the messages - val messages: Vector[AnyRef] = deserializeMessages(entriesAsBytes) - - EventHandler.info(this, "Replaying [%s] messages to actor [%s]".format(messages.size, actorAddress)) - - // replay all messages - messages foreach { message ⇒ - EventHandler.debug(this, "Replaying message [%s] to actor [%s]".format(message, actorAddress)) - - // FIXME how to handle '?' messages? - // We can *not* replay them with the correct semantics. Should we: - // 1. Ignore/drop them and log warning? - // 2. Throw exception when about to log them? - // 3. Other? - actorRef ! message - } - - } catch { - case e: Throwable ⇒ - EventHandler.error(e, this, e.toString) - throw e - } - } - } - } - } else { - EventHandler.error(this, "Actor 'address' is not defined, ignoring remote cluster daemon command [%s]".format(message)) - } - - self.reply(Success(cluster.remoteServerAddress.toString)) - } catch { - case error: Throwable ⇒ - self.reply(Failure(error)) - throw error - } - } - - def handle_fun0_unit(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - new LocalActorRef( - Props( - self ⇒ { - case f: Function0[_] ⇒ try { f() } finally { self.stop() } - }).copy(dispatcher = computeGridDispatcher), Props.randomName, systemService = true) ! payloadFor(message, classOf[Function0[Unit]]) - } - - def handle_fun0_any(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - new LocalActorRef( - Props( - self ⇒ { - case f: Function0[_] ⇒ try { self.reply(f()) } finally { self.stop() } - }).copy(dispatcher = computeGridDispatcher), Props.randomName, systemService = true) forward payloadFor(message, classOf[Function0[Any]]) - } - - def handle_fun1_arg_unit(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - new LocalActorRef( - Props( - self ⇒ { - case (fun: Function[_, _], param: Any) ⇒ try { fun.asInstanceOf[Any ⇒ Unit].apply(param) } finally { self.stop() } - }).copy(dispatcher = computeGridDispatcher), Props.randomName, systemService = true) ! payloadFor(message, classOf[Tuple2[Function1[Any, Unit], Any]]) - } - - def handle_fun1_arg_any(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - new LocalActorRef( - Props( - self ⇒ { - case (fun: Function[_, _], param: Any) ⇒ try { self.reply(fun.asInstanceOf[Any ⇒ Any](param)) } finally { self.stop() } - }).copy(dispatcher = computeGridDispatcher), Props.randomName, systemService = true) forward payloadFor(message, classOf[Tuple2[Function1[Any, Any], Any]]) - } - - def handleFailover(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - val (from, to) = payloadFor(message, classOf[(InetSocketAddress, InetSocketAddress)]) - cluster.failOverClusterActorRefConnections(from, to) - } - - private def payloadFor[T](message: RemoteSystemDaemonMessageProtocol, clazz: Class[T]): T = { - Serialization.deserialize(message.getPayload.toByteArray, clazz, None) match { - case Left(error) ⇒ throw error - case Right(instance) ⇒ instance.asInstanceOf[T] - } - } -} diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala deleted file mode 100644 index 29f56a5966..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import akka.actor._ -import akka.util._ -import ReflectiveAccess._ -import akka.routing._ -import akka.cluster._ -import FailureDetector._ -import akka.event.EventHandler -import akka.config.ConfigurationException - -import java.net.InetSocketAddress -import java.util.concurrent.atomic.AtomicReference - -import collection.immutable.Map -import annotation.tailrec - -/** - * ClusterActorRef factory and locator. - */ -object ClusterActorRef { - import FailureDetectorType._ - import RouterType._ - - def newRef( - actorAddress: String, - routerType: RouterType, - failureDetectorType: FailureDetectorType, - timeout: Long): ClusterActorRef = { - - val routerFactory: () ⇒ Router = routerType match { - case Direct ⇒ () ⇒ new DirectRouter - case Random ⇒ () ⇒ new RandomRouter - case RoundRobin ⇒ () ⇒ new RoundRobinRouter - case LeastCPU ⇒ sys.error("Router LeastCPU not supported yet") - case LeastRAM ⇒ sys.error("Router LeastRAM not supported yet") - case LeastMessages ⇒ sys.error("Router LeastMessages not supported yet") - case Custom ⇒ sys.error("Router Custom not supported yet") - } - - val failureDetectorFactory: (Map[InetSocketAddress, ActorRef]) ⇒ FailureDetector = failureDetectorType match { - case RemoveConnectionOnFirstFailureLocalFailureDetector ⇒ - (connections: Map[InetSocketAddress, ActorRef]) ⇒ new RemoveConnectionOnFirstFailureLocalFailureDetector(connections.values) - - case RemoveConnectionOnFirstFailureRemoteFailureDetector ⇒ - (connections: Map[InetSocketAddress, ActorRef]) ⇒ new RemoveConnectionOnFirstFailureRemoteFailureDetector(connections) - - case CustomFailureDetector(implClass) ⇒ - (connections: Map[InetSocketAddress, ActorRef]) ⇒ FailureDetector.createCustomFailureDetector(implClass, connections) - } - - new ClusterActorRef( - RoutedProps() - .withTimeout(timeout) - .withRouter(routerFactory) - .withFailureDetector(failureDetectorFactory), - actorAddress) - } - - /** - * Finds the cluster actor reference that has a specific address. - */ - def actorFor(address: String): Option[ActorRef] = - Actor.registry.local.actorFor(Address.clusterActorRefPrefix + address) - - private[cluster] def createRemoteActorRef(actorAddress: String, inetSocketAddress: InetSocketAddress) = { - RemoteActorRef(inetSocketAddress, actorAddress, Actor.TIMEOUT, None) - } -} - -/** - * ActorRef representing a one or many instances of a clustered, load-balanced and sometimes replicated actor - * where the instances can reside on other nodes in the cluster. - */ -private[akka] class ClusterActorRef(props: RoutedProps, val address: String) extends AbstractRoutedActorRef(props) { - - import ClusterActorRef._ - - ClusterModule.ensureEnabled() - - val addresses = Cluster.node.inetSocketAddressesForActor(address) - - EventHandler.debug(this, - "Checking out cluster actor ref with address [%s] and router [%s] on [%s] connected to [\n\t%s]" - .format(address, router, Cluster.node.remoteServerAddress, addresses.map(_._2).mkString("\n\t"))) - - addresses foreach { - case (_, address) ⇒ Cluster.node.clusterActorRefs.put(address, this) - } - - val connections: FailureDetector = { - val remoteConnections = (Map[InetSocketAddress, ActorRef]() /: addresses) { - case (map, (uuid, inetSocketAddress)) ⇒ - map + (inetSocketAddress -> createRemoteActorRef(address, inetSocketAddress)) - } - props.failureDetectorFactory(remoteConnections) - } - - router.init(connections) - - def nrOfConnections: Int = connections.size - - private[akka] def failOver(from: InetSocketAddress, to: InetSocketAddress) { - connections.failOver(from, to) - } - - def stop() { - synchronized { - if (_status == ActorRefInternals.RUNNING) { - Actor.registry.local.unregisterClusterActorRef(this) - _status = ActorRefInternals.SHUTDOWN - postMessageToMailbox(Terminate, None) - - // FIXME here we need to fire off Actor.cluster.remove(address) (which needs to be properly implemented first, see ticket) - connections.stopAll() - } - } - } - - /* If you start me up */ - if (_status == ActorRefInternals.UNSTARTED) { - _status = ActorRefInternals.RUNNING - Actor.registry.local.registerClusterActorRef(this) - } -} diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala deleted file mode 100644 index 61a393360c..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala +++ /dev/null @@ -1,205 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import akka.actor.DeploymentConfig._ -import akka.actor._ -import akka.event.EventHandler -import akka.config.Config -import akka.util.Switch -import akka.util.Helpers._ -import akka.cluster.zookeeper.AkkaZkClient - -import org.apache.zookeeper.CreateMode -import org.apache.zookeeper.recipes.lock.{ WriteLock, LockListener } - -import org.I0Itec.zkclient.exception.{ ZkNoNodeException, ZkNodeExistsException } - -import scala.collection.immutable.Seq -import scala.collection.JavaConversions.collectionAsScalaIterable - -import java.util.concurrent.{ CountDownLatch, TimeUnit } - -/** - * A ClusterDeployer is responsible for deploying a Deploy. - */ -object ClusterDeployer extends ActorDeployer { - val clusterName = Cluster.name - val nodeName = Config.nodename - val clusterPath = "/%s" format clusterName - - val deploymentPath = clusterPath + "/deployment" - val deploymentAddressPath = deploymentPath + "/%s" - - val deploymentCoordinationPath = clusterPath + "/deployment-coordination" - val deploymentInProgressLockPath = deploymentCoordinationPath + "/in-progress" - val isDeploymentCompletedInClusterLockPath = deploymentCoordinationPath + "/completed" // should not be part of basePaths - - val basePaths = List(clusterPath, deploymentPath, deploymentCoordinationPath, deploymentInProgressLockPath) - - private val isConnected = new Switch(false) - private val deploymentCompleted = new CountDownLatch(1) - - private val zkClient = new AkkaZkClient( - Cluster.zooKeeperServers, - Cluster.sessionTimeout, - Cluster.connectionTimeout, - Cluster.defaultZooKeeperSerializer) - - private val deploymentInProgressLockListener = new LockListener { - def lockAcquired() { - EventHandler.info(this, "Clustered deployment started") - } - - def lockReleased() { - EventHandler.info(this, "Clustered deployment completed") - deploymentCompleted.countDown() - } - } - - private val deploymentInProgressLock = new WriteLock( - zkClient.connection.getZookeeper, - deploymentInProgressLockPath, - null, - deploymentInProgressLockListener) - - private val systemDeployments: List[Deploy] = Nil - - def shutdown() { - isConnected switchOff { - // undeploy all - try { - for { - child ← collectionAsScalaIterable(zkClient.getChildren(deploymentPath)) - deployment ← zkClient.readData(deploymentAddressPath.format(child)).asInstanceOf[Deploy] - } zkClient.delete(deploymentAddressPath.format(deployment.address)) - - invalidateDeploymentInCluster() - } catch { - case e: Exception ⇒ - handleError(new DeploymentException("Could not undeploy all deployment data in ZooKeeper due to: " + e)) - } - - // shut down ZooKeeper client - zkClient.close() - EventHandler.info(this, "ClusterDeployer shut down successfully") - } - } - - def lookupDeploymentFor(address: String): Option[Deploy] = ensureRunning { - LocalDeployer.lookupDeploymentFor(address) match { // try local cache - case Some(deployment) ⇒ // in local cache - deployment - case None ⇒ // not in cache, check cluster - val deployment = - try { - Some(zkClient.readData(deploymentAddressPath.format(address)).asInstanceOf[Deploy]) - } catch { - case e: ZkNoNodeException ⇒ None - case e: Exception ⇒ - EventHandler.warning(this, e.toString) - None - } - deployment foreach (LocalDeployer.deploy(_)) // cache it in local cache - deployment - } - } - - def fetchDeploymentsFromCluster: List[Deploy] = ensureRunning { - val addresses = - try { - zkClient.getChildren(deploymentPath).toList - } catch { - case e: ZkNoNodeException ⇒ List[String]() - } - val deployments = addresses map { address ⇒ - zkClient.readData(deploymentAddressPath.format(address)).asInstanceOf[Deploy] - } - EventHandler.info(this, "Fetched deployment plans from cluster [\n\t%s\n]" format deployments.mkString("\n\t")) - deployments - } - - private[akka] def init(deployments: Seq[Deploy]) { - isConnected switchOn { - EventHandler.info(this, "Initializing ClusterDeployer") - - basePaths foreach { path ⇒ - try { - ignore[ZkNodeExistsException](zkClient.create(path, null, CreateMode.PERSISTENT)) - EventHandler.debug(this, "Created ZooKeeper path for deployment [%s]".format(path)) - } catch { - case e ⇒ - val error = new DeploymentException(e.toString) - EventHandler.error(error, this) - throw error - } - } - - val allDeployments = deployments ++ systemDeployments - - if (!isDeploymentCompletedInCluster) { - if (deploymentInProgressLock.lock()) { - // try to be the one doing the clustered deployment - EventHandler.info(this, "Pushing clustered deployment plans [\n\t" + allDeployments.mkString("\n\t") + "\n]") - allDeployments foreach (deploy(_)) // deploy - markDeploymentCompletedInCluster() - deploymentInProgressLock.unlock() // signal deployment complete - - } else { - deploymentCompleted.await(30, TimeUnit.SECONDS) // wait until deployment is completed by other "master" node - } - } - - // fetch clustered deployments and deploy them locally - fetchDeploymentsFromCluster foreach (LocalDeployer.deploy(_)) - } - } - - private[akka] def deploy(deployment: Deploy) { - ensureRunning { - LocalDeployer.deploy(deployment) - deployment match { - case Deploy(_, _, _, _, Local) | Deploy(_, _, _, _, _: Local) ⇒ //TODO LocalDeployer.deploy(deployment)?? - case Deploy(address, recipe, routing, _, _) ⇒ // cluster deployment - /*TODO recipe foreach { r ⇒ - Deployer.newClusterActorRef(() ⇒ Actor.actorOf(r.implementationClass), address, deployment) - }*/ - val path = deploymentAddressPath.format(address) - try { - ignore[ZkNodeExistsException](zkClient.create(path, null, CreateMode.PERSISTENT)) - zkClient.writeData(path, deployment) - } catch { - case e: NullPointerException ⇒ - handleError(new DeploymentException( - "Could not store deployment data [" + deployment + "] in ZooKeeper since client session is closed")) - case e: Exception ⇒ - handleError(new DeploymentException( - "Could not store deployment data [" + deployment + "] in ZooKeeper due to: " + e)) - } - } - } - } - - private def markDeploymentCompletedInCluster() { - ignore[ZkNodeExistsException](zkClient.create(isDeploymentCompletedInClusterLockPath, null, CreateMode.PERSISTENT)) - } - - private def isDeploymentCompletedInCluster = zkClient.exists(isDeploymentCompletedInClusterLockPath) - - // FIXME in future - add watch to this path to be able to trigger redeployment, and use this method to trigger redeployment - private def invalidateDeploymentInCluster() { - ignore[ZkNoNodeException](zkClient.delete(isDeploymentCompletedInClusterLockPath)) - } - - private def ensureRunning[T](body: ⇒ T): T = { - if (isConnected.isOn) body - else throw new IllegalStateException("ClusterDeployer is not running") - } - - private[akka] def handleError(e: Throwable): Nothing = { - EventHandler.error(e, this, e.toString) - throw e - } -} diff --git a/akka-remote/src/main/scala/akka/remote/Gossiper.scala b/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala similarity index 97% rename from akka-remote/src/main/scala/akka/remote/Gossiper.scala rename to akka-cluster/src/main/scala/akka/cluster/Gossiper.scala index 55165f0891..e234d6e158 100644 --- a/akka-remote/src/main/scala/akka/remote/Gossiper.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala @@ -2,13 +2,15 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.remote +package akka.cluster import akka.actor._ import akka.actor.Status._ +import akka.remote._ import akka.event.Logging -import akka.util._ import akka.dispatch.Await +import akka.pattern.ask +import akka.util._ import akka.config.ConfigurationException import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean } @@ -20,9 +22,6 @@ import System.{ currentTimeMillis ⇒ newTimestamp } import scala.collection.immutable.{ Map, SortedSet } import scala.annotation.tailrec -import akka.dispatch.Await -import akka.pattern.ask - import com.google.protobuf.ByteString /** @@ -136,7 +135,7 @@ case class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { private val memberFingerprint = address.## private val serialization = remote.serialization - private val failureDetector = remote.failureDetector + private val failureDetector = new AccrualFailureDetector(remoteSettings.FailureDetectorThreshold, remoteSettings.FailureDetectorMaxSampleSize, system) private val initialDelayForGossip = remoteSettings.InitialDelayForGossip private val gossipFrequency = remoteSettings.GossipFrequency @@ -154,12 +153,14 @@ case class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { private val isRunning = new AtomicBoolean(true) private val log = Logging(system, "Gossiper") private val random = SecureRandom.getInstance("SHA1PRNG") - private val connectionManager = new RemoteConnectionManager(system, remote, Map.empty[Address, ActorRef]) // Is it right to put this guy under the /system path or should we have a top-level /cluster or something else...? private val clusterDaemon = system.systemActorOf(Props(new ClusterDaemon(system, this)), "cluster") private val state = new AtomicReference[State](State(currentGossip = newGossip())) + // FIXME manage connections in some other way so we can delete the RemoteConnectionManager (SINCE IT SUCKS!!!) + private val connectionManager = new RemoteConnectionManager(system, remote, failureDetector, Map.empty[Address, ActorRef]) + log.info("Starting cluster Gossiper...") // join the cluster by connecting to one of the seed members and retrieve current cluster state (Gossip) diff --git a/akka-cluster/src/main/scala/akka/cluster/LocalCluster.scala b/akka-cluster/src/main/scala/akka/cluster/LocalCluster.scala deleted file mode 100644 index d8a0ac6027..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/LocalCluster.scala +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import akka.config.Config -import Config._ -import akka.util._ -import Helpers._ -import akka.actor._ -import Actor._ -import akka.event.EventHandler -import akka.cluster.zookeeper._ - -import org.apache.zookeeper._ -import org.apache.zookeeper.Watcher.Event._ -import org.apache.zookeeper.data.Stat -import org.apache.zookeeper.recipes.lock.{ WriteLock, LockListener } - -import org.I0Itec.zkclient._ -import org.I0Itec.zkclient.serialize._ -import org.I0Itec.zkclient.exception._ - -import java.util.concurrent.atomic.{ AtomicBoolean, AtomicReference } - -object LocalCluster { - val clusterDirectory = config.getString("akka.cluster.log-directory", "_akka_cluster") - val clusterDataDirectory = clusterDirectory + "/data" - val clusterLogDirectory = clusterDirectory + "/log" - - val clusterName = Config.clusterName - val nodename = Config.nodename - val zooKeeperServers = config.getString("akka.cluster.zookeeper-server-addresses", "localhost:2181") - val sessionTimeout = Duration(config.getInt("akka.cluster.session-timeout", 60), TIME_UNIT).toMillis.toInt - val connectionTimeout = Duration(config.getInt("akka.cluster.connection-timeout", 60), TIME_UNIT).toMillis.toInt - val defaultZooKeeperSerializer = new SerializableSerializer - - val zkServer = new AtomicReference[Option[ZkServer]](None) - - lazy val zkClient = new AkkaZkClient(zooKeeperServers, sessionTimeout, connectionTimeout, defaultZooKeeperSerializer) - - /** - * Looks up the local hostname. - */ - def lookupLocalhostName = NetworkUtil.getLocalhostName - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalCluster(): ZkServer = - startLocalCluster(clusterDataDirectory, clusterLogDirectory, 2181, 5000) - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalCluster(port: Int, tickTime: Int): ZkServer = - startLocalCluster(clusterDataDirectory, clusterLogDirectory, port, tickTime) - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalCluster(tickTime: Int): ZkServer = - startLocalCluster(clusterDataDirectory, clusterLogDirectory, 2181, tickTime) - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalCluster(dataPath: String, logPath: String): ZkServer = - startLocalCluster(dataPath, logPath, 2181, 500) - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalCluster(dataPath: String, logPath: String, port: Int, tickTime: Int): ZkServer = { - try { - val zk = AkkaZooKeeper.startLocalServer(dataPath, logPath, port, tickTime) - zkServer.set(Some(zk)) - zk - } catch { - case e: Throwable ⇒ - EventHandler.error(e, this, "Could not start local ZooKeeper cluster") - throw e - } - } - - /** - * Shut down the local ZooKeeper server. - */ - def shutdownLocalCluster() { - withPrintStackTraceOnError { - EventHandler.debug(this, "Shuts down local cluster") - zkServer.getAndSet(None).foreach(_.shutdown()) - } - } - - def createQueue(rootPath: String, blocking: Boolean = true) = - new ZooKeeperQueue(zkClient, rootPath, blocking) - - def barrier(name: String, count: Int): ZooKeeperBarrier = - ZooKeeperBarrier(zkClient, clusterName, name, nodename, count) - - def barrier(name: String, count: Int, timeout: Duration): ZooKeeperBarrier = - ZooKeeperBarrier(zkClient, clusterName, name, nodename, count, timeout) -} - diff --git a/akka-remote/src/main/scala/akka/remote/RemoteConnectionManager.scala b/akka-cluster/src/main/scala/akka/cluster/RemoteConnectionManager.scala similarity index 96% rename from akka-remote/src/main/scala/akka/remote/RemoteConnectionManager.scala rename to akka-cluster/src/main/scala/akka/cluster/RemoteConnectionManager.scala index fd2a9135d7..63020367a5 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteConnectionManager.scala +++ b/akka-cluster/src/main/scala/akka/cluster/RemoteConnectionManager.scala @@ -2,9 +2,10 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.remote +package akka.cluster import akka.actor._ +import akka.remote._ import akka.routing._ import akka.event.Logging @@ -19,6 +20,7 @@ import java.util.concurrent.atomic.AtomicReference class RemoteConnectionManager( system: ActorSystemImpl, remote: RemoteActorRefProvider, + failureDetector: AccrualFailureDetector, initialConnections: Map[Address, ActorRef] = Map.empty[Address, ActorRef]) extends ConnectionManager { @@ -30,8 +32,6 @@ class RemoteConnectionManager( def iterable: Iterable[ActorRef] = connections.values } - def failureDetector = remote.failureDetector - private val state: AtomicReference[State] = new AtomicReference[State](newState()) /** @@ -145,6 +145,6 @@ class RemoteConnectionManager( } } - private[remote] def newConnection(remoteAddress: Address, actorPath: ActorPath) = + private[cluster] def newConnection(remoteAddress: Address, actorPath: ActorPath) = new RemoteActorRef(remote, remote.transport, actorPath, Nobody) } diff --git a/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala b/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala deleted file mode 100644 index ce9eb300f5..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala +++ /dev/null @@ -1,604 +0,0 @@ -package akka.cluster - -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -import org.apache.bookkeeper.client.{ BookKeeper, LedgerHandle, LedgerEntry, BKException, AsyncCallback } -import org.apache.zookeeper.CreateMode - -import org.I0Itec.zkclient.exception._ - -import akka.AkkaException -import akka.config._ -import Config._ -import akka.util._ -import akka.actor._ -import DeploymentConfig.ReplicationScheme -import akka.event.EventHandler -import akka.dispatch.{ DefaultPromise, Promise, MessageInvocation } -import akka.cluster.zookeeper._ -import akka.serialization.ActorSerialization._ -import akka.serialization.Compression.LZF - -import java.util.Enumeration - -// FIXME allow user to choose dynamically between 'async' and 'sync' tx logging (asyncAddEntry(byte[] data, AddCallback cb, Object ctx)) -// FIXME clean up old entries in log after doing a snapshot - -class ReplicationException(message: String, cause: Throwable = null) extends AkkaException(message) { - def this(msg: String) = this(msg, null) -} - -/** - * A TransactionLog makes chunks of data durable. - */ -class TransactionLog private ( - ledger: LedgerHandle, - val id: String, - val isAsync: Boolean, - replicationScheme: ReplicationScheme) { - - import TransactionLog._ - - val logId = ledger.getId - val txLogPath = transactionLogPath(id) - val snapshotPath = txLogPath + "/snapshot" - - private val isOpen = new Switch(true) - - /** - * Record an Actor message invocation. - * - * @param invocation the MessageInvocation to record - * @param actorRef the LocalActorRef that received the message. - * @throws ReplicationException if the TransactionLog already is closed. - */ - def recordEntry(invocation: MessageInvocation, actorRef: LocalActorRef) { - val entryId = ledger.getLastAddPushed + 1 - val needsSnapshot = entryId != 0 && (entryId % snapshotFrequency) == 0 - - if (needsSnapshot) { - //todo: could it be that the message is never persisted when a snapshot is added? - val bytes = toBinary(actorRef, false, replicationScheme) - recordSnapshot(bytes) - } else { - val bytes = MessageSerializer.serialize(invocation.message.asInstanceOf[AnyRef]).toByteArray - recordEntry(bytes) - } - } - - /** - * Record an entry. - * - * @param entry the entry in byte form to record. - * @throws ReplicationException if the TransactionLog already is closed. - */ - def recordEntry(entry: Array[Byte]) { - if (isOpen.isOn) { - val entryBytes = - if (shouldCompressData) LZF.compress(entry) - else entry - - try { - if (isAsync) { - ledger.asyncAddEntry( - entryBytes, - new AsyncCallback.AddCallback { - def addComplete(returnCode: Int, ledgerHandle: LedgerHandle, entryId: Long, ctx: AnyRef) { - handleReturnCode(returnCode) - EventHandler.debug(this, "Writing entry [%s] to log [%s]".format(entryId, logId)) - } - }, - null) - } else { - handleReturnCode(ledger.addEntry(entryBytes)) - val entryId = ledger.getLastAddPushed - EventHandler.debug(this, "Writing entry [%s] to log [%s]".format(entryId, logId)) - } - } catch { - case e: Throwable ⇒ handleError(e) - } - } else transactionClosedError - } - - /** - * Record a snapshot. - * - * @param snapshot the snapshot in byteform to record. - * @throws ReplicationException if the TransactionLog already is closed. - */ - def recordSnapshot(snapshot: Array[Byte]) { - if (isOpen.isOn) { - val snapshotBytes = - if (shouldCompressData) LZF.compress(snapshot) - else snapshot - - try { - if (isAsync) { - ledger.asyncAddEntry( - snapshotBytes, - new AsyncCallback.AddCallback { - def addComplete(returnCode: Int, ledgerHandle: LedgerHandle, snapshotId: Long, ctx: AnyRef) { - handleReturnCode(returnCode) - EventHandler.debug(this, "Writing snapshot to log [%s]".format(snapshotId)) - storeSnapshotMetaDataInZooKeeper(snapshotId) - } - }, - null) - } else { - //todo: could this be racy, since writing the snapshot itself and storing the snapsnot id, is not - //an atomic operation? - - //first store the snapshot. - handleReturnCode(ledger.addEntry(snapshotBytes)) - val snapshotId = ledger.getLastAddPushed - - //this is the location where all previous entries can be removed. - //TODO: how to remove data? - - EventHandler.debug(this, "Writing snapshot to log [%s]".format(snapshotId)) - //and now store the snapshot metadata. - storeSnapshotMetaDataInZooKeeper(snapshotId) - } - } catch { - case e: Throwable ⇒ handleError(e) - } - } else transactionClosedError - } - - /** - * Get all the entries for this transaction log. - * - * @throws ReplicationException if the TransactionLog already is closed. - */ - def entries: Vector[Array[Byte]] = entriesInRange(0, ledger.getLastAddConfirmed) - - /** - * Get the latest snapshot and all subsequent entries from this snapshot. - */ - def latestSnapshotAndSubsequentEntries: (Option[Array[Byte]], Vector[Array[Byte]]) = { - latestSnapshotId match { - case Some(snapshotId) ⇒ - EventHandler.debug(this, "Reading entries from snapshot id [%s] for log [%s]".format(snapshotId, logId)) - - val cursor = snapshotId + 1 - val lastIndex = ledger.getLastAddConfirmed - - val snapshot = Some(entriesInRange(snapshotId, snapshotId).head) - - val entries = - if ((cursor - lastIndex) == 0) Vector.empty[Array[Byte]] - else entriesInRange(cursor, lastIndex) - - (snapshot, entries) - - case None ⇒ - (None, entries) - } - } - - /** - * Get a range of entries from 'from' to 'to' for this transaction log. - * - * @param from the first element of the range - * @param the last index from the range (including). - * @return a Vector containing Byte Arrays. Each element in the vector is a record. - * @throws IllegalArgumenException if from or to is negative, or if 'from' is bigger than 'to'. - * @throws ReplicationException if the TransactionLog already is closed. - */ - def entriesInRange(from: Long, to: Long): Vector[Array[Byte]] = if (isOpen.isOn) { - try { - if (from < 0) throw new IllegalArgumentException("'from' index can't be negative [" + from + "]") - if (to < 0) throw new IllegalArgumentException("'to' index can't be negative [" + from + "]") - if (to < from) throw new IllegalArgumentException("'to' index can't be smaller than 'from' index [" + from + "," + to + "]") - EventHandler.debug(this, "Reading entries [%s -> %s] for log [%s]".format(from, to, logId)) - - if (isAsync) { - val future = Promise[Vector[Array[Byte]]]() - ledger.asyncReadEntries( - from, to, - new AsyncCallback.ReadCallback { - def readComplete(returnCode: Int, ledgerHandle: LedgerHandle, enumeration: Enumeration[LedgerEntry], ctx: AnyRef) { - val future = ctx.asInstanceOf[Promise[Vector[Array[Byte]]]] - val entries = toByteArrays(enumeration) - - if (returnCode == BKException.Code.OK) future.success(entries) - else future.failure(BKException.create(returnCode)) - } - }, - future) - await(future) - } else { - toByteArrays(ledger.readEntries(from, to)) - } - } catch { - case e: Throwable ⇒ handleError(e) - } - } else transactionClosedError - - /** - * Get the last entry written to this transaction log. - * - * Returns -1 if there has never been an entry. - */ - def latestEntryId: Long = ledger.getLastAddConfirmed - - /** - * Get the id for the last snapshot written to this transaction log. - */ - def latestSnapshotId: Option[Long] = { - try { - val snapshotId = zkClient.readData(snapshotPath).asInstanceOf[Long] - EventHandler.debug(this, "Retrieved latest snapshot id [%s] from transaction log [%s]".format(snapshotId, logId)) - Some(snapshotId) - } catch { - case e: ZkNoNodeException ⇒ None - case e: Throwable ⇒ handleError(e) - } - } - - /** - * Delete this transaction log. So all entries but also all metadata will be removed. - * - * TODO: Behavior unclear what happens when already deleted (what happens to the ledger). - * TODO: Behavior unclear what happens when already closed. - */ - def delete() { - if (isOpen.isOn) { - EventHandler.debug(this, "Deleting transaction log [%s]".format(logId)) - try { - if (isAsync) { - bookieClient.asyncDeleteLedger( - logId, - new AsyncCallback.DeleteCallback { - def deleteComplete(returnCode: Int, ctx: AnyRef) { - (returnCode) - } - }, - null) - } else { - bookieClient.deleteLedger(logId) - } - - //also remote everything else that belongs to this TransactionLog. - zkClient.delete(snapshotPath) - zkClient.delete(txLogPath) - } catch { - case e: Throwable ⇒ handleError(e) - } - } - } - - /** - * Close this transaction log. - * - * If already closed, the call is ignored. - */ - def close() { - isOpen switchOff { - EventHandler.debug(this, "Closing transaction log [%s]".format(logId)) - try { - if (isAsync) { - ledger.asyncClose( - new AsyncCallback.CloseCallback { - def closeComplete( - returnCode: Int, - ledgerHandle: LedgerHandle, - ctx: AnyRef) { - handleReturnCode(returnCode) - } - }, - null) - } else { - ledger.close() - } - } catch { - case e: Throwable ⇒ handleError(e) - } - } - } - - private def toByteArrays(enumeration: Enumeration[LedgerEntry]): Vector[Array[Byte]] = { - var entries = Vector[Array[Byte]]() - while (enumeration.hasMoreElements) { - val bytes = enumeration.nextElement.getEntry - val entry = - if (shouldCompressData) LZF.uncompress(bytes) - else bytes - entries = entries :+ entry - } - entries - } - - private def storeSnapshotMetaDataInZooKeeper(snapshotId: Long) { - if (isOpen.isOn) { - try { - zkClient.create(snapshotPath, null, CreateMode.PERSISTENT) - } catch { - case e: ZkNodeExistsException ⇒ {} // do nothing - case e: Throwable ⇒ handleError(e) - } - - try { - zkClient.writeData(snapshotPath, snapshotId) - } catch { - case e: Throwable ⇒ - handleError(new ReplicationException( - "Could not store transaction log snapshot meta-data in ZooKeeper for UUID [" + id + "]")) - } - EventHandler.debug(this, "Writing snapshot [%s] to log [%s]".format(snapshotId, logId)) - } else transactionClosedError - } - - private def handleReturnCode(block: ⇒ Long) { - val code = block.toInt - if (code == BKException.Code.OK) {} // all fine - else handleError(BKException.create(code)) - } - - private def transactionClosedError: Nothing = { - handleError(new ReplicationException( - "Transaction log [" + logId + - "] is closed. You need to open up new a new one with 'TransactionLog.logFor(id)'")) - } -} - -/** - * TODO: Documentation. - */ -object TransactionLog { - - val zooKeeperServers = config.getString("akka.cluster.zookeeper-server-addresses", "localhost:2181") - val sessionTimeout = Duration(config.getInt("akka.cluster.session-timeout", 60), TIME_UNIT).toMillis.toInt - val connectionTimeout = Duration(config.getInt("akka.cluster.connection-timeout", 60), TIME_UNIT).toMillis.toInt - - val digestType = config.getString("akka.cluster.replication.digest-type", "CRC32") match { - case "CRC32" ⇒ BookKeeper.DigestType.CRC32 - case "MAC" ⇒ BookKeeper.DigestType.MAC - case unknown ⇒ throw new ConfigurationException( - "akka.cluster.replication.digest-type is invalid [" + unknown + "], must be either 'CRC32' or 'MAC'") - } - val password = config.getString("akka.cluster.replication.password", "secret").getBytes("UTF-8") - val ensembleSize = config.getInt("akka.cluster.replication.ensemble-size", 3) - val quorumSize = config.getInt("akka.cluster.replication.quorum-size", 2) - val snapshotFrequency = config.getInt("akka.cluster.replication.snapshot-frequency", 1000) - val timeout = Duration(config.getInt("akka.cluster.replication.timeout", 30), TIME_UNIT).toMillis - val shouldCompressData = config.getBool("akka.remote.use-compression", false) - - private[akka] val transactionLogNode = "/transaction-log-ids" - - private val isConnected = new Switch(false) - - @volatile - private[akka] var bookieClient: BookKeeper = _ - - @volatile - private[akka] var zkClient: AkkaZkClient = _ - - private[akka] def apply( - ledger: LedgerHandle, - id: String, - isAsync: Boolean, - replicationScheme: ReplicationScheme) = - new TransactionLog(ledger, id, isAsync, replicationScheme) - - /** - * Starts up the transaction log. - */ - def start() { - isConnected switchOn { - bookieClient = new BookKeeper(zooKeeperServers) - zkClient = new AkkaZkClient(zooKeeperServers, sessionTimeout, connectionTimeout) - - try { - zkClient.create(transactionLogNode, null, CreateMode.PERSISTENT) - } catch { - case e: ZkNodeExistsException ⇒ {} // do nothing - case e: Throwable ⇒ handleError(e) - } - - EventHandler.info(this, - ("Transaction log service started with" + - "\n\tdigest type [%s]" + - "\n\tensemble size [%s]" + - "\n\tquorum size [%s]" + - "\n\tlogging time out [%s]").format( - digestType, - ensembleSize, - quorumSize, - timeout)) - } - } - - /** - * Shuts down the transaction log. - */ - def shutdown() { - isConnected switchOff { - try { - EventHandler.info(this, "Shutting down transaction log...") - zkClient.close() - bookieClient.halt() - EventHandler.info(this, "Transaction log shut down successfully") - } catch { - case e: Throwable ⇒ handleError(e) - } - } - } - - def transactionLogPath(id: String): String = transactionLogNode + "/" + id - - /** - * Checks if a TransactionLog for the given id already exists. - */ - def exists(id: String): Boolean = { - val txLogPath = transactionLogPath(id) - zkClient.exists(txLogPath) - } - - /** - * Creates a new transaction log for the 'id' specified. If a TransactionLog already exists for the id, - * it will be overwritten. - */ - def newLogFor(id: String, isAsync: Boolean, replicationScheme: ReplicationScheme): TransactionLog = { - val txLogPath = transactionLogPath(id) - - val ledger = try { - if (exists(id)) { - //if it exists, we need to delete it first. This gives it the overwrite semantics we are looking for. - try { - val ledger = bookieClient.createLedger(ensembleSize, quorumSize, digestType, password) - val txLog = TransactionLog(ledger, id, false, null) - txLog.delete() - txLog.close() - } catch { - case e: Throwable ⇒ handleError(e) - } - } - - val future = Promise[LedgerHandle]() - if (isAsync) { - bookieClient.asyncCreateLedger( - ensembleSize, quorumSize, digestType, password, - new AsyncCallback.CreateCallback { - def createComplete( - returnCode: Int, - ledgerHandle: LedgerHandle, - ctx: AnyRef) { - val future = ctx.asInstanceOf[Promise[LedgerHandle]] - if (returnCode == BKException.Code.OK) future.success(ledgerHandle) - else future.failure(BKException.create(returnCode)) - } - }, - future) - await(future) - } else { - bookieClient.createLedger(ensembleSize, quorumSize, digestType, password) - } - } catch { - case e: Throwable ⇒ handleError(e) - } - - val logId = ledger.getId - try { - zkClient.create(txLogPath, null, CreateMode.PERSISTENT) - zkClient.writeData(txLogPath, logId) - logId //TODO: does this have any effect? - } catch { - case e: Throwable ⇒ - bookieClient.deleteLedger(logId) // clean up - handleError(new ReplicationException( - "Could not store transaction log [" + logId + - "] meta-data in ZooKeeper for UUID [" + id + "]", e)) - } - - EventHandler.info(this, "Created new transaction log [%s] for UUID [%s]".format(logId, id)) - TransactionLog(ledger, id, isAsync, replicationScheme) - } - - /** - * Fetches an existing transaction log for the 'id' specified. - * - * @throws ReplicationException if the log with the given id doesn't exist. - */ - def logFor(id: String, isAsync: Boolean, replicationScheme: ReplicationScheme): TransactionLog = { - val txLogPath = transactionLogPath(id) - - val logId = try { - val logId = zkClient.readData(txLogPath).asInstanceOf[Long] - EventHandler.debug(this, - "Retrieved transaction log [%s] for UUID [%s]".format(logId, id)) - logId - } catch { - case e: ZkNoNodeException ⇒ - handleError(new ReplicationException( - "Transaction log for UUID [" + id + "] does not exist in ZooKeeper")) - case e: Throwable ⇒ handleError(e) - } - - val ledger = try { - if (isAsync) { - val future = Promise[LedgerHandle]() - bookieClient.asyncOpenLedger( - logId, digestType, password, - new AsyncCallback.OpenCallback { - def openComplete(returnCode: Int, ledgerHandle: LedgerHandle, ctx: AnyRef) { - val future = ctx.asInstanceOf[Promise[LedgerHandle]] - if (returnCode == BKException.Code.OK) future.success(ledgerHandle) - else future.failure(BKException.create(returnCode)) - } - }, - future) - await(future) - } else { - bookieClient.openLedger(logId, digestType, password) - } - } catch { - case e: Throwable ⇒ handleError(e) - } - - TransactionLog(ledger, id, isAsync, replicationScheme) - } - - private[akka] def await[T](future: Promise[T]): T = { - future.await.value.get match { - case Right(result) => result - case Left(throwable) => handleError(throwable) - } - } - - private[akka] def handleError(e: Throwable): Nothing = { - EventHandler.error(e, this, e.toString) - throw e - } -} - -/** - * TODO: Documentation. - */ -object LocalBookKeeperEnsemble { - private val isRunning = new Switch(false) - - //TODO: should probably come from the config file. - private val port = 5555 - - @volatile - private var localBookKeeper: LocalBookKeeper = _ - - /** - * Starts the LocalBookKeeperEnsemble. - * - * Call can safely be made when already started. - * - * This call will block until it is started. - */ - def start() { - isRunning switchOn { - EventHandler.info(this, "Starting up LocalBookKeeperEnsemble...") - localBookKeeper = new LocalBookKeeper(TransactionLog.ensembleSize) - localBookKeeper.runZookeeper(port) - localBookKeeper.initializeZookeper() - localBookKeeper.runBookies() - EventHandler.info(this, "LocalBookKeeperEnsemble started up successfully") - } - } - - /** - * Shuts down the LocalBookKeeperEnsemble. - * - * Call can safely bemade when already shutdown. - * - * This call will block until the shutdown completes. - */ - def shutdown() { - isRunning switchOff { - EventHandler.info(this, "Shutting down LocalBookKeeperEnsemble...") - localBookKeeper.bs.foreach(_.shutdown()) // stop bookies - localBookKeeper.zkc.close() // stop zk client - localBookKeeper.zks.shutdown() // stop zk server - localBookKeeper.serverFactory.shutdown() // stop zk NIOServer - EventHandler.info(this, "LocalBookKeeperEnsemble shut down successfully") - } - } -} diff --git a/akka-remote/src/main/scala/akka/remote/VectorClock.scala b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala similarity index 99% rename from akka-remote/src/main/scala/akka/remote/VectorClock.scala rename to akka-cluster/src/main/scala/akka/cluster/VectorClock.scala index 42ea917669..a6a54de1d9 100644 --- a/akka-remote/src/main/scala/akka/remote/VectorClock.scala +++ b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala @@ -2,7 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.remote +package akka.cluster import akka.AkkaException diff --git a/akka-cluster/src/main/scala/akka/cluster/metrics/LocalNodeMetricsManager.scala b/akka-cluster/src/main/scala/akka/cluster/metrics/LocalNodeMetricsManager.scala deleted file mode 100644 index c366ed598c..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/metrics/LocalNodeMetricsManager.scala +++ /dev/null @@ -1,226 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.metrics - -import akka.cluster._ -import Cluster._ -import akka.cluster.zookeeper._ -import akka.actor._ -import Actor._ -import scala.collection.JavaConversions._ -import scala.collection.JavaConverters._ -import java.util.concurrent.{ ConcurrentHashMap, ConcurrentSkipListSet } -import java.util.concurrent.atomic.AtomicReference -import akka.util.{ Duration, Switch } -import akka.util.Helpers._ -import akka.util.duration._ -import org.I0Itec.zkclient.exception.ZkNoNodeException -import akka.event.EventHandler - -/* - * Instance of the metrics manager running on the node. To keep the fine performance, metrics of all the - * nodes in the cluster are cached internally, and refreshed from monitoring MBeans / Sigar (when if's local node), - * of ZooKeeper (if it's metrics of all the nodes in the cluster) after a specified timeout - - * metricsRefreshTimeout - * metricsRefreshTimeout defaults to 2 seconds, and can be declaratively defined through - * akka.conf: - * - * @exampl {{{ - * akka.cluster.metrics-refresh-timeout = 2 - * }}} - */ -class LocalNodeMetricsManager(zkClient: AkkaZkClient, private val metricsRefreshTimeout: Duration) - extends NodeMetricsManager { - - /* - * Provides metrics of the system that the node is running on, through monitoring MBeans, Hyperic Sigar - * and other systems - */ - lazy private val metricsProvider = SigarMetricsProvider(refreshTimeout.toMillis.toInt) fold ((thrw) ⇒ { - EventHandler.warning(this, """Hyperic Sigar library failed to load due to %s: %s. -All the metrics will be retreived from monitoring MBeans, and may be incorrect at some platforms. -In order to get better metrics, please put "sigar.jar" to the classpath, and add platform-specific native libary to "java.library.path".""" - .format(thrw.getClass.getName, thrw.getMessage)) - new JMXMetricsProvider - }, - sigar ⇒ sigar) - - /* - * Metrics of all nodes in the cluster - */ - private val localNodeMetricsCache = new ConcurrentHashMap[String, NodeMetrics] - - @volatile - private var _refreshTimeout = metricsRefreshTimeout - - /* - * Plugged monitors (both local and cluster-wide) - */ - private val alterationMonitors = new ConcurrentSkipListSet[MetricsAlterationMonitor] - - private val _isRunning = new Switch(false) - - /* - * If the value is true, metrics manages is started and running. Stopped, otherwise - */ - def isRunning = _isRunning.isOn - - /* - * Starts metrics manager. When metrics manager is started, it refreshes cache from ZooKeeper - * after refreshTimeout, and invokes plugged monitors - */ - def start() = { - _isRunning.switchOn { refresh() } - this - } - - private[cluster] def metricsForNode(nodeName: String): String = "%s/%s".format(node.NODE_METRICS, nodeName) - - /* - * Adds monitor that reacts, when specific conditions are satisfied - */ - def addMonitor(monitor: MetricsAlterationMonitor) = alterationMonitors add monitor - - def removeMonitor(monitor: MetricsAlterationMonitor) = alterationMonitors remove monitor - - def refreshTimeout_=(newValue: Duration) = _refreshTimeout = newValue - - /* - * Timeout after which metrics, cached in the metrics manager, will be refreshed from ZooKeeper - */ - def refreshTimeout = _refreshTimeout - - /* - * Stores metrics of the node in ZooKeeper - */ - private[akka] def storeMetricsInZK(metrics: NodeMetrics) = { - val metricsPath = metricsForNode(metrics.nodeName) - if (zkClient.exists(metricsPath)) { - zkClient.writeData(metricsPath, metrics) - } else { - ignore[ZkNoNodeException](zkClient.createEphemeral(metricsPath, metrics)) - } - } - - /* - * Gets metrics of the node from ZooKeeper - */ - private[akka] def getMetricsFromZK(nodeName: String) = { - zkClient.readData[NodeMetrics](metricsForNode(nodeName)) - } - - /* - * Removed metrics of the node from local cache and ZooKeeper - */ - def removeNodeMetrics(nodeName: String) = { - val metricsPath = metricsForNode(nodeName) - if (zkClient.exists(metricsPath)) { - ignore[ZkNoNodeException](zkClient.delete(metricsPath)) - } - - localNodeMetricsCache.remove(nodeName) - } - - /* - * Gets metrics of a local node directly from JMX monitoring beans/Hyperic Sigar - */ - def getLocalMetrics = metricsProvider.getLocalMetrics - - /* - * Gets metrics of the node, specified by the name. If useCached is true (default value), - * metrics snapshot is taken from the local cache; otherwise, it's retreived from ZooKeeper' - */ - def getMetrics(nodeName: String, useCached: Boolean = true): Option[NodeMetrics] = - if (useCached) - Option(localNodeMetricsCache.get(nodeName)) - else - try { - Some(getMetricsFromZK(nodeName)) - } catch { - case ex: ZkNoNodeException ⇒ None - } - - /* - * Return metrics of all nodes in the cluster from ZooKeeper - */ - private[akka] def getAllMetricsFromZK: Map[String, NodeMetrics] = { - val metricsPaths = zkClient.getChildren(node.NODE_METRICS).toList.toArray.asInstanceOf[Array[String]] - metricsPaths.flatMap { nodeName ⇒ getMetrics(nodeName, false).map((nodeName, _)) } toMap - } - - /* - * Gets cached metrics of all nodes in the cluster - */ - def getAllMetrics: Array[NodeMetrics] = localNodeMetricsCache.values.asScala.toArray - - /* - * Refreshes locally cached metrics from ZooKeeper, and invokes plugged monitors - */ - private[akka] def refresh() { - - storeMetricsInZK(getLocalMetrics) - refreshMetricsCacheFromZK() - - if (isRunning) { - Scheduler.schedule({ () ⇒ refresh() }, refreshTimeout.length, refreshTimeout.length, refreshTimeout.unit) - invokeMonitors() - } - } - - /* - * Refreshes metrics manager cache from ZooKeeper - */ - private def refreshMetricsCacheFromZK() { - val allMetricsFromZK = getAllMetricsFromZK - - localNodeMetricsCache.keySet.foreach { key ⇒ - if (!allMetricsFromZK.contains(key)) - localNodeMetricsCache.remove(key) - } - - // RACY: metrics for the node might have been removed both from ZK and local cache by the moment, - // but will be re-cached, since they're still present in allMetricsFromZK snapshot. Not important, because - // cache will be fixed soon, at the next iteration of refresh - allMetricsFromZK map { - case (node, metrics) ⇒ - localNodeMetricsCache.put(node, metrics) - } - } - - /* - * Invokes monitors with the cached metrics - */ - private def invokeMonitors(): Unit = if (!alterationMonitors.isEmpty) { - // RACY: metrics for some nodes might have been removed/added by that moment. Not important, - // because monitors will be fed with up-to-date metrics shortly, at the next iteration of refresh - val clusterNodesMetrics = getAllMetrics - val localNodeMetrics = clusterNodesMetrics.find(_.nodeName == nodeAddress.nodeName) - val iterator = alterationMonitors.iterator - - // RACY: there might be new monitors added after the iterator has been obtained. Not important, - // becuse refresh interval is meant to be very short, and all the new monitors will be called ad the - // next refresh iteration - while (iterator.hasNext) { - - val monitor = iterator.next - - monitor match { - case localMonitor: LocalMetricsAlterationMonitor ⇒ - localNodeMetrics.map { metrics ⇒ - if (localMonitor reactsOn metrics) - localMonitor react metrics - } - - case clusterMonitor: ClusterMetricsAlterationMonitor ⇒ - if (clusterMonitor reactsOn clusterNodesMetrics) - clusterMonitor react clusterNodesMetrics - } - - } - } - - def stop() = _isRunning.switchOff - -} diff --git a/akka-cluster/src/main/scala/akka/cluster/metrics/MetricsProvider.scala b/akka-cluster/src/main/scala/akka/cluster/metrics/MetricsProvider.scala deleted file mode 100644 index 0b366ef9c8..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/metrics/MetricsProvider.scala +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.metrics - -import akka.cluster._ -import akka.event.EventHandler -import java.lang.management.ManagementFactory -import akka.util.ReflectiveAccess._ -import akka.util.Switch - -/* - * Snapshot of the JVM / system that's the node is running on - * - * @param nodeName name of the node, where metrics are gathered at - * @param usedHeapMemory amount of heap memory currently used - * @param committedHeapMemory amount of heap memory guaranteed to be available - * @param maxHeapMemory maximum amount of heap memory that can be used - * @param avaiableProcessors number of the processors avalable to the JVM - * @param systemLoadAverage system load average. If OS-specific Sigar's native library is plugged, - * it's used to calculate average load on the CPUs in the system. Otherwise, value is retreived from monitoring - * MBeans. Hyperic Sigar provides more precise values, and, thus, if the library is provided, it's used by default. - * - */ -case class DefaultNodeMetrics(nodeName: String, - usedHeapMemory: Long, - committedHeapMemory: Long, - maxHeapMemory: Long, - avaiableProcessors: Int, - systemLoadAverage: Double) extends NodeMetrics - -object MetricsProvider { - - /* - * Maximum value of system load average - */ - val MAX_SYS_LOAD_AVG = 1 - - /* - * Minimum value of system load average - */ - val MIN_SYS_LOAD_AVG = 0 - - /* - * Default value of system load average - */ - val DEF_SYS_LOAD_AVG = 0.5 - -} - -/* - * Abstracts metrics provider that returns metrics of the system the node is running at - */ -trait MetricsProvider { - - /* - * Gets metrics of the local system - */ - def getLocalMetrics: NodeMetrics - -} - -/* - * Loads JVM metrics through JMX monitoring beans - */ -class JMXMetricsProvider extends MetricsProvider { - - import MetricsProvider._ - - private val memoryMXBean = ManagementFactory.getMemoryMXBean - - private val osMXBean = ManagementFactory.getOperatingSystemMXBean - - /* - * Validates and calculates system load average - * - * @param avg system load average obtained from a specific monitoring provider (may be incorrect) - * @return system load average, or default value(0.5), if passed value was out of permitted - * bounds (0.0 to 1.0) - */ - @inline - protected final def calcSystemLoadAverage(avg: Double) = - if (avg >= MIN_SYS_LOAD_AVG && avg <= MAX_SYS_LOAD_AVG) avg else DEF_SYS_LOAD_AVG - - protected def systemLoadAverage = calcSystemLoadAverage(osMXBean.getSystemLoadAverage) - - def getLocalMetrics = - DefaultNodeMetrics(Cluster.nodeAddress.nodeName, - memoryMXBean.getHeapMemoryUsage.getUsed, - memoryMXBean.getHeapMemoryUsage.getCommitted, - memoryMXBean.getHeapMemoryUsage.getMax, - osMXBean.getAvailableProcessors, - systemLoadAverage) - -} - -/* - * Loads wider range of metrics of a better quality with Hyperic Sigar (native library) - * - * @param refreshTimeout Sigar gathers metrics during this interval - */ -class SigarMetricsProvider private (private val sigarInstance: AnyRef) extends JMXMetricsProvider { - - private val reportErrors = new Switch(true) - - private val getCpuPercMethod = sigarInstance.getClass.getMethod("getCpuPerc") - private val sigarCpuCombinedMethod = getCpuPercMethod.getReturnType.getMethod("getCombined") - - /* - * Wraps reflective calls to Hyperic Sigar - * - * @param f reflective call to Hyperic Sigar - * @param fallback function, which is invoked, if call to Sigar has been finished with exception - */ - private def callSigarMethodOrElse[T](callSigar: ⇒ T, fallback: ⇒ T): T = - try callSigar catch { - case thrw ⇒ - reportErrors.switchOff { - EventHandler.warning(this, "Failed to get metrics from Hyperic Sigar. %s: %s" - .format(thrw.getClass.getName, thrw.getMessage)) - } - fallback - } - - /* - * Obtains system load average from Sigar - * If the value cannot be obtained, falls back to system load average taken from JMX - */ - override def systemLoadAverage = callSigarMethodOrElse( - calcSystemLoadAverage(sigarCpuCombinedMethod - .invoke(getCpuPercMethod.invoke(sigarInstance)).asInstanceOf[Double]), - super.systemLoadAverage) - -} - -object SigarMetricsProvider { - - /* - * Instantiates Sigar metrics provider through reflections, in order to avoid creating dependencies to - * Hiperic Sigar library - */ - def apply(refreshTimeout: Int): Either[Throwable, MetricsProvider] = try { - for { - sigarInstance ← createInstance[AnyRef]("org.hyperic.sigar.Sigar", noParams, noArgs).right - sigarProxyCacheClass: Class[_] ← getClassFor("org.hyperic.sigar.SigarProxyCache").right - } yield new SigarMetricsProvider(sigarProxyCacheClass - .getMethod("newInstance", Array(sigarInstance.getClass, classOf[Int]): _*) - .invoke(null, sigarInstance, new java.lang.Integer(refreshTimeout))) - } catch { - case thrw ⇒ Left(thrw) - } - -} diff --git a/akka-cluster/src/main/scala/akka/cluster/storage/Storage.scala b/akka-cluster/src/main/scala/akka/cluster/storage/Storage.scala deleted file mode 100644 index a402f2def1..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/storage/Storage.scala +++ /dev/null @@ -1,366 +0,0 @@ -package akka.cluster.storage - -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -import akka.cluster.zookeeper.AkkaZkClient -import akka.AkkaException -import org.apache.zookeeper.{ KeeperException, CreateMode } -import org.apache.zookeeper.data.Stat -import java.util.concurrent.ConcurrentHashMap -import annotation.tailrec -import java.lang.{ RuntimeException, UnsupportedOperationException } - -/** - * Simple abstraction to store an Array of bytes based on some String key. - * - * Nothing is being said about ACID, transactions etc. It depends on the implementation - * of this Storage interface of what is and isn't done on the lowest level. - * - * The amount of data that is allowed to be insert/updated is implementation specific. The InMemoryStorage - * has no limits, but the ZooKeeperStorage has a maximum size of 1 mb. - * - * TODO: Class is up for better names. - * TODO: Instead of a String as key, perhaps also a byte-array. - */ -trait Storage { - - /** - * Loads the VersionedData for the given key. - * - * This call doesn't care about the actual version of the data. - * - * @param key: the key of the VersionedData to load. - * @return the VersionedData for the given entry. - * @throws MissingDataException if the entry with the given key doesn't exist. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def load(key: String): VersionedData - - /** - * Loads the VersionedData for the given key and expectedVersion. - * - * This call can be used for optimistic locking since the version is included. - * - * @param key: the key of the VersionedData to load - * @param expectedVersion the version the data to load should have. - * @throws MissingDataException if the data with the given key doesn't exist. - * @throws BadVersionException if the version is not the expected version. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def load(key: String, expectedVersion: Long): VersionedData - - /** - * Checks if a VersionedData with the given key exists. - * - * @param key the key to check the existence for. - * @return true if exists, false if not. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def exists(key: String): Boolean - - /** - * Inserts a byte-array based on some key. - * - * @param key the key of the Data to insert. - * @param bytes the data to insert. - * @return the version of the written data (can be used for optimistic locking). - * @throws DataExistsException when VersionedData with the given Key already exists. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def insert(key: String, bytes: Array[Byte]): Long - - /** - * Inserts the data if there is no data for that key, or overwrites it if it is there. - * - * This is the method you want to call if you just want to save something and don't - * care about any lost update issues. - * - * @param key the key of the data - * @param bytes the data to insert - * @return the version of the written data (can be used for optimistic locking). - * @throws StorageException if anything goes wrong while accessing the storage - */ - def insertOrOverwrite(key: String, bytes: Array[Byte]): Long - - /** - * Overwrites the current data for the given key. This call doesn't care about the version of the existing data. - * - * @param key the key of the data to overwrite - * @param bytes the data to insert. - * @return the version of the written data (can be used for optimistic locking). - * @throws MissingDataException when the entry with the given key doesn't exist. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def overwrite(key: String, bytes: Array[Byte]): Long - - /** - * Updates an existing value using an optimistic lock. So it expect the current data to have the expectedVersion - * and only then, it will do the update. - * - * @param key the key of the data to update - * @param bytes the content to write for the given key - * @param expectedVersion the version of the content that is expected to be there. - * @return the version of the written data (can be used for optimistic locking). - * @throws MissingDataException if no data for the given key exists - * @throws BadVersionException if the version if the found data doesn't match the expected version. So essentially - * if another update was already done. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def update(key: String, bytes: Array[Byte], expectedVersion: Long): Long -} - -/** - * The VersionedData is a container of data (some bytes) and a version (a Long). - */ -class VersionedData(val data: Array[Byte], val version: Long) {} - -/** - * An AkkaException thrown by the Storage module. - */ -class StorageException(msg: String = null, cause: java.lang.Throwable = null) extends AkkaException(msg, cause) { - def this(msg: String) = this(msg, null); -} - -/** - * * - * A StorageException thrown when an operation is done on a non existing node. - */ -class MissingDataException(msg: String = null, cause: java.lang.Throwable = null) extends StorageException(msg, cause) { - def this(msg: String) = this(msg, null); -} - -/** - * A StorageException thrown when an operation is done on an existing node, but no node was expected. - */ -class DataExistsException(msg: String = null, cause: java.lang.Throwable = null) extends StorageException(msg, cause) { - def this(msg: String) = this(msg, null); -} - -/** - * A StorageException thrown when an operation causes an optimistic locking failure. - */ -class BadVersionException(msg: String = null, cause: java.lang.Throwable = null) extends StorageException(msg, cause) { - def this(msg: String) = this(msg, null); -} - -/** - * A Storage implementation based on ZooKeeper. - * - * The store method is atomic: - * - so everything is written or nothing is written - * - is isolated, so threadsafe, - * but it will not participate in any transactions. - * - */ -class ZooKeeperStorage(zkClient: AkkaZkClient, root: String = "/peter/storage") extends Storage { - - var path = "" - - //makes sure that the complete root exists on zookeeper. - root.split("/").foreach( - item ⇒ if (item.size > 0) { - - path = path + "/" + item - - if (!zkClient.exists(path)) { - //it could be that another thread is going to create this root node as well, so ignore it when it happens. - try { - zkClient.create(path, "".getBytes, CreateMode.PERSISTENT) - } catch { - case ignore: KeeperException.NodeExistsException ⇒ - } - } - }) - - def toZkPath(key: String): String = { - root + "/" + key - } - - def load(key: String) = try { - val stat = new Stat - val arrayOfBytes = zkClient.connection.readData(root + "/" + key, stat, false) - new VersionedData(arrayOfBytes, stat.getVersion) - } catch { - case e: KeeperException.NoNodeException ⇒ throw new MissingDataException( - String.format("Failed to load key [%s]: no data was found", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to load key [%s]", key), e) - } - - def load(key: String, expectedVersion: Long) = try { - val stat = new Stat - val arrayOfBytes = zkClient.connection.readData(root + "/" + key, stat, false) - - if (stat.getVersion != expectedVersion) throw new BadVersionException( - "Failed to update key [" + key + "]: version mismatch, expected [" + expectedVersion + "]" + - " but found [" + stat.getVersion + "]") - - new VersionedData(arrayOfBytes, stat.getVersion) - } catch { - case e: KeeperException.NoNodeException ⇒ throw new MissingDataException( - String.format("Failed to load key [%s]: no data was found", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to load key [%s]", key), e) - } - - def insertOrOverwrite(key: String, bytes: Array[Byte]) = { - try { - throw new UnsupportedOperationException() - } catch { - case e: KeeperException.NodeExistsException ⇒ throw new DataExistsException( - String.format("Failed to insert key [%s]: an entry already exists with the same key", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to insert key [%s]", key), e) - } - } - - def insert(key: String, bytes: Array[Byte]): Long = { - try { - zkClient.connection.create(root + "/" + key, bytes, CreateMode.PERSISTENT) - //todo: how to get hold of the version. - val version: Long = 0 - version - } catch { - case e: KeeperException.NodeExistsException ⇒ throw new DataExistsException( - String.format("Failed to insert key [%s]: an entry already exists with the same key", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to insert key [%s]", key), e) - } - } - - def exists(key: String) = try { - zkClient.connection.exists(toZkPath(key), false) - } catch { - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to check existance for key [%s]", key), e) - } - - def update(key: String, bytes: Array[Byte], expectedVersion: Long): Long = { - try { - zkClient.connection.writeData(root + "/" + key, bytes, expectedVersion.asInstanceOf[Int]) - throw new RuntimeException() - } catch { - case e: KeeperException.BadVersionException ⇒ throw new BadVersionException( - String.format("Failed to update key [%s]: version mismatch", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to update key [%s]", key), e) - } - } - - def overwrite(key: String, bytes: Array[Byte]): Long = { - try { - zkClient.connection.writeData(root + "/" + key, bytes) - -1L - } catch { - case e: KeeperException.NoNodeException ⇒ throw new MissingDataException( - String.format("Failed to overwrite key [%s]: a previous entry already exists", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to overwrite key [%s]", key), e) - } - } -} - -object InMemoryStorage { - val InitialVersion = 0; -} - -/** - * An in memory {@link RawStore} implementation. Useful for testing purposes. - */ -final class InMemoryStorage extends Storage { - - private val map = new ConcurrentHashMap[String, VersionedData]() - - def load(key: String) = { - val result = map.get(key) - - if (result == null) throw new MissingDataException( - String.format("Failed to load key [%s]: no data was found", key)) - - result - } - - def load(key: String, expectedVersion: Long) = { - val result = load(key) - - if (result.version != expectedVersion) throw new BadVersionException( - "Failed to load key [" + key + "]: version mismatch, expected [" + result.version + "] " + - "but found [" + expectedVersion + "]") - - result - } - - def exists(key: String) = map.containsKey(key) - - def insert(key: String, bytes: Array[Byte]): Long = { - val version: Long = InMemoryStorage.InitialVersion - val result = new VersionedData(bytes, version) - - val previous = map.putIfAbsent(key, result) - if (previous != null) throw new DataExistsException( - String.format("Failed to insert key [%s]: the key already has been inserted previously", key)) - - version - } - - @tailrec - def update(key: String, bytes: Array[Byte], expectedVersion: Long): Long = { - val found = map.get(key) - - if (found == null) throw new MissingDataException( - String.format("Failed to update key [%s], no previous entry exist", key)) - - if (expectedVersion != found.version) throw new BadVersionException( - "Failed to update key [" + key + "]: version mismatch, expected [" + expectedVersion + "]" + - " but found [" + found.version + "]") - - val newVersion: Long = expectedVersion + 1 - - if (map.replace(key, found, new VersionedData(bytes, newVersion))) newVersion - else update(key, bytes, expectedVersion) - } - - @tailrec - def overwrite(key: String, bytes: Array[Byte]): Long = { - val current = map.get(key) - - if (current == null) throw new MissingDataException( - String.format("Failed to overwrite key [%s], no previous entry exist", key)) - - val update = new VersionedData(bytes, current.version + 1) - - if (map.replace(key, current, update)) update.version - else overwrite(key, bytes) - } - - def insertOrOverwrite(key: String, bytes: Array[Byte]): Long = { - val version = InMemoryStorage.InitialVersion - val result = new VersionedData(bytes, version) - - val previous = map.putIfAbsent(key, result) - - if (previous == null) result.version - else overwrite(key, bytes) - } -} - -//TODO: To minimize the number of dependencies, should the Storage not be placed in a seperate module? -//class VoldemortRawStorage(storeClient: StoreClient) extends Storage { -// -// def load(Key: String) = { -// try { -// -// } catch { -// case -// } -// } -// -// override def insert(key: String, bytes: Array[Byte]) { -// throw new UnsupportedOperationException() -// } -// -// def update(key: String, bytes: Array[Byte]) { -// throw new UnsupportedOperationException() -// } -//} diff --git a/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZkClient.scala b/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZkClient.scala deleted file mode 100644 index 9137959877..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZkClient.scala +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster.zookeeper - -import org.I0Itec.zkclient._ -import org.I0Itec.zkclient.serialize._ -import org.I0Itec.zkclient.exception._ - -/** - * ZooKeeper client. Holds the ZooKeeper connection and manages its session. - */ -class AkkaZkClient(zkServers: String, - sessionTimeout: Int, - connectionTimeout: Int, - zkSerializer: ZkSerializer = new SerializableSerializer) - extends ZkClient(zkServers, sessionTimeout, connectionTimeout, zkSerializer) { - - def connection: ZkConnection = _connection.asInstanceOf[ZkConnection] - - def reconnect() { - val zkLock = getEventLock - - zkLock.lock() - try { - _connection.close() - _connection.connect(this) - } catch { - case e: InterruptedException ⇒ throw new ZkInterruptedException(e) - } finally { - zkLock.unlock() - } - } -} diff --git a/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZooKeeper.scala b/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZooKeeper.scala deleted file mode 100644 index b5165ffb72..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZooKeeper.scala +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster.zookeeper - -import org.I0Itec.zkclient._ -import org.apache.commons.io.FileUtils -import java.io.File - -object AkkaZooKeeper { - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalServer(dataPath: String, logPath: String): ZkServer = - startLocalServer(dataPath, logPath, 2181, 500) - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalServer(dataPath: String, logPath: String, port: Int, tickTime: Int): ZkServer = { - FileUtils.deleteDirectory(new File(dataPath)) - FileUtils.deleteDirectory(new File(logPath)) - val zkServer = new ZkServer( - dataPath, logPath, - new IDefaultNameSpace() { - def createDefaultNameSpace(zkClient: ZkClient) {} - }, - port, tickTime) - zkServer.start() - zkServer - } -} diff --git a/akka-cluster/src/main/scala/akka/cluster/zookeeper/ZooKeeperBarrier.scala b/akka-cluster/src/main/scala/akka/cluster/zookeeper/ZooKeeperBarrier.scala deleted file mode 100644 index c1f51ceb96..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/zookeeper/ZooKeeperBarrier.scala +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster.zookeeper - -import akka.util.Duration -import akka.util.duration._ - -import org.I0Itec.zkclient._ -import org.I0Itec.zkclient.exception._ - -import java.util.{ List ⇒ JList } -import java.util.concurrent.CountDownLatch - -class BarrierTimeoutException(message: String) extends RuntimeException(message) - -/** - * Barrier based on Zookeeper barrier tutorial. - */ -object ZooKeeperBarrier { - val BarriersNode = "/barriers" - val DefaultTimeout = 60 seconds - - def apply(zkClient: ZkClient, name: String, node: String, count: Int) = - new ZooKeeperBarrier(zkClient, name, node, count, DefaultTimeout) - - def apply(zkClient: ZkClient, name: String, node: String, count: Int, timeout: Duration) = - new ZooKeeperBarrier(zkClient, name, node, count, timeout) - - def apply(zkClient: ZkClient, cluster: String, name: String, node: String, count: Int) = - new ZooKeeperBarrier(zkClient, cluster + "-" + name, node, count, DefaultTimeout) - - def apply(zkClient: ZkClient, cluster: String, name: String, node: String, count: Int, timeout: Duration) = - new ZooKeeperBarrier(zkClient, cluster + "-" + name, node, count, timeout) - - def ignore[E: Manifest](body: ⇒ Unit) { - try { - body - } catch { - case e if manifest[E].erasure.isAssignableFrom(e.getClass) ⇒ () - } - } -} - -/** - * Barrier based on Zookeeper barrier tutorial. - */ -class ZooKeeperBarrier(zkClient: ZkClient, name: String, node: String, count: Int, timeout: Duration) - extends IZkChildListener { - - import ZooKeeperBarrier.{ BarriersNode, ignore } - - val barrier = BarriersNode + "/" + name - val entry = barrier + "/" + node - val ready = barrier + "/ready" - - val exitBarrier = new CountDownLatch(1) - - ignore[ZkNodeExistsException](zkClient.createPersistent(BarriersNode)) - ignore[ZkNodeExistsException](zkClient.createPersistent(barrier)) - - def apply(body: ⇒ Unit) { - enter() - body - leave() - } - - /** - * An await does a enter/leave making this barrier a 'single' barrier instead of a double barrier. - */ - def await() { - enter() - leave() - } - - def enter() = { - zkClient.createEphemeral(entry) - if (zkClient.countChildren(barrier) >= count) - ignore[ZkNodeExistsException](zkClient.createPersistent(ready)) - else - zkClient.waitUntilExists(ready, timeout.unit, timeout.length) - if (!zkClient.exists(ready)) { - throw new BarrierTimeoutException("Timeout (%s) while waiting for entry barrier" format timeout) - } - zkClient.subscribeChildChanges(barrier, this) - } - - def leave() { - zkClient.delete(entry) - exitBarrier.await(timeout.length, timeout.unit) - if (zkClient.countChildren(barrier) > 0) { - zkClient.unsubscribeChildChanges(barrier, this) - throw new BarrierTimeoutException("Timeout (%s) while waiting for exit barrier" format timeout) - } - zkClient.unsubscribeChildChanges(barrier, this) - } - - def handleChildChange(path: String, children: JList[String]) { - if (children.size <= 1) { - ignore[ZkNoNodeException](zkClient.delete(ready)) - exitBarrier.countDown() - } - } -} diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/GossipMembershipMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipMembershipMultiJvmSpec.scala similarity index 99% rename from akka-remote/src/multi-jvm/scala/akka/remote/GossipMembershipMultiJvmSpec.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/GossipMembershipMultiJvmSpec.scala index 418f6f385b..c380d3e5eb 100644 --- a/akka-remote/src/multi-jvm/scala/akka/remote/GossipMembershipMultiJvmSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipMembershipMultiJvmSpec.scala @@ -1,4 +1,4 @@ -// package akka.remote +// package akka.cluster // import akka.actor.Actor // import akka.remote._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmSpec.scala deleted file mode 100644 index f1b9f5a7ae..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmSpec.scala +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.changelisteners.newleader - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import ChangeListener._ -import Cluster._ -import akka.cluster.LocalCluster._ - -import java.util.concurrent._ - -object NewLeaderChangeListenerMultiJvmSpec { - var NrOfNodes = 2 -} - -class NewLeaderChangeListenerMultiJvmNode1 extends MasterClusterTestNode { - import NewLeaderChangeListenerMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A NewLeader change listener" must { - - "be invoked after leader election is completed" ignore { - barrier("start-node1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node2", NrOfNodes).await() - - System.exit(0) - } - } -} - -class NewLeaderChangeListenerMultiJvmNode2 extends ClusterTestNode { - import NewLeaderChangeListenerMultiJvmSpec._ - - "A NewLeader change listener" must { - - "be invoked after leader election is completed" ignore { - val latch = new CountDownLatch(1) - - barrier("start-node1", NrOfNodes).await() - - barrier("start-node2", NrOfNodes) { - node.register(new ChangeListener { - override def newLeader(node: String, client: ClusterNode) { - latch.countDown - } - }) - } - latch.await(10, TimeUnit.SECONDS) must be === true - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmSpec.scala deleted file mode 100644 index deec5c19e6..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmSpec.scala +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.changelisteners.nodeconnected - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import ChangeListener._ -import Cluster._ -import akka.cluster.LocalCluster._ - -import java.util.concurrent._ - -object NodeConnectedChangeListenerMultiJvmSpec { - var NrOfNodes = 2 -} - -class NodeConnectedChangeListenerMultiJvmNode1 extends MasterClusterTestNode { - import NodeConnectedChangeListenerMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A NodeConnected change listener" must { - - "be invoked when a new node joins the cluster" in { - val latch = new CountDownLatch(1) - node.register(new ChangeListener { - override def nodeConnected(node: String, client: ClusterNode) { - latch.countDown - } - }) - - barrier("start-node1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node2", NrOfNodes) { - latch.await(5, TimeUnit.SECONDS) must be === true - } - - node.shutdown() - } - } -} - -class NodeConnectedChangeListenerMultiJvmNode2 extends ClusterTestNode { - import NodeConnectedChangeListenerMultiJvmSpec._ - - "A NodeConnected change listener" must { - - "be invoked when a new node joins the cluster" in { - barrier("start-node1", NrOfNodes).await() - - barrier("start-node2", NrOfNodes) { - Cluster.node.start() - } - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmSpec.scala deleted file mode 100644 index 54a327126e..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmSpec.scala +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.changelisteners.nodedisconnected - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import ChangeListener._ -import Cluster._ -import akka.cluster.LocalCluster._ - -import java.util.concurrent._ - -object NodeDisconnectedChangeListenerMultiJvmSpec { - var NrOfNodes = 2 -} - -class NodeDisconnectedChangeListenerMultiJvmNode1 extends MasterClusterTestNode { - import NodeDisconnectedChangeListenerMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A NodeDisconnected change listener" must { - - "be invoked when a new node leaves the cluster" in { - val latch = new CountDownLatch(1) - node.register(new ChangeListener { - override def nodeDisconnected(node: String, client: ClusterNode) { - latch.countDown - } - }) - - barrier("start-node1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node2", NrOfNodes).await() - - latch.await(10, TimeUnit.SECONDS) must be === true - - node.shutdown() - } - } -} - -class NodeDisconnectedChangeListenerMultiJvmNode2 extends ClusterTestNode { - import NodeDisconnectedChangeListenerMultiJvmSpec._ - - "A NodeDisconnected change listener" must { - - "be invoked when a new node leaves the cluster" in { - barrier("start-node1", NrOfNodes).await() - - barrier("start-node2", NrOfNodes) { - Cluster.node.start() - } - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmSpec.scala deleted file mode 100644 index f9aabbb004..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmSpec.scala +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.configuration - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import Cluster._ -import akka.cluster.LocalCluster._ - -object ConfigurationStorageMultiJvmSpec { - var NrOfNodes = 2 -} - -class ConfigurationStorageMultiJvmNode1 extends MasterClusterTestNode { - import ConfigurationStorageMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A cluster" must { - - "be able to store, read and remove custom configuration data" in { - - barrier("start-node-1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node-2", NrOfNodes).await() - - barrier("store-config-data-node-1", NrOfNodes) { - node.setConfigElement("key1", "value1".getBytes) - } - - barrier("read-config-data-node-2", NrOfNodes).await() - - barrier("remove-config-data-node-2", NrOfNodes).await() - - barrier("try-read-config-data-node-1", NrOfNodes) { - val option = node.getConfigElement("key1") - option.isDefined must be(false) - - val elements = node.getConfigElementKeys - elements.size must be(0) - } - - node.shutdown() - } - } -} - -class ConfigurationStorageMultiJvmNode2 extends ClusterTestNode { - import ConfigurationStorageMultiJvmSpec._ - - "A cluster" must { - - "be able to store, read and remove custom configuration data" in { - - barrier("start-node-1", NrOfNodes).await() - - barrier("start-node-2", NrOfNodes) { - Cluster.node.start() - } - - barrier("store-config-data-node-1", NrOfNodes).await() - - barrier("read-config-data-node-2", NrOfNodes) { - val option = node.getConfigElement("key1") - option.isDefined must be(true) - option.get must be("value1".getBytes) - - val elements = node.getConfigElementKeys - elements.size must be(1) - elements.head must be("key1") - } - - barrier("remove-config-data-node-2", NrOfNodes) { - node.removeConfigElement("key1") - } - - barrier("try-read-config-data-node-1", NrOfNodes).await() - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmSpec.scala deleted file mode 100644 index 479f77e0d3..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmSpec.scala +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.leader.election - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import ChangeListener._ -import Cluster._ -import akka.cluster.LocalCluster._ - -import java.util.concurrent._ - -object LeaderElectionMultiJvmSpec { - var NrOfNodes = 2 -} -/* -class LeaderElectionMultiJvmNode1 extends MasterClusterTestNode { - import LeaderElectionMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A cluster" must { - - "be able to elect a single leader in the cluster and perform re-election if leader resigns" in { - - barrier("start-node1", NrOfNodes) { - Cluster.node.start() - } - node.isLeader must be === true - - barrier("start-node2", NrOfNodes) { - } - node.isLeader must be === true - - barrier("stop-node1", NrOfNodes) { - node.resign() - } - } - } -} - -class LeaderElectionMultiJvmNode2 extends ClusterTestNode { - import LeaderElectionMultiJvmSpec._ - - "A cluster" must { - - "be able to elect a single leader in the cluster and perform re-election if leader resigns" in { - - barrier("start-node1", NrOfNodes) { - } - node.isLeader must be === false - - barrier("start-node2", NrOfNodes) { - Cluster.node.start() - } - node.isLeader must be === false - - barrier("stop-node1", NrOfNodes) { - } - Thread.sleep(1000) // wait for re-election - - node.isLeader must be === true - } - } -} -*/ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmSpec.scala deleted file mode 100644 index c20bf9269c..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmSpec.scala +++ /dev/null @@ -1,116 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.registry - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.actor._ -import Actor._ -import akka.cluster._ -import ChangeListener._ -import Cluster._ -import akka.config.Config -import akka.serialization.Serialization -import akka.cluster.LocalCluster._ - -import java.util.concurrent._ - -object RegistryStoreMultiJvmSpec { - var NrOfNodes = 2 - - class HelloWorld1 extends Actor with Serializable { - def receive = { - case "Hello" ⇒ - reply("World from node [" + Config.nodename + "]") - } - } - - class HelloWorld2 extends Actor with Serializable { - var counter = 0 - def receive = { - case "Hello" ⇒ - Thread.sleep(1000) - counter += 1 - case "Count" ⇒ - reply(counter) - } - } -} - -class RegistryStoreMultiJvmNode1 extends MasterClusterTestNode { - import RegistryStoreMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A cluster" must { - - "be able to store an ActorRef in the cluster without a replication strategy and retrieve it with 'use'" in { - - barrier("start-node-1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node-2", NrOfNodes).await() - - barrier("store-1-in-node-1", NrOfNodes) { - node.store("hello-world-1", classOf[HelloWorld1], Serialization.serializerFor(classOf[HelloWorld1])) - } - - barrier("use-1-in-node-2", NrOfNodes).await() - - barrier("store-2-in-node-1", NrOfNodes) { - node.store("hello-world-2", classOf[HelloWorld1], false, Serialization.serializerFor(classOf[HelloWorld1])) - } - - barrier("use-2-in-node-2", NrOfNodes).await() - - node.shutdown() - } - } -} - -class RegistryStoreMultiJvmNode2 extends ClusterTestNode { - import RegistryStoreMultiJvmSpec._ - - "A cluster" must { - - "be able to store an actor in the cluster with 'store' and retrieve it with 'use'" in { - - barrier("start-node-1", NrOfNodes).await() - - barrier("start-node-2", NrOfNodes) { - Cluster.node.start() - } - - barrier("store-1-in-node-1", NrOfNodes).await() - - barrier("use-1-in-node-2", NrOfNodes) { - val actorOrOption = node.use("hello-world-1") - if (actorOrOption.isEmpty) fail("Actor could not be retrieved") - - val actorRef = actorOrOption.get - actorRef.address must be("hello-world-1") - - (actorRef ? "Hello").as[String].get must be("World from node [node2]") - } - - barrier("store-2-in-node-1", NrOfNodes).await() - - barrier("use-2-in-node-2", NrOfNodes) { - val actorOrOption = node.use("hello-world-2") - if (actorOrOption.isEmpty) fail("Actor could not be retrieved") - - val actorRef = actorOrOption.get - actorRef.address must be("hello-world-2") - - (actorRef ? "Hello").as[String].get must be("World from node [node2]") - } - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.conf deleted file mode 100644 index 88df1a6421..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.conf deleted file mode 100644 index 88df1a6421..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmSpec.scala deleted file mode 100644 index ef0b79b4a7..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmSpec.scala +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.deployment - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.actor._ -import Actor._ -import akka.cluster._ -import Cluster._ -import akka.cluster.LocalCluster._ - -object DeploymentMultiJvmSpec { - var NrOfNodes = 2 -} - -class DeploymentMultiJvmNode1 extends MasterClusterTestNode { - import DeploymentMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A ClusterDeployer" must { - - "be able to deploy deployments in akka.conf and lookup the deployments by 'address'" in { - - barrier("start-node-1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node-2", NrOfNodes).await() - - barrier("perform-deployment-on-node-1", NrOfNodes) { - Deployer.start() - } - - barrier("lookup-deployment-node-2", NrOfNodes).await() - - node.shutdown() - } - } -} - -class DeploymentMultiJvmNode2 extends ClusterTestNode { - import DeploymentMultiJvmSpec._ - - "A cluster" must { - - "be able to store, read and remove custom configuration data" in { - - barrier("start-node-1", NrOfNodes).await() - - barrier("start-node-2", NrOfNodes) { - Cluster.node.start() - } - - barrier("perform-deployment-on-node-1", NrOfNodes).await() - - barrier("lookup-deployment-node-2", NrOfNodes) { - Deployer.start() - val deployments = Deployer.deploymentsInConfig - deployments map { oldDeployment ⇒ - val newDeployment = ClusterDeployer.lookupDeploymentFor(oldDeployment.address) - newDeployment must be('defined) - oldDeployment must equal(newDeployment.get) - } - } - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.conf deleted file mode 100644 index 8d5284be46..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.cluster.metrics-refresh-timeout = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmSpec.scala deleted file mode 100644 index 380d68d8ef..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmSpec.scala +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.metrics.local - -import akka.cluster._ -import akka.actor._ -import Actor._ -import Cluster._ -import akka.dispatch._ -import akka.util.Duration -import akka.util.duration._ -import akka.cluster.metrics._ -import java.util.concurrent.atomic.AtomicInteger - -object LocalMetricsMultiJvmSpec { - val NrOfNodes = 1 -} - -class LocalMetricsMultiJvmNode1 extends MasterClusterTestNode { - - import LocalMetricsMultiJvmSpec._ - - val testNodes = NrOfNodes - - override def beforeAll = { - super.beforeAll() - node - } - - override def afterAll = { - node.shutdown() - super.afterAll() - } - - "Metrics manager" must { - - def timeout = node.metricsManager.refreshTimeout - - "be initialized with refresh timeout value, specified in akka.conf" in { - timeout must be(1.second) - } - - "return up-to-date local node metrics straight from MBeans/Sigar" in { - node.metricsManager.getLocalMetrics must not be (null) - - node.metricsManager.getLocalMetrics.systemLoadAverage must be(0.5 plusOrMinus 0.5) - } - - "return metrics cached in the MetricsManagerLocalMetrics" in { - node.metricsManager.getMetrics(nodeAddress.nodeName) must not be (null) - } - - "return local node metrics from ZNode" in { - node.metricsManager.getMetrics(nodeAddress.nodeName, false) must not be (null) - } - - "return cached metrics of all nodes in the cluster" in { - node.metricsManager.getAllMetrics.size must be(1) - node.metricsManager.getAllMetrics.find(_.nodeName == "node1") must not be (null) - } - - "throw no exceptions, when user attempts to get metrics of a non-existing node" in { - node.metricsManager.getMetrics("nonexisting") must be(None) - node.metricsManager.getMetrics("nonexisting", false) must be(None) - } - - "regularly update cached metrics" in { - val oldMetrics = node.metricsManager.getLocalMetrics - Thread sleep timeout.toMillis - node.metricsManager.getLocalMetrics must not be (oldMetrics) - } - - "allow to track JVM state and bind handles through MetricsAlterationMonitors" in { - val monitorReponse = Promise[String]() - - node.metricsManager.addMonitor(new LocalMetricsAlterationMonitor { - - val id = "heapMemoryThresholdMonitor" - - def reactsOn(metrics: NodeMetrics) = metrics.usedHeapMemory > 1 - - def react(metrics: NodeMetrics) = monitorReponse.success("Too much memory is used!") - - }) - - Await.result(monitorReponse, 5 seconds) must be("Too much memory is used!") - - } - - class FooMonitor(monitorWorked: AtomicInteger) extends LocalMetricsAlterationMonitor { - val id = "fooMonitor" - def reactsOn(metrics: NodeMetrics) = true - def react(metrics: NodeMetrics) = monitorWorked.set(monitorWorked.get + 1) - } - - "allow to unregister the monitor" in { - - val monitorWorked = new AtomicInteger(0) - val fooMonitor = new FooMonitor(monitorWorked) - - node.metricsManager.addMonitor(fooMonitor) - node.metricsManager.removeMonitor(fooMonitor) - - val oldValue = monitorWorked.get - Thread sleep timeout.toMillis - monitorWorked.get must be(oldValue) - - } - - "stop notifying monitors, when stopped" in { - - node.metricsManager.stop() - - val monitorWorked = new AtomicInteger(0) - - node.metricsManager.addMonitor(new LocalMetricsAlterationMonitor { - val id = "fooMonitor" - def reactsOn(metrics: NodeMetrics) = true - def react(metrics: NodeMetrics) = monitorWorked.set(monitorWorked.get + 1) - }) - - monitorWorked.get must be(0) - - node.metricsManager.start() - Thread sleep (timeout.toMillis * 2) - monitorWorked.get must be > (1) - - } - - } - -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.conf deleted file mode 100644 index 172e980612..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.conf +++ /dev/null @@ -1,3 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.conf deleted file mode 100644 index 172e980612..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.conf +++ /dev/null @@ -1,3 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmSpec.scala deleted file mode 100644 index 8c4730dc90..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmSpec.scala +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.metrics.remote - -import akka.cluster._ -import akka.actor._ -import Actor._ -import Cluster._ -import akka.dispatch._ -import akka.util.Duration -import akka.util.duration._ -import akka.cluster.metrics._ -import java.util.concurrent._ -import atomic.AtomicInteger - -object RemoteMetricsMultiJvmSpec { - val NrOfNodes = 2 - - val MetricsRefreshTimeout = 100.millis -} - -class AllMetricsAvailableMonitor(_id: String, completionLatch: CountDownLatch, clusterSize: Int) extends ClusterMetricsAlterationMonitor { - - val id = _id - - def reactsOn(allMetrics: Array[NodeMetrics]) = allMetrics.size == clusterSize - - def react(allMetrics: Array[NodeMetrics]) = completionLatch.countDown - -} - -class RemoteMetricsMultiJvmNode1 extends MasterClusterTestNode { - - import RemoteMetricsMultiJvmSpec._ - - val testNodes = NrOfNodes - - "Metrics manager" must { - "provide metrics of all nodes in the cluster" in { - - val allMetricsAvaiable = new CountDownLatch(1) - - node.metricsManager.refreshTimeout = MetricsRefreshTimeout - node.metricsManager.addMonitor(new AllMetricsAvailableMonitor("all-metrics-available", allMetricsAvaiable, NrOfNodes)) - - LocalCluster.barrier("node-start", NrOfNodes).await() - - allMetricsAvaiable.await() - - LocalCluster.barrier("check-all-remote-metrics", NrOfNodes) { - node.metricsManager.getAllMetrics.size must be(2) - } - - val cachedMetrics = node.metricsManager.getMetrics("node2") - val metricsFromZnode = node.metricsManager.getMetrics("node2", false) - - LocalCluster.barrier("check-single-remote-metrics", NrOfNodes) { - cachedMetrics must not be (null) - metricsFromZnode must not be (null) - } - - Thread sleep MetricsRefreshTimeout.toMillis - - LocalCluster.barrier("remote-metrics-is-updated", NrOfNodes) { - node.metricsManager.getMetrics("node2") must not be (cachedMetrics) - node.metricsManager.getMetrics("node2", false) must not be (metricsFromZnode) - } - - val someMetricsGone = new CountDownLatch(1) - node.metricsManager.addMonitor(new AllMetricsAvailableMonitor("some-metrics-gone", someMetricsGone, 1)) - - LocalCluster.barrier("some-nodes-leave", NrOfNodes).await() - - someMetricsGone.await(10, TimeUnit.SECONDS) must be(true) - - node.metricsManager.getMetrics("node2") must be(None) - node.metricsManager.getMetrics("node2", false) must be(None) - node.metricsManager.getAllMetrics.size must be(1) - - node.shutdown() - - } - } - -} - -class RemoteMetricsMultiJvmNode2 extends ClusterTestNode { - - import RemoteMetricsMultiJvmSpec._ - - val testNodes = NrOfNodes - - "Metrics manager" must { - "provide metrics of all nodes in the cluster" in { - - val allMetricsAvaiable = new CountDownLatch(1) - - node.metricsManager.refreshTimeout = MetricsRefreshTimeout - node.metricsManager.addMonitor(new AllMetricsAvailableMonitor("all-metrics-available", allMetricsAvaiable, NrOfNodes)) - - LocalCluster.barrier("node-start", NrOfNodes).await() - - allMetricsAvaiable.await() - - LocalCluster.barrier("check-all-remote-metrics", NrOfNodes) { - node.metricsManager.getAllMetrics.size must be(2) - } - - val cachedMetrics = node.metricsManager.getMetrics("node1") - val metricsFromZnode = node.metricsManager.getMetrics("node1", false) - - LocalCluster.barrier("check-single-remote-metrics", NrOfNodes) { - cachedMetrics must not be (null) - metricsFromZnode must not be (null) - } - - Thread sleep MetricsRefreshTimeout.toMillis - - LocalCluster.barrier("remote-metrics-is-updated", NrOfNodes) { - node.metricsManager.getMetrics("node1") must not be (cachedMetrics) - node.metricsManager.getMetrics("node1", false) must not be (metricsFromZnode) - } - - LocalCluster.barrier("some-nodes-leave", NrOfNodes) { - node.shutdown() - } - } - } - -} - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmSpec.scala deleted file mode 100644 index 7dfdec2f7c..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmSpec.scala +++ /dev/null @@ -1,112 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - * - * - * package akka.cluster.migration - * - * import org.scalatest.WordSpec - * import org.scalatest.matchers.MustMatchers - * import org.scalatest.BeforeAndAfterAll - * - * import akka.actor._ - * import Actor._ - * import akka.cluster._ - * import ChangeListener._ - * import Cluster._ - * import akka.config.Config - * import akka.serialization.Serialization - * import akka.cluster.LocalCluster._ - * - * import java.util.concurrent._ - * - * object MigrationExplicitMultiJvmSpec { - * var NrOfNodes = 2 - * - * class HelloWorld extends Actor with Serializable { - * def receive = { - * case "Hello" ⇒ - * reply("World from node [" + Config.nodename + "]") - * } - * } - * } - * - * class MigrationExplicitMultiJvmNode1 extends MasterClusterTestNode { - * import MigrationExplicitMultiJvmSpec._ - * - * val testNodes = NrOfNodes - * - * "A cluster" must { - * - * "be able to migrate an actor from one node to another" in { - * - * barrier("start-node-1", NrOfNodes) { - * Cluster.node.start() - * } - * - * barrier("start-node-2", NrOfNodes) { - * } - * - * barrier("store-1-in-node-1", NrOfNodes) { - * val serializer = Serialization.serializerFor(classOf[HelloWorld]).fold(x ⇒ fail("No serializer found"), s ⇒ s) - * node.store("hello-world", classOf[HelloWorld], serializer) - * } - * - * barrier("use-1-in-node-2", NrOfNodes) { - * } - * - * barrier("migrate-from-node2-to-node1", NrOfNodes) { - * } - * - * barrier("check-actor-is-moved-to-node1", NrOfNodes) { - * node.isInUseOnNode("hello-world") must be(true) - * - * val actorRef = Actor.registry.local.actorFor("hello-world").getOrElse(fail("Actor should have been in the local actor registry")) - * actorRef.address must be("hello-world") - * (actorRef ? "Hello").as[String].get must be("World from node [node1]") - * } - * - * node.shutdown() - * } - * } - * } - * - * class MigrationExplicitMultiJvmNode2 extends ClusterTestNode { - * import MigrationExplicitMultiJvmSpec._ - * - * "A cluster" must { - * - * "be able to migrate an actor from one node to another" in { - * - * barrier("start-node-1", NrOfNodes) { - * } - * - * barrier("start-node-2", NrOfNodes) { - * Cluster.node.start() - * } - * - * barrier("store-1-in-node-1", NrOfNodes) { - * } - * - * barrier("use-1-in-node-2", NrOfNodes) { - * val actorOrOption = node.use("hello-world") - * if (actorOrOption.isEmpty) fail("Actor could not be retrieved") - * - * val actorRef = actorOrOption.get - * actorRef.address must be("hello-world") - * - * (actorRef ? "Hello").as[String].get must be("World from node [node2]") - * } - * - * barrier("migrate-from-node2-to-node1", NrOfNodes) { - * node.migrate(NodeAddress(node.nodeAddress.clusterName, "node1"), "hello-world") - * Thread.sleep(2000) - * } - * - * barrier("check-actor-is-moved-to-node1", NrOfNodes) { - * } - * - * node.shutdown() - * } - * } - * } - */ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf deleted file mode 100644 index f510c5253c..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf +++ /dev/null @@ -1,6 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-test.router = "round-robin" -akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"] -akka.actor.deployment.service-test.nr-of-instances = 2 \ No newline at end of file diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf deleted file mode 100644 index b7c3e53e6f..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf +++ /dev/null @@ -1,5 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-test.router = "round-robin" -akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"] -akka.actor.deployment.service-test.nr-of-instances = 2 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf deleted file mode 100644 index b7c3e53e6f..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf +++ /dev/null @@ -1,5 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-test.router = "round-robin" -akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"] -akka.actor.deployment.service-test.nr-of-instances = 2 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.opts deleted file mode 100644 index 089e3b7776..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmSpec.scala deleted file mode 100644 index 98d2aaf394..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmSpec.scala +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.reflogic - -import akka.cluster._ -import akka.cluster.Cluster._ -import akka.actor.Actor -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } -import akka.routing.RoutingException -import java.net.ConnectException -import java.nio.channels.{ ClosedChannelException, NotYetConnectedException } -import akka.cluster.LocalCluster._ - -object ClusterActorRefCleanupMultiJvmSpec { - - val NrOfNodes = 3 - - class TestActor extends Actor with Serializable { - def receive = { - case _ ⇒ {} - } - } - -} - -class ClusterActorRefCleanupMultiJvmNode1 extends MasterClusterTestNode { - - import ClusterActorRefCleanupMultiJvmSpec._ - - val testNodes = NrOfNodes - - "ClusterActorRef" must { - "cleanup itself" ignore { - Cluster.node.start() - barrier("awaitStarted", NrOfNodes).await() - - val ref = Actor.actorOf(Props[ClusterActorRefCleanupMultiJvmSpec.TestActor]("service-test") - - ref.isInstanceOf[ClusterActorRef] must be(true) - - val clusteredRef = ref.asInstanceOf[ClusterActorRef] - - barrier("awaitActorCreated", NrOfNodes).await() - - //verify that all remote actors are there. - clusteredRef.nrOfConnections must be(2) - - // ignore exceptions from killing nodes - val ignoreExceptions = Seq( - EventFilter[ClosedChannelException], - EventFilter[NotYetConnectedException], - EventFilter[RoutingException], - EventFilter[ConnectException]) - - EventHandler.notify(TestEvent.Mute(ignoreExceptions)) - - //just some waiting to make sure that the node has died. - Thread.sleep(5000) - - //send some request, this should trigger the cleanup - try { - clusteredRef ! "hello" - clusteredRef ! "hello" - } catch { - case e: ClosedChannelException ⇒ - case e: NotYetConnectedException ⇒ - case e: RoutingException ⇒ - } - - barrier("node-3-dead", NrOfNodes - 1).await() - - //since the call to the node failed, the node must have been removed from the list. - clusteredRef.nrOfConnections must be(1) - - //just some waiting to make sure that the node has died. - Thread.sleep(5000) - - //trigger the cleanup. - try { - clusteredRef ! "hello" - clusteredRef ! "hello" - } catch { - case e: ClosedChannelException ⇒ - case e: NotYetConnectedException ⇒ - case e: RoutingException ⇒ - } - - //now there must not be any remaining connections after the dead of the last actor. - clusteredRef.nrOfConnections must be(0) - - //and lets make sure we now get the correct exception if we try to use the ref. - intercept[RoutingException] { - clusteredRef ! "Hello" - } - - node.shutdown() - } - } -} - -class ClusterActorRefCleanupMultiJvmNode2 extends ClusterTestNode { - - import ClusterActorRefCleanupMultiJvmSpec._ - - val testNodes = NrOfNodes - - //we are only using the nodes for their capacity, not for testing on this node itself. - "___" must { - "___" ignore { - Runtime.getRuntime.addShutdownHook(new Thread() { - override def run() { - ClusterTestNode.exit(classOf[ClusterActorRefCleanupMultiJvmNode2].getName) - } - }) - - Cluster.node.start() - barrier("awaitStarted", NrOfNodes).await() - - barrier("awaitActorCreated", NrOfNodes).await() - - barrier("node-3-dead", NrOfNodes - 1).await() - - System.exit(0) - } - } -} - -class ClusterActorRefCleanupMultiJvmNode3 extends ClusterTestNode { - - import ClusterActorRefCleanupMultiJvmSpec._ - - val testNodes = NrOfNodes - - //we are only using the nodes for their capacity, not for testing on this node itself. - "___" must { - "___" ignore { - Runtime.getRuntime.addShutdownHook(new Thread() { - override def run() { - ClusterTestNode.exit(classOf[ClusterActorRefCleanupMultiJvmNode3].getName) - } - }) - - Cluster.node.start() - barrier("awaitStarted", NrOfNodes).await() - - barrier("awaitActorCreated", NrOfNodes).await() - - System.exit(0) - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.conf deleted file mode 100644 index dca432f404..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world-write-behind-nosnapshot.router = "direct" -akka.actor.deployment.hello-world-write-behind-nosnapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.strategy = "write-behind" -akka.cluster.replication.snapshot-frequency = 1000 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.conf deleted file mode 100644 index dca432f404..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world-write-behind-nosnapshot.router = "direct" -akka.actor.deployment.hello-world-write-behind-nosnapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.strategy = "write-behind" -akka.cluster.replication.snapshot-frequency = 1000 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec.scala deleted file mode 100644 index a90d26ad8d..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec.scala +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -// package akka.cluster.replication.transactionlog.writebehind.nosnapshot - -// import akka.actor._ -// import akka.cluster._ -// import Cluster._ -// import akka.config.Config -// import akka.cluster.LocalCluster._ - -// object ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec { -// var NrOfNodes = 2 - -// sealed trait TransactionLogMessage extends Serializable -// case class Count(nr: Int) extends TransactionLogMessage -// case class Log(full: String) extends TransactionLogMessage -// case object GetLog extends TransactionLogMessage - -// class HelloWorld extends Actor with Serializable { -// var log = "" -// def receive = { -// case Count(nr) ⇒ -// log += nr.toString -// reply("World from node [" + Config.nodename + "]") -// case GetLog ⇒ -// reply(Log(log)) -// } -// } -// } - -// class ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1 extends ClusterTestNode { -// import ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec._ - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes) { -// Cluster.node.start() -// } - -// barrier("create-actor-on-node1", NrOfNodes) { -// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-behind-nosnapshot") -// // node.isInUseOnNode("hello-world") must be(true) -// actorRef.address must be("hello-world-write-behind-nosnapshot") -// for (i ← 0 until 10) { -// (actorRef ? Count(i)).as[String] must be(Some("World from node [node1]")) -// } -// } - -// barrier("start-node2", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } - -// class ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2 extends MasterClusterTestNode { -// import ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec._ - -// val testNodes = NrOfNodes - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes).await() - -// barrier("create-actor-on-node1", NrOfNodes).await() - -// barrier("start-node2", NrOfNodes) { -// Cluster.node.start() -// } - -// Thread.sleep(5000) // wait for fail-over from node1 to node2 - -// barrier("check-fail-over-to-node2", NrOfNodes - 1) { -// // both remaining nodes should now have the replica -// node.isInUseOnNode("hello-world-write-behind-nosnapshot") must be(true) -// val actorRef = Actor.registry.local.actorFor("hello-world-write-behind-nosnapshot").getOrElse(fail("Actor should have been in the local actor registry")) -// actorRef.address must be("hello-world-write-behind-nosnapshot") -// (actorRef ? GetLog).as[Log].get must be(Log("0123456789")) -// } - -// node.shutdown() -// } -// } - -// override def onReady() { -// LocalBookKeeperEnsemble.start() -// } - -// override def onShutdown() { -// TransactionLog.shutdown() -// LocalBookKeeperEnsemble.shutdown() -// } -// } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.conf deleted file mode 100644 index a3ec6ec2c3..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world.router = "direct" -akka.actor.deployment.hello-world-write-behind-snapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.strategy = "write-behind" -akka.cluster.replication.snapshot-frequency = 7 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.conf deleted file mode 100644 index a3ec6ec2c3..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world.router = "direct" -akka.actor.deployment.hello-world-write-behind-snapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.strategy = "write-behind" -akka.cluster.replication.snapshot-frequency = 7 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec.scala deleted file mode 100644 index fde113080e..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec.scala +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -// package akka.cluster.replication.transactionlog.writebehind.snapshot - -// import akka.actor._ -// import akka.cluster._ -// import Cluster._ -// import akka.config.Config -// import akka.cluster.LocalCluster._ - -// object ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec { -// var NrOfNodes = 2 - -// sealed trait TransactionLogMessage extends Serializable -// case class Count(nr: Int) extends TransactionLogMessage -// case class Log(full: String) extends TransactionLogMessage -// case object GetLog extends TransactionLogMessage - -// class HelloWorld extends Actor with Serializable { -// var log = "" -// //println("Creating HelloWorld log =======> " + log) -// def receive = { -// case Count(nr) ⇒ -// log += nr.toString -// //println("Message to HelloWorld log =======> " + log) -// reply("World from node [" + Config.nodename + "]") -// case GetLog ⇒ -// reply(Log(log)) -// } -// } -// } - -// class ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1 extends ClusterTestNode { -// import ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec._ - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes) { -// Cluster.node.start() -// } - -// barrier("create-actor-on-node1", NrOfNodes) { -// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-behind-snapshot") -// node.isInUseOnNode("hello-world-write-behind-snapshot") must be(true) -// actorRef.address must be("hello-world-write-behind-snapshot") -// var counter = 0 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// } - -// barrier("start-node2", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } - -// class ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2 extends MasterClusterTestNode { -// import ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec._ - -// val testNodes = NrOfNodes - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes).await() - -// barrier("create-actor-on-node1", NrOfNodes).await() - -// barrier("start-node2", NrOfNodes) { -// Cluster.node.start() -// } - -// Thread.sleep(5000) // wait for fail-over from node1 to node2 - -// barrier("check-fail-over-to-node2", NrOfNodes - 1) { -// // both remaining nodes should now have the replica -// node.isInUseOnNode("hello-world-write-behind-snapshot") must be(true) -// val actorRef = Actor.registry.local.actorFor("hello-world-write-behind-snapshot").getOrElse(fail("Actor should have been in the local actor registry")) -// actorRef.address must be("hello-world-write-behind-snapshot") -// (actorRef ? GetLog).as[Log].get must be(Log("0123456789")) -// } - -// node.shutdown() -// } -// } - -// override def onReady() { -// LocalBookKeeperEnsemble.start() -// } - -// override def onShutdown() { -// TransactionLog.shutdown() -// LocalBookKeeperEnsemble.shutdown() -// } -// } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.conf deleted file mode 100644 index 8de04a2eb1..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "DEBUG" -akka.actor.deployment.hello-world-write-through-nosnapshot.router = "direct" -akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.strategy = "write-through" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"] -akka.cluster.replication.snapshot-frequency = 1000 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.conf deleted file mode 100644 index 8de04a2eb1..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "DEBUG" -akka.actor.deployment.hello-world-write-through-nosnapshot.router = "direct" -akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.strategy = "write-through" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"] -akka.cluster.replication.snapshot-frequency = 1000 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec.scala deleted file mode 100644 index c2e6ed678b..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec.scala +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -// package akka.cluster.replication.transactionlog.writethrough.nosnapshot - -// import akka.actor._ -// import akka.cluster._ -// import Cluster._ -// import akka.config.Config -// import akka.cluster.LocalCluster._ - -// object ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec { -// var NrOfNodes = 2 - -// sealed trait TransactionLogMessage extends Serializable -// case class Count(nr: Int) extends TransactionLogMessage -// case class Log(full: String) extends TransactionLogMessage -// case object GetLog extends TransactionLogMessage - -// class HelloWorld extends Actor with Serializable { -// var log = "" -// def receive = { -// case Count(nr) ⇒ -// println("Received number: " + nr + " on " + self.address) -// log += nr.toString -// reply("World from node [" + Config.nodename + "]") -// case GetLog ⇒ -// println("Received getLog on " + uuid) -// reply(Log(log)) -// } -// } -// } - -// class ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1 extends ClusterTestNode { -// import ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec._ - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes) { -// Cluster.node.start() -// } - -// barrier("create-actor-on-node1", NrOfNodes) { -// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-through-nosnapshot") -// actorRef.address must be("hello-world-write-through-nosnapshot") -// for (i ← 0 until 10) -// (actorRef ? Count(i)).as[String] must be(Some("World from node [node1]")) -// } - -// barrier("start-node2", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } - -// class ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2 extends MasterClusterTestNode { -// import ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec._ - -// val testNodes = NrOfNodes - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes).await() - -// barrier("create-actor-on-node1", NrOfNodes).await() - -// barrier("start-node2", NrOfNodes) { -// Cluster.node.start() -// } - -// Thread.sleep(5000) // wait for fail-over from node1 to node2 - -// barrier("check-fail-over-to-node2", NrOfNodes - 1) { -// // both remaining nodes should now have the replica -// node.isInUseOnNode("hello-world-write-through-nosnapshot") must be(true) -// val actorRef = Actor.registry.local.actorFor("hello-world-write-through-nosnapshot").getOrElse(fail("Actor should have been in the local actor registry")) -// actorRef.address must be("hello-world-write-through-nosnapshot") -// (actorRef ? GetLog).as[Log].get must be(Log("0123456789")) -// } - -// node.shutdown() -// } -// } - -// override def onReady() { -// LocalBookKeeperEnsemble.start() -// } - -// override def onShutdown() { -// TransactionLog.shutdown() -// LocalBookKeeperEnsemble.shutdown() -// } -// } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.conf deleted file mode 100644 index 82d6dc18ce..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world-write-through-snapshot.router = "direct" -akka.actor.deployment.hello-world-write-through-snapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-through-snapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-through-snapshot.cluster.replication.strategy = "write-through" -akka.cluster.replication.snapshot-frequency = 7 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.conf deleted file mode 100644 index 82d6dc18ce..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world-write-through-snapshot.router = "direct" -akka.actor.deployment.hello-world-write-through-snapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-through-snapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-through-snapshot.cluster.replication.strategy = "write-through" -akka.cluster.replication.snapshot-frequency = 7 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec.scala deleted file mode 100644 index 3df29dd510..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec.scala +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -// package akka.cluster.replication.transactionlog.writethrough.snapshot - -// import akka.actor._ -// import akka.cluster._ -// import Cluster._ -// import akka.config.Config -// import akka.cluster.LocalCluster._ - -// object ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec { -// var NrOfNodes = 2 - -// sealed trait TransactionLogMessage extends Serializable -// case class Count(nr: Int) extends TransactionLogMessage -// case class Log(full: String) extends TransactionLogMessage -// case object GetLog extends TransactionLogMessage - -// class HelloWorld extends Actor with Serializable { -// var log = "" -// def receive = { -// case Count(nr) ⇒ -// log += nr.toString -// reply("World from node [" + Config.nodename + "]") -// case GetLog ⇒ -// reply(Log(log)) -// } -// } -// } - -// class ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1 extends ClusterTestNode { -// import ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec._ - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes) { -// Cluster.node.start() -// } - -// barrier("create-actor-on-node1", NrOfNodes) { -// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-through-snapshot") -// node.isInUseOnNode("hello-world-write-through-snapshot") must be(true) -// actorRef.address must be("hello-world-write-through-snapshot") -// var counter = 0 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// } - -// barrier("start-node2", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } - -// class ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2 extends MasterClusterTestNode { -// import ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec._ - -// val testNodes = NrOfNodes - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes).await() - -// barrier("create-actor-on-node1", NrOfNodes).await() - -// barrier("start-node2", NrOfNodes) { -// Cluster.node.start() -// } - -// Thread.sleep(5000) // wait for fail-over from node1 to node2 - -// barrier("check-fail-over-to-node2", NrOfNodes - 1) { -// // both remaining nodes should now have the replica -// node.isInUseOnNode("hello-world-write-through-snapshot") must be(true) -// val actorRef = Actor.registry.local.actorFor("hello-world-write-through-snapshot").getOrElse(fail("Actor should have been in the local actor registry")) -// actorRef.address must be("hello-world-write-through-snapshot") -// (actorRef ? GetLog).as[Log].get must be(Log("0123456789")) -// } - -// node.shutdown() -// } -// } - -// override def onReady() { -// LocalBookKeeperEnsemble.start() -// } - -// override def onShutdown() { -// TransactionLog.shutdown() -// LocalBookKeeperEnsemble.shutdown() -// } -// } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.conf deleted file mode 100644 index 7332be6934..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.conf +++ /dev/null @@ -1,5 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "direct" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node2"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.conf deleted file mode 100644 index 7332be6934..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.conf +++ /dev/null @@ -1,5 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "direct" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node2"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmSpec.scala deleted file mode 100644 index 6bc1653836..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmSpec.scala +++ /dev/null @@ -1,90 +0,0 @@ -package akka.cluster.routing.direct.failover - -import akka.config.Config -import scala.Predef._ -import akka.cluster.{ ClusterActorRef, Cluster, MasterClusterTestNode, ClusterTestNode } -import akka.actor.{ ActorInitializationException, Actor, ActorRef } -import akka.util.duration._ -import akka.util.{ Duration, Timer } -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } -import java.net.ConnectException -import java.nio.channels.NotYetConnectedException -import akka.cluster.LocalCluster -import akka.dispatch.Await - -object DirectRoutingFailoverMultiJvmSpec { - - val NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - - def receive = { - case "identify" ⇒ - reply(Config.nodename) - } - } -} - -class DirectRoutingFailoverMultiJvmNode1 extends MasterClusterTestNode { - - import DirectRoutingFailoverMultiJvmSpec._ - - val testNodes = NrOfNodes - - "Direct Router" must { - "throw exception [ActorInitializationException] upon fail-over" ignore { - - val ignoreExceptions = Seq(EventFilter[NotYetConnectedException], EventFilter[ConnectException]) - EventHandler.notify(TestEvent.Mute(ignoreExceptions)) - - var actor: ActorRef = null - - LocalCluster.barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - LocalCluster.barrier("actor-creation", NrOfNodes) { - actor = Actor.actorOf(Props[SomeActor]("service-hello") - } - - LocalCluster.barrier("verify-actor", NrOfNodes) { - Await.result(actor ? "identify", timeout.duration) must equal("node2") - } - - val timer = Timer(30.seconds, true) - while (timer.isTicking && !Cluster.node.isInUseOnNode("service-hello")) {} - - LocalCluster.barrier("verify-fail-over", NrOfNodes - 1) { - actor ! "identify" // trigger failure and removal of connection to node2 - intercept[Exception] { - actor ! "identify" // trigger exception since no more connections - } - } - - Cluster.node.shutdown() - } - } -} - -class DirectRoutingFailoverMultiJvmNode2 extends ClusterTestNode { - - import DirectRoutingFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - LocalCluster.barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - LocalCluster.barrier("actor-creation", NrOfNodes).await() - - LocalCluster.barrier("verify-actor", NrOfNodes) { - Cluster.node.isInUseOnNode("service-hello") must be(true) - } - - Cluster.node.shutdown() - } - } -} - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNode1MultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNode1MultiJvmSpec.scala deleted file mode 100644 index 6ce2219978..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNode1MultiJvmSpec.scala +++ /dev/null @@ -1,60 +0,0 @@ -package akka.cluster.routing.direct.homenode - -import akka.config.Config -import akka.actor.Actor -import akka.cluster.{ ClusterTestNode, MasterClusterTestNode, Cluster } -import Cluster._ -import akka.cluster.LocalCluster._ - -object HomeNodeMultiJvmSpec { - - val NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - def receive = { - case "identify" ⇒ { - reply(Config.nodename) - } - } - } - -} - -class HomeNodeMultiJvmNode1 extends MasterClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - val testNodes = NrOfNodes - - "___" must { - "___" in { - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - barrier("waiting-for-end", NrOfNodes).await() - node.shutdown() - } - } -} - -class HomeNodeMultiJvmNode2 extends ClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - "Direct Router: A Direct Router" must { - "obey 'home-node' config option when instantiated actor in cluster" in { - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - - val actorNode1 = Actor.actorOf(Props[SomeActor]("service-node1") - val name1 = (actorNode1 ? "identify").get.asInstanceOf[String] - name1 must equal("node1") - - val actorNode2 = Actor.actorOf(Props[SomeActor]("service-node2") - val name2 = (actorNode2 ? "identify").get.asInstanceOf[String] - name2 must equal("node2") - - barrier("waiting-for-end", NrOfNodes).await() - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.conf deleted file mode 100644 index 893f798e1d..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.conf +++ /dev/null @@ -1,6 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-node1.router = "direct" -akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-node2.router = "direct" -akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.conf deleted file mode 100644 index 893f798e1d..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.conf +++ /dev/null @@ -1,6 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-node1.router = "direct" -akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-node2.router = "direct" -akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.conf deleted file mode 100644 index aa0d7771c8..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "direct" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.conf deleted file mode 100644 index aa0d7771c8..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "direct" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmSpec.scala deleted file mode 100644 index a7b61af3e7..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmSpec.scala +++ /dev/null @@ -1,62 +0,0 @@ -package akka.cluster.routing.direct.normalusage - -import akka.actor.Actor -import akka.config.Config -import akka.cluster.{ ClusterActorRef, ClusterTestNode, MasterClusterTestNode, Cluster } -import akka.cluster.LocalCluster - -object SingleReplicaDirectRoutingMultiJvmSpec { - val NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - //println("---------------------------------------------------------------------------") - //println("SomeActor has been created on node [" + Config.nodename + "]") - //println("---------------------------------------------------------------------------") - - def receive = { - case "identify" ⇒ { - //println("The node received the 'identify' command: " + Config.nodename) - reply(Config.nodename) - } - } - } -} - -class SingleReplicaDirectRoutingMultiJvmNode1 extends MasterClusterTestNode { - - import SingleReplicaDirectRoutingMultiJvmSpec._ - - val testNodes = NrOfNodes - - "___" must { - "___" in { - Cluster.node.start() - LocalCluster.barrier("waiting-for-begin", NrOfNodes).await() - - LocalCluster.barrier("waiting-to-end", NrOfNodes).await() - Cluster.node.shutdown() - } - } -} - -class SingleReplicaDirectRoutingMultiJvmNode2 extends ClusterTestNode { - - import SingleReplicaDirectRoutingMultiJvmSpec._ - - "Direct Router: when node send message to existing node it" must { - "communicate with that node" in { - Cluster.node.start() - LocalCluster.barrier("waiting-for-begin", NrOfNodes).await() - - val actor = Actor.actorOf(Props[SomeActor]("service-hello").asInstanceOf[ClusterActorRef] - actor.isRunning must be(true) - - val result = (actor ? "identify").get - result must equal("node1") - - LocalCluster.barrier("waiting-to-end", NrOfNodes).await() - Cluster.node.shutdown() - } - } -} - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.conf deleted file mode 100644 index 1772693874..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1", "node:node3"] -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.timeout = 30 -akka.cluster.session-timeout = 10 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.opts deleted file mode 100644 index f1306829d9..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.conf deleted file mode 100644 index 1772693874..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1", "node:node3"] -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.timeout = 30 -akka.cluster.session-timeout = 10 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.opts deleted file mode 100644 index 897e69f626..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.conf deleted file mode 100644 index 1772693874..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1", "node:node3"] -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.timeout = 30 -akka.cluster.session-timeout = 10 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.opts deleted file mode 100644 index 4127fb94fc..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmSpec.scala deleted file mode 100644 index cbdc42dbe9..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmSpec.scala +++ /dev/null @@ -1,145 +0,0 @@ -package akka.cluster.routing.random.failover - -import akka.config.Config -import akka.cluster._ -import akka.actor.{ ActorRef, Actor } -import akka.event.EventHandler -import akka.util.duration._ -import akka.util.{ Duration, Timer } -import akka.testkit.{ EventFilter, TestEvent } -import java.util.{ Collections, Set ⇒ JSet } -import java.net.ConnectException -import java.nio.channels.NotYetConnectedException -import akka.cluster.LocalCluster._ -import akka.dispatch.Await - -object RandomFailoverMultiJvmSpec { - - val NrOfNodes = 3 - - class SomeActor extends Actor with Serializable { - - def receive = { - case "identify" ⇒ - reply(Config.nodename) - } - } - -} - -class RandomFailoverMultiJvmNode1 extends MasterClusterTestNode { - - import RandomFailoverMultiJvmSpec._ - - def testNodes = NrOfNodes - - "Random: when random router fails" must { - "jump to another replica" ignore { - val ignoreExceptions = Seq( - EventFilter[NotYetConnectedException], - EventFilter[ConnectException], - EventFilter[ClusterException], - EventFilter[java.nio.channels.ClosedChannelException]) - - var oldFoundConnections: JSet[String] = null - var actor: ActorRef = null - - barrier("node-start", NrOfNodes) { - EventHandler.notify(TestEvent.Mute(ignoreExceptions)) - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes) { - actor = Actor.actorOf(Props[SomeActor]("service-hello") - actor.isInstanceOf[ClusterActorRef] must be(true) - } - - val timer = Timer(30.seconds, true) - while (timer.isTicking && - !Cluster.node.isInUseOnNode("service-hello", "node1") && - !Cluster.node.isInUseOnNode("service-hello", "node3")) {} - - barrier("actor-usage", NrOfNodes) { - Cluster.node.isInUseOnNode("service-hello") must be(true) - oldFoundConnections = identifyConnections(actor) - - //since we have replication factor 2 - oldFoundConnections.size() must be(2) - } - - barrier("verify-fail-over", NrOfNodes - 1) { - val timer = Timer(30.seconds, true) - while (timer.isTicking && - !Cluster.node.isInUseOnNode("service-hello", "node1") && - !Cluster.node.isInUseOnNode("service-hello", "node2")) {} - - val newFoundConnections = identifyConnections(actor) - - //it still must be 2 since a different node should have been used to failover to - newFoundConnections.size() must be(2) - - //they are not disjoint since, there must be a single element that is in both - Collections.disjoint(newFoundConnections, oldFoundConnections) must be(false) - - //but they should not be equal since the shutdown-node has been replaced by another one. - newFoundConnections.equals(oldFoundConnections) must be(false) - } - - Cluster.node.shutdown() - } - } - - def identifyConnections(actor: ActorRef): JSet[String] = { - val set = new java.util.HashSet[String] - for (i ← 0 until 100) { // we should get hits from both nodes in 100 attempts, if not then not very random - val value = Await.result(actor ? "identify", timeout.duration).asInstanceOf[String] - set.add(value) - } - set - } -} - -class RandomFailoverMultiJvmNode2 extends ClusterTestNode { - - import RandomFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes).await() - barrier("actor-usage", NrOfNodes).await() - - Cluster.node.isInUseOnNode("service-hello") must be(false) - - Thread.sleep(5000) // wait for fail-over from node3 - - barrier("verify-fail-over", NrOfNodes - 1).await() - - Cluster.node.shutdown() - } - } -} - -class RandomFailoverMultiJvmNode3 extends ClusterTestNode { - - import RandomFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes).await() - barrier("actor-usage", NrOfNodes).await() - - Cluster.node.isInUseOnNode("service-hello") must be(true) - - Cluster.node.shutdown() - } - } -} - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.conf deleted file mode 100644 index 012685917c..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-node1.router = "random" -akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-node1.nr-of-instances = 1 -akka.actor.deployment.service-node2.router = "random" -akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"] -akka.actor.deployment.service-node2.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.conf deleted file mode 100644 index 012685917c..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-node1.router = "random" -akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-node1.nr-of-instances = 1 -akka.actor.deployment.service-node2.router = "random" -akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"] -akka.actor.deployment.service-node2.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmSpec.scala deleted file mode 100644 index a8f4887464..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmSpec.scala +++ /dev/null @@ -1,60 +0,0 @@ -package akka.cluster.routing.random.homenode - -import akka.config.Config -import akka.actor.Actor -import akka.cluster.{ ClusterTestNode, MasterClusterTestNode, Cluster } -import Cluster._ -import akka.cluster.LocalCluster._ - -object HomeNodeMultiJvmSpec { - - val NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - def receive = { - case "identify" ⇒ { - reply(Config.nodename) - } - } - } - -} - -class HomeNodeMultiJvmNode1 extends MasterClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - val testNodes = NrOfNodes - - "___" must { - "___" in { - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - barrier("waiting-for-end", NrOfNodes).await() - node.shutdown() - } - } -} - -class HomeNodeMultiJvmNode2 extends ClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - "Random Router: A Random Router" must { - "obey 'home-node' config option when instantiated actor in cluster" in { - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - - val actorNode1 = Actor.actorOf(Props[SomeActor]("service-node1") - val nameNode1 = (actorNode1 ? "identify").get.asInstanceOf[String] - nameNode1 must equal("node1") - - val actorNode2 = Actor.actorOf(Props[SomeActor]("service-node2") - val nameNode2 = (actorNode2 ? "identify").get.asInstanceOf[String] - nameNode2 must equal("node2") - - barrier("waiting-for-end", NrOfNodes).await() - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.conf deleted file mode 100644 index 729dc64fd6..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmSpec.scala deleted file mode 100644 index 525a09467a..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmSpec.scala +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.routing.random.replicationfactor_1 - -import akka.cluster._ -import akka.cluster.Cluster._ -import akka.actor._ -import akka.config.Config -import akka.cluster.LocalCluster._ - -/** - * Test that if a single node is used with a random router with replication factor then the actor is instantiated - * on the single node. - */ -object Random1ReplicaMultiJvmSpec { - - class HelloWorld extends Actor with Serializable { - def receive = { - case "Hello" ⇒ - reply("World from node [" + Config.nodename + "]") - } - } - -} - -class Random1ReplicaMultiJvmNode1 extends MasterClusterTestNode { - - import Random1ReplicaMultiJvmSpec._ - - val testNodes = 1 - - "Random Router: A cluster" must { - - "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - Cluster.node.start() - - var hello = Actor.actorOf(Props[HelloWorld]("service-hello") - hello must not equal (null) - hello.address must equal("service-hello") - hello.isInstanceOf[ClusterActorRef] must be(true) - - hello must not equal (null) - val reply = (hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1")) - reply must equal("World from node [node1]") - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.conf deleted file mode 100644 index ae344f2100..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.nr-of-instances = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.conf deleted file mode 100644 index 09a37715d0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.cluster.repliction-factor = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.conf deleted file mode 100644 index ae344f2100..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.nr-of-instances = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.opts deleted file mode 100644 index 089e3b7776..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmSpec.scala deleted file mode 100644 index c1a4175a09..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmSpec.scala +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.routing.random.replicationfactor_3 - -import akka.cluster._ -import akka.actor._ -import akka.config.Config -import Cluster._ -import akka.cluster.LocalCluster._ -import akka.dispatch.Await - -/** - * When a MultiJvmNode is started, will it automatically be part of the cluster (so will it automatically be eligible - * for running actors, or will it be just a 'client' talking to the cluster. - */ -object Random3ReplicasMultiJvmSpec { - val NrOfNodes = 3 - - class HelloWorld extends Actor with Serializable { - def receive = { - case "Hello" ⇒ - reply("World from node [" + Config.nodename + "]") - } - } -} - -/** - * What is the purpose of this node? Is this just a node for the cluster to make use of? - */ -class Random3ReplicasMultiJvmNode1 extends MasterClusterTestNode { - - import Random3ReplicasMultiJvmSpec._ - - def testNodes: Int = NrOfNodes - - "___" must { - "___" in { - Cluster.node.start() - - barrier("start-nodes", NrOfNodes).await() - - barrier("create-actor", NrOfNodes).await() - - barrier("end-test", NrOfNodes).await() - - node.shutdown() - } - } -} - -class Random3ReplicasMultiJvmNode2 extends ClusterTestNode { - - import Random3ReplicasMultiJvmSpec._ - import Cluster._ - - "Random: A cluster" must { - - "distribute requests randomly" in { - Cluster.node.start() - - //wait till node 1 has started. - barrier("start-nodes", NrOfNodes).await() - - //check if the actorRef is the expected remoteActorRef. - var hello: ActorRef = null - hello = Actor.actorOf(Props[HelloWorld]("service-hello") - hello must not equal (null) - hello.address must equal("service-hello") - hello.isInstanceOf[ClusterActorRef] must be(true) - - barrier("create-actor", NrOfNodes).await() - - val replies = collection.mutable.Map.empty[String, Int] - def count(reply: String) = { - if (replies.get(reply).isEmpty) replies.put(reply, 1) - else replies.put(reply, replies(reply) + 1) - } - - for (i ← 0 until 1000) { - count(Await.result((hello ? "Hello").mapTo[String], 10 seconds)) - } - - val repliesNode1 = replies("World from node [node1]") - val repliesNode2 = replies("World from node [node2]") - val repliesNode3 = replies("World from node [node3]") - - assert(repliesNode1 > 100) - assert(repliesNode2 > 100) - assert(repliesNode3 > 100) - assert(repliesNode1 + repliesNode2 + repliesNode3 === 1000) - - barrier("end-test", NrOfNodes).await() - - node.shutdown() - } - } -} - -class Random3ReplicasMultiJvmNode3 extends ClusterTestNode { - - import Random3ReplicasMultiJvmSpec._ - import Cluster._ - - "___" must { - "___" in { - Cluster.node.start() - - barrier("start-nodes", NrOfNodes).await() - - barrier("create-actor", NrOfNodes).await() - - barrier("end-test", NrOfNodes).await() - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.conf deleted file mode 100644 index 0a858fb8fd..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1","node:node3"] -akka.cluster.include-ref-node-in-replica-set = on -akka.actor.timeout = 30 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.opts deleted file mode 100644 index f1306829d9..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.conf deleted file mode 100644 index 0a858fb8fd..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1","node:node3"] -akka.cluster.include-ref-node-in-replica-set = on -akka.actor.timeout = 30 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.opts deleted file mode 100644 index 897e69f626..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.conf deleted file mode 100644 index 0a858fb8fd..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1","node:node3"] -akka.cluster.include-ref-node-in-replica-set = on -akka.actor.timeout = 30 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.opts deleted file mode 100644 index 4127fb94fc..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmSpec.scala deleted file mode 100644 index 1b97ef1075..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmSpec.scala +++ /dev/null @@ -1,146 +0,0 @@ -package akka.cluster.routing.roundrobin.failover - -import akka.config.Config -import akka.cluster._ -import akka.actor.{ ActorRef, Actor } -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } -import akka.util.duration._ -import akka.util.{ Duration, Timer } -import java.util.{ Collections, Set ⇒ JSet } -import java.net.ConnectException -import java.nio.channels.NotYetConnectedException -import java.lang.Thread -import akka.cluster.LocalCluster._ -import akka.dispatch.Await - -object RoundRobinFailoverMultiJvmSpec { - - val NrOfNodes = 3 - - class SomeActor extends Actor with Serializable { - - def receive = { - case "identify" ⇒ - reply(Config.nodename) - } - } - -} - -class RoundRobinFailoverMultiJvmNode1 extends MasterClusterTestNode { - - import RoundRobinFailoverMultiJvmSpec._ - - def testNodes = NrOfNodes - - "Round Robin: when round robin router fails" must { - "jump to another replica" ignore { - val ignoreExceptions = Seq( - EventFilter[NotYetConnectedException], - EventFilter[ConnectException], - EventFilter[ClusterException]) - - var oldFoundConnections: JSet[String] = null - var actor: ActorRef = null - - barrier("node-start", NrOfNodes) { - EventHandler.notify(TestEvent.Mute(ignoreExceptions)) - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes) { - actor = Actor.actorOf(Props[SomeActor]("service-hello") - actor.isInstanceOf[ClusterActorRef] must be(true) - } - - val timer = Timer(30.seconds, true) - while (timer.isTicking && - !Cluster.node.isInUseOnNode("service-hello", "node1") && - !Cluster.node.isInUseOnNode("service-hello", "node3")) {} - //Thread.sleep(5000) // wait for all actors to start up on other nodes - - barrier("actor-usage", NrOfNodes) { - Cluster.node.isInUseOnNode("service-hello") must be(true) - oldFoundConnections = identifyConnections(actor) - - //since we have replication factor 2 - oldFoundConnections.size() must be(2) - } - - Thread.sleep(5000) // wait for fail-over from node3 - - barrier("verify-fail-over", NrOfNodes - 1) { - val timer = Timer(30.seconds, true) - while (timer.isTicking && - !Cluster.node.isInUseOnNode("service-hello", "node1") && - !Cluster.node.isInUseOnNode("service-hello", "node2")) {} - - val newFoundConnections = identifyConnections(actor) - - //it still must be 2 since a different node should have been used to failover to - newFoundConnections.size() must be(2) - - //they are not disjoint since, there must be a single element that is in both - Collections.disjoint(newFoundConnections, oldFoundConnections) must be(false) - - //but they should not be equal since the shutdown-node has been replaced by another one. - newFoundConnections.equals(oldFoundConnections) must be(false) - } - - Cluster.node.shutdown() - } - } - - def identifyConnections(actor: ActorRef): JSet[String] = { - val set = new java.util.HashSet[String] - for (i ← 0 until 100) { - val value = Await.result(actor ? "identify", timeout.duration).asInstanceOf[String] - set.add(value) - } - set - } -} - -class RoundRobinFailoverMultiJvmNode2 extends ClusterTestNode { - - import RoundRobinFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes).await() - barrier("actor-usage", NrOfNodes).await() - - Cluster.node.isInUseOnNode("service-hello") must be(false) - - Thread.sleep(5000) // wait for fail-over from node3 - - barrier("verify-fail-over", NrOfNodes - 1).await() - } - } -} - -class RoundRobinFailoverMultiJvmNode3 extends ClusterTestNode { - - import RoundRobinFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes).await() - barrier("actor-usage", NrOfNodes).await() - - Cluster.node.isInUseOnNode("service-hello") must be(true) - - Cluster.node.shutdown() - } - } -} - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.conf deleted file mode 100644 index 85536cd656..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-node1.router = "round-robin" -akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-node1.nr-of-instances = 1 -akka.actor.deployment.service-node2.router = "round-robin" -akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"] -akka.actor.deployment.service-node2.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.conf deleted file mode 100644 index 99c85fd1a8..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.conf +++ /dev/null @@ -1,5 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-hello.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmSpec.scala deleted file mode 100644 index 4dc9e96429..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmSpec.scala +++ /dev/null @@ -1,63 +0,0 @@ -package akka.cluster.routing.roundrobin.homenode - -import akka.config.Config -import akka.actor.Actor -import akka.cluster.{ ClusterTestNode, MasterClusterTestNode, Cluster } -import Cluster._ -import akka.cluster.LocalCluster._ - -object HomeNodeMultiJvmSpec { - - val NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - def receive = { - case "identify" ⇒ { - reply(Config.nodename) - } - } - } - -} - -class HomeNodeMultiJvmNode1 extends MasterClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - val testNodes = NrOfNodes - - "___" must { - "___" in { - - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - barrier("waiting-for-end", NrOfNodes).await() - - node.shutdown() - } - } -} - -class HomeNodeMultiJvmNode2 extends ClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - "Round Robin: A Router" must { - "obey 'home-node' config option when instantiated actor in cluster" in { - - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - - val actorNode1 = Actor.actorOf(Props[SomeActor]("service-node1") - val name1 = (actorNode1 ? "identify").get.asInstanceOf[String] - name1 must equal("node1") - - val actorNode2 = Actor.actorOf(Props[SomeActor]("service-node2") - val name2 = (actorNode2 ? "identify").get.asInstanceOf[String] - name2 must equal("node2") - - barrier("waiting-for-end", NrOfNodes).await() - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.conf deleted file mode 100644 index 88df1a6421..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala deleted file mode 100644 index f8fd41b0cf..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.routing.roundrobin.replicationfactor_1 - -import akka.cluster._ -import Cluster._ -import akka.actor._ -import akka.config.Config -import akka.cluster.LocalCluster._ - -/** - * Test that if a single node is used with a round robin router with replication factor then the actor is instantiated on the single node. - */ -object RoundRobin1ReplicaMultiJvmSpec { - - class HelloWorld extends Actor with Serializable { - def receive = { - case "Hello" ⇒ reply("World from node [" + Config.nodename + "]") - } - } - -} - -class RoundRobin1ReplicaMultiJvmNode1 extends MasterClusterTestNode { - - import RoundRobin1ReplicaMultiJvmSpec._ - - val testNodes = 1 - - "Round Robin: A cluster" must { - - "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - Cluster.node.start() - - var hello = Actor.actorOf(Props[HelloWorld]("service-hello") - hello must not equal (null) - hello.address must equal("service-hello") - hello.isInstanceOf[ClusterActorRef] must be(true) - - hello must not equal (null) - val reply = (hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1")) - reply must equal("World from node [node1]") - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.conf deleted file mode 100644 index a763b66792..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 2 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.conf deleted file mode 100644 index a763b66792..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 2 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmSpec.scala deleted file mode 100644 index b101a06f81..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmSpec.scala +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.routing.roundrobin.replicationfactor_2 - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import Cluster._ -import akka.cluster.LocalCluster._ -import akka.actor._ -import akka.actor.Actor._ -import akka.config.Config -import akka.util.duration._ -import akka.util.{ Duration, Timer } -import akka.cluster.LocalCluster._ - -import java.util.concurrent.atomic.AtomicInteger -import java.util.concurrent.ConcurrentHashMap -import akka.dispatch.Await - -/** - * When a MultiJvmNode is started, will it automatically be part of the cluster (so will it automatically be eligible - * for running actors, or will it be just a 'client' talking to the cluster. - */ -object RoundRobin2ReplicasMultiJvmSpec { - val NrOfNodes = 2 - - class HelloWorld extends Actor with Serializable { - def receive = { - case "Hello" ⇒ - reply("World from node [" + Config.nodename + "]") - } - } -} - -class RoundRobin2ReplicasMultiJvmNode1 extends MasterClusterTestNode { - import RoundRobin2ReplicasMultiJvmSpec._ - - val testNodes = NrOfNodes - - "Round Robin: A cluster" must { - - "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - System.getProperty("akka.cluster.nodename", "") must be("node1") - System.getProperty("akka.remote.port", "") must be("9991") - - //wait till node 1 has started. - barrier("start-node1", NrOfNodes) { - Cluster.node.start() - } - - //wait till ndoe 2 has started. - barrier("start-node2", NrOfNodes).await() - - //wait till an actor reference on node 2 has become available. - barrier("get-ref-to-actor-on-node2", NrOfNodes) { - val timer = Timer(30.seconds, true) - while (timer.isTicking && !node.isInUseOnNode("service-hello")) {} - } - - //wait till the node 2 has send a message to the replica's. - barrier("send-message-from-node2-to-replicas", NrOfNodes).await() - - node.shutdown() - } - } -} - -class RoundRobin2ReplicasMultiJvmNode2 extends ClusterTestNode { - import RoundRobin2ReplicasMultiJvmSpec._ - - "Round Robin: A cluster" must { - - "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - System.getProperty("akka.cluster.nodename", "") must be("node2") - System.getProperty("akka.remote.port", "") must be("9992") - - //wait till node 1 has started. - barrier("start-node1", NrOfNodes).await() - - //wait till node 2 has started. - barrier("start-node2", NrOfNodes) { - Cluster.node.start() - } - - //check if the actorRef is the expected remoteActorRef. - var hello: ActorRef = null - barrier("get-ref-to-actor-on-node2", NrOfNodes) { - hello = Actor.actorOf(Props[HelloWorld]("service-hello") - hello must not equal (null) - hello.address must equal("service-hello") - hello.isInstanceOf[ClusterActorRef] must be(true) - } - - barrier("send-message-from-node2-to-replicas", NrOfNodes) { - //todo: is there a reason to check for null again since it already has been done in the previous block. - hello must not equal (null) - - val replies = new ConcurrentHashMap[String, AtomicInteger]() - def count(reply: String) = { - val counter = new AtomicInteger(0) - Option(replies.putIfAbsent(reply, counter)).getOrElse(counter).incrementAndGet() - } - - implicit val timeout = Timeout(Duration(20, "seconds")) - - for(i <- 1 to 8) - count(Await.result((hello ? "Hello").mapTo[String], timeout.duration)) - - replies.get("World from node [node1]").get must equal(4) - replies.get("World from node [node2]").get must equal(4) - } - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.conf deleted file mode 100644 index 8592b46c85..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.conf deleted file mode 100644 index 92bafcfe8b..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.cluster.repliction-factor = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.conf deleted file mode 100644 index 8592b46c85..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.opts deleted file mode 100644 index 089e3b7776..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmSpec.scala deleted file mode 100644 index f62b7d3e74..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmSpec.scala +++ /dev/null @@ -1,158 +0,0 @@ -// /** -// * Copyright (C) 2009-2012 Typesafe Inc. -// */ - -// package akka.cluster.routing.roundrobin.replicationfactor_3 - -// import org.scalatest.WordSpec -// import org.scalatest.matchers.MustMatchers -// import org.scalatest.BeforeAndAfterAll - -// import akka.cluster._ -// import akka.actor._ -// import akka.actor.Actor._ -// import akka.util.duration._ -// import akka.util.{ Duration, Timer } -// import akka.config.Config -// import akka.cluster.LocalCluster._ -// import Cluster._ - -// /** -// * When a MultiJvmNode is started, will it automatically be part of the cluster (so will it automatically be eligible -// * for running actors, or will it be just a 'client' talking to the cluster. -// */ -// object RoundRobin3ReplicasMultiJvmSpec { -// val NrOfNodes = 3 - -// class HelloWorld extends Actor with Serializable { -// def receive = { -// case "Hello" ⇒ -// reply("World from node [" + Config.nodename + "]") -// } -// } -// } - -// /** -// * What is the purpose of this node? Is this just a node for the cluster to make use of? -// */ -// class RoundRobin3ReplicasMultiJvmNode1 extends MasterClusterTestNode { -// import RoundRobin3ReplicasMultiJvmSpec._ - -// val testNodes = NrOfNodes - -// "Round Robin: A cluster" must { - -// "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - -// //wait till node 1 has started. -// barrier("start-node1", NrOfNodes) { -// Cluster.node.boot() -// } - -// //wait till ndoe 2 has started. -// barrier("start-node2", NrOfNodes).await() - -// //wait till node 3 has started. -// barrier("start-node3", NrOfNodes).await() - -// //wait till an actor reference on node 2 has become available. -// barrier("get-ref-to-actor-on-node2", NrOfNodes) { -// val timer = Timer(30.seconds, true) -// while (timer.isTicking && !node.isInUseOnNode("service-hello")) {} -// } - -// //wait till the node 2 has send a message to the replica's. -// barrier("send-message-from-node2-to-replicas", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } - -// class RoundRobin3ReplicasMultiJvmNode2 extends ClusterTestNode { -// import RoundRobin3ReplicasMultiJvmSpec._ -// import Cluster._ - -// "Round Robin: A cluster" must { - -// "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - -// //wait till node 1 has started. -// barrier("start-node1", NrOfNodes).await() - -// //wait till node 2 has started. -// barrier("start-node2", NrOfNodes) { -// Cluster.node.start() -// } - -// //wait till node 3 has started. -// barrier("start-node3", NrOfNodes).await() - -// //check if the actorRef is the expected remoteActorRef. -// var hello: ActorRef = null -// barrier("get-ref-to-actor-on-node2", NrOfNodes) { -// hello = Actor.actorOf(Props[HelloWorld]("service-hello") -// hello must not equal (null) -// hello.address must equal("service-hello") -// hello.isInstanceOf[ClusterActorRef] must be(true) -// } - -// barrier("send-message-from-node2-to-replicas", NrOfNodes) { -// //todo: is there a reason to check for null again since it already has been done in the previous block. -// hello must not equal (null) - -// val replies = collection.mutable.Map.empty[String, Int] -// def count(reply: String) = { -// if (replies.get(reply).isEmpty) replies.put(reply, 1) -// else replies.put(reply, replies(reply) + 1) -// } - -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node2"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node3"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node2"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node3"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node2"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node3"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node2"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node3"))) - -// replies("World from node [node1]") must equal(4) -// replies("World from node [node2]") must equal(4) -// replies("World from node [node3]") must equal(4) -// } - -// node.shutdown() -// } -// } -// } - -// class RoundRobin3ReplicasMultiJvmNode3 extends ClusterTestNode { -// import RoundRobin3ReplicasMultiJvmSpec._ -// import Cluster._ - -// "Round Robin: A cluster" must { - -// "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { -// barrier("start-node1", NrOfNodes).await() - -// barrier("start-node2", NrOfNodes).await() - -// barrier("start-node3", NrOfNodes) { -// Cluster.node.start() -// } - -// barrier("get-ref-to-actor-on-node2", NrOfNodes) { -// val timer = Timer(30.seconds, true) -// while (timer.isTicking && !node.isInUseOnNode("service-hello")) {} -// } - -// barrier("send-message-from-node2-to-replicas", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.conf deleted file mode 100644 index fd2babf3a9..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.conf +++ /dev/null @@ -1,6 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "akka.routing.ScatterGatherFirstCompletedRouter" -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.timeout = 30 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.conf deleted file mode 100644 index fd2babf3a9..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.conf +++ /dev/null @@ -1,6 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "akka.routing.ScatterGatherFirstCompletedRouter" -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.timeout = 30 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmSpec.scala deleted file mode 100644 index e8cc4f7d68..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmSpec.scala +++ /dev/null @@ -1,114 +0,0 @@ -package akka.cluster.routing.scattergather.failover - -import akka.config.Config -import akka.cluster._ -import akka.actor.{ ActorRef, Actor } -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } -import java.util.{ Collections, Set ⇒ JSet } -import java.net.ConnectException -import java.nio.channels.NotYetConnectedException -import java.lang.Thread -import akka.routing.Routing.Broadcast -import akka.cluster.LocalCluster._ -import akka.dispatch.Await - -object ScatterGatherFailoverMultiJvmSpec { - - val NrOfNodes = 2 - - case class Shutdown(node: Option[String] = None) - case class Sleep(node: String) - - class TestActor extends Actor with Serializable { - - def shutdownNode = new Thread() { - override def run() { - Thread.sleep(2000) - Cluster.node.shutdown() - } - } - - def receive = { - case Shutdown(None) ⇒ shutdownNode - case Sleep(node) if node.equals(Config.nodename) ⇒ - Thread sleep 100 - reply(Config.nodename) - case Shutdown(Some(node)) if node.equals(Config.nodename) ⇒ shutdownNode - case _ ⇒ - Thread sleep 100 - reply(Config.nodename) - } - } - -} - -class ScatterGatherFailoverMultiJvmNode1 extends MasterClusterTestNode { - - import ScatterGatherFailoverMultiJvmSpec._ - - def testNodes = NrOfNodes - - "When the message is sent with ?, and all connections are up, router" must { - "return the first came reponse" ignore { - val ignoreExceptions = Seq( - EventFilter[NotYetConnectedException], - EventFilter[ConnectException], - EventFilter[ClusterException]) - - EventHandler.notify(TestEvent.Mute(ignoreExceptions)) - - Cluster.node.start() - LocalCluster.barrier("waiting-for-begin", NrOfNodes).await() - - /* - FIXME: Uncomment, when custom routers will be fully supported (ticket #1109) - - val actor = Actor.actorOf(Props[TestActor]("service-hello").asInstanceOf[ClusterActorRef] - - identifyConnections(actor).size() must be(2) - - // since node1 is falling asleep, response from node2 is gathered - (actor ? Broadcast(Sleep("node1"))).get.asInstanceOf[String] must be("node2") - - Thread sleep 100 - - // since node2 shuts down during processing the message, response from node1 is gathered - (actor ? Broadcast(Shutdown(Some("node2")))).get.asInstanceOf[String] must be("node1") - - */ - LocalCluster.barrier("waiting-for-end", NrOfNodes).await() - Cluster.node.shutdown() - } - } - - def identifyConnections(actor: ActorRef): JSet[String] = { - val set = new java.util.HashSet[String] - for (i ← 0 until NrOfNodes * 2) { - val value = Await.result(actor ? "foo", timeout.duration).asInstanceOf[String] - set.add(value) - } - set - } -} - -class ScatterGatherFailoverMultiJvmNode2 extends ClusterTestNode { - - import ScatterGatherFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - - Cluster.node.start() - LocalCluster.barrier("waiting-for-begin", NrOfNodes).await() - - /* - FIXME: Uncomment, when custom routers will be fully supported (ticket #1109) - Thread.sleep(30 *1000) - */ - - LocalCluster.barrier("waiting-for-end", NrOfNodes).await() - Cluster.node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/sample/PingPongMultiJvmExample.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/sample/PingPongMultiJvmExample.scala deleted file mode 100644 index c7e9aceaf1..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/sample/PingPongMultiJvmExample.scala +++ /dev/null @@ -1,227 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.sample - -import akka.cluster._ - -import akka.actor._ -import akka.util.duration._ - -object PingPongMultiJvmExample { - val PING_ADDRESS = "ping" - val PONG_ADDRESS = "pong" - - val ClusterName = "ping-pong-cluster" - val NrOfNodes = 5 - val Pause = true - val PauseTimeout = 5 minutes - - // ----------------------------------------------- - // Messages - // ----------------------------------------------- - - sealed trait PingPong extends Serializable - case object Ping extends PingPong - case object Pong extends PingPong - case object Stop extends PingPong - - case class Serve(player: ActorRef) - - // ----------------------------------------------- - // Actors - // ----------------------------------------------- - - class PingActor extends Actor with Serializable { - var pong: ActorRef = _ - var play = true - - def receive = { - case Pong ⇒ - if (play) { - println("---->> PING") - pong ! Ping - } else { - println("---->> GAME OVER") - } - case Serve(player) ⇒ - pong = player - println("---->> SERVE") - pong ! Ping - case Stop ⇒ - play = false - } - } - - class PongActor extends Actor with Serializable { - def receive = { - case Ping ⇒ - println("---->> PONG") - reply(Pong) - } - } -} - -/* -object PingPongMultiJvmNode1 { - import PingPong._ - import BinaryFormats._ - - val PingService = classOf[PingActor].getName - val PongService = classOf[PongActor].getName - - def main(args: Array[String]) { run } - - def run = { - // ----------------------------------------------- - // Start monitoring - // ----------------------------------------------- - - //MonitoringServer.start - //Monitoring.startLocalDaemons - - // ----------------------------------------------- - // Start cluster - // ----------------------------------------------- - - Cluster.startLocalCluster() - - // create node - val node = Cluster.newNode(NodeAddress(ClusterName, "node1", port = 9991)) - - def pause(name: String, message: String) = { - node.barrier("user-prompt-" + name, NrOfNodes, PauseTimeout) { - println(message) - if (Pause) { - println("Press enter to continue (timeout of %s) ..." format PauseTimeout) - System.in.read - } - } - } - - pause("start", "Ready to start all nodes") - println("Starting nodes ...") - - Cluster.node.start() - - node.barrier("start", NrOfNodes) { - // wait for others to start - } - - // ----------------------------------------------- - // Store pong actors in the cluster - // ----------------------------------------------- - - pause("create", "Ready to create all actors") - println("Creating actors ...") - - // store the ping actor in the cluster, but do not deploy it anywhere - node.store(classOf[PingActor], PING_ADDRESS) - - // store the pong actor in the cluster and replicate it on all nodes - node.store(classOf[PongActor], PONG_ADDRESS, NrOfNodes) - - // give some time for the deployment - Thread.sleep(3000) - - // ----------------------------------------------- - // Get actor references - // ----------------------------------------------- - - // check out a local ping actor - val ping = node.use[PingActor](PING_ADDRESS).head - - // get a reference to all the pong actors through a round-robin router actor ref - val pong = node.ref(PONG_ADDRESS, router = Router.RoundRobin) - - // ----------------------------------------------- - // Play the game - // ----------------------------------------------- - - pause("play", "Ready to play ping pong") - - ping ! Serve(pong) - - // let them play for 3 seconds - Thread.sleep(3000) - - ping ! Stop - - // give some time for the game to finish - Thread.sleep(3000) - - // ----------------------------------------------- - // Stop actors - // ----------------------------------------------- - - pause("stop", "Ready to stop actors") - println("Stopping actors ...") - - ping.stop - pong.stop - - // give remote actors time to stop - Thread.sleep(5000) - - // ----------------------------------------------- - // Stop everything - // ----------------------------------------------- - - pause("shutdown", "Ready to shutdown") - println("Stopping everything ...") - - //Monitoring.stopLocalDaemons - //MonitoringServer.stop - - Actor.remote.shutdown - Actor.registry.local.shutdownAll - - node.stop - - Cluster.shutdownLocalCluster - } -} - -object PingPongMultiJvmNode2 extends PongNode(2) -object PingPongMultiJvmNode3 extends PongNode(3) -object PingPongMultiJvmNode4 extends PongNode(4) -object PingPongMultiJvmNode5 extends PongNode(5) - -class PongNode(number: Int) { - import PingPong._ - - def main(args: Array[String]) { run } - - def run = { - val node = Cluster.newNode(NodeAddress(ClusterName, "node" + number, port = 9990 + number)) - - def pause(name: String) = { - node.barrier("user-prompt-" + name, NrOfNodes, PauseTimeout) { - // wait for user prompt - } - } - - pause("start") - - node.barrier("start", NrOfNodes) { - Cluster.node.start() - } - - pause("create") - - pause("play") - - pause("stop") - - pause("shutdown") - - // clean up and stop - - Actor.remote.shutdown - Actor.registry.local.shutdownAll - - node.stop - } -} -*/ diff --git a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala similarity index 99% rename from akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala rename to akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala index cffc424408..d02199f703 100644 --- a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala @@ -1,4 +1,4 @@ -package akka.remote +package akka.cluster import java.net.InetSocketAddress import akka.testkit.AkkaSpec diff --git a/akka-cluster/src/test/scala/akka/cluster/AsynchronousTransactionLogSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AsynchronousTransactionLogSpec.scala deleted file mode 100644 index 0d26befc4e..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/AsynchronousTransactionLogSpec.scala +++ /dev/null @@ -1,230 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import org.apache.bookkeeper.client.BookKeeper -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.actor._ -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } - -import com.eaio.uuid.UUID - -class AsynchronousTransactionLogSpec extends WordSpec with MustMatchers with BeforeAndAfterAll { - private var bookKeeper: BookKeeper = _ - private var localBookKeeper: LocalBookKeeper = _ - - "An asynchronous Transaction Log" should { - "be able to record entries - asynchronous" in { - val uuid = (new UUID).toString - val txlog = TransactionLog.newLogFor(uuid, true, null) - val entry = "hello".getBytes("UTF-8") - txlog.recordEntry(entry) - Thread.sleep(200) - txlog.close - } - - "be able to be deleted - asynchronous" in { - val uuid = (new UUID).toString - val txlog = TransactionLog.newLogFor(uuid, true, null) - val entry = "hello".getBytes("UTF-8") - txlog.recordEntry(entry) - - txlog.delete() - txlog.close() - - val zkClient = TransactionLog.zkClient - assert(zkClient.readData(txlog.snapshotPath, true) == null) - assert(zkClient.readData(txlog.txLogPath, true) == null) - } - - "be able to be checked for existence - asynchronous" in { - val uuid = (new UUID).toString - TransactionLog.exists(uuid) must be(false) - - TransactionLog.newLogFor(uuid, true, null) - TransactionLog.exists(uuid) must be(true) - } - - "fail to be opened if non existing - asynchronous" in { - EventHandler.notify(TestEvent.Mute(EventFilter[ReplicationException])) - val uuid = (new UUID).toString - intercept[ReplicationException](TransactionLog.logFor(uuid, true, null)) - EventHandler.notify(TestEvent.UnMuteAll) - } - - "be able to overweite an existing txlog if one already exists - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close - - val txLog2 = TransactionLog.newLogFor(uuid, true, null) - txLog2.latestSnapshotId.isDefined must be(false) - txLog2.latestEntryId must be(-1) - } - - "be able to record and delete entries - asynchronous" in { - EventHandler.notify(TestEvent.Mute(EventFilter[ReplicationException])) - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.delete - Thread.sleep(200) - intercept[ReplicationException](TransactionLog.logFor(uuid, true, null)) - EventHandler.notify(TestEvent.UnMuteAll) - } - - "be able to record entries and read entries with 'entriesInRange' - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, true, null) - Thread.sleep(200) - val entries = txlog2.entriesInRange(0, 1).map(bytes ⇒ new String(bytes, "UTF-8")) - Thread.sleep(200) - entries.size must equal(2) - entries(0) must equal("hello") - entries(1) must equal("hello") - Thread.sleep(200) - txlog2.close - } - - "be able to record entries and read entries with 'entries' - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, true, null) - val entries = txlog2.entries.map(bytes ⇒ new String(bytes, "UTF-8")) - Thread.sleep(200) - entries.size must equal(4) - entries(0) must equal("hello") - entries(1) must equal("hello") - entries(2) must equal("hello") - entries(3) must equal("hello") - Thread.sleep(200) - txlog2.close - } - - "be able to record a snapshot - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - Thread.sleep(200) - txlog1.close - } - - "be able to record and read a snapshot and following entries - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - Thread.sleep(200) - - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, true, null) - Thread.sleep(200) - val (snapshotAsBytes, entriesAsBytes) = txlog2.latestSnapshotAndSubsequentEntries - Thread.sleep(200) - new String(snapshotAsBytes.getOrElse(fail("No snapshot")), "UTF-8") must equal("snapshot") - - val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8")) - Thread.sleep(200) - entries.size must equal(4) - entries(0) must equal("hello") - entries(1) must equal("hello") - entries(2) must equal("hello") - entries(3) must equal("hello") - Thread.sleep(200) - txlog2.close - } - - "be able to record entries then a snapshot then more entries - and then read from the snapshot and the following entries - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, true, null) - Thread.sleep(200) - val (snapshotAsBytes, entriesAsBytes) = txlog2.latestSnapshotAndSubsequentEntries - Thread.sleep(200) - new String(snapshotAsBytes.getOrElse(fail("No snapshot")), "UTF-8") must equal("snapshot") - val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8")) - Thread.sleep(200) - entries.size must equal(2) - entries(0) must equal("hello") - entries(1) must equal("hello") - Thread.sleep(200) - txlog2.close - } - } - - override def beforeAll() = { - LocalBookKeeperEnsemble.start() - TransactionLog.start() - } - - override def afterAll() = { - TransactionLog.shutdown() - LocalBookKeeperEnsemble.shutdown() - } -} diff --git a/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala similarity index 99% rename from akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala rename to akka-cluster/src/test/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 1e954b34fb..6366a9f65e 100644 --- a/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -1,7 +1,7 @@ // /** // * Copyright (C) 2009-2011 Typesafe Inc. // */ -// package akka.remote +// package akka.cluster // import java.net.InetSocketAddress diff --git a/akka-cluster/src/test/scala/akka/cluster/SynchronousTransactionLogSpec.scala b/akka-cluster/src/test/scala/akka/cluster/SynchronousTransactionLogSpec.scala deleted file mode 100644 index 3dc58d6c9a..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/SynchronousTransactionLogSpec.scala +++ /dev/null @@ -1,190 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import org.apache.bookkeeper.client.BookKeeper -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.actor._ -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } - -import com.eaio.uuid.UUID - -class SynchronousTransactionLogSpec extends WordSpec with MustMatchers with BeforeAndAfterAll { - private var bookKeeper: BookKeeper = _ - private var localBookKeeper: LocalBookKeeper = _ - - "A synchronous used Transaction Log" should { - - "be able to be deleted - synchronous" in { - val uuid = (new UUID).toString - val txlog = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog.recordEntry(entry) - - txlog.delete() - txlog.close() - - val zkClient = TransactionLog.zkClient - assert(zkClient.readData(txlog.snapshotPath, true) == null) - assert(zkClient.readData(txlog.txLogPath, true) == null) - } - - "fail to be opened if non existing - synchronous" in { - EventHandler.notify(TestEvent.Mute(EventFilter[ReplicationException])) - val uuid = (new UUID).toString - intercept[ReplicationException](TransactionLog.logFor(uuid, false, null)) - EventHandler.notify(TestEvent.UnMuteAll) - } - - "be able to be checked for existence - synchronous" in { - val uuid = (new UUID).toString - TransactionLog.exists(uuid) must be(false) - - TransactionLog.newLogFor(uuid, false, null) - TransactionLog.exists(uuid) must be(true) - } - - "be able to record entries - synchronous" in { - val uuid = (new UUID).toString - val txlog = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog.recordEntry(entry) - } - - "be able to overweite an existing txlog if one already exists - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close - - val txLog2 = TransactionLog.newLogFor(uuid, false, null) - txLog2.latestSnapshotId.isDefined must be(false) - txLog2.latestEntryId must be(-1) - } - - "be able to record and delete entries - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.delete - txlog1.close - // intercept[ReplicationException](TransactionLog.logFor(uuid, false, null)) - } - - "be able to record entries and read entries with 'entriesInRange' - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, false, null) - val entries = txlog2.entriesInRange(0, 1).map(bytes ⇒ new String(bytes, "UTF-8")) - entries.size must equal(2) - entries(0) must equal("hello") - entries(1) must equal("hello") - txlog2.close - } - - "be able to record entries and read entries with 'entries' - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close // should work without txlog.close - - val txlog2 = TransactionLog.logFor(uuid, false, null) - val entries = txlog2.entries.map(bytes ⇒ new String(bytes, "UTF-8")) - entries.size must equal(4) - entries(0) must equal("hello") - entries(1) must equal("hello") - entries(2) must equal("hello") - entries(3) must equal("hello") - txlog2.close - } - - "be able to record a snapshot - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - txlog1.close - } - - "be able to record and read a snapshot and following entries - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, false, null) - val (snapshotAsBytes, entriesAsBytes) = txlog2.latestSnapshotAndSubsequentEntries - new String(snapshotAsBytes.getOrElse(fail("No snapshot")), "UTF-8") must equal("snapshot") - - val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8")) - entries.size must equal(4) - entries(0) must equal("hello") - entries(1) must equal("hello") - entries(2) must equal("hello") - entries(3) must equal("hello") - txlog2.close - } - - "be able to record entries then a snapshot then more entries - and then read from the snapshot and the following entries - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, false, null) - val (snapshotAsBytes, entriesAsBytes) = txlog2.latestSnapshotAndSubsequentEntries - new String(snapshotAsBytes.getOrElse(fail("No snapshot")), "UTF-8") must equal("snapshot") - - val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8")) - entries.size must equal(2) - entries(0) must equal("hello") - entries(1) must equal("hello") - txlog2.close - } - } - - override def beforeAll() = { - LocalBookKeeperEnsemble.start() - TransactionLog.start() - } - - override def afterAll() = { - TransactionLog.shutdown() - LocalBookKeeperEnsemble.shutdown() - } -} diff --git a/akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala b/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala similarity index 99% rename from akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala rename to akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala index 03e4109423..df9cead7f8 100644 --- a/akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala @@ -1,4 +1,4 @@ -package akka.remote +package akka.cluster import java.net.InetSocketAddress import akka.testkit.AkkaSpec diff --git a/akka-cluster/src/test/scala/akka/cluster/sample/ClusteredPingPongSample.scala b/akka-cluster/src/test/scala/akka/cluster/sample/ClusteredPingPongSample.scala deleted file mode 100644 index c242185450..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/sample/ClusteredPingPongSample.scala +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.sample - -import akka.cluster._ - -import akka.actor._ -import akka.actor.Actor._ - -import java.util.concurrent.CountDownLatch - -object PingPong { - val PING_ADDRESS = "ping" - val PONG_ADDRESS = "pong" - - val NrOfPings = 5 - - // ------------------------ - // Messages - // ------------------------ - - sealed trait PingPong extends Serializable - case object Ball extends PingPong - case object Stop extends PingPong - case class Latch(latch: CountDownLatch) extends PingPong - - // ------------------------ - // Actors - // ------------------------ - - class PingActor extends Actor with Serializable { - var count = 0 - var gameOverLatch: CountDownLatch = _ - - def receive = { - case Ball ⇒ - if (count < NrOfPings) { - println("---->> PING (%s)" format count) - count += 1 - reply(Ball) - } else { - sender.foreach(s ⇒ (s ? Stop).await) - gameOverLatch.countDown - self.stop - } - case Latch(latch) ⇒ - gameOverLatch = latch - } - } - - class PongActor extends Actor with Serializable { - def receive = { - case Ball ⇒ - reply(Ball) - case Stop ⇒ - reply(Stop) - self.stop - } - } -} - -/* -object ClusteredPingPongSample { - import PingPong._ - import BinaryFormats._ - - val CLUSTER_NAME = "test-cluster" - - def main(args: Array[String]) = run - - def run = { - - // ------------------------ - // Start cluster of 5 nodes - // ------------------------ - - Cluster.startLocalCluster() - val localNode = Cluster.newNode(NodeAddress(CLUSTER_NAME, "node0", port = 9991)).start - val remoteNodes = Cluster.newNode(NodeAddress(CLUSTER_NAME, "node1", port = 9992)).start :: - Cluster.newNode(NodeAddress(CLUSTER_NAME, "node2", port = 9993)).start :: - Cluster.newNode(NodeAddress(CLUSTER_NAME, "node3", port = 9994)).start :: - Cluster.newNode(NodeAddress(CLUSTER_NAME, "node4", port = 9995)).start :: Nil - - // ------------------------ - // Store the actors in the cluster - // ------------------------ - - // Store the PingActor in the cluster, but do not deploy it anywhere - localNode.store(classOf[PingActor], PING_ADDRESS) - - // Store the PongActor in the cluster and deploy it - // to 5 (replication factor) nodes in the cluster - localNode.store(classOf[PongActor], PONG_ADDRESS, 5) - - Thread.sleep(1000) // let the deployment finish - - // ------------------------ - // Get the actors from the cluster - // ------------------------ - - // Check out a local PingActor instance (not reference) - val ping = localNode.use[PingActor](PING_ADDRESS).head - - // Get a reference to all the pong actors through a round-robin router ActorRef - val pong = localNode.ref(PONG_ADDRESS, router = Router.RoundRobin) - - // ------------------------ - // Play the game - // ------------------------ - - val latch = new CountDownLatch(1) - ping ! Latch(latch) // register latch for actor to know when to stop - - println("---->> SERVE") - - implicit val replyTo = Some(pong) // set the reply address to the PongActor - ping ! Ball // serve - - latch.await // wait for game to finish - - println("---->> GAME OVER") - - // ------------------------ - // Clean up - // ------------------------ - - localNode.stop - remoteNodes.foreach(_.stop) - Cluster.shutdownLocalCluster() - } -} -*/ diff --git a/akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala b/akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala deleted file mode 100644 index daf817872e..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.sample - -import akka.cluster._ -import akka.dispatch.Futures - -object ComputeGridSample { - //sample.cluster.ComputeGridSample.fun2 - - // FIXME rewrite as multi-jvm test - - /* - // run all - def run { - fun1 - fun2 - fun3 - fun4 - } - - // Send Function0[Unit] - def fun1 = { - Cluster.startLocalCluster() - val node = Cluster newNode (NodeAddress("test", "local", port = 9991)) start - val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start - - Thread.sleep(100) - val fun = () ⇒ println("=============>>> AKKA ROCKS <<<=============") - node send (fun, 2) // send and invoke function on to two cluster nodes - - node.stop - remote1.stop - Cluster.shutdownLocalCluster() - } - - // Send Function0[Any] - def fun2 = { - Cluster.startLocalCluster() - val local = Cluster newNode (NodeAddress("test", "local", port = 9991)) start - val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start - - Thread.sleep(100) - val fun = () ⇒ "AKKA ROCKS" - val futures = local send (fun, 2) // send and invoke function on to two cluster nodes and get result - - val result = Await.sync(Futures.fold("")(futures)(_ + " - " + _), timeout) - println("===================>>> Cluster says [" + result + "]") - - local.stop - remote1.stop - Cluster.shutdownLocalCluster() - } - - // Send Function1[Any, Unit] - def fun3 = { - Cluster.startLocalCluster() - val local = Cluster newNode (NodeAddress("test", "local", port = 9991)) start - val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start - - val fun = ((s: String) ⇒ println("=============>>> " + s + " <<<=============")).asInstanceOf[Function1[Any, Unit]] - local send (fun, "AKKA ROCKS", 2) // send and invoke function on to two cluster nodes - - local.stop - remote1.stop - Cluster.shutdownLocalCluster() - } - - // Send Function1[Any, Any] - def fun4 = { - Cluster.startLocalCluster() - val local = Cluster newNode (NodeAddress("test", "local", port = 9991)) start - val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start - - val fun = ((i: Int) ⇒ i * i).asInstanceOf[Function1[Any, Any]] - - val future1 = local send (fun, 2, 1) head // send and invoke function on one cluster node and get result - val future2 = local send (fun, 2, 1) head // send and invoke function on one cluster node and get result - - // grab the result from the first one that returns - val result = Await.sync(Futures.firstCompletedOf(List(future1, future2)), timeout) - println("===================>>> Cluster says [" + result + "]") - - local.stop - remote1.stop - Cluster.shutdownLocalCluster() - } - */ -} diff --git a/akka-cluster/src/test/scala/akka/cluster/storage/InMemoryStorageSpec.scala b/akka-cluster/src/test/scala/akka/cluster/storage/InMemoryStorageSpec.scala deleted file mode 100644 index 762b189bd2..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/storage/InMemoryStorageSpec.scala +++ /dev/null @@ -1,241 +0,0 @@ -package akka.cluster.storage - -import org.scalatest.matchers.MustMatchers -import org.scalatest.WordSpec -import akka.cluster.storage.StorageTestUtils._ - -class InMemoryStorageSpec extends WordSpec with MustMatchers { - - "unversioned load" must { - "throw MissingDataException if non existing key" in { - val store = new InMemoryStorage() - - try { - store.load("foo") - fail() - } catch { - case e: MissingDataException ⇒ - } - } - - "return VersionedData if key existing" in { - val storage = new InMemoryStorage() - val key = "somekey" - val value = "somevalue".getBytes - storage.insert(key, value) - - val result = storage.load(key) - //todo: strange that the implicit store is not found - assertContent(key, value, result.version)(storage) - } - } - - "exist" must { - "return true if value exists" in { - val store = new InMemoryStorage() - val key = "somekey" - store.insert(key, "somevalue".getBytes) - store.exists(key) must be(true) - } - - "return false if value not exists" in { - val store = new InMemoryStorage() - store.exists("somekey") must be(false) - } - } - - "versioned load" must { - "throw MissingDataException if non existing key" in { - val store = new InMemoryStorage() - - try { - store.load("foo", 1) - fail() - } catch { - case e: MissingDataException ⇒ - } - } - - "return VersionedData if key existing and exact version match" in { - val storage = new InMemoryStorage() - val key = "somekey" - val value = "somevalue".getBytes - val storedVersion = storage.insert(key, value) - - val loaded = storage.load(key, storedVersion) - assert(loaded.version == storedVersion) - org.junit.Assert.assertArrayEquals(value, loaded.data) - } - - "throw BadVersionException is version too new" in { - val storage = new InMemoryStorage() - val key = "somekey" - val value = "somevalue".getBytes - val version = storage.insert(key, value) - - try { - storage.load(key, version + 1) - fail() - } catch { - case e: BadVersionException ⇒ - } - } - - "throw BadVersionException is version too old" in { - val storage = new InMemoryStorage() - val key = "somekey" - val value = "somevalue".getBytes - val version = storage.insert(key, value) - - try { - storage.load(key, version - 1) - fail() - } catch { - case e: BadVersionException ⇒ - } - } - } - - "insert" must { - - "place a new value when non previously existed" in { - val storage = new InMemoryStorage() - val key = "somekey" - val oldValue = "oldvalue".getBytes - storage.insert(key, oldValue) - - val result = storage.load(key) - assertContent(key, oldValue)(storage) - assert(InMemoryStorage.InitialVersion == result.version) - } - - "throw MissingDataException when there already exists an entry with the same key" in { - val storage = new InMemoryStorage() - val key = "somekey" - val initialValue = "oldvalue".getBytes - val initialVersion = storage.insert(key, initialValue) - - val newValue = "newValue".getBytes - - try { - storage.insert(key, newValue) - fail() - } catch { - case e: DataExistsException ⇒ - } - - assertContent(key, initialValue, initialVersion)(storage) - } - } - - "update" must { - - "throw MissingDataException when no node exists" in { - val storage = new InMemoryStorage() - - val key = "somekey" - - try { - storage.update(key, "somevalue".getBytes, 1) - fail() - } catch { - case e: MissingDataException ⇒ - } - } - - "replace if previous value exists and no other updates have been done" in { - val storage = new InMemoryStorage() - - //do the initial insert - val key = "foo" - val oldValue = "insert".getBytes - val initialVersion = storage.insert(key, oldValue) - - //do the update the will be the cause of the conflict. - val newValue: Array[Byte] = "update".getBytes - val newVersion = storage.update(key, newValue, initialVersion) - - assertContent(key, newValue, newVersion)(storage) - } - - "throw BadVersionException when already overwritten" in { - val storage = new InMemoryStorage() - - //do the initial insert - val key = "foo" - val oldValue = "insert".getBytes - val initialVersion = storage.insert(key, oldValue) - - //do the update the will be the cause of the conflict. - val newValue = "otherupdate".getBytes - val newVersion = storage.update(key, newValue, initialVersion) - - try { - storage.update(key, "update".getBytes, initialVersion) - fail() - } catch { - case e: BadVersionException ⇒ - } - - assertContent(key, newValue, newVersion)(storage) - } - } - - "overwrite" must { - - "throw MissingDataException when no node exists" in { - val storage = new InMemoryStorage() - val key = "somekey" - - try { - storage.overwrite(key, "somevalue".getBytes) - fail() - } catch { - case e: MissingDataException ⇒ - } - - storage.exists(key) must be(false) - } - - "succeed if previous value exist" in { - val storage = new InMemoryStorage() - val key = "somekey" - val oldValue = "oldvalue".getBytes - val newValue = "somevalue".getBytes - - val initialVersion = storage.insert(key, oldValue) - val overwriteVersion = storage.overwrite(key, newValue) - - assert(overwriteVersion == initialVersion + 1) - assertContent(key, newValue, overwriteVersion)(storage) - } - } - - "insertOrOverwrite" must { - "insert if nothing was inserted before" in { - val storage = new InMemoryStorage() - val key = "somekey" - val value = "somevalue".getBytes - - val version = storage.insertOrOverwrite(key, value) - - assert(version == InMemoryStorage.InitialVersion) - assertContent(key, value, version)(storage) - } - - "overwrite of something existed before" in { - val storage = new InMemoryStorage() - val key = "somekey" - val oldValue = "oldvalue".getBytes - val newValue = "somevalue".getBytes - - val initialVersion = storage.insert(key, oldValue) - - val overwriteVersion = storage.insertOrOverwrite(key, newValue) - - assert(overwriteVersion == initialVersion + 1) - assertContent(key, newValue, overwriteVersion)(storage) - } - } - -} diff --git a/akka-cluster/src/test/scala/akka/cluster/storage/StorageTestUtils.scala b/akka-cluster/src/test/scala/akka/cluster/storage/StorageTestUtils.scala deleted file mode 100644 index 71ad994356..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/storage/StorageTestUtils.scala +++ /dev/null @@ -1,15 +0,0 @@ -package akka.cluster.storage - -object StorageTestUtils { - - def assertContent(key: String, expectedData: Array[Byte], expectedVersion: Long)(implicit storage: Storage) { - val found = storage.load(key) - assert(found.version == expectedVersion, "versions should match, found[" + found.version + "], expected[" + expectedVersion + "]") - org.junit.Assert.assertArrayEquals(expectedData, found.data) - } - - def assertContent(key: String, expectedData: Array[Byte])(implicit storage: Storage) { - val found = storage.load(key) - org.junit.Assert.assertArrayEquals(expectedData, found.data) - } -} diff --git a/akka-cluster/src/test/scala/akka/cluster/storage/ZooKeeperStorageSpec.scala b/akka-cluster/src/test/scala/akka/cluster/storage/ZooKeeperStorageSpec.scala deleted file mode 100644 index 8767ccf88e..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/storage/ZooKeeperStorageSpec.scala +++ /dev/null @@ -1,132 +0,0 @@ -// package akka.cluster.storage - -// import org.scalatest.matchers.MustMatchers -// import akka.actor.Actor -// import org.scalatest.{ BeforeAndAfterEach, BeforeAndAfterAll, WordSpec } -// import org.I0Itec.zkclient.ZkServer -// //import zookeeper.AkkaZkClient -// import akka.cluster.storage.StorageTestUtils._ -// import java.io.File -// import java.util.concurrent.atomic.AtomicLong - -// class ZooKeeperStorageSpec extends WordSpec with MustMatchers with BeforeAndAfterAll with BeforeAndAfterEach { -// val dataPath = "_akka_cluster/data" -// val logPath = "_akka_cluster/log" -// var zkServer: ZkServer = _ -// //var zkClient: AkkaZkClient = _ -// val idGenerator = new AtomicLong - -// def generateKey: String = { -// "foo" + idGenerator.incrementAndGet() -// } - -// override def beforeAll() { -// /*new File(dataPath).delete() -// new File(logPath).delete() - -// try { -// zkServer = Cluster.startLocalCluster(dataPath, logPath) -// Thread.sleep(5000) -// Actor.cluster.start() -// zkClient = Cluster.newZkClient() -// } catch { -// case e ⇒ e.printStackTrace() -// }*/ -// } - -// override def afterAll() { -// /*zkClient.close() -// Actor.cluster.shutdown() -// ClusterDeployer.shutdown() -// Cluster.shutdownLocalCluster() -// Actor.registry.local.shutdownAll() */ -// } - -// /* -// "unversioned load" must { -// "throw MissingDataException if non existing key" in { -// val storage = new ZooKeeperStorage(zkClient) - -// try { -// storage.load(generateKey) -// fail() -// } catch { -// case e: MissingDataException ⇒ -// } -// } - -// "return VersionedData if key existing" in { -// val storage = new ZooKeeperStorage(zkClient) -// val key = generateKey -// val value = "somevalue".getBytes -// storage.insert(key, value) - -// val result = storage.load(key) -// //todo: strange that the implicit store is not found -// assertContent(key, value, result.version)(storage) -// } -// } */ - -// /*"overwrite" must { - -// "throw MissingDataException when there doesn't exist an entry to overwrite" in { -// val storage = new ZooKeeperStorage(zkClient) -// val key = generateKey -// val value = "value".getBytes - -// try { -// storage.overwrite(key, value) -// fail() -// } catch { -// case e: MissingDataException ⇒ -// } - -// assert(!storage.exists(key)) -// } - -// "overwrite if there is an existing value" in { -// val storage = new ZooKeeperStorage(zkClient) -// val key = generateKey -// val oldValue = "oldvalue".getBytes - -// storage.insert(key, oldValue) -// val newValue = "newValue".getBytes - -// val result = storage.overwrite(key, newValue) -// //assertContent(key, newValue, result.version)(storage) -// } -// } - -// "insert" must { - -// "place a new value when non previously existed" in { -// val storage = new ZooKeeperStorage(zkClient) -// val key = generateKey -// val oldValue = "oldvalue".getBytes -// storage.insert(key, oldValue) - -// val result = storage.load(key) -// assertContent(key, oldValue)(storage) -// assert(InMemoryStorage.InitialVersion == result.version) -// } - -// "throw DataExistsException when there already exists an entry with the same key" in { -// val storage = new ZooKeeperStorage(zkClient) -// val key = generateKey -// val oldValue = "oldvalue".getBytes - -// val initialVersion = storage.insert(key, oldValue) -// val newValue = "newValue".getBytes - -// try { -// storage.insert(key, newValue) -// fail() -// } catch { -// case e: DataExistsException ⇒ -// } - -// assertContent(key, oldValue, initialVersion)(storage) -// } -// } */ - -// } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index a169f9e9b5..4ef079457a 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -57,10 +57,6 @@ class RemoteActorRefProvider( def tempPath() = local.tempPath() def tempContainer = local.tempContainer - @volatile - private var _failureDetector: AccrualFailureDetector = _ - def failureDetector: AccrualFailureDetector = _failureDetector - @volatile private var _transport: RemoteTransport = _ def transport: RemoteTransport = _transport @@ -80,8 +76,6 @@ class RemoteActorRefProvider( def init(system: ActorSystemImpl) { local.init(system) - _failureDetector = new AccrualFailureDetector(remoteSettings.FailureDetectorThreshold, remoteSettings.FailureDetectorMaxSampleSize, system) - _remoteDaemon = new RemoteSystemDaemon(system, rootPath / "remote", rootGuardian, log) local.registerExtraNames(Map(("remote", remoteDaemon))) diff --git a/akka-remote/src/test/resources/log4j.properties b/akka-remote/src/test/resources/log4j.properties deleted file mode 100644 index 2d07c8e051..0000000000 --- a/akka-remote/src/test/resources/log4j.properties +++ /dev/null @@ -1,58 +0,0 @@ -# Define some default values that can be overridden by system properties -zookeeper.root.logger=INFO, CONSOLE -zookeeper.console.threshold=OFF -zookeeper.log.dir=. -zookeeper.log.file=zookeeper.log -zookeeper.log.threshold=DEBUG -zookeeper.tracelog.dir=. -zookeeper.tracelog.file=zookeeper_trace.log - -# -# ZooKeeper Logging Configuration -# - -# Format is " (, )+ - -# DEFAULT: console appender only -log4j.rootLogger=${zookeeper.root.logger} - -# Example with rolling log file -#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE - -# Example with rolling log file and tracing -#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE - -# -# Log INFO level and above messages to the console -# -log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender -log4j.appender.CONSOLE.Threshold=${zookeeper.console.threshold} -log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout -log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n - -# -# Add ROLLINGFILE to rootLogger to get log file output -# Log DEBUG level and above messages to a log file -log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender -log4j.appender.ROLLINGFILE.Threshold=${zookeeper.log.threshold} -log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/${zookeeper.log.file} - -# Max log file size of 10MB -log4j.appender.ROLLINGFILE.MaxFileSize=10MB -# uncomment the next line to limit number of backup files -#log4j.appender.ROLLINGFILE.MaxBackupIndex=10 - -log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout -log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n - - -# -# Add TRACEFILE to rootLogger to get log file output -# Log DEBUG level and above messages to a log file -log4j.appender.TRACEFILE=org.apache.log4j.FileAppender -log4j.appender.TRACEFILE.Threshold=TRACE -log4j.appender.TRACEFILE.File=${zookeeper.tracelog.dir}/${zookeeper.tracelog.file} - -log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout -### Notice we are including log4j's NDC here (%x) -log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L][%x] - %m%n diff --git a/akka-remote/src/test/resources/logback-test.xml b/akka-remote/src/test/resources/logback-test.xml deleted file mode 100644 index 240a412687..0000000000 --- a/akka-remote/src/test/resources/logback-test.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - [%4p] [%d{ISO8601}] [%t] %c{1}: %m%n - - - - - - - - - - - diff --git a/akka-remote/src/test/resources/zoo.cfg b/akka-remote/src/test/resources/zoo.cfg deleted file mode 100644 index b71eadcc33..0000000000 --- a/akka-remote/src/test/resources/zoo.cfg +++ /dev/null @@ -1,12 +0,0 @@ -# The number of milliseconds of each tick -tickTime=2000 -# The number of ticks that the initial -# synchronization phase can take -initLimit=10 -# The number of ticks that can pass between -# sending a request and getting an acknowledgement -syncLimit=5 -# the directory where the snapshot is stored. -dataDir=/export/crawlspace/mahadev/zookeeper/server1/data -# the port at which the clients will connect -clientPort=2181 diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 9dada98416..a5c257ca84 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -31,7 +31,7 @@ object AkkaBuild extends Build { Unidoc.unidocExclude := Seq(samples.id, tutorials.id), Dist.distExclude := Seq(actorTests.id, akkaSbtPlugin.id, docs.id) ), - aggregate = Seq(actor, testkit, actorTests, remote, slf4j, agent, transactor, mailboxes, zeroMQ, kernel, akkaSbtPlugin, actorMigration, samples, tutorials, docs) + aggregate = Seq(actor, testkit, actorTests, remote, cluster, slf4j, agent, transactor, mailboxes, zeroMQ, kernel, akkaSbtPlugin, actorMigration, samples, tutorials, docs) ) lazy val actor = Project( @@ -86,6 +86,25 @@ object AkkaBuild extends Build { ) ) configs (MultiJvm) + lazy val cluster = Project( + id = "akka-cluster", + base = file("akka-cluster"), + dependencies = Seq(remote, remote % "test->test", testkit % "test->test"), + settings = defaultSettings ++ multiJvmSettings ++ schoirSettings ++ Seq( + libraryDependencies ++= Dependencies.cluster, + // disable parallel tests + parallelExecution in Test := false, + extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src => + (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq + }, + scalatestOptions in MultiJvm := Seq("-r", "org.scalatest.akka.QuietReporter"), + jvmOptions in MultiJvm := { + if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil + }, + test in Test <<= (test in Test) dependsOn (test in MultiJvm) + ) + ) configs (MultiJvm) + lazy val slf4j = Project( id = "akka-slf4j", base = file("akka-slf4j"), @@ -301,7 +320,7 @@ object AkkaBuild extends Build { lazy val docs = Project( id = "akka-docs", base = file("akka-docs"), - dependencies = Seq(actor, testkit % "test->test", remote, slf4j, agent, transactor, fileMailbox, mongoMailbox, redisMailbox, beanstalkMailbox, zookeeperMailbox), + dependencies = Seq(actor, testkit % "test->test", remote, cluster, slf4j, agent, transactor, fileMailbox, mongoMailbox, redisMailbox, beanstalkMailbox, zookeeperMailbox), settings = defaultSettings ++ Seq( unmanagedSourceDirectories in Test <<= baseDirectory { _ ** "code" get }, libraryDependencies ++= Dependencies.docs, @@ -410,10 +429,7 @@ object Dependencies { Test.zookeeper, Test.log4j // needed for ZkBarrier in multi-jvm tests ) -// val cluster = Seq( -// bookkeeper, commonsCodec, commonsIo, guice, h2Lzf, jacksonCore, jacksonMapper, log4j, netty, -// protobuf, sjson, zkClient, zookeeper, zookeeperLock, Test.junit, Test.scalatest -// ) + val cluster = Seq(Test.junit, Test.scalatest) val slf4j = Seq(slf4jApi) From 9a7b234ec2c402a56591012423cbb09e3091bf91 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 24 Jan 2012 11:59:57 +0100 Subject: [PATCH 46/94] Changed copyright header to Typesafe. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/actor/Scheduler.scala | 14 +++----------- 1 file changed, 3 insertions(+), 11 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 72d429b450..eed0060e52 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -1,15 +1,7 @@ -/* - * Copyright 2007 WorldWide Conferencing, LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. +/** + * Copyright (C) 2009-2011 Typesafe Inc. */ + package akka.actor import akka.util.Duration From 3088201bb88aff5ec5113992a8b57f70a5f520a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 24 Jan 2012 12:00:53 +0100 Subject: [PATCH 47/94] Changed akka.util.Timer to use nanos and added a 'timeLeft' method. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/util/Duration.scala | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/akka-actor/src/main/scala/akka/util/Duration.scala b/akka-actor/src/main/scala/akka/util/Duration.scala index 65d6e6148c..312d733904 100644 --- a/akka-actor/src/main/scala/akka/util/Duration.scala +++ b/akka-actor/src/main/scala/akka/util/Duration.scala @@ -17,21 +17,26 @@ class TimerException(message: String) extends RuntimeException(message) * import akka.util.duration._ * import akka.util.Timer * - * val timer = Timer(30.seconds) + * val timer = Timer(30 seconds) * while (timer.isTicking) { ... } * */ -case class Timer(duration: Duration, throwExceptionOnTimeout: Boolean = false) { - val startTimeInMillis = System.currentTimeMillis - val timeoutInMillis = duration.toMillis +case class Timer(timeout: Duration, throwExceptionOnTimeout: Boolean = false) { + val startTime = Duration(System.nanoTime, NANOSECONDS) + + def timeLeft: Duration = { + val time = timeout.toNanos - (System.nanoTime - startTime.toNanos) + if (time <= 0) Duration(0, NANOSECONDS) + else Duration(time, NANOSECONDS) + } /** * Returns true while the timer is ticking. After that it either throws and exception or * returns false. Depending on if the 'throwExceptionOnTimeout' argument is true or false. */ def isTicking: Boolean = { - if (!(timeoutInMillis > (System.currentTimeMillis - startTimeInMillis))) { - if (throwExceptionOnTimeout) throw new TimerException("Time out after " + duration) + if (!(timeout.toNanos > (System.nanoTime - startTime.toNanos))) { + if (throwExceptionOnTimeout) throw new TimerException("Time out after " + timeout) else false } else true } From 6db989ffa37258bdac0d7fbaa3a2fc56744f6e98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 24 Jan 2012 12:01:42 +0100 Subject: [PATCH 48/94] Fixed ugly logging in NettyRemoteSupport (plus misc minor formatting). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../main/scala/akka/remote/netty/NettyRemoteSupport.scala | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index e9fe83dd7e..a225dd7aa8 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -157,7 +157,10 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor def unbindClient(remoteAddress: Address): Unit = { clientsLock.writeLock().lock() try { - remoteClients.foreach { case (k, v) ⇒ if (v.isBoundTo(remoteAddress)) { v.shutdown(); remoteClients.remove(k) } } + remoteClients foreach { + case (k, v) ⇒ + if (v.isBoundTo(remoteAddress)) { v.shutdown(); remoteClients.remove(k) } + } } finally { clientsLock.writeLock().unlock() } @@ -227,7 +230,8 @@ class DefaultDisposableChannelGroup(name: String) extends DefaultChannelGroup(na override def close(): ChannelGroupFuture = { guard.writeLock().lock() try { - if (open.getAndSet(false)) super.close() else throw new IllegalStateException("ChannelGroup already closed, cannot add new channel") + if (open.getAndSet(false)) super.close() + else throw new IllegalStateException("ChannelGroup already closed, cannot add new channel") } finally { guard.writeLock().unlock() } From e24238377f07478e96097558f7c8837970d985b9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 24 Jan 2012 12:03:38 +0100 Subject: [PATCH 49/94] Minor code and ScalaDoc formatting changes. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/actor/ActorSystem.scala | 1 + .../src/main/scala/akka/remote/VectorClock.scala | 12 +++++++----- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index e3235a5cec..c7a868ffd9 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -1,6 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ + package akka.actor import akka.config.ConfigurationException diff --git a/akka-remote/src/main/scala/akka/remote/VectorClock.scala b/akka-remote/src/main/scala/akka/remote/VectorClock.scala index 9da70111e9..fde9bb84e7 100644 --- a/akka-remote/src/main/scala/akka/remote/VectorClock.scala +++ b/akka-remote/src/main/scala/akka/remote/VectorClock.scala @@ -12,8 +12,8 @@ class VectorClockException(message: String) extends AkkaException(message) * Representation of a Vector-based clock (counting clock), inspired by Lamport logical clocks. * * Reference: - * Leslie Lamport (1978). "Time, clocks, and the ordering of events in a distributed system". Communications of the ACM 21 (7): 558-565. - * Friedemann Mattern (1988). "Virtual Time and Global States of Distributed Systems". Workshop on Parallel and Distributed Algorithms: pp. 215-226 + * 1) Leslie Lamport (1978). "Time, clocks, and the ordering of events in a distributed system". Communications of the ACM 21 (7): 558-565. + * 2) Friedemann Mattern (1988). "Virtual Time and Global States of Distributed Systems". Workshop on Parallel and Distributed Algorithms: pp. 215-226 */ case class VectorClock( versions: Vector[VectorClock.Entry] = Vector.empty[VectorClock.Entry], @@ -55,9 +55,11 @@ object VectorClock { /** * The result of comparing two vector clocks. * Either: - * 1) v1 is BEFORE v2 - * 2) v1 is AFTER t2 - * 3) v1 happens CONCURRENTLY to v2 + * {{ + * 1) v1 is BEFORE v2 + * 2) v1 is AFTER t2 + * 3) v1 happens CONCURRENTLY to v2 + * }} */ sealed trait Ordering case object Before extends Ordering From 04cf3ea7db6e31be55184277f009c107f1a859fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 24 Jan 2012 12:09:32 +0100 Subject: [PATCH 50/94] Added initial join cluster through seed nodes phase to Gossiper plus misc other fixes and additions. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - Added JoinCluster phase (connect and get initial data from seed nodes) to Gossiper. - Added '/system/cluster' daemon actor to Gossiper responsible for gossip communication. - Added various config options to Gossiper. - Fixed misc bugs in Gossiper. Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/remote/Gossiper.scala | 249 ++++++++++++++---- .../scala/akka/remote/RemoteSettings.scala | 17 +- 2 files changed, 206 insertions(+), 60 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/Gossiper.scala b/akka-remote/src/main/scala/akka/remote/Gossiper.scala index d99414f9c9..20e803c7eb 100644 --- a/akka-remote/src/main/scala/akka/remote/Gossiper.scala +++ b/akka-remote/src/main/scala/akka/remote/Gossiper.scala @@ -7,21 +7,24 @@ package akka.remote import akka.actor._ import akka.actor.Status._ import akka.event.Logging -import akka.util.Duration +import akka.util._ +import akka.dispatch.Await import akka.config.ConfigurationException import java.util.concurrent.atomic.AtomicReference -import java.util.concurrent.TimeUnit.SECONDS +import java.util.concurrent.TimeUnit._ +import java.util.concurrent.TimeoutException import java.security.SecureRandom import System.{ currentTimeMillis ⇒ newTimestamp } import scala.collection.immutable.Map import scala.annotation.tailrec -import java.util.concurrent.TimeoutException import akka.dispatch.Await import akka.pattern.ask +import com.google.protobuf.ByteString + /** * Interface for node membership change listener. */ @@ -78,6 +81,37 @@ case class Gossip( */ // ====== END - NEW GOSSIP IMPLEMENTATION ====== +/** + * Interface for node membership change listener. + */ +trait NodeMembershipChangeListener { + def nodeConnected(node: ParsedTransportAddress) + def nodeDisconnected(node: ParsedTransportAddress) +} + +sealed trait ClusterMessage extends Serializable + +case object JoinCluster extends ClusterMessage + +/** + * Represents the node state of to gossip, versioned by a vector clock. + */ +case class Gossip( + version: VectorClock, + node: ParsedTransportAddress, + availableNodes: Set[ParsedTransportAddress] = Set.empty[ParsedTransportAddress], + unavailableNodes: Set[ParsedTransportAddress] = Set.empty[ParsedTransportAddress]) extends ClusterMessage + +class ClusterDaemon(system: ActorSystem, gossiper: Gossiper) extends Actor { + val log = Logging(system, "ClusterDaemon") + + def receive = { + case JoinCluster ⇒ sender ! gossiper.latestGossip + case gossip: Gossip ⇒ gossiper.tell(gossip) + case unknown ⇒ log.error("Unknown message sent to cluster daemon [" + unknown + "]") + } +} + /** * This module is responsible for Gossiping cluster information. The abstraction maintains the list of live * and dead nodes. Periodically i.e. every 1 second this module chooses a random node and initiates a round @@ -93,7 +127,7 @@ case class Gossip( * gossip to random seed with certain probability depending on number of unreachable, seed and live nodes. * */ -class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { +case class Gossiper(remote: Remote, system: ActorSystemImpl) { /** * Represents the state for this Gossiper. Implemented using optimistic lockless concurrency, @@ -103,35 +137,63 @@ class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { currentGossip: Gossip, nodeMembershipChangeListeners: Set[NodeMembershipChangeListener] = Set.empty[NodeMembershipChangeListener]) + // configuration private val remoteSettings = remote.remoteSettings private val serialization = remote.serialization - private val log = Logging(system, "Gossiper") private val failureDetector = remote.failureDetector - private val connectionManager = new RemoteConnectionManager(system, remote, Map.empty[Address, ActorRef]) - private val seeds = { + private val initalDelayForGossip = remoteSettings.InitalDelayForGossip + private val gossipFrequency = remoteSettings.GossipFrequency + + implicit val seedNodeConnectionTimeout = remoteSettings.SeedNodeConnectionTimeout + implicit val defaultTimeout = Timeout(remoteSettings.RemoteSystemDaemonAckTimeout) + + // seed nodes + private val seeds: Set[ParsedTransportAddress] = { + val seeds = remoteSettings.SeedNodes flatMap { + case uta: UnparsedTransportAddress ⇒ + uta.parse(remote.transports) match { + case pta: ParsedTransportAddress ⇒ Some(pta) + case _ ⇒ None + } + case _ ⇒ None + } if (remoteSettings.SeedNodes.isEmpty) throw new ConfigurationException( "At least one seed node must be defined in the configuration [akka.cluster.seed-nodes]") else remoteSettings.SeedNodes } - private val address = remote.transport.address - private val nodeFingerprint = address.## - + private val log = Logging(system, "Gossiper") private val random = SecureRandom.getInstance("SHA1PRNG") - private val initalDelayForGossip = remoteSettings.InitialDelayForGossip - private val gossipFrequency = remoteSettings.GossipFrequency - + private val connectionManager = new RemoteConnectionManager(system, remote, Map.empty[Address, ActorRef]) + private val clusterDaemon = system.systemActorOf(Props(new ClusterDaemon(system, this)), "cluster") private val state = new AtomicReference[State](State(currentGossip = newGossip())) - { - // start periodic gossip and cluster scrutinization - default is run them every second with 1/2 second in between - system.scheduler.schedule(Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(initateGossip()) - system.scheduler.schedule(Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(scrutinize()) - } + log.info("Starting cluster Gossiper...") + + // join the cluster by connecting to one of the seed nodes and retrieve current cluster state (Gossip) + joinCluster(Timer(remoteSettings.MaxTimeToRetryJoiningCluster)) + + // start periodic gossip and cluster scrutinization - default is run them every second with 1/2 second in between + val initateGossipCanceller = system.scheduler.schedule( + Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(initateGossip()) + val scrutinizeCanceller = system.scheduler.schedule( + Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(scrutinize()) /** - * Tell the gossiper some gossip news. + * Shuts down all connections to other nodes, the cluster daemon and the periodic gossip and cleanup tasks. + */ + def shutdown() { + connectionManager.shutdown() + system.stop(clusterDaemon) + initateGossipCanceller.cancel() + scrutinizeCanceller.cancel() + } + + def latestGossip: Gossip = state.get.currentGossip + + /** + * Tell the gossiper some gossip. */ @tailrec final def tell(newGossip: Gossip) { @@ -141,33 +203,29 @@ class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { val oldState = state.get val latestGossip = latestVersionOf(newGossip, oldState.currentGossip) - val oldAvailableNodes = latestGossip.availableNodes - val oldUnavailableNodes = latestGossip.unavailableNodes + val latestAvailableNodes = latestGossip.availableNodes + val latestUnavailableNodes = latestGossip.unavailableNodes - if (!(oldAvailableNodes contains gossipingNode) && !(oldUnavailableNodes contains gossipingNode)) { + if (!(latestAvailableNodes contains gossipingNode) && !(latestUnavailableNodes contains gossipingNode)) { // we have a new node - val newGossip = latestGossip copy (availableNodes = oldAvailableNodes + gossipingNode) + val newGossip = latestGossip copy (availableNodes = latestAvailableNodes + gossipingNode) val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) // if we won the race then update else try again if (!state.compareAndSet(oldState, newState)) tell(newGossip) // recur else { // create connections for all new nodes in the latest gossip - for { - node ← oldAvailableNodes - if connectionManager.connectionFor(node).isEmpty - } { - val connectionFactory = () ⇒ system.actorFor(RootActorPath(gossipingNode) / "remote") - connectionManager.putIfAbsent(node, connectionFactory) // create a new remote connection to the new node + (latestAvailableNodes + gossipingNode) foreach { node ⇒ + setUpConnectionToNode(node) oldState.nodeMembershipChangeListeners foreach (_ nodeConnected node) // notify listeners about the new nodes } } - } else if (oldUnavailableNodes contains gossipingNode) { + } else if (latestUnavailableNodes contains gossipingNode) { // gossip from an old former dead node - val newUnavailableNodes = oldUnavailableNodes - gossipingNode - val newAvailableNodes = oldAvailableNodes + gossipingNode + val newUnavailableNodes = latestUnavailableNodes - gossipingNode + val newAvailableNodes = latestAvailableNodes + gossipingNode val newGossip = latestGossip copy (availableNodes = newAvailableNodes, unavailableNodes = newUnavailableNodes) val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) @@ -178,6 +236,9 @@ class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { } } + /** + * Registers a listener to subscribe to cluster membership changes. + */ @tailrec final def registerListener(listener: NodeMembershipChangeListener) { val oldState = state.get @@ -186,6 +247,9 @@ class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { if (!state.compareAndSet(oldState, newState)) registerListener(listener) // recur } + /** + * Unsubscribes to cluster membership changes. + */ @tailrec final def unregisterListener(listener: NodeMembershipChangeListener) { val oldState = state.get @@ -194,6 +258,67 @@ class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { if (!state.compareAndSet(oldState, newState)) unregisterListener(listener) // recur } + /** + * Sets up remote connections to all the nodes in the argument list. + */ + private def connectToNodes(nodes: Seq[ParsedTransportAddress]) { + nodes foreach { node ⇒ + setUpConnectionToNode(node) + state.get.nodeMembershipChangeListeners foreach (_ nodeConnected node) // notify listeners about the new nodes + } + } + + // FIXME should shuffle list randomly before start traversing to avoid connecting to some node on every node + @tailrec + final private def connectToRandomNodeOf(nodes: Seq[ParsedTransportAddress]): ActorRef = { + nodes match { + case node :: rest ⇒ + setUpConnectionToNode(node) match { + case Some(connection) ⇒ connection + case None ⇒ connectToRandomNodeOf(rest) // recur if + } + case Nil ⇒ + throw new RemoteConnectionException( + "Could not establish connection to any of the nodes in the argument list") + } + } + + /** + * Joins the cluster by connecting to one of the seed nodes and retrieve current cluster state (Gossip). + */ + private def joinCluster(timer: Timer) { + val seedNodes = seedNodesWithoutMyself // filter out myself + + if (!seedNodes.isEmpty) { // if we have seed nodes to contact + connectToNodes(seedNodes) + + try { + log.info("Trying to join cluster through one of the seed nodes [{}]", seedNodes.mkString(", ")) + + Await.result(connectToRandomNodeOf(seedNodes) ? JoinCluster, seedNodeConnectionTimeout) match { + case initialGossip: Gossip ⇒ + // just sets/overwrites the state/gossip regardless of what it was before + // since it should be treated as the initial state + state.set(state.get copy (currentGossip = initialGossip)) + log.debug("Received initial gossip [{}] from seed node", initialGossip) + + case unknown ⇒ + throw new IllegalStateException("Expected initial gossip from seed, received [" + unknown + "]") + } + } catch { + case e: Exception ⇒ + log.error( + "Could not join cluster through any of the seed nodes - retrying for another {} seconds", + timer.timeLeft.toSeconds) + + if (timer.timeLeft.toMillis > 0) joinCluster(timer) // recur - retry joining the cluster + else throw new RemoteConnectionException( + "Could not join cluster (any of the seed nodes) - giving up after trying for " + + timer.timeout.toSeconds + " seconds") + } + } + } + /** * Initates a new round of gossip. */ @@ -209,47 +334,49 @@ class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { // 1. gossip to alive nodes val gossipedToSeed = - if (oldAvailableNodesSize > 0) gossipTo(oldAvailableNodes) + if (oldAvailableNodesSize > 0) gossipToRandomNodeOf(oldAvailableNodes) else false // 2. gossip to dead nodes if (oldUnavailableNodesSize > 0) { val probability: Double = oldUnavailableNodesSize / (oldAvailableNodesSize + 1) - if (random.nextDouble() < probability) gossipTo(oldUnavailableNodes) + if (random.nextDouble() < probability) gossipToRandomNodeOf(oldUnavailableNodes) } // 3. gossip to a seed for facilitating partition healing - if ((!gossipedToSeed || oldAvailableNodesSize < 1) && (seeds.head != address)) { - if (oldAvailableNodesSize == 0) gossipTo(seeds) + if ((!gossipedToSeed || oldAvailableNodesSize < 1) && (seeds.head != remoteAddress)) { + if (oldAvailableNodesSize == 0) gossipToRandomNodeOf(seeds) else { val probability = 1.0 / oldAvailableNodesSize + oldUnavailableNodesSize - if (random.nextDouble() <= probability) gossipTo(seeds) + if (random.nextDouble() <= probability) gossipToRandomNodeOf(seeds) } } } /** - * Gossips set of nodes passed in as argument. Returns 'true' if it gossiped to a "seed" node. + * Gossips to a random node in the set of nodes passed in as argument. + * + * @returns 'true' if it gossiped to a "seed" node. */ - private def gossipTo(nodes: Set[Address]): Boolean = { - val peers = nodes filter (_ != address) // filter out myself + private def gossipToRandomNodeOf(nodes: Set[ParsedTransportAddress]): Boolean = { + val peers = nodes filter (_ != remoteAddress) // filter out myself val peer = selectRandomNode(peers) val oldState = state.get val oldGossip = oldState.currentGossip - val connection = connectionManager.connectionFor(peer).getOrElse( - throw new IllegalStateException("Connection for [" + peer + "] is not set up")) - - try { - val t = remoteSettings.RemoteSystemDaemonAckTimeout - Await.result(connection.?(newGossip)(t), t) match { - case Success(receiver) ⇒ log.debug("Gossip sent to [{}] was successfully received", receiver) - case Failure(cause) ⇒ log.error(cause, cause.toString) - } - } catch { - case e: TimeoutException ⇒ log.error(e, "Gossip to [%s] timed out".format(connection.path)) - case e: Exception ⇒ - log.error(e, "Could not gossip to [{}] due to: {}", connection.path, e.toString) + setUpConnectionToNode(peer) match { + case Some(connection) ⇒ + try { + Await.result(connection ? newGossip, seedNodeConnectionTimeout) match { + case Success(receiver) ⇒ log.debug("Gossip sent to [{}] was successfully received", receiver) + case Failure(cause) ⇒ log.error(cause, cause.toString) + } + } catch { + case e: TimeoutException ⇒ log.error(e, "Gossip to [%s] timed out".format(connection.path)) + case e: Exception ⇒ log.error(e, "Could not gossip to [{}] due to: {}", connection.path, e.toString) + } + case None ⇒ + // FIXME what to do if the node can't be reached for gossiping - mark as unavailable in failure detector? } seeds exists (peer == _) @@ -287,6 +414,20 @@ class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { } } + private def setUpConnectionToNode(node: ParsedTransportAddress): Option[ActorRef] = { + //connectionManager.newConnection(node, RootActorPath(RemoteSystemAddress(system.name, node)) / "system" / "cluster") + try { + Some( + connectionManager.putIfAbsent( + node, + () ⇒ system.actorFor(RootActorPath(RemoteSystemAddress(system.name, node)) / "system" / "cluster"))) + // connectionManager.connectionFor(node).getOrElse( + // throw new RemoteConnectionException("Could not set up connection to node [" + node + "]")) + } catch { + case e: Exception ⇒ None + } + } + private def newGossip(): Gossip = Gossip( version = VectorClock(), node = address, @@ -305,6 +446,8 @@ class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { } } + private def seedNodesWithoutMyself: List[Address] = seeds.filter(_ != remoteAddress.transport).toList + private def selectRandomNode(nodes: Set[Address]): Address = { nodes.toList(random.nextInt(nodes.size)) } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index 6509d19383..a2ca0435b9 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -24,14 +24,17 @@ class RemoteSettings(val config: Config, val systemName: String) { val FailureDetectorThreshold = getInt("akka.remote.failure-detector.threshold") val FailureDetectorMaxSampleSize = getInt("akka.remote.failure-detector.max-sample-size") - // Gossiper - val RemoteSystemDaemonAckTimeout = Duration(getMilliseconds("akka.remote.remote-daemon-ack-timeout"), MILLISECONDS) - val InitialDelayForGossip = Duration(getMilliseconds("akka.remote.gossip.initialDelay"), MILLISECONDS) - val GossipFrequency = Duration(getMilliseconds("akka.remote.gossip.frequency"), MILLISECONDS) // TODO cluster config will go into akka-cluster/reference.conf when we enable that module - val SeedNodes = Set.empty[Address] ++ getStringList("akka.cluster.seed-nodes").asScala.collect { - case AddressExtractor(addr) ⇒ addr + // cluster config section + val UseCluster = getBoolean("akka.cluster.use-cluster") + val SeedNodeConnectionTimeout = Duration(config.getMilliseconds("akka.cluster.seed-node-connection-timeout"), MILLISECONDS) + val MaxTimeToRetryJoiningCluster = Duration(config.getMilliseconds("akka.cluster.max-time-to-retry-joining-cluster"), MILLISECONDS) + val InitalDelayForGossip = Duration(getMilliseconds("akka.cluster.gossip.initialDelay"), MILLISECONDS) + val GossipFrequency = Duration(getMilliseconds("akka.cluster.gossip.frequency"), MILLISECONDS) + val SeedNodes = Set.empty[RemoteNettyAddress] ++ getStringList("akka.cluster.seed-nodes").asScala.collect { + case RemoteAddressExtractor(addr) ⇒ addr.transport } + val RemoteSystemDaemonAckTimeout = Duration(getMilliseconds("akka.remote.remote-daemon-ack-timeout"), MILLISECONDS) val UntrustedMode = getBoolean("akka.remote.untrusted-mode") -} \ No newline at end of file +} From 642be72a6b906f97549332f87e412deea3ca1e67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Fri, 27 Jan 2012 14:50:33 +0100 Subject: [PATCH 51/94] Added 'Versioned' abstraction which is versioned through a VectorClock (including tests). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../main/scala/akka/remote/VectorClock.scala | 14 ++++ .../scala/akka/remote/VectorClockSpec.scala | 84 ++++++++++++++++++- 2 files changed, 95 insertions(+), 3 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/VectorClock.scala b/akka-remote/src/main/scala/akka/remote/VectorClock.scala index fde9bb84e7..42ea917669 100644 --- a/akka-remote/src/main/scala/akka/remote/VectorClock.scala +++ b/akka-remote/src/main/scala/akka/remote/VectorClock.scala @@ -8,6 +8,20 @@ import akka.AkkaException class VectorClockException(message: String) extends AkkaException(message) +trait Versioned { + def version: VectorClock +} + +object Versioned { + def latestVersionOf[T <: Versioned](versioned1: T, versioned2: T): T = { + (versioned1.version compare versioned2.version) match { + case VectorClock.Before ⇒ versioned2 // version 1 is BEFORE (older), use version 2 + case VectorClock.After ⇒ versioned1 // version 1 is AFTER (newer), use version 1 + case VectorClock.Concurrent ⇒ versioned1 // can't establish a causal relationship between versions => conflict - keeping version 1 + } + } +} + /** * Representation of a Vector-based clock (counting clock), inspired by Lamport logical clocks. * diff --git a/akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala b/akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala index 5bfda16666..03e4109423 100644 --- a/akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala @@ -6,7 +6,7 @@ import akka.testkit.AkkaSpec class VectorClockSpec extends AkkaSpec { import VectorClock._ - "An VectorClock" must { + "A VectorClock" must { "have zero versions when created" in { val clock = VectorClock() @@ -40,7 +40,7 @@ class VectorClockSpec extends AkkaSpec { clock1.compare(clock2) must not be (Concurrent) } - "A clock should not happen before an identical clock" in { + "not happen before an identical clock" in { val clock1_1 = VectorClock() val clock2_1 = clock1_1.increment(1, System.currentTimeMillis) val clock3_1 = clock2_1.increment(2, System.currentTimeMillis) @@ -54,7 +54,7 @@ class VectorClockSpec extends AkkaSpec { clock4_1.compare(clock4_2) must not be (Concurrent) } - "A clock should happen before an identical clock with a single additional event" in { + "happen before an identical clock with a single additional event" in { val clock1_1 = VectorClock() val clock2_1 = clock1_1.increment(1, System.currentTimeMillis) val clock3_1 = clock2_1.increment(2, System.currentTimeMillis) @@ -121,4 +121,82 @@ class VectorClockSpec extends AkkaSpec { clock5_1.compare(clock3_2) must be(After) } } + + "A Versioned" must { + class TestVersioned(val version: VectorClock = VectorClock()) extends Versioned { + def increment(v: Int, time: Long) = new TestVersioned(version.increment(v, time)) + } + + "have zero versions when created" in { + val versioned = new TestVersioned() + versioned.version.versions must be(Vector()) + } + + "happen before an identical versioned with a single additional event" in { + val versioned1_1 = new TestVersioned() + val versioned2_1 = versioned1_1.increment(1, System.currentTimeMillis) + val versioned3_1 = versioned2_1.increment(2, System.currentTimeMillis) + val versioned4_1 = versioned3_1.increment(1, System.currentTimeMillis) + + val versioned1_2 = new TestVersioned() + val versioned2_2 = versioned1_2.increment(1, System.currentTimeMillis) + val versioned3_2 = versioned2_2.increment(2, System.currentTimeMillis) + val versioned4_2 = versioned3_2.increment(1, System.currentTimeMillis) + val versioned5_2 = versioned4_2.increment(3, System.currentTimeMillis) + + Versioned.latestVersionOf[TestVersioned](versioned4_1, versioned5_2) must be(versioned5_2) + } + + "Two versioneds with different events should be concurrent: 1" in { + var versioned1_1 = new TestVersioned() + val versioned2_1 = versioned1_1.increment(1, System.currentTimeMillis) + + val versioned1_2 = new TestVersioned() + val versioned2_2 = versioned1_2.increment(2, System.currentTimeMillis) + + Versioned.latestVersionOf[TestVersioned](versioned2_1, versioned2_2) must be(versioned2_1) + } + + "Two versioneds with different events should be concurrent: 2" in { + val versioned1_3 = new TestVersioned() + val versioned2_3 = versioned1_3.increment(1, System.currentTimeMillis) + val versioned3_3 = versioned2_3.increment(2, System.currentTimeMillis) + val versioned4_3 = versioned3_3.increment(1, System.currentTimeMillis) + + val versioned1_4 = new TestVersioned() + val versioned2_4 = versioned1_4.increment(1, System.currentTimeMillis) + val versioned3_4 = versioned2_4.increment(1, System.currentTimeMillis) + val versioned4_4 = versioned3_4.increment(3, System.currentTimeMillis) + + Versioned.latestVersionOf[TestVersioned](versioned4_3, versioned4_4) must be(versioned4_3) + } + + "be earlier than another versioned if it has an older version" in { + val versioned1_1 = new TestVersioned() + val versioned2_1 = versioned1_1.increment(2, System.currentTimeMillis) + val versioned3_1 = versioned2_1.increment(2, System.currentTimeMillis) + + val versioned1_2 = new TestVersioned() + val versioned2_2 = versioned1_2.increment(1, System.currentTimeMillis) + val versioned3_2 = versioned2_2.increment(2, System.currentTimeMillis) + val versioned4_2 = versioned3_2.increment(2, System.currentTimeMillis) + val versioned5_2 = versioned4_2.increment(3, System.currentTimeMillis) + + Versioned.latestVersionOf[TestVersioned](versioned3_1, versioned5_2) must be(versioned5_2) + } + + "be later than another versioned if it has an newer version" in { + val versioned1_1 = new TestVersioned() + val versioned2_1 = versioned1_1.increment(1, System.currentTimeMillis) + val versioned3_1 = versioned2_1.increment(2, System.currentTimeMillis) + val versioned4_1 = versioned3_1.increment(2, System.currentTimeMillis) + val versioned5_1 = versioned4_1.increment(3, System.currentTimeMillis) + + val versioned1_2 = new TestVersioned() + val versioned2_2 = versioned1_2.increment(2, System.currentTimeMillis) + val versioned3_2 = versioned2_2.increment(2, System.currentTimeMillis) + + Versioned.latestVersionOf[TestVersioned](versioned5_1, versioned3_2) must be(versioned5_1) + } + } } From 135f1e3002f89ceb4092cfacaf8828b5b13363f9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 28 Jan 2012 15:33:24 +0100 Subject: [PATCH 52/94] Added logging to AccrualFailureDetector MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../akka/remote/AccrualFailureDetector.scala | 23 +++++++++++++------ .../remote/AccrualFailureDetectorSpec.scala | 12 ++++++---- 2 files changed, 23 insertions(+), 12 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala b/akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala index 2d7a831b9d..1c9cb45c08 100644 --- a/akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala +++ b/akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala @@ -10,6 +10,9 @@ import scala.annotation.tailrec import System.{ currentTimeMillis ⇒ newTimestamp } import akka.actor.{ ActorSystem, Address } +import akka.actor.ActorSystem +import akka.event.Logging + /** * Implementation of 'The Phi Accrual Failure Detector' by Hayashibara et al. as defined in their paper: * [http://ddg.jaist.ac.jp/pub/HDY+04.pdf] @@ -20,12 +23,14 @@ import akka.actor.{ ActorSystem, Address } *

* Default threshold is 8, but can be configured in the Akka config. */ -class AccrualFailureDetector(val threshold: Int = 8, val maxSampleSize: Int = 1000) { +class AccrualFailureDetector(val threshold: Int = 8, val maxSampleSize: Int = 1000, system: ActorSystem) { private final val PhiFactor = 1.0 / math.log(10.0) private case class FailureStats(mean: Double = 0.0D, variance: Double = 0.0D, deviation: Double = 0.0D) + private val log = Logging(system, "FailureDetector") + /** * Implement using optimistic lockless concurrency, all state is represented * by this immutable case class and managed by an AtomicReference. @@ -49,6 +54,7 @@ class AccrualFailureDetector(val threshold: Int = 8, val maxSampleSize: Int = 10 */ @tailrec final def heartbeat(connection: Address) { + log.info("Heartbeat from connection [{}] ", connection) val oldState = state.get val latestTimestamp = oldState.timestamps.get(connection) @@ -132,12 +138,15 @@ class AccrualFailureDetector(val threshold: Int = 8, val maxSampleSize: Int = 10 def phi(connection: Address): Double = { val oldState = state.get val oldTimestamp = oldState.timestamps.get(connection) - if (oldTimestamp.isEmpty) 0.0D // treat unmanaged connections, e.g. with zero heartbeats, as healthy connections - else { - val timestampDiff = newTimestamp - oldTimestamp.get - val mean = oldState.failureStats.get(connection).getOrElse(FailureStats()).mean - PhiFactor * timestampDiff / mean - } + val phi = + if (oldTimestamp.isEmpty) 0.0D // treat unmanaged connections, e.g. with zero heartbeats, as healthy connections + else { + val timestampDiff = newTimestamp - oldTimestamp.get + val mean = oldState.failureStats.get(connection).getOrElse(FailureStats()).mean + PhiFactor * timestampDiff / mean + } + log.debug("Phi value [{}] and threshold [{}] for connection [{}] ", phi, threshold, connection) + phi } /** diff --git a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala index 17a848b8d3..cffc424408 100644 --- a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala @@ -4,13 +4,15 @@ import java.net.InetSocketAddress import akka.testkit.AkkaSpec import akka.actor.Address -class AccrualFailureDetectorSpec extends AkkaSpec { +class AccrualFailureDetectorSpec extends AkkaSpec(""" + akka.loglevel = "DEBUG" +""") { "An AccrualFailureDetector" must { val conn = Address("akka", "", Some("localhost"), Some(2552)) "mark node as available after a series of successful heartbeats" in { - val fd = new AccrualFailureDetector() + val fd = new AccrualFailureDetector(system = system) fd.heartbeat(conn) @@ -25,7 +27,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec { // FIXME how should we deal with explicit removal of connection? - if triggered as failure then we have a problem in boostrap - see line 142 in AccrualFailureDetector "mark node as dead after explicit removal of connection" ignore { - val fd = new AccrualFailureDetector + val fd = new AccrualFailureDetector(system = system) fd.heartbeat(conn) @@ -43,7 +45,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec { } "mark node as dead if heartbeat are missed" in { - val fd = new AccrualFailureDetector(threshold = 3) + val fd = new AccrualFailureDetector(threshold = 3, system = system) fd.heartbeat(conn) @@ -61,7 +63,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec { } "mark node as available if it starts heartbeat again after being marked dead due to detection of failure" in { - val fd = new AccrualFailureDetector(threshold = 3) + val fd = new AccrualFailureDetector(threshold = 3, system = system) fd.heartbeat(conn) From 4b0b985f98b2e815d1b504e84b31a7f3be7212ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Sat, 28 Jan 2012 15:34:46 +0100 Subject: [PATCH 53/94] Added test for testing the Failure Detector when used together with Gossiper and a set of remote cluster nodes. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../test/scala/akka/remote/GossiperSpec.scala | 13 --- .../GossipingAccrualFailureDetectorSpec.scala | 95 +++++++++++++++++++ 2 files changed, 95 insertions(+), 13 deletions(-) delete mode 100644 akka-remote/src/test/scala/akka/remote/GossiperSpec.scala create mode 100644 akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala diff --git a/akka-remote/src/test/scala/akka/remote/GossiperSpec.scala b/akka-remote/src/test/scala/akka/remote/GossiperSpec.scala deleted file mode 100644 index 12e2925b26..0000000000 --- a/akka-remote/src/test/scala/akka/remote/GossiperSpec.scala +++ /dev/null @@ -1,13 +0,0 @@ -package akka.remote - -import java.net.InetSocketAddress -import akka.testkit.AkkaSpec - -class GossiperSpec extends AkkaSpec { - - "An Gossiper" must { - - "..." in { - } - } -} diff --git a/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala new file mode 100644 index 0000000000..85f1c5a084 --- /dev/null +++ b/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala @@ -0,0 +1,95 @@ +/** + * Copyright (C) 2009-2011 Typesafe Inc. + */ +package akka.remote + +import java.net.InetSocketAddress + +import akka.testkit._ +import akka.dispatch._ +import akka.actor._ +import com.typesafe.config._ + +class GossipingAccrualFailureDetectorSpec extends AkkaSpec(""" + akka { + loglevel = "INFO" + actor.provider = "akka.remote.RemoteActorRefProvider" + + remote.server.hostname = localhost + remote.server.port = 5550 + remote.failure-detector.threshold = 3 + cluster.seed-nodes = ["akka://localhost:5551"] + } + """) with ImplicitSender { + + val conn1 = RemoteNettyAddress("localhost", 5551) + val node1 = ActorSystem("GossiperSpec", ConfigFactory + .parseString("akka { remote.server.port=5551, cluster.use-cluster = on }") + .withFallback(system.settings.config)) + val remote1 = + node1.asInstanceOf[ActorSystemImpl] + .provider.asInstanceOf[RemoteActorRefProvider] + .remote + val gossiper1 = remote1.gossiper + val fd1 = remote1.failureDetector + gossiper1 must be('defined) + + val conn2 = RemoteNettyAddress("localhost", 5552) + val node2 = ActorSystem("GossiperSpec", ConfigFactory + .parseString("akka { remote.server.port=5552, cluster.use-cluster = on }") + .withFallback(system.settings.config)) + val remote2 = + node2.asInstanceOf[ActorSystemImpl] + .provider.asInstanceOf[RemoteActorRefProvider] + .remote + val gossiper2 = remote2.gossiper + val fd2 = remote2.failureDetector + gossiper2 must be('defined) + + val conn3 = RemoteNettyAddress("localhost", 5553) + val node3 = ActorSystem("GossiperSpec", ConfigFactory + .parseString("akka { remote.server.port=5553, cluster.use-cluster = on }") + .withFallback(system.settings.config)) + val remote3 = + node3.asInstanceOf[ActorSystemImpl] + .provider.asInstanceOf[RemoteActorRefProvider] + .remote + val gossiper3 = remote3.gossiper + val fd3 = remote3.failureDetector + gossiper3 must be('defined) + + "A Gossip-driven Failure Detector" must { + + "receive gossip heartbeats so that all healthy nodes in the cluster are marked 'available'" ignore { + Thread.sleep(5000) // let them gossip for 10 seconds + fd1.isAvailable(conn2) must be(true) + fd1.isAvailable(conn3) must be(true) + fd2.isAvailable(conn1) must be(true) + fd2.isAvailable(conn3) must be(true) + fd3.isAvailable(conn1) must be(true) + fd3.isAvailable(conn2) must be(true) + } + + "mark node as 'unavailable' if a node in the cluster is shut down and its heartbeats stops" ignore { + // kill node 3 + gossiper3.get.shutdown() + node3.shutdown() + Thread.sleep(5000) // let them gossip for 10 seconds + + fd1.isAvailable(conn2) must be(true) + fd1.isAvailable(conn3) must be(false) + fd2.isAvailable(conn1) must be(true) + fd2.isAvailable(conn3) must be(false) + } + } + + override def atTermination() { + gossiper1.get.shutdown() + gossiper2.get.shutdown() + gossiper3.get.shutdown() + node1.shutdown() + node2.shutdown() + node3.shutdown() + // FIXME Ordering problem - If we shut down the ActorSystem before the Gossiper then we get an IllegalStateException + } +} From 56b94227d316eba64e406ba0368ee3a123c3e317 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 30 Jan 2012 11:37:02 +0100 Subject: [PATCH 54/94] Added some options to the cluster config section. Moved gossip config from remote to cluster section. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- akka-remote/src/main/resources/reference.conf | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 86de93527c..f9c6430f6f 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -131,11 +131,6 @@ akka { max-sample-size = 1000 } - gossip { - initialDelay = 5s - frequency = 1s - } - # The dispatcher used for remote system messages compute-grid-dispatcher { # defaults to same settings as default-dispatcher @@ -150,6 +145,13 @@ akka { } cluster { + use-cluster = off seed-nodes = [] + max-time-to-retry-joining-cluster = 30s + seed-node-connection-timeout = 30s + gossip { + initialDelay = 5s + frequency = 1s + } } } From c6fb6def13c2647758db8822c6f47ae8077f80bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 30 Jan 2012 11:41:41 +0100 Subject: [PATCH 55/94] Enhanced the Gossip state with member status, ring convergence flags etc. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Added member status, ring convergence flags etc to Gossip state. * Updated Gossiper to use Member throughout instead of ParsedTransportAddress. * Commented out cluster membership updating to be replaced by the one in the cluster specification. Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/remote/Gossiper.scala | 396 +++++++++--------- 1 file changed, 191 insertions(+), 205 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/Gossiper.scala b/akka-remote/src/main/scala/akka/remote/Gossiper.scala index 20e803c7eb..e5b6e938bc 100644 --- a/akka-remote/src/main/scala/akka/remote/Gossiper.scala +++ b/akka-remote/src/main/scala/akka/remote/Gossiper.scala @@ -11,13 +11,13 @@ import akka.util._ import akka.dispatch.Await import akka.config.ConfigurationException -import java.util.concurrent.atomic.AtomicReference +import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean } import java.util.concurrent.TimeUnit._ import java.util.concurrent.TimeoutException import java.security.SecureRandom import System.{ currentTimeMillis ⇒ newTimestamp } -import scala.collection.immutable.Map +import scala.collection.immutable.{ Map, SortedSet } import scala.annotation.tailrec import akka.dispatch.Await @@ -26,105 +26,94 @@ import akka.pattern.ask import com.google.protobuf.ByteString /** - * Interface for node membership change listener. + * Interface for member membership change listener. */ trait NodeMembershipChangeListener { - def nodeConnected(node: Address) - def nodeDisconnected(node: Address) + def memberConnected(member: Member) + def memberDisconnected(member: Member) } /** - * Represents the node state of to gossip, versioned by a vector clock. + * Base trait for all cluster messages. All ClusterMessage's are serializable. */ -case class Gossip( - version: VectorClock, - node: Address, - availableNodes: Set[Address] = Set.empty[Address], - unavailableNodes: Set[Address] = Set.empty[Address]) - -// ====== START - NEW GOSSIP IMPLEMENTATION ====== -/* - case class Gossip( - version: VectorClock, - node: ParsedTransportAddress, - leader: ParsedTransportAddress, // FIXME leader is always head of 'members', so we probably don't need this field - members: SortedSet[Member] = SortetSet.empty[Member](Ordering.fromLessThan[String](_ > _)), // sorted set of members with their status, sorted by name - seen: Map[Member, VectorClock] = Map.empty[Member, VectorClock], // for ring convergence - pendingChanges: Option[Vector[PendingPartitioningChange]] = None, // for handoff - meta: Option[Map[String, Array[Byte]]] = None) // misc meta-data - - case class Member(address: ParsedTransportAddress, status: MemberStatus) - - sealed trait MemberStatus - object MemberStatus { - case class Joining(version: VectorClock) extends MemberStatus - case class Up(version: VectorClock) extends MemberStatus - case class Leaving(version: VectorClock) extends MemberStatus - case class Exiting(version: VectorClock) extends MemberStatus - case class Down(version: VectorClock) extends MemberStatus - } - - sealed trait PendingPartitioningStatus - object PendingPartitioningStatus { - case object Complete extends PendingPartitioningStatus - case object Awaiting extends PendingPartitioningStatus - } - - // FIXME what is this? - type VNodeMod = AnyRef - - case class PendingPartitioningChange( - owner: ParsedTransportAddress, - nextOwner: ParsedTransportAddress, - changes: Vector[VNodeMod], - status: PendingPartitioningStatus) -*/ -// ====== END - NEW GOSSIP IMPLEMENTATION ====== - /** - * Interface for node membership change listener. + * Command to join the cluster. */ -trait NodeMembershipChangeListener { - def nodeConnected(node: ParsedTransportAddress) - def nodeDisconnected(node: ParsedTransportAddress) -} - -sealed trait ClusterMessage extends Serializable - case object JoinCluster extends ClusterMessage /** - * Represents the node state of to gossip, versioned by a vector clock. + * Represents the state of the cluster; cluster ring membership, ring convergence, meta data - all versioned by a vector clock. */ case class Gossip( - version: VectorClock, - node: ParsedTransportAddress, - availableNodes: Set[ParsedTransportAddress] = Set.empty[ParsedTransportAddress], - unavailableNodes: Set[ParsedTransportAddress] = Set.empty[ParsedTransportAddress]) extends ClusterMessage + version: VectorClock = VectorClock(), + member: Address, + // sorted set of members with their status, sorted by name + members: SortedSet[Member] = SortedSet.empty[Member](Ordering.fromLessThan[Member](_.address.toString > _.address.toString)), + unavailableMembers: Set[Member] = Set.empty[Member], + // for ring convergence + seen: Map[Member, VectorClock] = Map.empty[Member, VectorClock], + // for handoff + //pendingChanges: Option[Vector[PendingPartitioningChange]] = None, + meta: Option[Map[String, Array[Byte]]] = None) + extends ClusterMessage // is a serializable cluster message + with Versioned // has a vector clock as version -class ClusterDaemon(system: ActorSystem, gossiper: Gossiper) extends Actor { +/** + * Represents the address and the current status of a cluster member node. + */ +case class Member(address: Address, status: MemberStatus) extends ClusterMessage + +/** + * Defines the current status of a cluster member node + * + * Can be one of: Joining, Up, Leaving, Exiting and Down. + */ +sealed trait MemberStatus extends ClusterMessage with Versioned +object MemberStatus { + case class Joining(version: VectorClock = VectorClock()) extends MemberStatus + case class Up(version: VectorClock = VectorClock()) extends MemberStatus + case class Leaving(version: VectorClock = VectorClock()) extends MemberStatus + case class Exiting(version: VectorClock = VectorClock()) extends MemberStatus + case class Down(version: VectorClock = VectorClock()) extends MemberStatus +} + +// sealed trait PendingPartitioningStatus +// object PendingPartitioningStatus { +// case object Complete extends PendingPartitioningStatus +// case object Awaiting extends PendingPartitioningStatus +// } + +// case class PendingPartitioningChange( +// owner: Address, +// nextOwner: Address, +// changes: Vector[VNodeMod], +// status: PendingPartitioningStatus) + +final class ClusterDaemon(system: ActorSystem, gossiper: Gossiper) extends Actor { val log = Logging(system, "ClusterDaemon") def receive = { - case JoinCluster ⇒ sender ! gossiper.latestGossip - case gossip: Gossip ⇒ gossiper.tell(gossip) - case unknown ⇒ log.error("Unknown message sent to cluster daemon [" + unknown + "]") + case JoinCluster ⇒ sender ! gossiper.latestGossip + case gossip: Gossip ⇒ + gossiper.tell(gossip) + + case unknown ⇒ log.error("Unknown message sent to cluster daemon [" + unknown + "]") } } /** * This module is responsible for Gossiping cluster information. The abstraction maintains the list of live - * and dead nodes. Periodically i.e. every 1 second this module chooses a random node and initiates a round + * and dead members. Periodically i.e. every 1 second this module chooses a random member and initiates a round * of Gossip with it. Whenever it gets gossip updates it updates the Failure Detector with the liveness * information. *

- * During each of these runs the node initiates gossip exchange according to following rules (as defined in the + * During each of these runs the member initiates gossip exchange according to following rules (as defined in the * Cassandra documentation [http://wiki.apache.org/cassandra/ArchitectureGossip]: *

- *   1) Gossip to random live node (if any)
- *   2) Gossip to random unreachable node with certain probability depending on number of unreachable and live nodes
- *   3) If the node gossiped to at (1) was not seed, or the number of live nodes is less than number of seeds,
- *       gossip to random seed with certain probability depending on number of unreachable, seed and live nodes.
+ *   1) Gossip to random live member (if any)
+ *   2) Gossip to random unreachable member with certain probability depending on number of unreachable and live members
+ *   3) If the member gossiped to at (1) was not seed, or the number of live members is less than number of seeds,
+ *       gossip to random seed with certain probability depending on number of unreachable, seed and live members.
  * 
*/ case class Gossiper(remote: Remote, system: ActorSystemImpl) { @@ -135,7 +124,7 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { */ private case class State( currentGossip: Gossip, - nodeMembershipChangeListeners: Set[NodeMembershipChangeListener] = Set.empty[NodeMembershipChangeListener]) + memberMembershipChangeListeners: Set[NodeMembershipChangeListener] = Set.empty[NodeMembershipChangeListener]) // configuration private val remoteSettings = remote.remoteSettings @@ -148,46 +137,53 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { implicit val seedNodeConnectionTimeout = remoteSettings.SeedNodeConnectionTimeout implicit val defaultTimeout = Timeout(remoteSettings.RemoteSystemDaemonAckTimeout) - // seed nodes - private val seeds: Set[ParsedTransportAddress] = { + // seed members + private val seeds: Set[Member] = { val seeds = remoteSettings.SeedNodes flatMap { case uta: UnparsedTransportAddress ⇒ uta.parse(remote.transports) match { - case pta: ParsedTransportAddress ⇒ Some(pta) + case pta: Address ⇒ Some(Member(pta, MemberStatus.Up())) case _ ⇒ None } case _ ⇒ None } if (remoteSettings.SeedNodes.isEmpty) throw new ConfigurationException( - "At least one seed node must be defined in the configuration [akka.cluster.seed-nodes]") + "At least one seed member must be defined in the configuration [akka.cluster.seed-members]") else remoteSettings.SeedNodes } + private val isRunning = new AtomicBoolean(true) private val log = Logging(system, "Gossiper") private val random = SecureRandom.getInstance("SHA1PRNG") private val connectionManager = new RemoteConnectionManager(system, remote, Map.empty[Address, ActorRef]) + + // Is it right to put this guy under the /system path or should we have a top-level /cluster or something else...? private val clusterDaemon = system.systemActorOf(Props(new ClusterDaemon(system, this)), "cluster") private val state = new AtomicReference[State](State(currentGossip = newGossip())) log.info("Starting cluster Gossiper...") - // join the cluster by connecting to one of the seed nodes and retrieve current cluster state (Gossip) + // join the cluster by connecting to one of the seed members and retrieve current cluster state (Gossip) joinCluster(Timer(remoteSettings.MaxTimeToRetryJoiningCluster)) - // start periodic gossip and cluster scrutinization - default is run them every second with 1/2 second in between + // start periodic gossip and cluster scrutinization val initateGossipCanceller = system.scheduler.schedule( Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(initateGossip()) + val scrutinizeCanceller = system.scheduler.schedule( Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(scrutinize()) /** - * Shuts down all connections to other nodes, the cluster daemon and the periodic gossip and cleanup tasks. + * Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks. */ def shutdown() { - connectionManager.shutdown() - system.stop(clusterDaemon) - initateGossipCanceller.cancel() - scrutinizeCanceller.cancel() + if (isRunning.compareAndSet(true, false)) { + log.info("Shutting down Gossiper for [{}]", remoteAddress) + connectionManager.shutdown() + system.stop(clusterDaemon) + initateGossipCanceller.cancel() + scrutinizeCanceller.cancel() + } } def latestGossip: Gossip = state.get.currentGossip @@ -195,45 +191,56 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { /** * Tell the gossiper some gossip. */ - @tailrec + //@tailrec final def tell(newGossip: Gossip) { - val gossipingNode = newGossip.node + val gossipingNode = newGossip.member failureDetector heartbeat gossipingNode // update heartbeat in failure detector - val oldState = state.get - val latestGossip = latestVersionOf(newGossip, oldState.currentGossip) - val latestAvailableNodes = latestGossip.availableNodes - val latestUnavailableNodes = latestGossip.unavailableNodes + // FIXME all below here is WRONG - redesign with cluster convergence in mind - if (!(latestAvailableNodes contains gossipingNode) && !(latestUnavailableNodes contains gossipingNode)) { - // we have a new node - val newGossip = latestGossip copy (availableNodes = latestAvailableNodes + gossipingNode) - val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) + // val oldState = state.get + // println("-------- NEW VERSION " + newGossip) + // println("-------- OLD VERSION " + oldState.currentGossip) + // val latestGossip = VectorClock.latestVersionOf(newGossip, oldState.currentGossip) + // println("-------- WINNING VERSION " + latestGossip) - // if we won the race then update else try again - if (!state.compareAndSet(oldState, newState)) tell(newGossip) // recur - else { - // create connections for all new nodes in the latest gossip - (latestAvailableNodes + gossipingNode) foreach { node ⇒ - setUpConnectionToNode(node) - oldState.nodeMembershipChangeListeners foreach (_ nodeConnected node) // notify listeners about the new nodes - } - } + // val latestAvailableNodes = latestGossip.members + // val latestUnavailableNodes = latestGossip.unavailableMembers + // println("=======>>> gossipingNode: " + gossipingNode) + // println("=======>>> latestAvailableNodes: " + latestAvailableNodes) + // if (!(latestAvailableNodes contains gossipingNode) && !(latestUnavailableNodes contains gossipingNode)) { + // println("-------- NEW NODE") + // // we have a new member + // val newGossip = latestGossip copy (availableNodes = latestAvailableNodes + gossipingNode) + // val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) - } else if (latestUnavailableNodes contains gossipingNode) { - // gossip from an old former dead node + // println("--------- new GOSSIP " + newGossip.members) + // println("--------- new STATE " + newState) + // // if we won the race then update else try again + // if (!state.compareAndSet(oldState, newState)) tell(newGossip) // recur + // else { + // println("---------- WON RACE - setting state") + // // create connections for all new members in the latest gossip + // (latestAvailableNodes + gossipingNode) foreach { member ⇒ + // setUpConnectionToNode(member) + // oldState.memberMembershipChangeListeners foreach (_ memberConnected member) // notify listeners about the new members + // } + // } - val newUnavailableNodes = latestUnavailableNodes - gossipingNode - val newAvailableNodes = latestAvailableNodes + gossipingNode + // } else if (latestUnavailableNodes contains gossipingNode) { + // // gossip from an old former dead member - val newGossip = latestGossip copy (availableNodes = newAvailableNodes, unavailableNodes = newUnavailableNodes) - val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) + // val newUnavailableMembers = latestUnavailableNodes - gossipingNode + // val newMembers = latestAvailableNodes + gossipingNode - // if we won the race then update else try again - if (!state.compareAndSet(oldState, newState)) tell(newGossip) // recur - else oldState.nodeMembershipChangeListeners foreach (_ nodeConnected gossipingNode) // notify listeners on successful update of state - } + // val newGossip = latestGossip copy (availableNodes = newMembers, unavailableNodes = newUnavailableMembers) + // val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) + + // // if we won the race then update else try again + // if (!state.compareAndSet(oldState, newState)) tell(newGossip) // recur + // else oldState.memberMembershipChangeListeners foreach (_ memberConnected gossipingNode) // notify listeners on successful update of state + // } } /** @@ -242,8 +249,8 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { @tailrec final def registerListener(listener: NodeMembershipChangeListener) { val oldState = state.get - val newListeners = oldState.nodeMembershipChangeListeners + listener - val newState = oldState copy (nodeMembershipChangeListeners = newListeners) + val newListeners = oldState.memberMembershipChangeListeners + listener + val newState = oldState copy (memberMembershipChangeListeners = newListeners) if (!state.compareAndSet(oldState, newState)) registerListener(listener) // recur } @@ -253,54 +260,54 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { @tailrec final def unregisterListener(listener: NodeMembershipChangeListener) { val oldState = state.get - val newListeners = oldState.nodeMembershipChangeListeners - listener - val newState = oldState copy (nodeMembershipChangeListeners = newListeners) + val newListeners = oldState.memberMembershipChangeListeners - listener + val newState = oldState copy (memberMembershipChangeListeners = newListeners) if (!state.compareAndSet(oldState, newState)) unregisterListener(listener) // recur } /** - * Sets up remote connections to all the nodes in the argument list. + * Sets up remote connections to all the members in the argument list. */ - private def connectToNodes(nodes: Seq[ParsedTransportAddress]) { - nodes foreach { node ⇒ - setUpConnectionToNode(node) - state.get.nodeMembershipChangeListeners foreach (_ nodeConnected node) // notify listeners about the new nodes + private def connectToNodes(members: Seq[Member]) { + members foreach { member ⇒ + setUpConnectionToNode(member) + state.get.memberMembershipChangeListeners foreach (_ memberConnected member) // notify listeners about the new members } } - // FIXME should shuffle list randomly before start traversing to avoid connecting to some node on every node + // FIXME should shuffle list randomly before start traversing to avoid connecting to some member on every member @tailrec - final private def connectToRandomNodeOf(nodes: Seq[ParsedTransportAddress]): ActorRef = { - nodes match { - case node :: rest ⇒ - setUpConnectionToNode(node) match { + final private def connectToRandomNodeOf(members: Seq[Member]): ActorRef = { + members match { + case member :: rest ⇒ + setUpConnectionToNode(member) match { case Some(connection) ⇒ connection case None ⇒ connectToRandomNodeOf(rest) // recur if } case Nil ⇒ throw new RemoteConnectionException( - "Could not establish connection to any of the nodes in the argument list") + "Could not establish connection to any of the members in the argument list") } } /** - * Joins the cluster by connecting to one of the seed nodes and retrieve current cluster state (Gossip). + * Joins the cluster by connecting to one of the seed members and retrieve current cluster state (Gossip). */ private def joinCluster(timer: Timer) { val seedNodes = seedNodesWithoutMyself // filter out myself - if (!seedNodes.isEmpty) { // if we have seed nodes to contact + if (!seedNodes.isEmpty) { // if we have seed members to contact connectToNodes(seedNodes) try { - log.info("Trying to join cluster through one of the seed nodes [{}]", seedNodes.mkString(", ")) + log.info("Trying to join cluster through one of the seed members [{}]", seedNodes.mkString(", ")) Await.result(connectToRandomNodeOf(seedNodes) ? JoinCluster, seedNodeConnectionTimeout) match { case initialGossip: Gossip ⇒ // just sets/overwrites the state/gossip regardless of what it was before // since it should be treated as the initial state state.set(state.get copy (currentGossip = initialGossip)) - log.debug("Received initial gossip [{}] from seed node", initialGossip) + log.debug("Received initial gossip [{}] from seed member", initialGossip) case unknown ⇒ throw new IllegalStateException("Expected initial gossip from seed, received [" + unknown + "]") @@ -308,13 +315,19 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { } catch { case e: Exception ⇒ log.error( - "Could not join cluster through any of the seed nodes - retrying for another {} seconds", + "Could not join cluster through any of the seed members - retrying for another {} seconds", timer.timeLeft.toSeconds) - if (timer.timeLeft.toMillis > 0) joinCluster(timer) // recur - retry joining the cluster - else throw new RemoteConnectionException( - "Could not join cluster (any of the seed nodes) - giving up after trying for " + - timer.timeout.toSeconds + " seconds") + // retry joining the cluster unless + // 1. Gossiper is shut down + // 2. The connection time window has expired + if (isRunning.get) { + println("=======>>> isRun: " + isRunning.get + " " + remoteAddress) + if (timer.timeLeft.toMillis > 0) joinCluster(timer) // recur + else throw new RemoteConnectionException( + "Could not join cluster (any of the seed members) - giving up after trying for " + + timer.timeout.toSeconds + " seconds") + } } } } @@ -326,64 +339,50 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { val oldState = state.get val oldGossip = oldState.currentGossip - val oldAvailableNodes = oldGossip.availableNodes - val oldUnavailableNodes = oldGossip.unavailableNodes + val oldMembers = oldGossip.members + val oldMembersSize = oldMembers.size - val oldAvailableNodesSize = oldAvailableNodes.size - val oldUnavailableNodesSize = oldUnavailableNodes.size + val oldUnavailableMembers = oldGossip.unavailableMembers + val oldUnavailableMembersSize = oldUnavailableMembers.size - // 1. gossip to alive nodes + // 1. gossip to alive members val gossipedToSeed = - if (oldAvailableNodesSize > 0) gossipToRandomNodeOf(oldAvailableNodes) + if (oldUnavailableMembersSize > 0) gossipToRandomNodeOf(oldMembers) else false - // 2. gossip to dead nodes - if (oldUnavailableNodesSize > 0) { - val probability: Double = oldUnavailableNodesSize / (oldAvailableNodesSize + 1) - if (random.nextDouble() < probability) gossipToRandomNodeOf(oldUnavailableNodes) + // 2. gossip to dead members + if (oldUnavailableMembersSize > 0) { + val probability: Double = oldUnavailableMembersSize / (oldMembersSize + 1) + if (random.nextDouble() < probability) gossipToRandomNodeOf(oldUnavailableMembers) } // 3. gossip to a seed for facilitating partition healing - if ((!gossipedToSeed || oldAvailableNodesSize < 1) && (seeds.head != remoteAddress)) { - if (oldAvailableNodesSize == 0) gossipToRandomNodeOf(seeds) + if ((!gossipedToSeed || oldMembersSize < 1) && (seeds.head != remoteAddress)) { + if (oldMembersSize == 0) gossipToRandomNodeOf(seeds) else { - val probability = 1.0 / oldAvailableNodesSize + oldUnavailableNodesSize + val probability = 1.0 / oldMembersSize + oldUnavailableMembersSize if (random.nextDouble() <= probability) gossipToRandomNodeOf(seeds) } } } /** - * Gossips to a random node in the set of nodes passed in as argument. + * Gossips to a random member in the set of members passed in as argument. * - * @returns 'true' if it gossiped to a "seed" node. + * @returns 'true' if it gossiped to a "seed" member. */ - private def gossipToRandomNodeOf(nodes: Set[ParsedTransportAddress]): Boolean = { - val peers = nodes filter (_ != remoteAddress) // filter out myself + private def gossipToRandomNodeOf(members: Set[Member]): Boolean = { + val peers = members filter (_.address != remoteAddress) // filter out myself val peer = selectRandomNode(peers) val oldState = state.get val oldGossip = oldState.currentGossip - - setUpConnectionToNode(peer) match { - case Some(connection) ⇒ - try { - Await.result(connection ? newGossip, seedNodeConnectionTimeout) match { - case Success(receiver) ⇒ log.debug("Gossip sent to [{}] was successfully received", receiver) - case Failure(cause) ⇒ log.error(cause, cause.toString) - } - } catch { - case e: TimeoutException ⇒ log.error(e, "Gossip to [%s] timed out".format(connection.path)) - case e: Exception ⇒ log.error(e, "Could not gossip to [{}] due to: {}", connection.path, e.toString) - } - case None ⇒ - // FIXME what to do if the node can't be reached for gossiping - mark as unavailable in failure detector? - } - + // if connection can't be established/found => ignore it since the failure detector will take care of the potential problem + setUpConnectionToNode(peer) foreach { _ ! newGossip } seeds exists (peer == _) } /** - * Scrutinizes the cluster; marks nodes detected by the failure detector as unavailable, and notifies all listeners + * Scrutinizes the cluster; marks members detected by the failure detector as unavailable, and notifies all listeners * of the change in the cluster membership. */ @tailrec @@ -391,15 +390,15 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { val oldState = state.get val oldGossip = oldState.currentGossip - val oldAvailableNodes = oldGossip.availableNodes - val oldUnavailableNodes = oldGossip.unavailableNodes - val newlyDetectedUnavailableNodes = oldAvailableNodes filterNot failureDetector.isAvailable + val oldMembers = oldGossip.members + val oldUnavailableMembers = oldGossip.unavailableMembers + val newlyDetectedUnavailableMembers = oldMembers filterNot (member ⇒ failureDetector.isAvailable(member.address)) - if (!newlyDetectedUnavailableNodes.isEmpty) { // we have newly detected nodes marked as unavailable - val newAvailableNodes = oldAvailableNodes diff newlyDetectedUnavailableNodes - val newUnavailableNodes = oldUnavailableNodes ++ newlyDetectedUnavailableNodes + if (!newlyDetectedUnavailableMembers.isEmpty) { // we have newly detected members marked as unavailable + val newMembers = oldMembers diff newlyDetectedUnavailableMembers + val newUnavailableMembers = oldUnavailableMembers ++ newlyDetectedUnavailableMembers - val newGossip = oldGossip copy (availableNodes = newAvailableNodes, unavailableNodes = newUnavailableNodes) + val newGossip = oldGossip copy (members = newMembers, unavailableMembers = newUnavailableMembers) val newState = oldState copy (currentGossip = incrementVersionForGossip(newGossip)) // if we won the race then update else try again @@ -407,48 +406,35 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { else { // notify listeners on successful update of state for { - deadNode ← newUnavailableNodes - listener ← oldState.nodeMembershipChangeListeners - } listener nodeDisconnected deadNode + deadNode ← newUnavailableMembers + listener ← oldState.memberMembershipChangeListeners + } listener memberDisconnected deadNode } } } - private def setUpConnectionToNode(node: ParsedTransportAddress): Option[ActorRef] = { - //connectionManager.newConnection(node, RootActorPath(RemoteSystemAddress(system.name, node)) / "system" / "cluster") + private def setUpConnectionToNode(member: Member): Option[ActorRef] = { + val address = member.address try { Some( connectionManager.putIfAbsent( - node, - () ⇒ system.actorFor(RootActorPath(RemoteSystemAddress(system.name, node)) / "system" / "cluster"))) - // connectionManager.connectionFor(node).getOrElse( - // throw new RemoteConnectionException("Could not set up connection to node [" + node + "]")) + address, + () ⇒ system.actorFor(RootActorPath(RemoteSystemAddress(system.name, address)) / "system" / "cluster"))) } catch { case e: Exception ⇒ None } } - private def newGossip(): Gossip = Gossip( - version = VectorClock(), - node = address, - availableNodes = Set(address)) + private def newGossip(): Gossip = Gossip(member = address) private def incrementVersionForGossip(from: Gossip): Gossip = { - val newVersion = from.version.increment(nodeFingerprint, newTimestamp) + val newVersion = from.version.increment(memberFingerprint, newTimestamp) from copy (version = newVersion) } - private def latestVersionOf(newGossip: Gossip, oldGossip: Gossip): Gossip = { - (newGossip.version compare oldGossip.version) match { - case VectorClock.After ⇒ newGossip // gossiped version is newer, use new version - case VectorClock.Before ⇒ oldGossip // gossiped version is older, use old version - case VectorClock.Concurrent ⇒ oldGossip // can't establish a causal relationship between two versions => conflict - } - } + private def seedNodesWithoutMyself: List[Member] = seeds.filter(_ != remoteAddress.transport).toList - private def seedNodesWithoutMyself: List[Address] = seeds.filter(_ != remoteAddress.transport).toList - - private def selectRandomNode(nodes: Set[Address]): Address = { - nodes.toList(random.nextInt(nodes.size)) + private def selectRandomNode(members: Set[Member]): Member = { + members.toList(random.nextInt(members.size)) } } From f914dfe83bfb44f8748d8e31020a04b8508de2a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 30 Jan 2012 19:40:28 +0100 Subject: [PATCH 56/94] Merged with master. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/remote/Gossiper.scala | 40 ++--- .../akka/remote/RemoteActorRefProvider.scala | 13 +- .../scala/akka/remote/RemoteSettings.scala | 6 +- .../GossipingAccrualFailureDetectorSpec.scala | 166 +++++++++--------- 4 files changed, 115 insertions(+), 110 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/Gossiper.scala b/akka-remote/src/main/scala/akka/remote/Gossiper.scala index e5b6e938bc..55165f0891 100644 --- a/akka-remote/src/main/scala/akka/remote/Gossiper.scala +++ b/akka-remote/src/main/scala/akka/remote/Gossiper.scala @@ -36,6 +36,8 @@ trait NodeMembershipChangeListener { /** * Base trait for all cluster messages. All ClusterMessage's are serializable. */ +sealed trait ClusterMessage extends Serializable + /** * Command to join the cluster. */ @@ -116,7 +118,7 @@ final class ClusterDaemon(system: ActorSystem, gossiper: Gossiper) extends Actor * gossip to random seed with certain probability depending on number of unreachable, seed and live members. * */ -case class Gossiper(remote: Remote, system: ActorSystemImpl) { +case class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { /** * Represents the state for this Gossiper. Implemented using optimistic lockless concurrency, @@ -128,10 +130,15 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { // configuration private val remoteSettings = remote.remoteSettings + + private val protocol = "akka" // TODO should this be hardcoded? + private val address = remote.transport.address + private val memberFingerprint = address.## + private val serialization = remote.serialization private val failureDetector = remote.failureDetector - private val initalDelayForGossip = remoteSettings.InitalDelayForGossip + private val initialDelayForGossip = remoteSettings.InitialDelayForGossip private val gossipFrequency = remoteSettings.GossipFrequency implicit val seedNodeConnectionTimeout = remoteSettings.SeedNodeConnectionTimeout @@ -139,17 +146,9 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { // seed members private val seeds: Set[Member] = { - val seeds = remoteSettings.SeedNodes flatMap { - case uta: UnparsedTransportAddress ⇒ - uta.parse(remote.transports) match { - case pta: Address ⇒ Some(Member(pta, MemberStatus.Up())) - case _ ⇒ None - } - case _ ⇒ None - } if (remoteSettings.SeedNodes.isEmpty) throw new ConfigurationException( "At least one seed member must be defined in the configuration [akka.cluster.seed-members]") - else remoteSettings.SeedNodes + else remoteSettings.SeedNodes map (address ⇒ Member(address, MemberStatus.Up())) } private val isRunning = new AtomicBoolean(true) @@ -168,17 +167,17 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { // start periodic gossip and cluster scrutinization val initateGossipCanceller = system.scheduler.schedule( - Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(initateGossip()) + Duration(initialDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(initateGossip()) val scrutinizeCanceller = system.scheduler.schedule( - Duration(initalDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(scrutinize()) + Duration(initialDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(scrutinize()) /** * Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks. */ def shutdown() { if (isRunning.compareAndSet(true, false)) { - log.info("Shutting down Gossiper for [{}]", remoteAddress) + log.info("Shutting down Gossiper for [{}]", address) connectionManager.shutdown() system.stop(clusterDaemon) initateGossipCanceller.cancel() @@ -322,7 +321,6 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { // 1. Gossiper is shut down // 2. The connection time window has expired if (isRunning.get) { - println("=======>>> isRun: " + isRunning.get + " " + remoteAddress) if (timer.timeLeft.toMillis > 0) joinCluster(timer) // recur else throw new RemoteConnectionException( "Could not join cluster (any of the seed members) - giving up after trying for " + @@ -357,7 +355,7 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { } // 3. gossip to a seed for facilitating partition healing - if ((!gossipedToSeed || oldMembersSize < 1) && (seeds.head != remoteAddress)) { + if ((!gossipedToSeed || oldMembersSize < 1) && (seeds.head != address)) { if (oldMembersSize == 0) gossipToRandomNodeOf(seeds) else { val probability = 1.0 / oldMembersSize + oldUnavailableMembersSize @@ -372,7 +370,7 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { * @returns 'true' if it gossiped to a "seed" member. */ private def gossipToRandomNodeOf(members: Set[Member]): Boolean = { - val peers = members filter (_.address != remoteAddress) // filter out myself + val peers = members filter (_.address != address) // filter out myself val peer = selectRandomNode(peers) val oldState = state.get val oldGossip = oldState.currentGossip @@ -419,7 +417,7 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { Some( connectionManager.putIfAbsent( address, - () ⇒ system.actorFor(RootActorPath(RemoteSystemAddress(system.name, address)) / "system" / "cluster"))) + () ⇒ system.actorFor(RootActorPath(Address(protocol, system.name)) / "system" / "cluster"))) } catch { case e: Exception ⇒ None } @@ -432,9 +430,7 @@ case class Gossiper(remote: Remote, system: ActorSystemImpl) { from copy (version = newVersion) } - private def seedNodesWithoutMyself: List[Member] = seeds.filter(_ != remoteAddress.transport).toList + private def seedNodesWithoutMyself: List[Member] = seeds.filter(_.address != address).toList - private def selectRandomNode(members: Set[Member]): Member = { - members.toList(random.nextInt(members.size)) - } + private def selectRandomNode(members: Set[Member]): Member = members.toList(random.nextInt(members.size)) } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index 6081372e6b..a169f9e9b5 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -4,6 +4,7 @@ package akka.remote +import akka.AkkaException import akka.actor._ import akka.dispatch._ import akka.event.{ DeathWatch, Logging, LoggingAdapter } @@ -15,6 +16,10 @@ import akka.util.ReflectiveAccess import akka.serialization.Serialization import akka.serialization.SerializationExtension +class RemoteException(msg: String) extends AkkaException(msg) +class RemoteCommunicationException(msg: String) extends RemoteException(msg) +class RemoteConnectionException(msg: String) extends RemoteException(msg) + /** * Remote ActorRefProvider. Starts up actor on remote node and creates a RemoteActorRef representing it. */ @@ -41,8 +46,6 @@ class RemoteActorRefProvider( val deathWatch = new RemoteDeathWatch(local.deathWatch, this) - val failureDetector = new AccrualFailureDetector(remoteSettings.FailureDetectorThreshold, remoteSettings.FailureDetectorMaxSampleSize) - // these are only available after init() def rootGuardian = local.rootGuardian def guardian = local.guardian @@ -54,6 +57,10 @@ class RemoteActorRefProvider( def tempPath() = local.tempPath() def tempContainer = local.tempContainer + @volatile + private var _failureDetector: AccrualFailureDetector = _ + def failureDetector: AccrualFailureDetector = _failureDetector + @volatile private var _transport: RemoteTransport = _ def transport: RemoteTransport = _transport @@ -73,6 +80,8 @@ class RemoteActorRefProvider( def init(system: ActorSystemImpl) { local.init(system) + _failureDetector = new AccrualFailureDetector(remoteSettings.FailureDetectorThreshold, remoteSettings.FailureDetectorMaxSampleSize, system) + _remoteDaemon = new RemoteSystemDaemon(system, rootPath / "remote", rootGuardian, log) local.registerExtraNames(Map(("remote", remoteDaemon))) diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index a2ca0435b9..84428d739b 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -29,10 +29,10 @@ class RemoteSettings(val config: Config, val systemName: String) { val UseCluster = getBoolean("akka.cluster.use-cluster") val SeedNodeConnectionTimeout = Duration(config.getMilliseconds("akka.cluster.seed-node-connection-timeout"), MILLISECONDS) val MaxTimeToRetryJoiningCluster = Duration(config.getMilliseconds("akka.cluster.max-time-to-retry-joining-cluster"), MILLISECONDS) - val InitalDelayForGossip = Duration(getMilliseconds("akka.cluster.gossip.initialDelay"), MILLISECONDS) + val InitialDelayForGossip = Duration(getMilliseconds("akka.cluster.gossip.initialDelay"), MILLISECONDS) val GossipFrequency = Duration(getMilliseconds("akka.cluster.gossip.frequency"), MILLISECONDS) - val SeedNodes = Set.empty[RemoteNettyAddress] ++ getStringList("akka.cluster.seed-nodes").asScala.collect { - case RemoteAddressExtractor(addr) ⇒ addr.transport + val SeedNodes = Set.empty[Address] ++ getStringList("akka.cluster.seed-nodes").asScala.collect { + case AddressExtractor(addr) ⇒ addr } val RemoteSystemDaemonAckTimeout = Duration(getMilliseconds("akka.remote.remote-daemon-ack-timeout"), MILLISECONDS) diff --git a/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala b/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala index 85f1c5a084..1e954b34fb 100644 --- a/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala @@ -1,95 +1,95 @@ -/** - * Copyright (C) 2009-2011 Typesafe Inc. - */ -package akka.remote +// /** +// * Copyright (C) 2009-2011 Typesafe Inc. +// */ +// package akka.remote -import java.net.InetSocketAddress +// import java.net.InetSocketAddress -import akka.testkit._ -import akka.dispatch._ -import akka.actor._ -import com.typesafe.config._ +// import akka.testkit._ +// import akka.dispatch._ +// import akka.actor._ +// import com.typesafe.config._ -class GossipingAccrualFailureDetectorSpec extends AkkaSpec(""" - akka { - loglevel = "INFO" - actor.provider = "akka.remote.RemoteActorRefProvider" +// class GossipingAccrualFailureDetectorSpec extends AkkaSpec(""" +// akka { +// loglevel = "INFO" +// actor.provider = "akka.remote.RemoteActorRefProvider" - remote.server.hostname = localhost - remote.server.port = 5550 - remote.failure-detector.threshold = 3 - cluster.seed-nodes = ["akka://localhost:5551"] - } - """) with ImplicitSender { +// remote.server.hostname = localhost +// remote.server.port = 5550 +// remote.failure-detector.threshold = 3 +// cluster.seed-nodes = ["akka://localhost:5551"] +// } +// """) with ImplicitSender { - val conn1 = RemoteNettyAddress("localhost", 5551) - val node1 = ActorSystem("GossiperSpec", ConfigFactory - .parseString("akka { remote.server.port=5551, cluster.use-cluster = on }") - .withFallback(system.settings.config)) - val remote1 = - node1.asInstanceOf[ActorSystemImpl] - .provider.asInstanceOf[RemoteActorRefProvider] - .remote - val gossiper1 = remote1.gossiper - val fd1 = remote1.failureDetector - gossiper1 must be('defined) +// val conn1 = Address("akka", system.systemName, Some("localhost"), Some(5551)) +// val node1 = ActorSystem("GossiperSpec", ConfigFactory +// .parseString("akka { remote.server.port=5551, cluster.use-cluster = on }") +// .withFallback(system.settings.config)) +// val remote1 = +// node1.asInstanceOf[ActorSystemImpl] +// .provider.asInstanceOf[RemoteActorRefProvider] +// .remote +// val gossiper1 = remote1.gossiper +// val fd1 = remote1.failureDetector +// gossiper1 must be('defined) - val conn2 = RemoteNettyAddress("localhost", 5552) - val node2 = ActorSystem("GossiperSpec", ConfigFactory - .parseString("akka { remote.server.port=5552, cluster.use-cluster = on }") - .withFallback(system.settings.config)) - val remote2 = - node2.asInstanceOf[ActorSystemImpl] - .provider.asInstanceOf[RemoteActorRefProvider] - .remote - val gossiper2 = remote2.gossiper - val fd2 = remote2.failureDetector - gossiper2 must be('defined) +// val conn2 = RemoteNettyAddress("localhost", 5552) +// val node2 = ActorSystem("GossiperSpec", ConfigFactory +// .parseString("akka { remote.server.port=5552, cluster.use-cluster = on }") +// .withFallback(system.settings.config)) +// val remote2 = +// node2.asInstanceOf[ActorSystemImpl] +// .provider.asInstanceOf[RemoteActorRefProvider] +// .remote +// val gossiper2 = remote2.gossiper +// val fd2 = remote2.failureDetector +// gossiper2 must be('defined) - val conn3 = RemoteNettyAddress("localhost", 5553) - val node3 = ActorSystem("GossiperSpec", ConfigFactory - .parseString("akka { remote.server.port=5553, cluster.use-cluster = on }") - .withFallback(system.settings.config)) - val remote3 = - node3.asInstanceOf[ActorSystemImpl] - .provider.asInstanceOf[RemoteActorRefProvider] - .remote - val gossiper3 = remote3.gossiper - val fd3 = remote3.failureDetector - gossiper3 must be('defined) +// val conn3 = RemoteNettyAddress("localhost", 5553) +// val node3 = ActorSystem("GossiperSpec", ConfigFactory +// .parseString("akka { remote.server.port=5553, cluster.use-cluster = on }") +// .withFallback(system.settings.config)) +// val remote3 = +// node3.asInstanceOf[ActorSystemImpl] +// .provider.asInstanceOf[RemoteActorRefProvider] +// .remote +// val gossiper3 = remote3.gossiper +// val fd3 = remote3.failureDetector +// gossiper3 must be('defined) - "A Gossip-driven Failure Detector" must { +// "A Gossip-driven Failure Detector" must { - "receive gossip heartbeats so that all healthy nodes in the cluster are marked 'available'" ignore { - Thread.sleep(5000) // let them gossip for 10 seconds - fd1.isAvailable(conn2) must be(true) - fd1.isAvailable(conn3) must be(true) - fd2.isAvailable(conn1) must be(true) - fd2.isAvailable(conn3) must be(true) - fd3.isAvailable(conn1) must be(true) - fd3.isAvailable(conn2) must be(true) - } +// "receive gossip heartbeats so that all healthy nodes in the cluster are marked 'available'" ignore { +// Thread.sleep(5000) // let them gossip for 10 seconds +// fd1.isAvailable(conn2) must be(true) +// fd1.isAvailable(conn3) must be(true) +// fd2.isAvailable(conn1) must be(true) +// fd2.isAvailable(conn3) must be(true) +// fd3.isAvailable(conn1) must be(true) +// fd3.isAvailable(conn2) must be(true) +// } - "mark node as 'unavailable' if a node in the cluster is shut down and its heartbeats stops" ignore { - // kill node 3 - gossiper3.get.shutdown() - node3.shutdown() - Thread.sleep(5000) // let them gossip for 10 seconds +// "mark node as 'unavailable' if a node in the cluster is shut down and its heartbeats stops" ignore { +// // kill node 3 +// gossiper3.get.shutdown() +// node3.shutdown() +// Thread.sleep(5000) // let them gossip for 10 seconds - fd1.isAvailable(conn2) must be(true) - fd1.isAvailable(conn3) must be(false) - fd2.isAvailable(conn1) must be(true) - fd2.isAvailable(conn3) must be(false) - } - } +// fd1.isAvailable(conn2) must be(true) +// fd1.isAvailable(conn3) must be(false) +// fd2.isAvailable(conn1) must be(true) +// fd2.isAvailable(conn3) must be(false) +// } +// } - override def atTermination() { - gossiper1.get.shutdown() - gossiper2.get.shutdown() - gossiper3.get.shutdown() - node1.shutdown() - node2.shutdown() - node3.shutdown() - // FIXME Ordering problem - If we shut down the ActorSystem before the Gossiper then we get an IllegalStateException - } -} +// override def atTermination() { +// gossiper1.get.shutdown() +// gossiper2.get.shutdown() +// gossiper3.get.shutdown() +// node1.shutdown() +// node2.shutdown() +// node3.shutdown() +// // FIXME Ordering problem - If we shut down the ActorSystem before the Gossiper then we get an IllegalStateException +// } +// } From 64301f5d7762daf6c56070453834c4d3c82b519d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Mon, 30 Jan 2012 19:57:05 +0100 Subject: [PATCH 57/94] Moved failure detector config from 'remote' to 'cluster' --- akka-remote/src/main/resources/reference.conf | 38 ++++++++++--------- .../scala/akka/remote/RemoteSettings.scala | 6 +-- 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index f9c6430f6f..76f1980615 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -61,23 +61,23 @@ akka { # it reuses inbound connections for replies, which is called a passive client connection (i.e. from server # to client). netty { - + # (O) In case of increased latency / overflow how long # should we wait (blocking the sender) until we deem the send to be cancelled? # 0 means "never backoff", any positive number will indicate time to block at most. backoff-timeout = 0ms - + # (I&O) Generate your own with '$AKKA_HOME/scripts/generate_config_with_secure_cookie.sh' # or using 'akka.util.Crypt.generateSecureCookie' secure-cookie = "" - + # (I) Should the remote server require that it peers share the same secure-cookie # (defined in the 'remote' section)? require-cookie = off # (I) Reuse inbound connections for outbound messages use-passive-connections = on - + # (I) The hostname or ip to bind the remoting to, # InetAddress.getLocalHost.getHostAddress is used if empty hostname = "" @@ -118,19 +118,6 @@ akka { reconnection-time-window = 600s } - # accrual failure detection config - failure-detector { - - # defines the failure detector threshold - # A low threshold is prone to generate many wrong suspicions but ensures - # a quick detection in the event of a real crash. Conversely, a high - # threshold generates fewer mistakes but needs more time to detect - # actual crashes - threshold = 8 - - max-sample-size = 1000 - } - # The dispatcher used for remote system messages compute-grid-dispatcher { # defaults to same settings as default-dispatcher @@ -146,9 +133,24 @@ akka { cluster { use-cluster = off + seed-nodes = [] - max-time-to-retry-joining-cluster = 30s seed-node-connection-timeout = 30s + max-time-to-retry-joining-cluster = 30s + + # accrual failure detection config + failure-detector { + + # defines the failure detector threshold + # A low threshold is prone to generate many wrong suspicions but ensures + # a quick detection in the event of a real crash. Conversely, a high + # threshold generates fewer mistakes but needs more time to detect + # actual crashes + threshold = 8 + + max-sample-size = 1000 + } + gossip { initialDelay = 5s frequency = 1s diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index 84428d739b..0060233246 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -20,13 +20,11 @@ class RemoteSettings(val config: Config, val systemName: String) { val LogReceive = getBoolean("akka.remote.log-received-messages") val LogSend = getBoolean("akka.remote.log-sent-messages") - // AccrualFailureDetector - val FailureDetectorThreshold = getInt("akka.remote.failure-detector.threshold") - val FailureDetectorMaxSampleSize = getInt("akka.remote.failure-detector.max-sample-size") - // TODO cluster config will go into akka-cluster/reference.conf when we enable that module // cluster config section val UseCluster = getBoolean("akka.cluster.use-cluster") + val FailureDetectorThreshold = getInt("akka.cluster.failure-detector.threshold") + val FailureDetectorMaxSampleSize = getInt("akka.cluster.failure-detector.max-sample-size") val SeedNodeConnectionTimeout = Duration(config.getMilliseconds("akka.cluster.seed-node-connection-timeout"), MILLISECONDS) val MaxTimeToRetryJoiningCluster = Duration(config.getMilliseconds("akka.cluster.max-time-to-retry-joining-cluster"), MILLISECONDS) val InitialDelayForGossip = Duration(getMilliseconds("akka.cluster.gossip.initialDelay"), MILLISECONDS) From 0fa184560c183cb135918234768eb199d6c3fc4a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 31 Jan 2012 13:33:04 +0100 Subject: [PATCH 58/94] Moved Gossiper, FailureDetector and VectorClock (with tests) to the akka-cluster module. Deleted all old unused cluster code (ZooKeeper-based stuff). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../java/akka/cluster/LocalBookKeeper.java | 187 -- .../cluster/zookeeper/DistributedQueue.java | 312 --- .../cluster/zookeeper/ZooKeeperQueue.java | 173 -- .../cluster}/AccrualFailureDetector.scala | 12 +- .../scala/akka/cluster/BookKeeperServer.scala | 35 - .../src/main/scala/akka/cluster/Cluster.scala | 1876 ----------------- .../scala/akka/cluster/ClusterActorRef.scala | 129 -- .../scala/akka/cluster/ClusterDeployer.scala | 205 -- .../main/scala/akka/cluster}/Gossiper.scala | 15 +- .../scala/akka/cluster/LocalCluster.scala | 105 - .../cluster}/RemoteConnectionManager.scala | 8 +- .../scala/akka/cluster/TransactionLog.scala | 604 ------ .../scala/akka/cluster}/VectorClock.scala | 2 +- .../metrics/LocalNodeMetricsManager.scala | 226 -- .../cluster/metrics/MetricsProvider.scala | 154 -- .../scala/akka/cluster/storage/Storage.scala | 366 ---- .../akka/cluster/zookeeper/AkkaZkClient.scala | 34 - .../cluster/zookeeper/AkkaZooKeeper.scala | 32 - .../cluster/zookeeper/ZooKeeperBarrier.scala | 104 - .../GossipMembershipMultiJvmSpec.scala | 2 +- .../NewLeaderChangeListenerMultiJvmNode1.conf | 2 - .../NewLeaderChangeListenerMultiJvmNode1.opts | 1 - .../NewLeaderChangeListenerMultiJvmNode2.conf | 2 - .../NewLeaderChangeListenerMultiJvmNode2.opts | 1 - .../NewLeaderChangeListenerMultiJvmSpec.scala | 63 - ...eConnectedChangeListenerMultiJvmNode1.conf | 2 - ...eConnectedChangeListenerMultiJvmNode1.opts | 1 - ...eConnectedChangeListenerMultiJvmNode2.conf | 2 - ...eConnectedChangeListenerMultiJvmNode2.opts | 1 - ...eConnectedChangeListenerMultiJvmSpec.scala | 65 - ...sconnectedChangeListenerMultiJvmNode1.conf | 2 - ...sconnectedChangeListenerMultiJvmNode1.opts | 1 - ...sconnectedChangeListenerMultiJvmNode2.conf | 2 - ...sconnectedChangeListenerMultiJvmNode2.opts | 1 - ...sconnectedChangeListenerMultiJvmSpec.scala | 65 - .../ConfigurationStorageMultiJvmNode1.conf | 2 - .../ConfigurationStorageMultiJvmNode1.opts | 1 - .../ConfigurationStorageMultiJvmNode2.conf | 2 - .../ConfigurationStorageMultiJvmNode2.opts | 1 - .../ConfigurationStorageMultiJvmSpec.scala | 89 - .../election/LeaderElectionMultiJvmNode1.conf | 2 - .../election/LeaderElectionMultiJvmNode1.opts | 1 - .../election/LeaderElectionMultiJvmNode2.conf | 2 - .../election/LeaderElectionMultiJvmNode2.opts | 1 - .../election/LeaderElectionMultiJvmSpec.scala | 71 - .../registry/RegistryStoreMultiJvmNode1.conf | 2 - .../registry/RegistryStoreMultiJvmNode1.opts | 1 - .../registry/RegistryStoreMultiJvmNode2.conf | 2 - .../registry/RegistryStoreMultiJvmNode2.opts | 1 - .../registry/RegistryStoreMultiJvmSpec.scala | 116 - .../deployment/DeploymentMultiJvmNode1.conf | 4 - .../deployment/DeploymentMultiJvmNode1.opts | 1 - .../deployment/DeploymentMultiJvmNode2.conf | 4 - .../deployment/DeploymentMultiJvmNode2.opts | 1 - .../deployment/DeploymentMultiJvmSpec.scala | 75 - .../local/LocalMetricsMultiJvmNode1.conf | 4 - .../local/LocalMetricsMultiJvmNode1.opts | 1 - .../local/LocalMetricsMultiJvmSpec.scala | 134 -- .../remote/RemoteMetricsMultiJvmNode1.conf | 3 - .../remote/RemoteMetricsMultiJvmNode1.opts | 1 - .../remote/RemoteMetricsMultiJvmNode2.conf | 3 - .../remote/RemoteMetricsMultiJvmNode2.opts | 1 - .../remote/RemoteMetricsMultiJvmSpec.scala | 133 -- .../MigrationExplicitMultiJvmNode1.conf | 2 - .../MigrationExplicitMultiJvmNode1.opts | 1 - .../MigrationExplicitMultiJvmNode2.conf | 2 - .../MigrationExplicitMultiJvmNode2.opts | 1 - .../MigrationExplicitMultiJvmSpec.scala | 112 - .../ClusterActorRefCleanupMultiJvmNode1.conf | 6 - .../ClusterActorRefCleanupMultiJvmNode1.opts | 1 - .../ClusterActorRefCleanupMultiJvmNode2.conf | 5 - .../ClusterActorRefCleanupMultiJvmNode2.opts | 1 - .../ClusterActorRefCleanupMultiJvmNode3.conf | 5 - .../ClusterActorRefCleanupMultiJvmNode3.opts | 1 - .../ClusterActorRefCleanupMultiJvmSpec.scala | 154 -- ...LogWriteBehindNoSnapshotMultiJvmNode1.conf | 7 - ...LogWriteBehindNoSnapshotMultiJvmNode1.opts | 1 - ...LogWriteBehindNoSnapshotMultiJvmNode2.conf | 7 - ...LogWriteBehindNoSnapshotMultiJvmNode2.opts | 1 - ...LogWriteBehindNoSnapshotMultiJvmSpec.scala | 99 - ...onLogWriteBehindSnapshotMultiJvmNode1.conf | 7 - ...onLogWriteBehindSnapshotMultiJvmNode1.opts | 1 - ...onLogWriteBehindSnapshotMultiJvmNode2.conf | 7 - ...onLogWriteBehindSnapshotMultiJvmNode2.opts | 1 - ...onLogWriteBehindSnapshotMultiJvmSpec.scala | 118 -- ...ogWriteThroughNoSnapshotMultiJvmNode1.conf | 7 - ...ogWriteThroughNoSnapshotMultiJvmNode1.opts | 1 - ...ogWriteThroughNoSnapshotMultiJvmNode2.conf | 7 - ...ogWriteThroughNoSnapshotMultiJvmNode2.opts | 1 - ...ogWriteThroughNoSnapshotMultiJvmSpec.scala | 99 - ...nLogWriteThroughSnapshotMultiJvmNode1.conf | 7 - ...nLogWriteThroughSnapshotMultiJvmNode1.opts | 1 - ...nLogWriteThroughSnapshotMultiJvmNode2.conf | 7 - ...nLogWriteThroughSnapshotMultiJvmNode2.opts | 1 - ...nLogWriteThroughSnapshotMultiJvmSpec.scala | 116 - .../DirectRoutingFailoverMultiJvmNode1.conf | 5 - .../DirectRoutingFailoverMultiJvmNode1.opts | 1 - .../DirectRoutingFailoverMultiJvmNode2.conf | 5 - .../DirectRoutingFailoverMultiJvmNode2.opts | 1 - .../DirectRoutingFailoverMultiJvmSpec.scala | 90 - .../homenode/HomeNode1MultiJvmSpec.scala | 60 - .../homenode/HomeNodeMultiJvmNode1.conf | 6 - .../homenode/HomeNodeMultiJvmNode1.opts | 1 - .../homenode/HomeNodeMultiJvmNode2.conf | 6 - .../homenode/HomeNodeMultiJvmNode2.opts | 1 - ...ngleReplicaDirectRoutingMultiJvmNode1.conf | 4 - ...ngleReplicaDirectRoutingMultiJvmNode1.opts | 1 - ...ngleReplicaDirectRoutingMultiJvmNode2.conf | 4 - ...ngleReplicaDirectRoutingMultiJvmNode2.opts | 1 - ...ngleReplicaDirectRoutingMultiJvmSpec.scala | 62 - .../failover/RandomFailoverMultiJvmNode1.conf | 8 - .../failover/RandomFailoverMultiJvmNode1.opts | 1 - .../failover/RandomFailoverMultiJvmNode2.conf | 8 - .../failover/RandomFailoverMultiJvmNode2.opts | 1 - .../failover/RandomFailoverMultiJvmNode3.conf | 8 - .../failover/RandomFailoverMultiJvmNode3.opts | 1 - .../failover/RandomFailoverMultiJvmSpec.scala | 145 -- .../homenode/HomeNodeMultiJvmNode1.conf | 8 - .../homenode/HomeNodeMultiJvmNode1.opts | 1 - .../homenode/HomeNodeMultiJvmNode2.conf | 8 - .../homenode/HomeNodeMultiJvmNode2.opts | 1 - .../homenode/HomeNodeMultiJvmSpec.scala | 60 - .../Random1ReplicaMultiJvmNode1.conf | 4 - .../Random1ReplicaMultiJvmNode1.opts | 1 - .../Random1ReplicaMultiJvmSpec.scala | 51 - .../Random3ReplicasMultiJvmNode1.conf | 4 - .../Random3ReplicasMultiJvmNode1.opts | 1 - .../Random3ReplicasMultiJvmNode2.conf | 4 - .../Random3ReplicasMultiJvmNode2.opts | 1 - .../Random3ReplicasMultiJvmNode3.conf | 4 - .../Random3ReplicasMultiJvmNode3.opts | 1 - .../Random3ReplicasMultiJvmSpec.scala | 119 -- .../RoundRobinFailoverMultiJvmNode1.conf | 8 - .../RoundRobinFailoverMultiJvmNode1.opts | 1 - .../RoundRobinFailoverMultiJvmNode2.conf | 8 - .../RoundRobinFailoverMultiJvmNode2.opts | 1 - .../RoundRobinFailoverMultiJvmNode3.conf | 8 - .../RoundRobinFailoverMultiJvmNode3.opts | 1 - .../RoundRobinFailoverMultiJvmSpec.scala | 146 -- .../homenode/HomeNodeMultiJvmNode1.conf | 8 - .../homenode/HomeNodeMultiJvmNode1.opts | 1 - .../homenode/HomeNodeMultiJvmNode2.conf | 5 - .../homenode/HomeNodeMultiJvmNode2.opts | 1 - .../homenode/HomeNodeMultiJvmSpec.scala | 63 - .../RoundRobin1ReplicaMultiJvmNode1.conf | 4 - .../RoundRobin1ReplicaMultiJvmNode1.opts | 1 - .../RoundRobin1ReplicaMultiJvmSpec.scala | 49 - .../RoundRobin2ReplicasMultiJvmNode1.conf | 4 - .../RoundRobin2ReplicasMultiJvmNode1.opts | 1 - .../RoundRobin2ReplicasMultiJvmNode2.conf | 4 - .../RoundRobin2ReplicasMultiJvmNode2.opts | 1 - .../RoundRobin2ReplicasMultiJvmSpec.scala | 121 -- .../RoundRobin3ReplicasMultiJvmNode1.conf | 4 - .../RoundRobin3ReplicasMultiJvmNode1.opts | 1 - .../RoundRobin3ReplicasMultiJvmNode2.conf | 4 - .../RoundRobin3ReplicasMultiJvmNode2.opts | 1 - .../RoundRobin3ReplicasMultiJvmNode3.conf | 4 - .../RoundRobin3ReplicasMultiJvmNode3.opts | 1 - .../RoundRobin3ReplicasMultiJvmSpec.scala | 158 -- .../ScatterGatherFailoverMultiJvmNode1.conf | 6 - .../ScatterGatherFailoverMultiJvmNode1.opts | 1 - .../ScatterGatherFailoverMultiJvmNode2.conf | 6 - .../ScatterGatherFailoverMultiJvmNode2.opts | 1 - .../ScatterGatherFailoverMultiJvmSpec.scala | 114 - .../sample/PingPongMultiJvmExample.scala | 227 -- .../cluster}/AccrualFailureDetectorSpec.scala | 2 +- .../AsynchronousTransactionLogSpec.scala | 230 -- .../GossipingAccrualFailureDetectorSpec.scala | 2 +- .../SynchronousTransactionLogSpec.scala | 190 -- .../scala/akka/cluster}/VectorClockSpec.scala | 2 +- .../sample/ClusteredPingPongSample.scala | 134 -- .../cluster/sample/ComputeGridSample.scala | 91 - .../cluster/storage/InMemoryStorageSpec.scala | 241 --- .../cluster/storage/StorageTestUtils.scala | 15 - .../storage/ZooKeeperStorageSpec.scala | 132 -- .../akka/remote/RemoteActorRefProvider.scala | 6 - .../src/test/resources/log4j.properties | 58 - .../src/test/resources/logback-test.xml | 26 - akka-remote/src/test/resources/zoo.cfg | 12 - project/AkkaBuild.scala | 28 +- 180 files changed, 45 insertions(+), 9014 deletions(-) delete mode 100644 akka-cluster/src/main/java/akka/cluster/LocalBookKeeper.java delete mode 100644 akka-cluster/src/main/java/akka/cluster/zookeeper/DistributedQueue.java delete mode 100644 akka-cluster/src/main/java/akka/cluster/zookeeper/ZooKeeperQueue.java rename {akka-remote/src/main/scala/akka/remote => akka-cluster/src/main/scala/akka/cluster}/AccrualFailureDetector.scala (99%) delete mode 100644 akka-cluster/src/main/scala/akka/cluster/BookKeeperServer.scala delete mode 100644 akka-cluster/src/main/scala/akka/cluster/Cluster.scala delete mode 100644 akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala delete mode 100644 akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala rename {akka-remote/src/main/scala/akka/remote => akka-cluster/src/main/scala/akka/cluster}/Gossiper.scala (97%) delete mode 100644 akka-cluster/src/main/scala/akka/cluster/LocalCluster.scala rename {akka-remote/src/main/scala/akka/remote => akka-cluster/src/main/scala/akka/cluster}/RemoteConnectionManager.scala (96%) delete mode 100644 akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala rename {akka-remote/src/main/scala/akka/remote => akka-cluster/src/main/scala/akka/cluster}/VectorClock.scala (99%) delete mode 100644 akka-cluster/src/main/scala/akka/cluster/metrics/LocalNodeMetricsManager.scala delete mode 100644 akka-cluster/src/main/scala/akka/cluster/metrics/MetricsProvider.scala delete mode 100644 akka-cluster/src/main/scala/akka/cluster/storage/Storage.scala delete mode 100644 akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZkClient.scala delete mode 100644 akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZooKeeper.scala delete mode 100644 akka-cluster/src/main/scala/akka/cluster/zookeeper/ZooKeeperBarrier.scala rename {akka-remote/src/multi-jvm/scala/akka/remote => akka-cluster/src/multi-jvm/scala/akka/cluster}/GossipMembershipMultiJvmSpec.scala (99%) delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNode1MultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.conf delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.opts delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmSpec.scala delete mode 100644 akka-cluster/src/multi-jvm/scala/akka/cluster/sample/PingPongMultiJvmExample.scala rename {akka-remote/src/test/scala/akka/remote => akka-cluster/src/test/scala/akka/cluster}/AccrualFailureDetectorSpec.scala (99%) delete mode 100644 akka-cluster/src/test/scala/akka/cluster/AsynchronousTransactionLogSpec.scala rename {akka-remote/src/test/scala/akka/remote => akka-cluster/src/test/scala/akka/cluster}/GossipingAccrualFailureDetectorSpec.scala (99%) delete mode 100644 akka-cluster/src/test/scala/akka/cluster/SynchronousTransactionLogSpec.scala rename {akka-remote/src/test/scala/akka/remote => akka-cluster/src/test/scala/akka/cluster}/VectorClockSpec.scala (99%) delete mode 100644 akka-cluster/src/test/scala/akka/cluster/sample/ClusteredPingPongSample.scala delete mode 100644 akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala delete mode 100644 akka-cluster/src/test/scala/akka/cluster/storage/InMemoryStorageSpec.scala delete mode 100644 akka-cluster/src/test/scala/akka/cluster/storage/StorageTestUtils.scala delete mode 100644 akka-cluster/src/test/scala/akka/cluster/storage/ZooKeeperStorageSpec.scala delete mode 100644 akka-remote/src/test/resources/log4j.properties delete mode 100644 akka-remote/src/test/resources/logback-test.xml delete mode 100644 akka-remote/src/test/resources/zoo.cfg diff --git a/akka-cluster/src/main/java/akka/cluster/LocalBookKeeper.java b/akka-cluster/src/main/java/akka/cluster/LocalBookKeeper.java deleted file mode 100644 index 413b9a3154..0000000000 --- a/akka-cluster/src/main/java/akka/cluster/LocalBookKeeper.java +++ /dev/null @@ -1,187 +0,0 @@ -package akka.cluster; - -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -import java.io.BufferedReader; -import java.io.File; -import java.io.IOException; -import java.io.InputStreamReader; -import java.io.OutputStream; -import java.net.InetAddress; -import java.net.InetSocketAddress; -import java.net.Socket; - -import org.apache.bookkeeper.proto.BookieServer; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooKeeper; -import org.apache.zookeeper.ZooDefs.Ids; -import org.apache.zookeeper.server.NIOServerCnxnFactory; -import org.apache.zookeeper.server.ZooKeeperServer; - -public class LocalBookKeeper { - public static final int CONNECTION_TIMEOUT = 30000; - - int numberOfBookies; - - public LocalBookKeeper() { - numberOfBookies = 3; - } - - public LocalBookKeeper(int numberOfBookies) { - this(); - this.numberOfBookies = numberOfBookies; - } - - private final String HOSTPORT = "127.0.0.1:2181"; - NIOServerCnxnFactory serverFactory; - ZooKeeperServer zks; - ZooKeeper zkc; - int ZooKeeperDefaultPort = 2181; - File ZkTmpDir; - - //BookKeeper variables - File tmpDirs[]; - BookieServer bs[]; - Integer initialPort = 5000; - - /** - * @param args - */ - - public void runZookeeper(int maxCC) throws IOException{ - // create a ZooKeeper server(dataDir, dataLogDir, port) - //ServerStats.registerAsConcrete(); - //ClientBase.setupTestEnv(); - ZkTmpDir = File.createTempFile("zookeeper", "test"); - ZkTmpDir.delete(); - ZkTmpDir.mkdir(); - - try { - zks = new ZooKeeperServer(ZkTmpDir, ZkTmpDir, ZooKeeperDefaultPort); - serverFactory = new NIOServerCnxnFactory(); - serverFactory.configure(new InetSocketAddress(ZooKeeperDefaultPort), maxCC); - serverFactory.startup(zks); - } catch (Exception e) { - // TODO Auto-generated catch block - } - - boolean b = waitForServerUp(HOSTPORT, CONNECTION_TIMEOUT); - } - - public void initializeZookeper() { - //initialize the zk client with values - try { - zkc = new ZooKeeper("127.0.0.1", ZooKeeperDefaultPort, new emptyWatcher()); - zkc.create("/ledgers", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); - zkc.create("/ledgers/available", new byte[0], Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT); - // No need to create an entry for each requested bookie anymore as the - // BookieServers will register themselves with ZooKeeper on startup. - } catch (KeeperException e) { - } catch (InterruptedException e) { - } catch (IOException e) { - } - } - - public void runBookies() throws IOException{ - // Create Bookie Servers (B1, B2, B3) - - tmpDirs = new File[numberOfBookies]; - bs = new BookieServer[numberOfBookies]; - - for(int i = 0; i < numberOfBookies; i++) { - tmpDirs[i] = File.createTempFile("bookie" + Integer.toString(i), "test"); - tmpDirs[i].delete(); - tmpDirs[i].mkdir(); - - bs[i] = new BookieServer(initialPort + i, InetAddress.getLocalHost().getHostAddress() + ":" - + ZooKeeperDefaultPort, tmpDirs[i], new File[]{tmpDirs[i]}); - bs[i].start(); - } - } - - public static void main(String[] args) throws IOException, InterruptedException { - if(args.length < 1) { - usage(); - System.exit(-1); - } - LocalBookKeeper lb = new LocalBookKeeper(Integer.parseInt(args[0])); - lb.runZookeeper(1000); - lb.initializeZookeper(); - lb.runBookies(); - while (true) { - Thread.sleep(5000); - } - } - - private static void usage() { - System.err.println("Usage: LocalBookKeeper number-of-bookies"); - } - - /* User for testing purposes, void */ - class emptyWatcher implements Watcher{ - public void process(WatchedEvent event) {} - } - - public static boolean waitForServerUp(String hp, long timeout) { - long start = System.currentTimeMillis(); - String split[] = hp.split(":"); - String host = split[0]; - int port = Integer.parseInt(split[1]); - while (true) { - try { - Socket sock = new Socket(host, port); - BufferedReader reader = null; - try { - OutputStream outstream = sock.getOutputStream(); - outstream.write("stat".getBytes()); - outstream.flush(); - - reader = - new BufferedReader( - new InputStreamReader(sock.getInputStream())); - String line = reader.readLine(); - if (line != null && line.startsWith("Zookeeper version:")) { - return true; - } - } finally { - sock.close(); - if (reader != null) { - reader.close(); - } - } - } catch (IOException e) { - // ignore as this is expected - } - - if (System.currentTimeMillis() > start + timeout) { - break; - } - try { - Thread.sleep(250); - } catch (InterruptedException e) { - // ignore - } - } - return false; - } - -} diff --git a/akka-cluster/src/main/java/akka/cluster/zookeeper/DistributedQueue.java b/akka-cluster/src/main/java/akka/cluster/zookeeper/DistributedQueue.java deleted file mode 100644 index 7bb87bc414..0000000000 --- a/akka-cluster/src/main/java/akka/cluster/zookeeper/DistributedQueue.java +++ /dev/null @@ -1,312 +0,0 @@ -/** - * - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright ownership. - * The ASF licenses this file to You under the Apache License, Version 2.0 - * (the "License"); you may not use this file except in compliance with - * the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package akka.cluster.zookeeper; - -import java.util.List; -import java.util.NoSuchElementException; -import java.util.TreeMap; -import java.util.concurrent.CountDownLatch; - -import org.apache.log4j.Logger; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.ZooKeeper; -import org.apache.zookeeper.data.ACL; -import org.apache.zookeeper.data.Stat; - -/** - * - * A protocol to implement a distributed queue. - * - */ - -public class DistributedQueue { - private static final Logger LOG = Logger.getLogger(DistributedQueue.class); - - private final String dir; - - private ZooKeeper zookeeper; - private List acl = ZooDefs.Ids.OPEN_ACL_UNSAFE; - - private final String prefix = "qn-"; - - - public DistributedQueue(ZooKeeper zookeeper, String dir, List acl) { - this.dir = dir; - - if(acl != null) { - this.acl = acl; - } - this.zookeeper = zookeeper; - - } - - - - /** - * Returns a Map of the children, ordered by id. - * @param watcher optional watcher on getChildren() operation. - * @return map from id to child name for all children - */ - private TreeMap orderedChildren(Watcher watcher) throws KeeperException, InterruptedException { - TreeMap orderedChildren = new TreeMap(); - - List childNames = null; - try{ - childNames = zookeeper.getChildren(dir, watcher); - }catch (KeeperException.NoNodeException e) { - throw e; - } - - for(String childName : childNames) { - try{ - //Check format - if(!childName.regionMatches(0, prefix, 0, prefix.length())) { - LOG.warn("Found child node with improper name: " + childName); - continue; - } - String suffix = childName.substring(prefix.length()); - Long childId = new Long(suffix); - orderedChildren.put(childId,childName); - }catch(NumberFormatException e) { - LOG.warn("Found child node with improper format : " + childName + " " + e,e); - } - } - - return orderedChildren; - } - - /** - * Find the smallest child node. - * @return The name of the smallest child node. - */ - private String smallestChildName() throws KeeperException, InterruptedException { - long minId = Long.MAX_VALUE; - String minName = ""; - - List childNames = null; - - try{ - childNames = zookeeper.getChildren(dir, false); - }catch(KeeperException.NoNodeException e) { - LOG.warn("Caught: " +e,e); - return null; - } - - for(String childName : childNames) { - try{ - //Check format - if(!childName.regionMatches(0, prefix, 0, prefix.length())) { - LOG.warn("Found child node with improper name: " + childName); - continue; - } - String suffix = childName.substring(prefix.length()); - long childId = Long.parseLong(suffix); - if(childId < minId) { - minId = childId; - minName = childName; - } - }catch(NumberFormatException e) { - LOG.warn("Found child node with improper format : " + childName + " " + e,e); - } - } - - - if(minId < Long.MAX_VALUE) { - return minName; - }else{ - return null; - } - } - - /** - * Return the head of the queue without modifying the queue. - * @return the data at the head of the queue. - * @throws NoSuchElementException - * @throws KeeperException - * @throws InterruptedException - */ - public byte[] element() throws NoSuchElementException, KeeperException, InterruptedException { - TreeMap orderedChildren; - - // element, take, and remove follow the same pattern. - // We want to return the child node with the smallest sequence number. - // Since other clients are remove()ing and take()ing nodes concurrently, - // the child with the smallest sequence number in orderedChildren might be gone by the time we check. - // We don't call getChildren again until we have tried the rest of the nodes in sequence order. - while(true) { - try{ - orderedChildren = orderedChildren(null); - }catch(KeeperException.NoNodeException e) { - throw new NoSuchElementException(); - } - if(orderedChildren.size() == 0 ) throw new NoSuchElementException(); - - for(String headNode : orderedChildren.values()) { - if(headNode != null) { - try{ - return zookeeper.getData(dir+"/"+headNode, false, null); - }catch(KeeperException.NoNodeException e) { - //Another client removed the node first, try next - } - } - } - - } - } - - - /** - * Attempts to remove the head of the queue and return it. - * @return The former head of the queue - * @throws NoSuchElementException - * @throws KeeperException - * @throws InterruptedException - */ - public byte[] remove() throws NoSuchElementException, KeeperException, InterruptedException { - TreeMap orderedChildren; - // Same as for element. Should refactor this. - while(true) { - try{ - orderedChildren = orderedChildren(null); - }catch(KeeperException.NoNodeException e) { - throw new NoSuchElementException(); - } - if(orderedChildren.size() == 0) throw new NoSuchElementException(); - - for(String headNode : orderedChildren.values()) { - String path = dir +"/"+headNode; - try{ - byte[] data = zookeeper.getData(path, false, null); - zookeeper.delete(path, -1); - return data; - }catch(KeeperException.NoNodeException e) { - // Another client deleted the node first. - } - } - - } - } - - private class LatchChildWatcher implements Watcher { - - CountDownLatch latch; - - public LatchChildWatcher() { - latch = new CountDownLatch(1); - } - - public void process(WatchedEvent event) { - LOG.debug("Watcher fired on path: " + event.getPath() + " state: " + - event.getState() + " type " + event.getType()); - latch.countDown(); - } - public void await() throws InterruptedException { - latch.await(); - } - } - - /** - * Removes the head of the queue and returns it, blocks until it succeeds. - * @return The former head of the queue - * @throws NoSuchElementException - * @throws KeeperException - * @throws InterruptedException - */ - public byte[] take() throws KeeperException, InterruptedException { - TreeMap orderedChildren; - // Same as for element. Should refactor this. - while(true) { - LatchChildWatcher childWatcher = new LatchChildWatcher(); - try{ - orderedChildren = orderedChildren(childWatcher); - }catch(KeeperException.NoNodeException e) { - zookeeper.create(dir, new byte[0], acl, CreateMode.PERSISTENT); - continue; - } - if(orderedChildren.size() == 0) { - childWatcher.await(); - continue; - } - - for(String headNode : orderedChildren.values()) { - String path = dir +"/"+headNode; - try{ - byte[] data = zookeeper.getData(path, false, null); - zookeeper.delete(path, -1); - return data; - }catch(KeeperException.NoNodeException e) { - // Another client deleted the node first. - } - } - } - } - - /** - * Inserts data into queue. - * @param data - * @return true if data was successfully added - */ - public boolean offer(byte[] data) throws KeeperException, InterruptedException{ - for(;;) { - try{ - zookeeper.create(dir+"/"+prefix, data, acl, CreateMode.PERSISTENT_SEQUENTIAL); - return true; - }catch(KeeperException.NoNodeException e) { - zookeeper.create(dir, new byte[0], acl, CreateMode.PERSISTENT); - } - } - - } - - /** - * Returns the data at the first element of the queue, or null if the queue is empty. - * @return data at the first element of the queue, or null. - * @throws KeeperException - * @throws InterruptedException - */ - public byte[] peek() throws KeeperException, InterruptedException{ - try{ - return element(); - }catch(NoSuchElementException e) { - return null; - } - } - - - /** - * Attempts to remove the head of the queue and return it. Returns null if the queue is empty. - * @return Head of the queue or null. - * @throws KeeperException - * @throws InterruptedException - */ - public byte[] poll() throws KeeperException, InterruptedException { - try{ - return remove(); - }catch(NoSuchElementException e) { - return null; - } - } - - - -} diff --git a/akka-cluster/src/main/java/akka/cluster/zookeeper/ZooKeeperQueue.java b/akka-cluster/src/main/java/akka/cluster/zookeeper/ZooKeeperQueue.java deleted file mode 100644 index 8867d97e00..0000000000 --- a/akka-cluster/src/main/java/akka/cluster/zookeeper/ZooKeeperQueue.java +++ /dev/null @@ -1,173 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.zookeeper; - -import java.io.Serializable; -import java.util.List; -import java.util.ArrayList; - -import org.I0Itec.zkclient.ExceptionUtil; -import org.I0Itec.zkclient.IZkChildListener; -import org.I0Itec.zkclient.ZkClient; -import org.I0Itec.zkclient.exception.ZkNoNodeException; - -public class ZooKeeperQueue { - - protected static class Element { - private String _name; - private T _data; - - public Element(String name, T data) { - _name = name; - _data = data; - } - - public String getName() { - return _name; - } - - public T getData() { - return _data; - } - } - - protected final ZkClient _zkClient; - private final String _elementsPath; - private final String _rootPath; - private final boolean _isBlocking; - - public ZooKeeperQueue(ZkClient zkClient, String rootPath, boolean isBlocking) { - _zkClient = zkClient; - _rootPath = rootPath; - _isBlocking = isBlocking; - _elementsPath = rootPath + "/queue"; - if (!_zkClient.exists(rootPath)) { - _zkClient.createPersistent(rootPath, true); - _zkClient.createPersistent(_elementsPath, true); - } - } - - public String enqueue(T element) { - try { - String sequential = _zkClient.createPersistentSequential(getElementRoughPath(), element); - String elementId = sequential.substring(sequential.lastIndexOf('/') + 1); - return elementId; - } catch (Exception e) { - throw ExceptionUtil.convertToRuntimeException(e); - } - } - - public T dequeue() throws InterruptedException { - if (_isBlocking) { - Element element = getFirstElement(); - _zkClient.delete(getElementPath(element.getName())); - return element.getData(); - } else { - throw new UnsupportedOperationException("Non-blocking ZooKeeperQueue is not yet supported"); - /* FIXME DOES NOT WORK - try { - String headName = getSmallestElement(_zkClient.getChildren(_elementsPath)); - String headPath = getElementPath(headName); - return (T) _zkClient.readData(headPath); - } catch (ZkNoNodeException e) { - return null; - } - */ - } - } - - public boolean containsElement(String elementId) { - String zkPath = getElementPath(elementId); - return _zkClient.exists(zkPath); - } - - public T peek() throws InterruptedException { - Element element = getFirstElement(); - if (element == null) { - return null; - } - return element.getData(); - } - - @SuppressWarnings("unchecked") - public List getElements() { - List paths =_zkClient.getChildren(_elementsPath); - List elements = new ArrayList(); - for (String path: paths) { - elements.add((T)_zkClient.readData(path)); - } - return elements; - } - - public int size() { - return _zkClient.getChildren(_elementsPath).size(); - } - - public void clear() { - _zkClient.deleteRecursive(_rootPath); - } - - public boolean isEmpty() { - return size() == 0; - } - - private String getElementRoughPath() { - return getElementPath("item" + "-"); - } - - private String getElementPath(String elementId) { - return _elementsPath + "/" + elementId; - } - - private String getSmallestElement(List list) { - String smallestElement = list.get(0); - for (String element : list) { - if (element.compareTo(smallestElement) < 0) { - smallestElement = element; - } - } - return smallestElement; - } - - @SuppressWarnings("unchecked") - protected Element getFirstElement() throws InterruptedException { - final Object mutex = new Object(); - IZkChildListener notifyListener = new IZkChildListener() { - @Override - public void handleChildChange(String parentPath, List currentChilds) throws Exception { - synchronized (mutex) { - mutex.notify(); - } - } - }; - try { - while (true) { - List elementNames; - synchronized (mutex) { - elementNames = _zkClient.subscribeChildChanges(_elementsPath, notifyListener); - while (elementNames == null || elementNames.isEmpty()) { - mutex.wait(); - elementNames = _zkClient.getChildren(_elementsPath); - } - } - String elementName = getSmallestElement(elementNames); - try { - String elementPath = getElementPath(elementName); - return new Element(elementName, (T) _zkClient.readData(elementPath)); - } catch (ZkNoNodeException e) { - // somebody else picked up the element first, so we have to - // retry with the new first element - } - } - } catch (InterruptedException e) { - throw e; - } catch (Exception e) { - throw ExceptionUtil.convertToRuntimeException(e); - } finally { - _zkClient.unsubscribeChildChanges(_elementsPath, notifyListener); - } - } - -} diff --git a/akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala similarity index 99% rename from akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala rename to akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index 1c9cb45c08..892f7a026d 100644 --- a/akka-remote/src/main/scala/akka/remote/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -2,16 +2,16 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.remote +package akka.cluster + +import akka.actor.{ ActorSystem, Address } +import akka.event.Logging -import java.util.concurrent.atomic.AtomicReference import scala.collection.immutable.Map import scala.annotation.tailrec -import System.{ currentTimeMillis ⇒ newTimestamp } -import akka.actor.{ ActorSystem, Address } -import akka.actor.ActorSystem -import akka.event.Logging +import java.util.concurrent.atomic.AtomicReference +import System.{ currentTimeMillis ⇒ newTimestamp } /** * Implementation of 'The Phi Accrual Failure Detector' by Hayashibara et al. as defined in their paper: diff --git a/akka-cluster/src/main/scala/akka/cluster/BookKeeperServer.scala b/akka-cluster/src/main/scala/akka/cluster/BookKeeperServer.scala deleted file mode 100644 index 679af24d03..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/BookKeeperServer.scala +++ /dev/null @@ -1,35 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import org.apache.bookkeeper.proto.BookieServer - -import java.io.File - -/* -A simple use of BookKeeper is to implement a write-ahead transaction log. A server maintains an in-memory data structure -(with periodic snapshots for example) and logs changes to that structure before it applies the change. The system -server creates a ledger at startup and store the ledger id and password in a well known place (ZooKeeper maybe). When -it needs to make a change, the server adds an entry with the change information to a ledger and apply the change when -BookKeeper adds the entry successfully. The server can even use asyncAddEntry to queue up many changes for high change -throughput. BooKeeper meticulously logs the changes in order and call the completion functions in order. - -When the system server dies, a backup server will come online, get the last snapshot and then it will open the -ledger of the old server and read all the entries from the time the snapshot was taken. (Since it doesn't know the last -entry number it will use MAX_INTEGER). Once all the entries have been processed, it will close the ledger and start a -new one for its use. -*/ - -object BookKeeperServer { - val port = 3181 - val zkServers = "localhost:2181" - val journal = new File("./bk/journal") - val ledgers = Array(new File("./bk/ledger")) - val bookie = new BookieServer(port, zkServers, journal, ledgers) - - def start() { - bookie.start() - bookie.join() - } -} diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala deleted file mode 100644 index 130149b491..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ /dev/null @@ -1,1876 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import org.apache.zookeeper._ -import org.apache.zookeeper.Watcher.Event._ -import org.apache.zookeeper.data.Stat -import org.apache.zookeeper.recipes.lock.{ WriteLock, LockListener } - -import org.I0Itec.zkclient._ -import org.I0Itec.zkclient.serialize._ -import org.I0Itec.zkclient.exception._ - -import java.util.{ List ⇒ JList } -import java.util.concurrent.atomic.{ AtomicBoolean, AtomicReference } -import java.util.concurrent.{ CopyOnWriteArrayList, Callable, ConcurrentHashMap } -import javax.management.StandardMBean -import java.net.InetSocketAddress - -import scala.collection.mutable.ConcurrentMap -import scala.collection.JavaConversions._ -import scala.annotation.tailrec - -import akka.util._ -import duration._ -import Helpers._ - -import akka.actor._ -import Actor._ -import Status._ -import DeploymentConfig._ - -import akka.event.EventHandler -import akka.config.Config -import akka.config.Config._ - -import akka.serialization.{ Serialization, Serializer, ActorSerialization, Compression } -import ActorSerialization._ -import Compression.LZF - -import akka.routing._ -import akka.cluster._ -import akka.cluster.metrics._ -import akka.cluster.zookeeper._ -import ChangeListener._ -import RemoteProtocol._ -import RemoteSystemDaemonMessageType._ - -import com.eaio.uuid.UUID - -import com.google.protobuf.ByteString -import akka.dispatch.{Await, Dispatchers, Future, PinnedDispatcher} - -// FIXME add watch for each node that when the entry for the node is removed then the node shuts itself down - -/** - * JMX MBean for the cluster service. - */ -trait ClusterNodeMBean { - - def stop() - - def disconnect() - - def reconnect() - - def resign() - - def getRemoteServerHostname: String - - def getRemoteServerPort: Int - - def getNodeName: String - - def getClusterName: String - - def getZooKeeperServerAddresses: String - - def getMemberNodes: Array[String] - - def getNodeAddress(): NodeAddress - - def getLeaderLockName: String - - def isLeader: Boolean - - def getUuidsForClusteredActors: Array[String] - - def getAddressesForClusteredActors: Array[String] - - def getUuidsForActorsInUse: Array[String] - - def getAddressesForActorsInUse: Array[String] - - def getNodesForActorInUseWithAddress(address: String): Array[String] - - def getUuidsForActorsInUseOnNode(nodeName: String): Array[String] - - def getAddressesForActorsInUseOnNode(nodeName: String): Array[String] - - def setConfigElement(key: String, value: String) - - def getConfigElement(key: String): AnyRef - - def removeConfigElement(key: String) - - def getConfigElementKeys: Array[String] - - def getMembershipPathFor(node: String): String - - def getConfigurationPathFor(key: String): String - - def getActorAddresstoNodesPathFor(actorAddress: String): String - - def getActorAddressToNodesPathForWithNodeName(actorAddress: String, nodeName: String): String - - def getNodeToUuidsPathFor(node: String): String - - // FIXME All MBean methods that take a UUID are useless, change to String - def getNodeToUuidsPathFor(node: String, uuid: UUID): String - - def getActorAddressRegistryPathFor(actorAddress: String): String - - def getActorAddressRegistrySerializerPathFor(actorAddress: String): String - - def getActorAddressRegistryUuidPathFor(actorAddress: String): String - - def getActorUuidRegistryNodePathFor(uuid: UUID): String - - def getActorUuidRegistryRemoteAddressPathFor(uuid: UUID): String - - def getActorAddressToUuidsPathFor(actorAddress: String): String - - def getActorAddressToUuidsPathForWithNodeName(actorAddress: String, uuid: UUID): String -} - -/** - * Module for the Cluster. Also holds global state such as configuration data etc. - */ -object Cluster { - val EMPTY_STRING = "".intern - - // config options - val name = Config.clusterName - val zooKeeperServers = config.getString("akka.cluster.zookeeper-server-addresses", "localhost:2181") - val remoteServerPort = config.getInt("akka.remote.server.port", 2552) - val sessionTimeout = Duration(config.getInt("akka.cluster.session-timeout", 60), TIME_UNIT).toMillis.toInt - val metricsRefreshInterval = Duration(config.getInt("akka.cluster.metrics-refresh-timeout", 2), TIME_UNIT) - val connectionTimeout = Duration(config.getInt("akka.cluster.connection-timeout", 60), TIME_UNIT).toMillis.toInt - val maxTimeToWaitUntilConnected = Duration(config.getInt("akka.cluster.max-time-to-wait-until-connected", 30), TIME_UNIT).toMillis.toInt - val shouldCompressData = config.getBool("akka.remote.use-compression", false) - val enableJMX = config.getBool("akka.enable-jmx", true) - val remoteDaemonAckTimeout = Duration(config.getInt("akka.remote.remote-daemon-ack-timeout", 30), TIME_UNIT).toMillis.toInt - val includeRefNodeInReplicaSet = config.getBool("akka.cluster.include-ref-node-in-replica-set", true) - - @volatile - private var properties = Map.empty[String, String] - - /** - * Use to override JVM options such as -Dakka.cluster.nodename=node1 etc. - * Currently supported options are: - *
-   *   Cluster setProperty ("akka.cluster.nodename", "node1")
-   *   Cluster setProperty ("akka.remote.hostname", "darkstar.lan")
-   *   Cluster setProperty ("akka.remote.port", "1234")
-   * 
- */ - def setProperty(property: (String, String)) { - properties = properties + property - } - - private def nodename: String = properties.get("akka.cluster.nodename") match { - case Some(uberride) ⇒ uberride - case None ⇒ Config.nodename - } - - private def hostname: String = properties.get("akka.remote.hostname") match { - case Some(uberride) ⇒ uberride - case None ⇒ Config.hostname - } - - private def port: Int = properties.get("akka.remote.port") match { - case Some(uberride) ⇒ uberride.toInt - case None ⇒ Config.remoteServerPort - } - - val defaultZooKeeperSerializer = new SerializableSerializer - - /** - * The node address. - */ - val nodeAddress = NodeAddress(name, nodename) - - /** - * The reference to the running ClusterNode. - */ - val node = { - if (nodeAddress eq null) throw new IllegalArgumentException("NodeAddress can't be null") - new DefaultClusterNode(nodeAddress, hostname, port, zooKeeperServers, defaultZooKeeperSerializer) - } - - /** - * Creates a new AkkaZkClient. - */ - def newZkClient(): AkkaZkClient = new AkkaZkClient(zooKeeperServers, sessionTimeout, connectionTimeout, defaultZooKeeperSerializer) - - def uuidToString(uuid: UUID): String = uuid.toString - - def stringToUuid(uuid: String): UUID = { - if (uuid eq null) throw new ClusterException("UUID is null") - if (uuid == "") throw new ClusterException("UUID is an empty string") - try { - new UUID(uuid) - } catch { - case e: StringIndexOutOfBoundsException ⇒ - val error = new ClusterException("UUID not valid [" + uuid + "]") - EventHandler.error(error, this, "") - throw error - } - } - - def uuidProtocolToUuid(uuid: UuidProtocol): UUID = new UUID(uuid.getHigh, uuid.getLow) - - def uuidToUuidProtocol(uuid: UUID): UuidProtocol = - UuidProtocol.newBuilder - .setHigh(uuid.getTime) - .setLow(uuid.getClockSeqAndNode) - .build -} - -/** - * A Cluster is made up by a bunch of jvm's, the ClusterNode. - * - * These are the path tree holding the cluster meta-data in ZooKeeper. - * - * Syntax: foo means a variable string, 'foo' means a symbol that does not change and "data" in foo[data] means the value (in bytes) for the node "foo" - * - *
- *   /clusterName/'members'/nodeName
- *   /clusterName/'config'/key[bytes]
- *
- *   /clusterName/'actor-address-to-nodes'/actorAddress/nodeName
- *   /clusterName/'actors-node-to-uuids'/nodeName/actorUuid
- *
- *   /clusterName/'actor-address-registry'/actorAddress/'serializer'[serializerName]
- *   /clusterName/'actor-address-registry'/actorAddress/'uuid'[actorUuid]
- *
- *   /clusterName/'actor-uuid-registry'/actorUuid/'node'[nodeName]
- *   /clusterName/'actor-uuid-registry'/actorUuid/'node'/ip:port
- *   /clusterName/'actor-uuid-registry'/actorUuid/'address'[actorAddress]
- *
- *   /clusterName/'actor-address-to-uuids'/actorAddress/actorUuid
- * 
- */ -class DefaultClusterNode private[akka] ( - val nodeAddress: NodeAddress, - val hostname: String = Config.hostname, - val port: Int = Config.remoteServerPort, - val zkServerAddresses: String, - val serializer: ZkSerializer) extends ErrorHandler with ClusterNode { - self ⇒ - - if ((hostname eq null) || hostname == "") throw new NullPointerException("Host name must not be null or empty string") - if (port < 1) throw new NullPointerException("Port can not be negative") - if (nodeAddress eq null) throw new IllegalArgumentException("'nodeAddress' can not be 'null'") - - val clusterJmxObjectName = JMX.nameFor(hostname, "monitoring", "cluster") - - import Cluster._ - - // private val connectToAllNewlyArrivedMembershipNodesInClusterLock = new AtomicBoolean(false) - - private[cluster] lazy val remoteClientLifeCycleHandler = actorOf(Props(new Actor { - def receive = { - case RemoteClientError(cause, client, address) ⇒ client.shutdownClientModule() - case RemoteClientDisconnected(client, address) ⇒ client.shutdownClientModule() - case _ ⇒ //ignore other - } - }), "akka.cluster.RemoteClientLifeCycleListener") - - private[cluster] lazy val remoteDaemon = new LocalActorRef(Props(new RemoteClusterDaemon(this)).copy(dispatcher = new PinnedDispatcher()), RemoteClusterDaemon.Address, systemService = true) - - private[cluster] lazy val remoteDaemonSupervisor = Supervisor( - SupervisorConfig( - OneForOneStrategy(List(classOf[Exception]), Int.MaxValue, Int.MaxValue), // is infinite restart what we want? - Supervise( - remoteDaemon, - Permanent) - :: Nil)).start() - - lazy val remoteService: RemoteSupport = { - val remote = new akka.remote.netty.NettyRemoteSupport - remote.start(hostname, port) - remote.register(RemoteClusterDaemon.Address, remoteDaemon) - remote.addListener(RemoteFailureDetector.sender) - remote.addListener(remoteClientLifeCycleHandler) - remote - } - - lazy val remoteServerAddress: InetSocketAddress = remoteService.address - - lazy val metricsManager: NodeMetricsManager = new LocalNodeMetricsManager(zkClient, Cluster.metricsRefreshInterval).start() - - // static nodes - val CLUSTER_PATH = "/" + nodeAddress.clusterName - val MEMBERSHIP_PATH = CLUSTER_PATH + "/members" - val CONFIGURATION_PATH = CLUSTER_PATH + "/config" - val PROVISIONING_PATH = CLUSTER_PATH + "/provisioning" - val ACTOR_ADDRESS_NODES_TO_PATH = CLUSTER_PATH + "/actor-address-to-nodes" - val ACTOR_ADDRESS_REGISTRY_PATH = CLUSTER_PATH + "/actor-address-registry" - val ACTOR_UUID_REGISTRY_PATH = CLUSTER_PATH + "/actor-uuid-registry" - val ACTOR_ADDRESS_TO_UUIDS_PATH = CLUSTER_PATH + "/actor-address-to-uuids" - val NODE_TO_ACTOR_UUIDS_PATH = CLUSTER_PATH + "/node-to-actors-uuids" - val NODE_METRICS = CLUSTER_PATH + "/metrics" - - val basePaths = List( - CLUSTER_PATH, - MEMBERSHIP_PATH, - ACTOR_ADDRESS_REGISTRY_PATH, - ACTOR_UUID_REGISTRY_PATH, - ACTOR_ADDRESS_NODES_TO_PATH, - NODE_TO_ACTOR_UUIDS_PATH, - ACTOR_ADDRESS_TO_UUIDS_PATH, - CONFIGURATION_PATH, - PROVISIONING_PATH, - NODE_METRICS) - - val LEADER_ELECTION_PATH = CLUSTER_PATH + "/leader" // should NOT be part of 'basePaths' only used by 'leaderLock' - - private val membershipNodePath = membershipPathFor(nodeAddress.nodeName) - - def membershipNodes: Array[String] = locallyCachedMembershipNodes.toList.toArray.asInstanceOf[Array[String]] - - // zookeeper listeners - private val stateListener = new StateListener(this) - private val membershipListener = new MembershipChildListener(this) - - // cluster node listeners - private val changeListeners = new CopyOnWriteArrayList[ChangeListener]() - - // Address -> ClusterActorRef - private[akka] val clusterActorRefs = new Index[InetSocketAddress, ClusterActorRef] - - case class VersionedConnectionState(version: Long, connections: Map[String, Tuple2[InetSocketAddress, ActorRef]]) - - // all the connections to other nodes - private[akka] val nodeConnections = { - var conns = Map.empty[String, Tuple2[InetSocketAddress, ActorRef]] - // add the remote connection to 'this' node as well, but as a 'local' actor - if (includeRefNodeInReplicaSet) conns += (nodeAddress.nodeName -> (remoteServerAddress, remoteDaemon)) - new AtomicReference[VersionedConnectionState](VersionedConnectionState(0, conns)) - } - - private val isShutdownFlag = new AtomicBoolean(false) - - // ZooKeeper client - private[cluster] val zkClient = new AkkaZkClient(zkServerAddresses, sessionTimeout, connectionTimeout, serializer) - - // leader election listener, registered to the 'leaderLock' below - private[cluster] val leaderElectionCallback = new LockListener { - override def lockAcquired() { - EventHandler.info(this, "Node [%s] is the new leader".format(self.nodeAddress.nodeName)) - self.publish(NewLeader(self.nodeAddress.nodeName)) - } - - override def lockReleased() { - EventHandler.info(this, "Node [%s] is *NOT* the leader anymore".format(self.nodeAddress.nodeName)) - } - } - - // leader election lock in ZooKeeper - private[cluster] val leaderLock = new WriteLock( - zkClient.connection.getZookeeper, - LEADER_ELECTION_PATH, null, - leaderElectionCallback) - - if (enableJMX) createMBean - - boot() - - // ======================================= - // Node - // ======================================= - - private[cluster] def boot() { - EventHandler.info(this, - ("\nCreating cluster node with" + - "\n\tcluster name = [%s]" + - "\n\tnode name = [%s]" + - "\n\tport = [%s]" + - "\n\tzookeeper server addresses = [%s]" + - "\n\tserializer = [%s]") - .format(nodeAddress.clusterName, nodeAddress.nodeName, port, zkServerAddresses, serializer)) - EventHandler.info(this, "Starting up remote server [%s]".format(remoteServerAddress.toString)) - createZooKeeperPathStructureIfNeeded() - registerListeners() - joinCluster() - joinLeaderElection() - fetchMembershipNodes() - EventHandler.info(this, "Cluster node [%s] started successfully".format(nodeAddress)) - } - - def isShutdown = isShutdownFlag.get - - def start() {} - - def shutdown() { - isShutdownFlag.set(true) - - def shutdownNode() { - ignore[ZkNoNodeException](zkClient.deleteRecursive(membershipNodePath)) - - locallyCachedMembershipNodes.clear() - - nodeConnections.get.connections.toList.foreach({ - case (_, (address, _)) ⇒ - Actor.remote.shutdownClientConnection(address) // shut down client connections - }) - - remoteService.shutdown() // shutdown server - - RemoteFailureDetector.sender.stop() - remoteClientLifeCycleHandler.stop() - remoteDaemon.stop() - - // for monitoring remote listener - registry.local.actors.filter(remoteService.hasListener).foreach(_.stop()) - - nodeConnections.set(VersionedConnectionState(0, Map.empty[String, Tuple2[InetSocketAddress, ActorRef]])) - - disconnect() - EventHandler.info(this, "Cluster node shut down [%s]".format(nodeAddress)) - } - - shutdownNode() - } - - def disconnect(): ClusterNode = { - zkClient.unsubscribeAll() - zkClient.close() - this - } - - def reconnect(): ClusterNode = { - zkClient.reconnect() - this - } - - // ======================================= - // Change notification - // ======================================= - - /** - * Registers a cluster change listener. - */ - def register(listener: ChangeListener): ClusterNode = { - changeListeners.add(listener) - this - } - - private[cluster] def publish(change: ChangeNotification) { - changeListeners.iterator.foreach(_.notify(change, this)) - } - - // ======================================= - // Leader - // ======================================= - - /** - * Returns the name of the current leader lock. - */ - def leader: String = leaderLock.getId - - /** - * Returns true if 'this' node is the current leader. - */ - def isLeader: Boolean = leaderLock.isOwner - - /** - * Explicitly resign from being a leader. If this node is not a leader then this operation is a no-op. - */ - def resign() { - if (isLeader) leaderLock.unlock() - } - - // ======================================= - // Actor - // ======================================= - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), 0, Transient, false, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], replicationScheme: ReplicationScheme, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), 0, replicationScheme, false, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], nrOfInstances: Int, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), nrOfInstances, Transient, false, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], nrOfInstances: Int, replicationScheme: ReplicationScheme, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), nrOfInstances, replicationScheme, false, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), 0, Transient, serializeMailbox, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], replicationScheme: ReplicationScheme, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), 0, replicationScheme, serializeMailbox, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], nrOfInstances: Int, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), nrOfInstances, Transient, serializeMailbox, serializer) - - /** - * Clusters an actor of a specific type. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store[T <: Actor](actorAddress: String, actorClass: Class[T], nrOfInstances: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, () ⇒ Actor.actorOf(actorClass, actorAddress), nrOfInstances, replicationScheme, serializeMailbox, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, 0, Transient, false, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, 0, Transient, serializeMailbox, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, replicationScheme: ReplicationScheme, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, 0, replicationScheme, false, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, nrOfInstances, Transient, false, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, replicationScheme: ReplicationScheme, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, nrOfInstances, replicationScheme, false, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, nrOfInstances, Transient, serializeMailbox, serializer) - - /** - * Clusters an actor with UUID. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, serializer: Serializer): ClusterNode = - store(actorAddress, actorFactory, 0, replicationScheme, serializeMailbox, serializer) - - /** - * Needed to have reflection through structural typing work. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, replicationScheme: ReplicationScheme, serializeMailbox: Boolean, serializer: AnyRef): ClusterNode = - store(actorAddress, actorFactory, nrOfInstances, replicationScheme, serializeMailbox, serializer.asInstanceOf[Serializer]) - - /** - * Needed to have reflection through structural typing work. - */ - def store(actorAddress: String, actorFactory: () ⇒ ActorRef, nrOfInstances: Int, serializeMailbox: Boolean, serializer: AnyRef): ClusterNode = - store(actorAddress, actorFactory, nrOfInstances, Transient, serializeMailbox, serializer) - - /** - * Clusters an actor. If the actor is already clustered then the clustered version will be updated - * with the actor passed in as argument. You can use this to save off snapshots of the actor to a highly - * available durable store. - */ - def store( - actorAddress: String, - actorFactory: () ⇒ ActorRef, - nrOfInstances: Int, - replicationScheme: ReplicationScheme, - serializeMailbox: Boolean, - serializer: Serializer): ClusterNode = { - - EventHandler.debug(this, - "Storing actor with address [%s] in cluster".format(actorAddress)) - - val actorFactoryBytes = - Serialization.serialize(actorFactory) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - if (shouldCompressData) LZF.compress(bytes) - else bytes - } - - val actorAddressRegistryPath = actorAddressRegistryPathFor(actorAddress) - - // create ADDRESS -> Array[Byte] for actor registry - try { - zkClient.writeData(actorAddressRegistryPath, actorFactoryBytes) - } catch { - case e: ZkNoNodeException ⇒ // if not stored yet, store the actor - zkClient.retryUntilConnected(new Callable[Either[String, Exception]]() { - def call: Either[String, Exception] = { - try { - Left(zkClient.connection.create(actorAddressRegistryPath, actorFactoryBytes, CreateMode.PERSISTENT)) - } catch { - case e: KeeperException.NodeExistsException ⇒ Right(e) - } - } - }) match { - case Left(path) ⇒ path - case Right(exception) ⇒ actorAddressRegistryPath - } - } - - // create ADDRESS -> SERIALIZER CLASS NAME mapping - try { - zkClient.createPersistent(actorAddressRegistrySerializerPathFor(actorAddress), serializer.identifier.toString) - } catch { - case e: ZkNodeExistsException ⇒ zkClient.writeData(actorAddressRegistrySerializerPathFor(actorAddress), serializer.identifier.toString) - } - - // create ADDRESS -> NODE mapping - ignore[ZkNodeExistsException](zkClient.createPersistent(actorAddressToNodesPathFor(actorAddress))) - - // create ADDRESS -> UUIDs mapping - ignore[ZkNodeExistsException](zkClient.createPersistent(actorAddressToUuidsPathFor(actorAddress))) - - useActorOnNodes(nodesForNrOfInstances(nrOfInstances, Some(actorAddress)).toArray, actorAddress) - - this - } - - /** - * Removes actor from the cluster. - */ - // def remove(actorRef: ActorRef) { - // remove(actorRef.address) - // } - - /** - * Removes actor with uuid from the cluster. - */ - // def remove(actorAddress: String) { - // releaseActorOnAllNodes(actorAddress) - // // warning: ordering matters here - // // FIXME remove ADDRESS to UUID mapping? - // ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAddressToUuidsPathFor(actorAddress))) - // ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAddressRegistryPathFor(actorAddress))) - // ignore[ZkNoNodeException](zkClient.deleteRecursive(actorAddressToNodesPathFor(actorAddress))) - // } - - /** - * Is the actor with uuid clustered or not? - */ - def isClustered(actorAddress: String): Boolean = zkClient.exists(actorAddressRegistryPathFor(actorAddress)) - - /** - * Is the actor with uuid in use on 'this' node or not? - */ - def isInUseOnNode(actorAddress: String): Boolean = isInUseOnNode(actorAddress, nodeAddress) - - /** - * Is the actor with uuid in use or not? - */ - def isInUseOnNode(actorAddress: String, node: NodeAddress): Boolean = zkClient.exists(actorAddressToNodesPathFor(actorAddress, node.nodeName)) - - /** - * Is the actor with uuid in use or not? - */ - def isInUseOnNode(actorAddress: String, nodeName: String): Boolean = zkClient.exists(actorAddressToNodesPathFor(actorAddress, nodeName)) - - /** - * Checks out an actor for use on this node, e.g. checked out as a 'LocalActorRef' but it makes it available - * for remote access through lookup by its UUID. - */ - def use[T <: Actor](actorAddress: String): Option[LocalActorRef] = { - val nodeName = nodeAddress.nodeName - - val actorFactoryPath = actorAddressRegistryPathFor(actorAddress) - zkClient.retryUntilConnected(new Callable[Either[Exception, () ⇒ LocalActorRef]]() { - def call: Either[Exception, () ⇒ LocalActorRef] = { - try { - - val actorFactoryBytes = - if (shouldCompressData) LZF.uncompress(zkClient.connection.readData(actorFactoryPath, new Stat, false)) - else zkClient.connection.readData(actorFactoryPath, new Stat, false) - - val actorFactory = - Serialization.deserialize(actorFactoryBytes, classOf[() ⇒ LocalActorRef], None) match { - case Left(error) ⇒ throw error - case Right(instance) ⇒ instance.asInstanceOf[() ⇒ LocalActorRef] - } - - Right(actorFactory) - } catch { - case e: KeeperException.NoNodeException ⇒ Left(e) - } - } - }) match { - case Left(exception) ⇒ throw exception - case Right(actorFactory) ⇒ - val actorRef = actorFactory() - - EventHandler.debug(this, - "Checking out actor [%s] to be used on node [%s] as local actor" - .format(actorAddress, nodeName)) - - val uuid = actorRef.uuid - - // create UUID registry - ignore[ZkNodeExistsException](zkClient.createPersistent(actorUuidRegistryPathFor(uuid))) - - // create UUID -> NODE mapping - try { - zkClient.createPersistent(actorUuidRegistryNodePathFor(uuid), nodeName) - } catch { - case e: ZkNodeExistsException ⇒ zkClient.writeData(actorUuidRegistryNodePathFor(uuid), nodeName) - } - - // create UUID -> ADDRESS - try { - zkClient.createPersistent(actorUuidRegistryAddressPathFor(uuid), actorAddress) - } catch { - case e: ZkNodeExistsException ⇒ zkClient.writeData(actorUuidRegistryAddressPathFor(uuid), actorAddress) - } - - // create UUID -> REMOTE ADDRESS (InetSocketAddress) mapping - try { - zkClient.createPersistent(actorUuidRegistryRemoteAddressPathFor(uuid), remoteServerAddress) - } catch { - case e: ZkNodeExistsException ⇒ zkClient.writeData(actorUuidRegistryRemoteAddressPathFor(uuid), remoteServerAddress) - } - - // create ADDRESS -> UUID mapping - try { - zkClient.createPersistent(actorAddressRegistryUuidPathFor(actorAddress), uuid) - } catch { - case e: ZkNodeExistsException ⇒ zkClient.writeData(actorAddressRegistryUuidPathFor(actorAddress), uuid) - } - - // create NODE -> UUID mapping - ignore[ZkNodeExistsException](zkClient.createPersistent(nodeToUuidsPathFor(nodeName, uuid), true)) - - // create ADDRESS -> UUIDs mapping - ignore[ZkNodeExistsException](zkClient.createPersistent(actorAddressToUuidsPathFor(actorAddress, uuid))) - - // create ADDRESS -> NODE mapping - ignore[ZkNodeExistsException](zkClient.createPersistent(actorAddressToNodesPathFor(actorAddress, nodeName))) - - actorRef - } - } - - /** - * Using (checking out) actor on a specific set of nodes. - */ - def useActorOnNodes(nodes: Array[String], actorAddress: String, replicateFromUuid: Option[UUID] = None) { - EventHandler.debug(this, - "Sending command to nodes [%s] for checking out actor [%s]".format(nodes.mkString(", "), actorAddress)) - - val builder = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(USE) - .setActorAddress(actorAddress) - - // set the UUID to replicated from - if available - replicateFromUuid foreach (uuid ⇒ builder.setReplicateActorFromUuid(uuidToUuidProtocol(uuid))) - - val command = builder.build - - nodes foreach { node ⇒ - nodeConnections.get.connections(node) foreach { - case (address, connection) ⇒ - sendCommandToNode(connection, command, async = false) - } - } - } - - /** - * Using (checking out) actor on all nodes in the cluster. - */ - def useActorOnAllNodes(actorAddress: String, replicateFromUuid: Option[UUID] = None) { - useActorOnNodes(membershipNodes, actorAddress, replicateFromUuid) - } - - /** - * Using (checking out) actor on a specific node. - */ - def useActorOnNode(node: String, actorAddress: String, replicateFromUuid: Option[UUID] = None) { - useActorOnNodes(Array(node), actorAddress, replicateFromUuid) - } - - /** - * Checks in an actor after done using it on this node. - */ - def release(actorRef: ActorRef) { - release(actorRef.address) - } - - /** - * Checks in an actor after done using it on this node. - */ - def release(actorAddress: String) { - - // FIXME 'Cluster.release' needs to notify all existing ClusterActorRef's that are using the instance that it is no - // longer available. Then what to do? Should we even remove this method? - - ignore[ZkNoNodeException](zkClient.delete(actorAddressToNodesPathFor(actorAddress, nodeAddress.nodeName))) - - uuidsForActorAddress(actorAddress) foreach { uuid ⇒ - EventHandler.debug(this, - "Releasing actor [%s] with UUID [%s] after usage".format(actorAddress, uuid)) - - ignore[ZkNoNodeException](zkClient.deleteRecursive(nodeToUuidsPathFor(nodeAddress.nodeName, uuid))) - ignore[ZkNoNodeException](zkClient.delete(actorUuidRegistryRemoteAddressPathFor(uuid))) - } - } - - /** - * Releases (checking in) all actors with a specific address on all nodes in the cluster where the actor is in 'use'. - */ - private[akka] def releaseActorOnAllNodes(actorAddress: String) { - EventHandler.debug(this, - "Releasing (checking in) all actors with address [%s] on all nodes in cluster".format(actorAddress)) - - val command = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(RELEASE) - .setActorAddress(actorAddress) - .build - - nodesForActorsInUseWithAddress(actorAddress) foreach { node ⇒ - nodeConnections.get.connections(node) foreach { - case (_, connection) ⇒ sendCommandToNode(connection, command, async = true) - } - } - } - - /** - * Creates an ActorRef with a Router to a set of clustered actors. - */ - def ref(actorAddress: String, router: RouterType, failureDetector: FailureDetectorType): ActorRef = - ClusterActorRef.newRef(actorAddress, router, failureDetector, Actor.TIMEOUT) - - /** - * Returns the UUIDs of all actors checked out on this node. - */ - private[akka] def uuidsForActorsInUse: Array[UUID] = uuidsForActorsInUseOnNode(nodeAddress.nodeName) - - /** - * Returns the addresses of all actors checked out on this node. - */ - def addressesForActorsInUse: Array[String] = actorAddressForUuids(uuidsForActorsInUse) - - /** - * Returns the UUIDs of all actors registered in this cluster. - */ - private[akka] def uuidsForClusteredActors: Array[UUID] = - zkClient.getChildren(ACTOR_UUID_REGISTRY_PATH).toList.map(new UUID(_)).toArray.asInstanceOf[Array[UUID]] - - /** - * Returns the addresses of all actors registered in this cluster. - */ - def addressesForClusteredActors: Array[String] = actorAddressForUuids(uuidsForClusteredActors) - - /** - * Returns the actor id for the actor with a specific UUID. - */ - private[akka] def actorAddressForUuid(uuid: UUID): Option[String] = { - try { - Some(zkClient.readData(actorUuidRegistryAddressPathFor(uuid)).asInstanceOf[String]) - } catch { - case e: ZkNoNodeException ⇒ None - } - } - - /** - * Returns the actor ids for all the actors with a specific UUID. - */ - private[akka] def actorAddressForUuids(uuids: Array[UUID]): Array[String] = - uuids map (actorAddressForUuid(_)) filter (_.isDefined) map (_.get) - - /** - * Returns the actor UUIDs for actor ID. - */ - private[akka] def uuidsForActorAddress(actorAddress: String): Array[UUID] = { - try { - zkClient.getChildren(actorAddressToUuidsPathFor(actorAddress)).toList.toArray map { - case c: CharSequence ⇒ new UUID(c) - } filter (_ ne null) - } catch { - case e: ZkNoNodeException ⇒ Array[UUID]() - } - } - - /** - * Returns the node names of all actors in use with UUID. - */ - private[akka] def nodesForActorsInUseWithAddress(actorAddress: String): Array[String] = { - try { - zkClient.getChildren(actorAddressToNodesPathFor(actorAddress)).toList.toArray.asInstanceOf[Array[String]] - } catch { - case e: ZkNoNodeException ⇒ Array[String]() - } - } - - /** - * Returns the UUIDs of all actors in use registered on a specific node. - */ - private[akka] def uuidsForActorsInUseOnNode(nodeName: String): Array[UUID] = { - try { - zkClient.getChildren(nodeToUuidsPathFor(nodeName)).toList.toArray map { - case c: CharSequence ⇒ new UUID(c) - } filter (_ ne null) - } catch { - case e: ZkNoNodeException ⇒ Array[UUID]() - } - } - - /** - * Returns the addresses of all actors in use registered on a specific node. - */ - def addressesForActorsInUseOnNode(nodeName: String): Array[String] = { - val uuids = - try { - zkClient.getChildren(nodeToUuidsPathFor(nodeName)).toList.toArray map { - case c: CharSequence ⇒ new UUID(c) - } filter (_ ne null) - } catch { - case e: ZkNoNodeException ⇒ Array[UUID]() - } - actorAddressForUuids(uuids) - } - - /** - * Returns Serializer for actor with specific address. - */ - def serializerForActor(actorAddress: String): Serializer = try { - Serialization.serializerByIdentity(zkClient.readData(actorAddressRegistrySerializerPathFor(actorAddress), new Stat).asInstanceOf[String].toByte) - } catch { - case e: ZkNoNodeException ⇒ throw new IllegalStateException("No serializer found for actor with address [%s]".format(actorAddress)) - } - - /** - * Returns addresses for nodes that the clustered actor is in use on. - */ - def inetSocketAddressesForActor(actorAddress: String): Array[(UUID, InetSocketAddress)] = { - try { - for { - uuid ← uuidsForActorAddress(actorAddress) - } yield { - val remoteAddress = zkClient.readData(actorUuidRegistryRemoteAddressPathFor(uuid)).asInstanceOf[InetSocketAddress] - (uuid, remoteAddress) - } - } catch { - case e: ZkNoNodeException ⇒ - EventHandler.warning(this, - "Could not retrieve remote socket address for node hosting actor [%s] due to: %s" - .format(actorAddress, e.toString)) - Array[(UUID, InetSocketAddress)]() - } - } - - // ======================================= - // Compute Grid - // ======================================= - - /** - * Send a function 'Function0[Unit]' to be invoked on a random number of nodes (defined by 'nrOfInstances' argument). - */ - def send(f: Function0[Unit], nrOfInstances: Int) { - Serialization.serialize(f) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - val message = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(FUNCTION_FUN0_UNIT) - .setPayload(ByteString.copyFrom(bytes)) - .build - nodeConnectionsForNrOfInstances(nrOfInstances) foreach (_ ! message) - } - } - - /** - * Send a function 'Function0[Any]' to be invoked on a random number of nodes (defined by 'nrOfInstances' argument). - * Returns an 'Array' with all the 'Future's from the computation. - */ - def send(f: Function0[Any], nrOfInstances: Int): List[Future[Any]] = { - Serialization.serialize(f) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - val message = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(FUNCTION_FUN0_ANY) - .setPayload(ByteString.copyFrom(bytes)) - .build - val results = nodeConnectionsForNrOfInstances(nrOfInstances) map (_ ? message) - results.toList.asInstanceOf[List[Future[Any]]] - } - } - - /** - * Send a function 'Function1[Any, Unit]' to be invoked on a random number of nodes (defined by 'nrOfInstances' argument) - * with the argument speficied. - */ - def send(f: Function1[Any, Unit], arg: Any, nrOfInstances: Int) { - Serialization.serialize((f, arg)) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - val message = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(FUNCTION_FUN1_ARG_UNIT) - .setPayload(ByteString.copyFrom(bytes)) - .build - nodeConnectionsForNrOfInstances(nrOfInstances) foreach (_ ! message) - } - } - - /** - * Send a function 'Function1[Any, Any]' to be invoked on a random number of nodes (defined by 'nrOfInstances' argument) - * with the argument speficied. - * Returns an 'Array' with all the 'Future's from the computation. - */ - def send(f: Function1[Any, Any], arg: Any, nrOfInstances: Int): List[Future[Any]] = { - Serialization.serialize((f, arg)) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - val message = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(FUNCTION_FUN1_ARG_ANY) - .setPayload(ByteString.copyFrom(bytes)) - .build - val results = nodeConnectionsForNrOfInstances(nrOfInstances) map (_ ? message) - results.toList.asInstanceOf[List[Future[Any]]] - } - } - - // ======================================= - // Config - // ======================================= - - /** - * Stores a configuration element under a specific key. - * If the key already exists then it will be overwritten. - */ - def setConfigElement(key: String, bytes: Array[Byte]) { - val compressedBytes = if (shouldCompressData) LZF.compress(bytes) else bytes - EventHandler.debug(this, - "Adding config value [%s] under key [%s] in cluster registry".format(key, compressedBytes)) - zkClient.retryUntilConnected(new Callable[Either[Unit, Exception]]() { - def call: Either[Unit, Exception] = { - try { - Left(zkClient.connection.create(configurationPathFor(key), compressedBytes, CreateMode.PERSISTENT)) - } catch { - case e: KeeperException.NodeExistsException ⇒ - try { - Left(zkClient.connection.writeData(configurationPathFor(key), compressedBytes)) - } catch { - case e: Exception ⇒ Right(e) - } - } - } - }) match { - case Left(_) ⇒ /* do nothing */ - case Right(exception) ⇒ throw exception - } - } - - /** - * Returns the config element for the key or NULL if no element exists under the key. - * Returns Some(element) if it exists else None - */ - def getConfigElement(key: String): Option[Array[Byte]] = try { - Some(zkClient.connection.readData(configurationPathFor(key), new Stat, true)) - } catch { - case e: KeeperException.NoNodeException ⇒ None - } - - /** - * Removes configuration element for a specific key. - * Does nothing if the key does not exist. - */ - def removeConfigElement(key: String) { - ignore[ZkNoNodeException] { - EventHandler.debug(this, - "Removing config element with key [%s] from cluster registry".format(key)) - zkClient.deleteRecursive(configurationPathFor(key)) - } - } - - /** - * Returns a list with all config element keys. - */ - def getConfigElementKeys: Array[String] = zkClient.getChildren(CONFIGURATION_PATH).toList.toArray.asInstanceOf[Array[String]] - - // ======================================= - // Private - // ======================================= - - private def sendCommandToNode(connection: ActorRef, command: RemoteSystemDaemonMessageProtocol, async: Boolean = true) { - if (async) { - connection ! command - } else { - try { - Await.result(connection ? (command, remoteDaemonAckTimeout), 10 seconds).asInstanceOf[Status] match { - case Success(status) ⇒ - EventHandler.debug(this, "Remote command sent to [%s] successfully received".format(status)) - case Failure(cause) ⇒ - EventHandler.error(cause, this, cause.toString) - throw cause - } - } catch { - case e: TimeoutException => - EventHandler.error(e, this, "Remote command to [%s] timed out".format(connection.address)) - throw e - case e: Exception ⇒ - EventHandler.error(e, this, "Could not send remote command to [%s] due to: %s".format(connection.address, e.toString)) - throw e - } - } - } - - private[cluster] def membershipPathFor(node: String): String = "%s/%s".format(MEMBERSHIP_PATH, node) - - private[cluster] def configurationPathFor(key: String): String = "%s/%s".format(CONFIGURATION_PATH, key) - - private[cluster] def actorAddressToNodesPathFor(actorAddress: String): String = "%s/%s".format(ACTOR_ADDRESS_NODES_TO_PATH, actorAddress) - - private[cluster] def actorAddressToNodesPathFor(actorAddress: String, nodeName: String): String = "%s/%s".format(actorAddressToNodesPathFor(actorAddress), nodeName) - - private[cluster] def nodeToUuidsPathFor(node: String): String = "%s/%s".format(NODE_TO_ACTOR_UUIDS_PATH, node) - - private[cluster] def nodeToUuidsPathFor(node: String, uuid: UUID): String = "%s/%s/%s".format(NODE_TO_ACTOR_UUIDS_PATH, node, uuid) - - private[cluster] def actorAddressRegistryPathFor(actorAddress: String): String = "%s/%s".format(ACTOR_ADDRESS_REGISTRY_PATH, actorAddress) - - private[cluster] def actorAddressRegistrySerializerPathFor(actorAddress: String): String = "%s/%s".format(actorAddressRegistryPathFor(actorAddress), "serializer") - - private[cluster] def actorAddressRegistryUuidPathFor(actorAddress: String): String = "%s/%s".format(actorAddressRegistryPathFor(actorAddress), "uuid") - - private[cluster] def actorUuidRegistryPathFor(uuid: UUID): String = "%s/%s".format(ACTOR_UUID_REGISTRY_PATH, uuid) - - private[cluster] def actorUuidRegistryNodePathFor(uuid: UUID): String = "%s/%s".format(actorUuidRegistryPathFor(uuid), "node") - - private[cluster] def actorUuidRegistryAddressPathFor(uuid: UUID): String = "%s/%s".format(actorUuidRegistryPathFor(uuid), "address") - - private[cluster] def actorUuidRegistryRemoteAddressPathFor(uuid: UUID): String = "%s/%s".format(actorUuidRegistryPathFor(uuid), "remote-address") - - private[cluster] def actorAddressToUuidsPathFor(actorAddress: String): String = "%s/%s".format(ACTOR_ADDRESS_TO_UUIDS_PATH, actorAddress.replace('.', '_')) - - private[cluster] def actorAddressToUuidsPathFor(actorAddress: String, uuid: UUID): String = "%s/%s".format(actorAddressToUuidsPathFor(actorAddress), uuid) - - /** - * Returns a random set with node names of size 'nrOfInstances'. - * Default nrOfInstances is 0, which returns the empty Set. - */ - private def nodesForNrOfInstances(nrOfInstances: Int = 0, actorAddress: Option[String] = None): Set[String] = { - var replicaNames = Set.empty[String] - val nrOfClusterNodes = nodeConnections.get.connections.size - - if (nrOfInstances < 1) return replicaNames - if (nrOfClusterNodes < nrOfInstances) throw new IllegalArgumentException( - "Replication factor [" + nrOfInstances + - "] is greater than the number of available nodeNames [" + nrOfClusterNodes + "]") - - val preferredNodes = - if (actorAddress.isDefined) { - // use 'preferred-nodes' in deployment config for the actor - Deployer.deploymentFor(actorAddress.get) match { - case Deploy(_, _, _, _, Cluster(nodes, _, _)) ⇒ - nodes map (node ⇒ DeploymentConfig.nodeNameFor(node)) take nrOfInstances - case _ ⇒ - throw new ClusterException("Actor [" + actorAddress.get + "] is not configured as clustered") - } - } else Vector.empty[String] - - for { - nodeName ← preferredNodes - key ← nodeConnections.get.connections.keys - if key == nodeName - } replicaNames = replicaNames + nodeName - - val nrOfCurrentReplicaNames = replicaNames.size - - val replicaSet = - if (nrOfCurrentReplicaNames > nrOfInstances) throw new IllegalStateException("Replica set is larger than replication factor") - else if (nrOfCurrentReplicaNames == nrOfInstances) replicaNames - else { - val random = new java.util.Random(System.currentTimeMillis) - while (replicaNames.size < nrOfInstances) { - replicaNames = replicaNames + membershipNodes(random.nextInt(nrOfClusterNodes)) - } - replicaNames - } - - EventHandler.debug(this, - "Picked out replica set [%s] for actor [%s]".format(replicaSet.mkString(", "), actorAddress)) - - replicaSet - } - - /** - * Returns a random set with replica connections of size 'nrOfInstances'. - * Default nrOfInstances is 0, which returns the empty Set. - */ - private def nodeConnectionsForNrOfInstances(nrOfInstances: Int = 0, actorAddress: Option[String] = None): Set[ActorRef] = { - for { - node ← nodesForNrOfInstances(nrOfInstances, actorAddress) - connectionOption ← nodeConnections.get.connections(node) - connection ← connectionOption - actorRef ← connection._2 - } yield actorRef - } - - /** - * Update the list of connections to other nodes in the cluster. - * Tail recursive, using lockless optimimistic concurrency. - * - * @return a Map with the remote socket addresses to of disconnected node connections - */ - @tailrec - final private[cluster] def connectToAllNewlyArrivedMembershipNodesInCluster( - newlyConnectedMembershipNodes: Traversable[String], - newlyDisconnectedMembershipNodes: Traversable[String]): Map[String, InetSocketAddress] = { - - var change = false - val oldState = nodeConnections.get - - var newConnections = oldState.connections //Map.empty[String, Tuple2[InetSocketAddress, ActorRef]] - - // cache the disconnected connections in a map, needed for fail-over of these connections later - var disconnectedConnections = Map.empty[String, InetSocketAddress] - newlyDisconnectedMembershipNodes foreach { node ⇒ - disconnectedConnections = disconnectedConnections + (node -> (oldState.connections(node) match { - case (address, _) ⇒ address - })) - } - - // remove connections to failed nodes - newlyDisconnectedMembershipNodes foreach { node ⇒ - newConnections = newConnections - node - change = true - } - - // add connections newly arrived nodes - newlyConnectedMembershipNodes foreach { node ⇒ - if (!newConnections.contains(node)) { - - // only connect to each replica once - remoteSocketAddressForNode(node) foreach { address ⇒ - EventHandler.debug(this, "Setting up connection to node with nodename [%s] and address [%s]".format(node, address)) - - val clusterDaemon = remoteService.actorFor( - RemoteClusterDaemon.Address, address.getHostName, address.getPort) - newConnections = newConnections + (node -> (address, clusterDaemon)) - change = true - } - } - } - - // add the remote connection to 'this' node as well, but as a 'local' actor - if (includeRefNodeInReplicaSet) - newConnections = newConnections + (nodeAddress.nodeName -> (remoteServerAddress, remoteDaemon)) - - //there was a state change, so we are now going to update the state. - val newState = new VersionedConnectionState(oldState.version + 1, newConnections) - - if (!nodeConnections.compareAndSet(oldState, newState)) { - // we failed to set the state, try again - connectToAllNewlyArrivedMembershipNodesInCluster( - newlyConnectedMembershipNodes, newlyDisconnectedMembershipNodes) - } else { - // we succeeded to set the state, return - EventHandler.info(this, "Connected to nodes [\n\t%s]".format(newConnections.mkString("\n\t"))) - disconnectedConnections - } - } - - private[cluster] def joinCluster() { - try { - EventHandler.info(this, - "Joining cluster as membership node [%s] on [%s]".format(nodeAddress, membershipNodePath)) - zkClient.createEphemeral(membershipNodePath, remoteServerAddress) - } catch { - case e: ZkNodeExistsException ⇒ - e.printStackTrace - val error = new ClusterException( - "Can't join the cluster. The node name [" + nodeAddress.nodeName + "] is already in use by another node.") - EventHandler.error(error, this, error.toString) - throw error - } - ignore[ZkNodeExistsException](zkClient.createPersistent(nodeToUuidsPathFor(nodeAddress.nodeName))) - } - - private[cluster] def joinLeaderElection(): Boolean = { - EventHandler.info(this, "Node [%s] is joining leader election".format(nodeAddress.nodeName)) - try { - leaderLock.lock - } catch { - case e: KeeperException.NodeExistsException ⇒ false - } - } - - private[cluster] def remoteSocketAddressForNode(node: String): Option[InetSocketAddress] = { - try { - Some(zkClient.readData(membershipPathFor(node), new Stat).asInstanceOf[InetSocketAddress]) - } catch { - case e: ZkNoNodeException ⇒ None - } - } - - private[cluster] def failOverClusterActorRefConnections(from: InetSocketAddress, to: InetSocketAddress) { - EventHandler.info(this, "Failing over ClusterActorRef from %s to %s".format(from, to)) - clusterActorRefs.valueIterator(from) foreach (_.failOver(from, to)) - } - - private[cluster] def migrateActorsOnFailedNodes( - failedNodes: List[String], - currentClusterNodes: List[String], - oldClusterNodes: List[String], - disconnectedConnections: Map[String, InetSocketAddress]) { - - failedNodes.foreach { failedNodeName ⇒ - - val failedNodeAddress = NodeAddress(nodeAddress.clusterName, failedNodeName) - - val myIndex = oldClusterNodes.indexWhere(_.endsWith(nodeAddress.nodeName)) - val failedNodeIndex = oldClusterNodes.indexWhere(_ == failedNodeName) - - // Migrate to the successor of the failed node (using a sorted circular list of the node names) - if ((failedNodeIndex == 0 && myIndex == oldClusterNodes.size - 1) || // No leftmost successor exists, check the tail - (failedNodeIndex == myIndex + 1)) { - // Am I the leftmost successor? - - // Takes the lead of migrating the actors. Not all to this node. - // All to this node except if the actor already resides here, then pick another node it is not already on. - - // Yes I am the node to migrate the actor to (can only be one in the cluster) - val actorUuidsForFailedNode = zkClient.getChildren(nodeToUuidsPathFor(failedNodeName)).toList - - actorUuidsForFailedNode.foreach { uuidAsString ⇒ - EventHandler.debug(this, - "Cluster node [%s] has failed, migrating actor with UUID [%s] to [%s]" - .format(failedNodeName, uuidAsString, nodeAddress.nodeName)) - - val uuid = uuidFrom(uuidAsString) - val actorAddress = actorAddressForUuid(uuid).getOrElse( - throw new IllegalStateException("No actor address found for UUID [" + uuidAsString + "]")) - - val migrateToNodeAddress = - if (!isShutdown && isInUseOnNode(actorAddress)) { - // already in use on this node, pick another node to instantiate the actor on - val replicaNodesForActor = nodesForActorsInUseWithAddress(actorAddress) - val nodesAvailableForMigration = (currentClusterNodes.toSet diff failedNodes.toSet) diff replicaNodesForActor.toSet - - if (nodesAvailableForMigration.isEmpty) throw new ClusterException( - "Can not migrate actor to new node since there are not any available nodes left. " + - "(However, the actor already has >1 replica in cluster, so we are ok)") - - NodeAddress(nodeAddress.clusterName, nodesAvailableForMigration.head) - } else { - // actor is not in use on this node, migrate it here - nodeAddress - } - - // if actor is replicated => pass along the UUID for the actor to replicate from (replay transaction log etc.) - val replicateFromUuid = - if (isReplicated(actorAddress)) Some(uuid) - else None - - migrateWithoutCheckingThatActorResidesOnItsHomeNode( - failedNodeAddress, - migrateToNodeAddress, - actorAddress, - replicateFromUuid) - } - - // notify all available nodes that they should fail-over all connections from 'from' to 'to' - val from = disconnectedConnections(failedNodeName) - val to = remoteServerAddress - - Serialization.serialize((from, to)) match { - case Left(error) ⇒ throw error - case Right(bytes) ⇒ - - val command = RemoteSystemDaemonMessageProtocol.newBuilder - .setMessageType(FAIL_OVER_CONNECTIONS) - .setPayload(ByteString.copyFrom(bytes)) - .build - - // FIXME now we are broadcasting to ALL nodes in the cluster even though a fraction might have a reference to the actors - should that be fixed? - nodeConnections.get.connections.values foreach { - case (_, connection) ⇒ sendCommandToNode(connection, command, async = true) - } - } - } - } - } - - /** - * Used when the ephemeral "home" node is already gone, so we can't check if it is available. - */ - private def migrateWithoutCheckingThatActorResidesOnItsHomeNode( - from: NodeAddress, to: NodeAddress, actorAddress: String, replicateFromUuid: Option[UUID]) { - - EventHandler.debug(this, "Migrating actor [%s] from node [%s] to node [%s]".format(actorAddress, from, to)) - if (!isInUseOnNode(actorAddress, to) && !isShutdown) { - release(actorAddress) - - val remoteAddress = remoteSocketAddressForNode(to.nodeName).getOrElse(throw new ClusterException("No remote address registered for [" + to.nodeName + "]")) - - ignore[ZkNoNodeException](zkClient.delete(actorAddressToNodesPathFor(actorAddress, from.nodeName))) - - // FIXME who takes care of this line? - //ignore[ZkNoNodeException](zkClient.delete(nodeToUuidsPathFor(from.nodeName, uuid))) - - // 'use' (check out) actor on the remote 'to' node - useActorOnNode(to.nodeName, actorAddress, replicateFromUuid) - } - } - - private def createZooKeeperPathStructureIfNeeded() { - ignore[ZkNodeExistsException] { - zkClient.create(CLUSTER_PATH, null, CreateMode.PERSISTENT) - EventHandler.info(this, "Created node [%s]".format(CLUSTER_PATH)) - } - - basePaths.foreach { path ⇒ - try { - ignore[ZkNodeExistsException](zkClient.create(path, null, CreateMode.PERSISTENT)) - EventHandler.debug(this, "Created node [%s]".format(path)) - } catch { - case e ⇒ - val error = new ClusterException(e.toString) - EventHandler.error(error, this) - throw error - } - } - } - - private def registerListeners() = { - zkClient.subscribeStateChanges(stateListener) - zkClient.subscribeChildChanges(MEMBERSHIP_PATH, membershipListener) - } - - private def unregisterListeners() = { - zkClient.unsubscribeStateChanges(stateListener) - zkClient.unsubscribeChildChanges(MEMBERSHIP_PATH, membershipListener) - } - - private def fetchMembershipNodes() { - val membershipChildren = zkClient.getChildren(MEMBERSHIP_PATH) - locallyCachedMembershipNodes.clear() - membershipChildren.iterator.foreach(locallyCachedMembershipNodes.add) - connectToAllNewlyArrivedMembershipNodesInCluster(membershipNodes, Nil) - } - - private def isReplicated(actorAddress: String): Boolean = DeploymentConfig.isReplicated(Deployer.deploymentFor(actorAddress)) - - private def createMBean = { - val clusterMBean = new StandardMBean(classOf[ClusterNodeMBean]) with ClusterNodeMBean { - - override def stop() = self.shutdown() - - override def disconnect() = self.disconnect() - - override def reconnect() = self.reconnect() - - override def resign() = self.resign() - - override def getNodeAddress = self.nodeAddress - - override def getRemoteServerHostname = self.hostname - - override def getRemoteServerPort = self.port - - override def getNodeName = self.nodeAddress.nodeName - - override def getClusterName = self.nodeAddress.clusterName - - override def getZooKeeperServerAddresses = self.zkServerAddresses - - override def getMemberNodes = self.locallyCachedMembershipNodes.iterator.map(_.toString).toArray - - override def getLeaderLockName = self.leader.toString - - override def isLeader = self.isLeader - - override def getUuidsForActorsInUse = self.uuidsForActorsInUse.map(_.toString).toArray - - override def getAddressesForActorsInUse = self.addressesForActorsInUse.map(_.toString).toArray - - override def getUuidsForClusteredActors = self.uuidsForClusteredActors.map(_.toString).toArray - - override def getAddressesForClusteredActors = self.addressesForClusteredActors.map(_.toString).toArray - - override def getNodesForActorInUseWithAddress(address: String) = self.nodesForActorsInUseWithAddress(address) - - override def getUuidsForActorsInUseOnNode(nodeName: String) = self.uuidsForActorsInUseOnNode(nodeName).map(_.toString).toArray - - override def getAddressesForActorsInUseOnNode(nodeName: String) = self.addressesForActorsInUseOnNode(nodeName).map(_.toString).toArray - - override def setConfigElement(key: String, value: String): Unit = self.setConfigElement(key, value.getBytes("UTF-8")) - - override def getConfigElement(key: String) = new String(self.getConfigElement(key).getOrElse(Array[Byte]()), "UTF-8") - - override def removeConfigElement(key: String): Unit = self.removeConfigElement(key) - - override def getConfigElementKeys = self.getConfigElementKeys.toArray - - override def getMembershipPathFor(node: String) = self.membershipPathFor(node) - - override def getConfigurationPathFor(key: String) = self.configurationPathFor(key) - - override def getActorAddresstoNodesPathFor(actorAddress: String) = self.actorAddressToNodesPathFor(actorAddress) - - override def getActorAddressToNodesPathForWithNodeName(actorAddress: String, nodeName: String) = self.actorAddressToNodesPathFor(actorAddress, nodeName) - - override def getNodeToUuidsPathFor(node: String) = self.nodeToUuidsPathFor(node) - - override def getNodeToUuidsPathFor(node: String, uuid: UUID) = self.nodeToUuidsPathFor(node, uuid) - - override def getActorAddressRegistryPathFor(actorAddress: String) = self.actorAddressRegistryPathFor(actorAddress) - - override def getActorAddressRegistrySerializerPathFor(actorAddress: String) = self.actorAddressRegistrySerializerPathFor(actorAddress) - - override def getActorAddressRegistryUuidPathFor(actorAddress: String) = self.actorAddressRegistryUuidPathFor(actorAddress) - - override def getActorUuidRegistryNodePathFor(uuid: UUID) = self.actorUuidRegistryNodePathFor(uuid) - - override def getActorUuidRegistryRemoteAddressPathFor(uuid: UUID) = self.actorUuidRegistryNodePathFor(uuid) - - override def getActorAddressToUuidsPathFor(actorAddress: String) = self.actorAddressToUuidsPathFor(actorAddress) - - override def getActorAddressToUuidsPathForWithNodeName(actorAddress: String, uuid: UUID) = self.actorAddressToUuidsPathFor(actorAddress, uuid) - } - - JMX.register(clusterJmxObjectName, clusterMBean) - - // FIXME need monitoring to lookup the cluster MBean dynamically - // Monitoring.registerLocalMBean(clusterJmxObjectName, clusterMBean) - } -} - -class MembershipChildListener(self: ClusterNode) extends IZkChildListener with ErrorHandler { - def handleChildChange(parentPath: String, currentChilds: JList[String]) { - withErrorHandler { - if (!self.isShutdown) { - if (currentChilds ne null) { - val currentClusterNodes = currentChilds.toList - if (!currentClusterNodes.isEmpty) EventHandler.debug(this, - "MembershipChildListener at [%s] has children [%s]" - .format(self.nodeAddress.nodeName, currentClusterNodes.mkString(" "))) - - // take a snapshot of the old cluster nodes and then update the list with the current connected nodes in the cluster - val oldClusterNodes = self.locallyCachedMembershipNodes.toArray.toSet.asInstanceOf[Set[String]] - self.locallyCachedMembershipNodes.clear() - currentClusterNodes foreach (self.locallyCachedMembershipNodes.add) - - val newlyConnectedMembershipNodes = (Set(currentClusterNodes: _*) diff oldClusterNodes).toList - val newlyDisconnectedMembershipNodes = (oldClusterNodes diff Set(currentClusterNodes: _*)).toList - - // update the connections with the new set of cluster nodes - val disconnectedConnections = self.connectToAllNewlyArrivedMembershipNodesInCluster(newlyConnectedMembershipNodes, newlyDisconnectedMembershipNodes) - - // if node(s) left cluster then migrate actors residing on the failed node - if (!newlyDisconnectedMembershipNodes.isEmpty) { - self.migrateActorsOnFailedNodes(newlyDisconnectedMembershipNodes, currentClusterNodes, oldClusterNodes.toList, disconnectedConnections) - } - - // publish NodeConnected and NodeDisconnect events to the listeners - newlyConnectedMembershipNodes foreach (node ⇒ self.publish(NodeConnected(node))) - newlyDisconnectedMembershipNodes foreach { node ⇒ - self.publish(NodeDisconnected(node)) - // remove metrics of a disconnected node from ZK and local cache - self.metricsManager.removeNodeMetrics(node) - } - } - } - } - } -} - -class StateListener(self: ClusterNode) extends IZkStateListener { - def handleStateChanged(state: KeeperState) { - state match { - case KeeperState.SyncConnected ⇒ - EventHandler.debug(this, "Cluster node [%s] - Connected".format(self.nodeAddress)) - self.publish(ThisNode.Connected) - case KeeperState.Disconnected ⇒ - EventHandler.debug(this, "Cluster node [%s] - Disconnected".format(self.nodeAddress)) - self.publish(ThisNode.Disconnected) - case KeeperState.Expired ⇒ - EventHandler.debug(this, "Cluster node [%s] - Expired".format(self.nodeAddress)) - self.publish(ThisNode.Expired) - } - } - - /** - * Re-initialize after the zookeeper session has expired and a new session has been created. - */ - def handleNewSession() { - EventHandler.debug(this, "Session expired re-initializing node [%s]".format(self.nodeAddress)) - self.boot() - self.publish(NewSession) - } -} - -trait ErrorHandler { - def withErrorHandler[T](body: ⇒ T) = { - try { - ignore[ZkInterruptedException](body) // FIXME Is it good to ignore ZkInterruptedException? If not, how should we handle it? - } catch { - case e: Throwable ⇒ - EventHandler.error(e, this, e.toString) - throw e - } - } -} - -object RemoteClusterDaemon { - val Address = "akka-cluster-daemon".intern - - // FIXME configure computeGridDispatcher to what? - val computeGridDispatcher = Dispatchers.newDispatcher("akka:compute-grid").build -} - -/** - * Internal "daemon" actor for cluster internal communication. - * - * It acts as the brain of the cluster that responds to cluster events (messages) and undertakes action. - */ -class RemoteClusterDaemon(cluster: ClusterNode) extends Actor { - - import RemoteClusterDaemon._ - import Cluster._ - - override def preRestart(reason: Throwable, msg: Option[Any]) { - EventHandler.debug(this, "RemoteClusterDaemon failed due to [%s] restarting...".format(reason)) - } - - def receive: Receive = { - case message: RemoteSystemDaemonMessageProtocol ⇒ - EventHandler.debug(this, - "Received command [\n%s] to RemoteClusterDaemon on node [%s]".format(message, cluster.nodeAddress.nodeName)) - - message.getMessageType match { - case USE ⇒ handleUse(message) - case RELEASE ⇒ handleRelease(message) - case STOP ⇒ cluster.shutdown() - case DISCONNECT ⇒ cluster.disconnect() - case RECONNECT ⇒ cluster.reconnect() - case RESIGN ⇒ cluster.resign() - case FAIL_OVER_CONNECTIONS ⇒ handleFailover(message) - case FUNCTION_FUN0_UNIT ⇒ handle_fun0_unit(message) - case FUNCTION_FUN0_ANY ⇒ handle_fun0_any(message) - case FUNCTION_FUN1_ARG_UNIT ⇒ handle_fun1_arg_unit(message) - case FUNCTION_FUN1_ARG_ANY ⇒ handle_fun1_arg_any(message) - //TODO: should we not deal with unrecognized message types? - } - - case unknown ⇒ EventHandler.warning(this, "Unknown message [%s]".format(unknown)) - } - - def handleRelease(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - if (message.hasActorUuid) { - cluster.actorAddressForUuid(uuidProtocolToUuid(message.getActorUuid)) foreach { address ⇒ - cluster.release(address) - } - } else if (message.hasActorAddress) { - cluster release message.getActorAddress - } else { - EventHandler.warning(this, - "None of 'uuid' or 'actorAddress'' is specified, ignoring remote cluster daemon command [%s]".format(message)) - } - } - - def handleUse(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - def deserializeMessages(entriesAsBytes: Vector[Array[Byte]]): Vector[AnyRef] = { - import akka.cluster.RemoteProtocol._ - import akka.cluster.MessageSerializer - - entriesAsBytes map { bytes ⇒ - val messageBytes = - if (Cluster.shouldCompressData) LZF.uncompress(bytes) - else bytes - MessageSerializer.deserialize(MessageProtocol.parseFrom(messageBytes), None) - } - } - - def actorOfRefToUseForReplay(snapshotAsBytes: Option[Array[Byte]], actorAddress: String, newActorRef: LocalActorRef): ActorRef = { - snapshotAsBytes match { - - // we have a new actor ref - the snapshot - case Some(bytes) ⇒ - // stop the new actor ref and use the snapshot instead - //TODO: What if that actor already has been retrieved and is being used?? - //So do we have a race here? - cluster.remoteService.unregister(actorAddress) - - // deserialize the snapshot actor ref and register it as remote actor - val uncompressedBytes = - if (Cluster.shouldCompressData) LZF.uncompress(bytes) - else bytes - - val snapshotActorRef = fromBinary(uncompressedBytes, newActorRef.uuid) - cluster.remoteService.register(actorAddress, snapshotActorRef) - - // FIXME we should call 'stop()' here (to GC the actor), but can't since that will currently - //shut down the TransactionLog for this UUID - since both this actor and the new snapshotActorRef - //have the same UUID (which they should) - //newActorRef.stop() - - snapshotActorRef - - // we have no snapshot - use the new actor ref - case None ⇒ - newActorRef - } - } - - try { - if (message.hasActorAddress) { - val actorAddress = message.getActorAddress - cluster.serializerForActor(actorAddress) foreach { serializer ⇒ - cluster.use(actorAddress, serializer) foreach { newActorRef ⇒ - cluster.remoteService.register(actorAddress, newActorRef) - - if (message.hasReplicateActorFromUuid) { - // replication is used - fetch the messages and replay them - val replicateFromUuid = uuidProtocolToUuid(message.getReplicateActorFromUuid) - val deployment = Deployer.deploymentFor(actorAddress) - val replicationScheme = DeploymentConfig.replicationSchemeFor(deployment).getOrElse( - throw new IllegalStateException( - "Actor [" + actorAddress + "] should have been configured as a replicated actor but could not find its ReplicationScheme")) - val isWriteBehind = DeploymentConfig.isWriteBehindReplication(replicationScheme) - - try { - // get the transaction log for the actor UUID - val readonlyTxLog = TransactionLog.logFor(replicateFromUuid.toString, isWriteBehind, replicationScheme) - - // get the latest snapshot (Option[Array[Byte]]) and all the subsequent messages (Array[Byte]) - val (snapshotAsBytes, entriesAsBytes) = readonlyTxLog.latestSnapshotAndSubsequentEntries - - // deserialize and restore actor snapshot. This call will automatically recreate a transaction log. - val actorRef = actorOfRefToUseForReplay(snapshotAsBytes, actorAddress, newActorRef) - - // deserialize the messages - val messages: Vector[AnyRef] = deserializeMessages(entriesAsBytes) - - EventHandler.info(this, "Replaying [%s] messages to actor [%s]".format(messages.size, actorAddress)) - - // replay all messages - messages foreach { message ⇒ - EventHandler.debug(this, "Replaying message [%s] to actor [%s]".format(message, actorAddress)) - - // FIXME how to handle '?' messages? - // We can *not* replay them with the correct semantics. Should we: - // 1. Ignore/drop them and log warning? - // 2. Throw exception when about to log them? - // 3. Other? - actorRef ! message - } - - } catch { - case e: Throwable ⇒ - EventHandler.error(e, this, e.toString) - throw e - } - } - } - } - } else { - EventHandler.error(this, "Actor 'address' is not defined, ignoring remote cluster daemon command [%s]".format(message)) - } - - self.reply(Success(cluster.remoteServerAddress.toString)) - } catch { - case error: Throwable ⇒ - self.reply(Failure(error)) - throw error - } - } - - def handle_fun0_unit(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - new LocalActorRef( - Props( - self ⇒ { - case f: Function0[_] ⇒ try { f() } finally { self.stop() } - }).copy(dispatcher = computeGridDispatcher), Props.randomName, systemService = true) ! payloadFor(message, classOf[Function0[Unit]]) - } - - def handle_fun0_any(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - new LocalActorRef( - Props( - self ⇒ { - case f: Function0[_] ⇒ try { self.reply(f()) } finally { self.stop() } - }).copy(dispatcher = computeGridDispatcher), Props.randomName, systemService = true) forward payloadFor(message, classOf[Function0[Any]]) - } - - def handle_fun1_arg_unit(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - new LocalActorRef( - Props( - self ⇒ { - case (fun: Function[_, _], param: Any) ⇒ try { fun.asInstanceOf[Any ⇒ Unit].apply(param) } finally { self.stop() } - }).copy(dispatcher = computeGridDispatcher), Props.randomName, systemService = true) ! payloadFor(message, classOf[Tuple2[Function1[Any, Unit], Any]]) - } - - def handle_fun1_arg_any(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - new LocalActorRef( - Props( - self ⇒ { - case (fun: Function[_, _], param: Any) ⇒ try { self.reply(fun.asInstanceOf[Any ⇒ Any](param)) } finally { self.stop() } - }).copy(dispatcher = computeGridDispatcher), Props.randomName, systemService = true) forward payloadFor(message, classOf[Tuple2[Function1[Any, Any], Any]]) - } - - def handleFailover(message: RemoteProtocol.RemoteSystemDaemonMessageProtocol) { - val (from, to) = payloadFor(message, classOf[(InetSocketAddress, InetSocketAddress)]) - cluster.failOverClusterActorRefConnections(from, to) - } - - private def payloadFor[T](message: RemoteSystemDaemonMessageProtocol, clazz: Class[T]): T = { - Serialization.deserialize(message.getPayload.toByteArray, clazz, None) match { - case Left(error) ⇒ throw error - case Right(instance) ⇒ instance.asInstanceOf[T] - } - } -} diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala deleted file mode 100644 index 29f56a5966..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRef.scala +++ /dev/null @@ -1,129 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import akka.actor._ -import akka.util._ -import ReflectiveAccess._ -import akka.routing._ -import akka.cluster._ -import FailureDetector._ -import akka.event.EventHandler -import akka.config.ConfigurationException - -import java.net.InetSocketAddress -import java.util.concurrent.atomic.AtomicReference - -import collection.immutable.Map -import annotation.tailrec - -/** - * ClusterActorRef factory and locator. - */ -object ClusterActorRef { - import FailureDetectorType._ - import RouterType._ - - def newRef( - actorAddress: String, - routerType: RouterType, - failureDetectorType: FailureDetectorType, - timeout: Long): ClusterActorRef = { - - val routerFactory: () ⇒ Router = routerType match { - case Direct ⇒ () ⇒ new DirectRouter - case Random ⇒ () ⇒ new RandomRouter - case RoundRobin ⇒ () ⇒ new RoundRobinRouter - case LeastCPU ⇒ sys.error("Router LeastCPU not supported yet") - case LeastRAM ⇒ sys.error("Router LeastRAM not supported yet") - case LeastMessages ⇒ sys.error("Router LeastMessages not supported yet") - case Custom ⇒ sys.error("Router Custom not supported yet") - } - - val failureDetectorFactory: (Map[InetSocketAddress, ActorRef]) ⇒ FailureDetector = failureDetectorType match { - case RemoveConnectionOnFirstFailureLocalFailureDetector ⇒ - (connections: Map[InetSocketAddress, ActorRef]) ⇒ new RemoveConnectionOnFirstFailureLocalFailureDetector(connections.values) - - case RemoveConnectionOnFirstFailureRemoteFailureDetector ⇒ - (connections: Map[InetSocketAddress, ActorRef]) ⇒ new RemoveConnectionOnFirstFailureRemoteFailureDetector(connections) - - case CustomFailureDetector(implClass) ⇒ - (connections: Map[InetSocketAddress, ActorRef]) ⇒ FailureDetector.createCustomFailureDetector(implClass, connections) - } - - new ClusterActorRef( - RoutedProps() - .withTimeout(timeout) - .withRouter(routerFactory) - .withFailureDetector(failureDetectorFactory), - actorAddress) - } - - /** - * Finds the cluster actor reference that has a specific address. - */ - def actorFor(address: String): Option[ActorRef] = - Actor.registry.local.actorFor(Address.clusterActorRefPrefix + address) - - private[cluster] def createRemoteActorRef(actorAddress: String, inetSocketAddress: InetSocketAddress) = { - RemoteActorRef(inetSocketAddress, actorAddress, Actor.TIMEOUT, None) - } -} - -/** - * ActorRef representing a one or many instances of a clustered, load-balanced and sometimes replicated actor - * where the instances can reside on other nodes in the cluster. - */ -private[akka] class ClusterActorRef(props: RoutedProps, val address: String) extends AbstractRoutedActorRef(props) { - - import ClusterActorRef._ - - ClusterModule.ensureEnabled() - - val addresses = Cluster.node.inetSocketAddressesForActor(address) - - EventHandler.debug(this, - "Checking out cluster actor ref with address [%s] and router [%s] on [%s] connected to [\n\t%s]" - .format(address, router, Cluster.node.remoteServerAddress, addresses.map(_._2).mkString("\n\t"))) - - addresses foreach { - case (_, address) ⇒ Cluster.node.clusterActorRefs.put(address, this) - } - - val connections: FailureDetector = { - val remoteConnections = (Map[InetSocketAddress, ActorRef]() /: addresses) { - case (map, (uuid, inetSocketAddress)) ⇒ - map + (inetSocketAddress -> createRemoteActorRef(address, inetSocketAddress)) - } - props.failureDetectorFactory(remoteConnections) - } - - router.init(connections) - - def nrOfConnections: Int = connections.size - - private[akka] def failOver(from: InetSocketAddress, to: InetSocketAddress) { - connections.failOver(from, to) - } - - def stop() { - synchronized { - if (_status == ActorRefInternals.RUNNING) { - Actor.registry.local.unregisterClusterActorRef(this) - _status = ActorRefInternals.SHUTDOWN - postMessageToMailbox(Terminate, None) - - // FIXME here we need to fire off Actor.cluster.remove(address) (which needs to be properly implemented first, see ticket) - connections.stopAll() - } - } - } - - /* If you start me up */ - if (_status == ActorRefInternals.UNSTARTED) { - _status = ActorRefInternals.RUNNING - Actor.registry.local.registerClusterActorRef(this) - } -} diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala deleted file mode 100644 index 61a393360c..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDeployer.scala +++ /dev/null @@ -1,205 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import akka.actor.DeploymentConfig._ -import akka.actor._ -import akka.event.EventHandler -import akka.config.Config -import akka.util.Switch -import akka.util.Helpers._ -import akka.cluster.zookeeper.AkkaZkClient - -import org.apache.zookeeper.CreateMode -import org.apache.zookeeper.recipes.lock.{ WriteLock, LockListener } - -import org.I0Itec.zkclient.exception.{ ZkNoNodeException, ZkNodeExistsException } - -import scala.collection.immutable.Seq -import scala.collection.JavaConversions.collectionAsScalaIterable - -import java.util.concurrent.{ CountDownLatch, TimeUnit } - -/** - * A ClusterDeployer is responsible for deploying a Deploy. - */ -object ClusterDeployer extends ActorDeployer { - val clusterName = Cluster.name - val nodeName = Config.nodename - val clusterPath = "/%s" format clusterName - - val deploymentPath = clusterPath + "/deployment" - val deploymentAddressPath = deploymentPath + "/%s" - - val deploymentCoordinationPath = clusterPath + "/deployment-coordination" - val deploymentInProgressLockPath = deploymentCoordinationPath + "/in-progress" - val isDeploymentCompletedInClusterLockPath = deploymentCoordinationPath + "/completed" // should not be part of basePaths - - val basePaths = List(clusterPath, deploymentPath, deploymentCoordinationPath, deploymentInProgressLockPath) - - private val isConnected = new Switch(false) - private val deploymentCompleted = new CountDownLatch(1) - - private val zkClient = new AkkaZkClient( - Cluster.zooKeeperServers, - Cluster.sessionTimeout, - Cluster.connectionTimeout, - Cluster.defaultZooKeeperSerializer) - - private val deploymentInProgressLockListener = new LockListener { - def lockAcquired() { - EventHandler.info(this, "Clustered deployment started") - } - - def lockReleased() { - EventHandler.info(this, "Clustered deployment completed") - deploymentCompleted.countDown() - } - } - - private val deploymentInProgressLock = new WriteLock( - zkClient.connection.getZookeeper, - deploymentInProgressLockPath, - null, - deploymentInProgressLockListener) - - private val systemDeployments: List[Deploy] = Nil - - def shutdown() { - isConnected switchOff { - // undeploy all - try { - for { - child ← collectionAsScalaIterable(zkClient.getChildren(deploymentPath)) - deployment ← zkClient.readData(deploymentAddressPath.format(child)).asInstanceOf[Deploy] - } zkClient.delete(deploymentAddressPath.format(deployment.address)) - - invalidateDeploymentInCluster() - } catch { - case e: Exception ⇒ - handleError(new DeploymentException("Could not undeploy all deployment data in ZooKeeper due to: " + e)) - } - - // shut down ZooKeeper client - zkClient.close() - EventHandler.info(this, "ClusterDeployer shut down successfully") - } - } - - def lookupDeploymentFor(address: String): Option[Deploy] = ensureRunning { - LocalDeployer.lookupDeploymentFor(address) match { // try local cache - case Some(deployment) ⇒ // in local cache - deployment - case None ⇒ // not in cache, check cluster - val deployment = - try { - Some(zkClient.readData(deploymentAddressPath.format(address)).asInstanceOf[Deploy]) - } catch { - case e: ZkNoNodeException ⇒ None - case e: Exception ⇒ - EventHandler.warning(this, e.toString) - None - } - deployment foreach (LocalDeployer.deploy(_)) // cache it in local cache - deployment - } - } - - def fetchDeploymentsFromCluster: List[Deploy] = ensureRunning { - val addresses = - try { - zkClient.getChildren(deploymentPath).toList - } catch { - case e: ZkNoNodeException ⇒ List[String]() - } - val deployments = addresses map { address ⇒ - zkClient.readData(deploymentAddressPath.format(address)).asInstanceOf[Deploy] - } - EventHandler.info(this, "Fetched deployment plans from cluster [\n\t%s\n]" format deployments.mkString("\n\t")) - deployments - } - - private[akka] def init(deployments: Seq[Deploy]) { - isConnected switchOn { - EventHandler.info(this, "Initializing ClusterDeployer") - - basePaths foreach { path ⇒ - try { - ignore[ZkNodeExistsException](zkClient.create(path, null, CreateMode.PERSISTENT)) - EventHandler.debug(this, "Created ZooKeeper path for deployment [%s]".format(path)) - } catch { - case e ⇒ - val error = new DeploymentException(e.toString) - EventHandler.error(error, this) - throw error - } - } - - val allDeployments = deployments ++ systemDeployments - - if (!isDeploymentCompletedInCluster) { - if (deploymentInProgressLock.lock()) { - // try to be the one doing the clustered deployment - EventHandler.info(this, "Pushing clustered deployment plans [\n\t" + allDeployments.mkString("\n\t") + "\n]") - allDeployments foreach (deploy(_)) // deploy - markDeploymentCompletedInCluster() - deploymentInProgressLock.unlock() // signal deployment complete - - } else { - deploymentCompleted.await(30, TimeUnit.SECONDS) // wait until deployment is completed by other "master" node - } - } - - // fetch clustered deployments and deploy them locally - fetchDeploymentsFromCluster foreach (LocalDeployer.deploy(_)) - } - } - - private[akka] def deploy(deployment: Deploy) { - ensureRunning { - LocalDeployer.deploy(deployment) - deployment match { - case Deploy(_, _, _, _, Local) | Deploy(_, _, _, _, _: Local) ⇒ //TODO LocalDeployer.deploy(deployment)?? - case Deploy(address, recipe, routing, _, _) ⇒ // cluster deployment - /*TODO recipe foreach { r ⇒ - Deployer.newClusterActorRef(() ⇒ Actor.actorOf(r.implementationClass), address, deployment) - }*/ - val path = deploymentAddressPath.format(address) - try { - ignore[ZkNodeExistsException](zkClient.create(path, null, CreateMode.PERSISTENT)) - zkClient.writeData(path, deployment) - } catch { - case e: NullPointerException ⇒ - handleError(new DeploymentException( - "Could not store deployment data [" + deployment + "] in ZooKeeper since client session is closed")) - case e: Exception ⇒ - handleError(new DeploymentException( - "Could not store deployment data [" + deployment + "] in ZooKeeper due to: " + e)) - } - } - } - } - - private def markDeploymentCompletedInCluster() { - ignore[ZkNodeExistsException](zkClient.create(isDeploymentCompletedInClusterLockPath, null, CreateMode.PERSISTENT)) - } - - private def isDeploymentCompletedInCluster = zkClient.exists(isDeploymentCompletedInClusterLockPath) - - // FIXME in future - add watch to this path to be able to trigger redeployment, and use this method to trigger redeployment - private def invalidateDeploymentInCluster() { - ignore[ZkNoNodeException](zkClient.delete(isDeploymentCompletedInClusterLockPath)) - } - - private def ensureRunning[T](body: ⇒ T): T = { - if (isConnected.isOn) body - else throw new IllegalStateException("ClusterDeployer is not running") - } - - private[akka] def handleError(e: Throwable): Nothing = { - EventHandler.error(e, this, e.toString) - throw e - } -} diff --git a/akka-remote/src/main/scala/akka/remote/Gossiper.scala b/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala similarity index 97% rename from akka-remote/src/main/scala/akka/remote/Gossiper.scala rename to akka-cluster/src/main/scala/akka/cluster/Gossiper.scala index 55165f0891..e234d6e158 100644 --- a/akka-remote/src/main/scala/akka/remote/Gossiper.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala @@ -2,13 +2,15 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.remote +package akka.cluster import akka.actor._ import akka.actor.Status._ +import akka.remote._ import akka.event.Logging -import akka.util._ import akka.dispatch.Await +import akka.pattern.ask +import akka.util._ import akka.config.ConfigurationException import java.util.concurrent.atomic.{ AtomicReference, AtomicBoolean } @@ -20,9 +22,6 @@ import System.{ currentTimeMillis ⇒ newTimestamp } import scala.collection.immutable.{ Map, SortedSet } import scala.annotation.tailrec -import akka.dispatch.Await -import akka.pattern.ask - import com.google.protobuf.ByteString /** @@ -136,7 +135,7 @@ case class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { private val memberFingerprint = address.## private val serialization = remote.serialization - private val failureDetector = remote.failureDetector + private val failureDetector = new AccrualFailureDetector(remoteSettings.FailureDetectorThreshold, remoteSettings.FailureDetectorMaxSampleSize, system) private val initialDelayForGossip = remoteSettings.InitialDelayForGossip private val gossipFrequency = remoteSettings.GossipFrequency @@ -154,12 +153,14 @@ case class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { private val isRunning = new AtomicBoolean(true) private val log = Logging(system, "Gossiper") private val random = SecureRandom.getInstance("SHA1PRNG") - private val connectionManager = new RemoteConnectionManager(system, remote, Map.empty[Address, ActorRef]) // Is it right to put this guy under the /system path or should we have a top-level /cluster or something else...? private val clusterDaemon = system.systemActorOf(Props(new ClusterDaemon(system, this)), "cluster") private val state = new AtomicReference[State](State(currentGossip = newGossip())) + // FIXME manage connections in some other way so we can delete the RemoteConnectionManager (SINCE IT SUCKS!!!) + private val connectionManager = new RemoteConnectionManager(system, remote, failureDetector, Map.empty[Address, ActorRef]) + log.info("Starting cluster Gossiper...") // join the cluster by connecting to one of the seed members and retrieve current cluster state (Gossip) diff --git a/akka-cluster/src/main/scala/akka/cluster/LocalCluster.scala b/akka-cluster/src/main/scala/akka/cluster/LocalCluster.scala deleted file mode 100644 index d8a0ac6027..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/LocalCluster.scala +++ /dev/null @@ -1,105 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import akka.config.Config -import Config._ -import akka.util._ -import Helpers._ -import akka.actor._ -import Actor._ -import akka.event.EventHandler -import akka.cluster.zookeeper._ - -import org.apache.zookeeper._ -import org.apache.zookeeper.Watcher.Event._ -import org.apache.zookeeper.data.Stat -import org.apache.zookeeper.recipes.lock.{ WriteLock, LockListener } - -import org.I0Itec.zkclient._ -import org.I0Itec.zkclient.serialize._ -import org.I0Itec.zkclient.exception._ - -import java.util.concurrent.atomic.{ AtomicBoolean, AtomicReference } - -object LocalCluster { - val clusterDirectory = config.getString("akka.cluster.log-directory", "_akka_cluster") - val clusterDataDirectory = clusterDirectory + "/data" - val clusterLogDirectory = clusterDirectory + "/log" - - val clusterName = Config.clusterName - val nodename = Config.nodename - val zooKeeperServers = config.getString("akka.cluster.zookeeper-server-addresses", "localhost:2181") - val sessionTimeout = Duration(config.getInt("akka.cluster.session-timeout", 60), TIME_UNIT).toMillis.toInt - val connectionTimeout = Duration(config.getInt("akka.cluster.connection-timeout", 60), TIME_UNIT).toMillis.toInt - val defaultZooKeeperSerializer = new SerializableSerializer - - val zkServer = new AtomicReference[Option[ZkServer]](None) - - lazy val zkClient = new AkkaZkClient(zooKeeperServers, sessionTimeout, connectionTimeout, defaultZooKeeperSerializer) - - /** - * Looks up the local hostname. - */ - def lookupLocalhostName = NetworkUtil.getLocalhostName - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalCluster(): ZkServer = - startLocalCluster(clusterDataDirectory, clusterLogDirectory, 2181, 5000) - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalCluster(port: Int, tickTime: Int): ZkServer = - startLocalCluster(clusterDataDirectory, clusterLogDirectory, port, tickTime) - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalCluster(tickTime: Int): ZkServer = - startLocalCluster(clusterDataDirectory, clusterLogDirectory, 2181, tickTime) - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalCluster(dataPath: String, logPath: String): ZkServer = - startLocalCluster(dataPath, logPath, 2181, 500) - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalCluster(dataPath: String, logPath: String, port: Int, tickTime: Int): ZkServer = { - try { - val zk = AkkaZooKeeper.startLocalServer(dataPath, logPath, port, tickTime) - zkServer.set(Some(zk)) - zk - } catch { - case e: Throwable ⇒ - EventHandler.error(e, this, "Could not start local ZooKeeper cluster") - throw e - } - } - - /** - * Shut down the local ZooKeeper server. - */ - def shutdownLocalCluster() { - withPrintStackTraceOnError { - EventHandler.debug(this, "Shuts down local cluster") - zkServer.getAndSet(None).foreach(_.shutdown()) - } - } - - def createQueue(rootPath: String, blocking: Boolean = true) = - new ZooKeeperQueue(zkClient, rootPath, blocking) - - def barrier(name: String, count: Int): ZooKeeperBarrier = - ZooKeeperBarrier(zkClient, clusterName, name, nodename, count) - - def barrier(name: String, count: Int, timeout: Duration): ZooKeeperBarrier = - ZooKeeperBarrier(zkClient, clusterName, name, nodename, count, timeout) -} - diff --git a/akka-remote/src/main/scala/akka/remote/RemoteConnectionManager.scala b/akka-cluster/src/main/scala/akka/cluster/RemoteConnectionManager.scala similarity index 96% rename from akka-remote/src/main/scala/akka/remote/RemoteConnectionManager.scala rename to akka-cluster/src/main/scala/akka/cluster/RemoteConnectionManager.scala index fd2a9135d7..63020367a5 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteConnectionManager.scala +++ b/akka-cluster/src/main/scala/akka/cluster/RemoteConnectionManager.scala @@ -2,9 +2,10 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.remote +package akka.cluster import akka.actor._ +import akka.remote._ import akka.routing._ import akka.event.Logging @@ -19,6 +20,7 @@ import java.util.concurrent.atomic.AtomicReference class RemoteConnectionManager( system: ActorSystemImpl, remote: RemoteActorRefProvider, + failureDetector: AccrualFailureDetector, initialConnections: Map[Address, ActorRef] = Map.empty[Address, ActorRef]) extends ConnectionManager { @@ -30,8 +32,6 @@ class RemoteConnectionManager( def iterable: Iterable[ActorRef] = connections.values } - def failureDetector = remote.failureDetector - private val state: AtomicReference[State] = new AtomicReference[State](newState()) /** @@ -145,6 +145,6 @@ class RemoteConnectionManager( } } - private[remote] def newConnection(remoteAddress: Address, actorPath: ActorPath) = + private[cluster] def newConnection(remoteAddress: Address, actorPath: ActorPath) = new RemoteActorRef(remote, remote.transport, actorPath, Nobody) } diff --git a/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala b/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala deleted file mode 100644 index ce9eb300f5..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/TransactionLog.scala +++ /dev/null @@ -1,604 +0,0 @@ -package akka.cluster - -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -import org.apache.bookkeeper.client.{ BookKeeper, LedgerHandle, LedgerEntry, BKException, AsyncCallback } -import org.apache.zookeeper.CreateMode - -import org.I0Itec.zkclient.exception._ - -import akka.AkkaException -import akka.config._ -import Config._ -import akka.util._ -import akka.actor._ -import DeploymentConfig.ReplicationScheme -import akka.event.EventHandler -import akka.dispatch.{ DefaultPromise, Promise, MessageInvocation } -import akka.cluster.zookeeper._ -import akka.serialization.ActorSerialization._ -import akka.serialization.Compression.LZF - -import java.util.Enumeration - -// FIXME allow user to choose dynamically between 'async' and 'sync' tx logging (asyncAddEntry(byte[] data, AddCallback cb, Object ctx)) -// FIXME clean up old entries in log after doing a snapshot - -class ReplicationException(message: String, cause: Throwable = null) extends AkkaException(message) { - def this(msg: String) = this(msg, null) -} - -/** - * A TransactionLog makes chunks of data durable. - */ -class TransactionLog private ( - ledger: LedgerHandle, - val id: String, - val isAsync: Boolean, - replicationScheme: ReplicationScheme) { - - import TransactionLog._ - - val logId = ledger.getId - val txLogPath = transactionLogPath(id) - val snapshotPath = txLogPath + "/snapshot" - - private val isOpen = new Switch(true) - - /** - * Record an Actor message invocation. - * - * @param invocation the MessageInvocation to record - * @param actorRef the LocalActorRef that received the message. - * @throws ReplicationException if the TransactionLog already is closed. - */ - def recordEntry(invocation: MessageInvocation, actorRef: LocalActorRef) { - val entryId = ledger.getLastAddPushed + 1 - val needsSnapshot = entryId != 0 && (entryId % snapshotFrequency) == 0 - - if (needsSnapshot) { - //todo: could it be that the message is never persisted when a snapshot is added? - val bytes = toBinary(actorRef, false, replicationScheme) - recordSnapshot(bytes) - } else { - val bytes = MessageSerializer.serialize(invocation.message.asInstanceOf[AnyRef]).toByteArray - recordEntry(bytes) - } - } - - /** - * Record an entry. - * - * @param entry the entry in byte form to record. - * @throws ReplicationException if the TransactionLog already is closed. - */ - def recordEntry(entry: Array[Byte]) { - if (isOpen.isOn) { - val entryBytes = - if (shouldCompressData) LZF.compress(entry) - else entry - - try { - if (isAsync) { - ledger.asyncAddEntry( - entryBytes, - new AsyncCallback.AddCallback { - def addComplete(returnCode: Int, ledgerHandle: LedgerHandle, entryId: Long, ctx: AnyRef) { - handleReturnCode(returnCode) - EventHandler.debug(this, "Writing entry [%s] to log [%s]".format(entryId, logId)) - } - }, - null) - } else { - handleReturnCode(ledger.addEntry(entryBytes)) - val entryId = ledger.getLastAddPushed - EventHandler.debug(this, "Writing entry [%s] to log [%s]".format(entryId, logId)) - } - } catch { - case e: Throwable ⇒ handleError(e) - } - } else transactionClosedError - } - - /** - * Record a snapshot. - * - * @param snapshot the snapshot in byteform to record. - * @throws ReplicationException if the TransactionLog already is closed. - */ - def recordSnapshot(snapshot: Array[Byte]) { - if (isOpen.isOn) { - val snapshotBytes = - if (shouldCompressData) LZF.compress(snapshot) - else snapshot - - try { - if (isAsync) { - ledger.asyncAddEntry( - snapshotBytes, - new AsyncCallback.AddCallback { - def addComplete(returnCode: Int, ledgerHandle: LedgerHandle, snapshotId: Long, ctx: AnyRef) { - handleReturnCode(returnCode) - EventHandler.debug(this, "Writing snapshot to log [%s]".format(snapshotId)) - storeSnapshotMetaDataInZooKeeper(snapshotId) - } - }, - null) - } else { - //todo: could this be racy, since writing the snapshot itself and storing the snapsnot id, is not - //an atomic operation? - - //first store the snapshot. - handleReturnCode(ledger.addEntry(snapshotBytes)) - val snapshotId = ledger.getLastAddPushed - - //this is the location where all previous entries can be removed. - //TODO: how to remove data? - - EventHandler.debug(this, "Writing snapshot to log [%s]".format(snapshotId)) - //and now store the snapshot metadata. - storeSnapshotMetaDataInZooKeeper(snapshotId) - } - } catch { - case e: Throwable ⇒ handleError(e) - } - } else transactionClosedError - } - - /** - * Get all the entries for this transaction log. - * - * @throws ReplicationException if the TransactionLog already is closed. - */ - def entries: Vector[Array[Byte]] = entriesInRange(0, ledger.getLastAddConfirmed) - - /** - * Get the latest snapshot and all subsequent entries from this snapshot. - */ - def latestSnapshotAndSubsequentEntries: (Option[Array[Byte]], Vector[Array[Byte]]) = { - latestSnapshotId match { - case Some(snapshotId) ⇒ - EventHandler.debug(this, "Reading entries from snapshot id [%s] for log [%s]".format(snapshotId, logId)) - - val cursor = snapshotId + 1 - val lastIndex = ledger.getLastAddConfirmed - - val snapshot = Some(entriesInRange(snapshotId, snapshotId).head) - - val entries = - if ((cursor - lastIndex) == 0) Vector.empty[Array[Byte]] - else entriesInRange(cursor, lastIndex) - - (snapshot, entries) - - case None ⇒ - (None, entries) - } - } - - /** - * Get a range of entries from 'from' to 'to' for this transaction log. - * - * @param from the first element of the range - * @param the last index from the range (including). - * @return a Vector containing Byte Arrays. Each element in the vector is a record. - * @throws IllegalArgumenException if from or to is negative, or if 'from' is bigger than 'to'. - * @throws ReplicationException if the TransactionLog already is closed. - */ - def entriesInRange(from: Long, to: Long): Vector[Array[Byte]] = if (isOpen.isOn) { - try { - if (from < 0) throw new IllegalArgumentException("'from' index can't be negative [" + from + "]") - if (to < 0) throw new IllegalArgumentException("'to' index can't be negative [" + from + "]") - if (to < from) throw new IllegalArgumentException("'to' index can't be smaller than 'from' index [" + from + "," + to + "]") - EventHandler.debug(this, "Reading entries [%s -> %s] for log [%s]".format(from, to, logId)) - - if (isAsync) { - val future = Promise[Vector[Array[Byte]]]() - ledger.asyncReadEntries( - from, to, - new AsyncCallback.ReadCallback { - def readComplete(returnCode: Int, ledgerHandle: LedgerHandle, enumeration: Enumeration[LedgerEntry], ctx: AnyRef) { - val future = ctx.asInstanceOf[Promise[Vector[Array[Byte]]]] - val entries = toByteArrays(enumeration) - - if (returnCode == BKException.Code.OK) future.success(entries) - else future.failure(BKException.create(returnCode)) - } - }, - future) - await(future) - } else { - toByteArrays(ledger.readEntries(from, to)) - } - } catch { - case e: Throwable ⇒ handleError(e) - } - } else transactionClosedError - - /** - * Get the last entry written to this transaction log. - * - * Returns -1 if there has never been an entry. - */ - def latestEntryId: Long = ledger.getLastAddConfirmed - - /** - * Get the id for the last snapshot written to this transaction log. - */ - def latestSnapshotId: Option[Long] = { - try { - val snapshotId = zkClient.readData(snapshotPath).asInstanceOf[Long] - EventHandler.debug(this, "Retrieved latest snapshot id [%s] from transaction log [%s]".format(snapshotId, logId)) - Some(snapshotId) - } catch { - case e: ZkNoNodeException ⇒ None - case e: Throwable ⇒ handleError(e) - } - } - - /** - * Delete this transaction log. So all entries but also all metadata will be removed. - * - * TODO: Behavior unclear what happens when already deleted (what happens to the ledger). - * TODO: Behavior unclear what happens when already closed. - */ - def delete() { - if (isOpen.isOn) { - EventHandler.debug(this, "Deleting transaction log [%s]".format(logId)) - try { - if (isAsync) { - bookieClient.asyncDeleteLedger( - logId, - new AsyncCallback.DeleteCallback { - def deleteComplete(returnCode: Int, ctx: AnyRef) { - (returnCode) - } - }, - null) - } else { - bookieClient.deleteLedger(logId) - } - - //also remote everything else that belongs to this TransactionLog. - zkClient.delete(snapshotPath) - zkClient.delete(txLogPath) - } catch { - case e: Throwable ⇒ handleError(e) - } - } - } - - /** - * Close this transaction log. - * - * If already closed, the call is ignored. - */ - def close() { - isOpen switchOff { - EventHandler.debug(this, "Closing transaction log [%s]".format(logId)) - try { - if (isAsync) { - ledger.asyncClose( - new AsyncCallback.CloseCallback { - def closeComplete( - returnCode: Int, - ledgerHandle: LedgerHandle, - ctx: AnyRef) { - handleReturnCode(returnCode) - } - }, - null) - } else { - ledger.close() - } - } catch { - case e: Throwable ⇒ handleError(e) - } - } - } - - private def toByteArrays(enumeration: Enumeration[LedgerEntry]): Vector[Array[Byte]] = { - var entries = Vector[Array[Byte]]() - while (enumeration.hasMoreElements) { - val bytes = enumeration.nextElement.getEntry - val entry = - if (shouldCompressData) LZF.uncompress(bytes) - else bytes - entries = entries :+ entry - } - entries - } - - private def storeSnapshotMetaDataInZooKeeper(snapshotId: Long) { - if (isOpen.isOn) { - try { - zkClient.create(snapshotPath, null, CreateMode.PERSISTENT) - } catch { - case e: ZkNodeExistsException ⇒ {} // do nothing - case e: Throwable ⇒ handleError(e) - } - - try { - zkClient.writeData(snapshotPath, snapshotId) - } catch { - case e: Throwable ⇒ - handleError(new ReplicationException( - "Could not store transaction log snapshot meta-data in ZooKeeper for UUID [" + id + "]")) - } - EventHandler.debug(this, "Writing snapshot [%s] to log [%s]".format(snapshotId, logId)) - } else transactionClosedError - } - - private def handleReturnCode(block: ⇒ Long) { - val code = block.toInt - if (code == BKException.Code.OK) {} // all fine - else handleError(BKException.create(code)) - } - - private def transactionClosedError: Nothing = { - handleError(new ReplicationException( - "Transaction log [" + logId + - "] is closed. You need to open up new a new one with 'TransactionLog.logFor(id)'")) - } -} - -/** - * TODO: Documentation. - */ -object TransactionLog { - - val zooKeeperServers = config.getString("akka.cluster.zookeeper-server-addresses", "localhost:2181") - val sessionTimeout = Duration(config.getInt("akka.cluster.session-timeout", 60), TIME_UNIT).toMillis.toInt - val connectionTimeout = Duration(config.getInt("akka.cluster.connection-timeout", 60), TIME_UNIT).toMillis.toInt - - val digestType = config.getString("akka.cluster.replication.digest-type", "CRC32") match { - case "CRC32" ⇒ BookKeeper.DigestType.CRC32 - case "MAC" ⇒ BookKeeper.DigestType.MAC - case unknown ⇒ throw new ConfigurationException( - "akka.cluster.replication.digest-type is invalid [" + unknown + "], must be either 'CRC32' or 'MAC'") - } - val password = config.getString("akka.cluster.replication.password", "secret").getBytes("UTF-8") - val ensembleSize = config.getInt("akka.cluster.replication.ensemble-size", 3) - val quorumSize = config.getInt("akka.cluster.replication.quorum-size", 2) - val snapshotFrequency = config.getInt("akka.cluster.replication.snapshot-frequency", 1000) - val timeout = Duration(config.getInt("akka.cluster.replication.timeout", 30), TIME_UNIT).toMillis - val shouldCompressData = config.getBool("akka.remote.use-compression", false) - - private[akka] val transactionLogNode = "/transaction-log-ids" - - private val isConnected = new Switch(false) - - @volatile - private[akka] var bookieClient: BookKeeper = _ - - @volatile - private[akka] var zkClient: AkkaZkClient = _ - - private[akka] def apply( - ledger: LedgerHandle, - id: String, - isAsync: Boolean, - replicationScheme: ReplicationScheme) = - new TransactionLog(ledger, id, isAsync, replicationScheme) - - /** - * Starts up the transaction log. - */ - def start() { - isConnected switchOn { - bookieClient = new BookKeeper(zooKeeperServers) - zkClient = new AkkaZkClient(zooKeeperServers, sessionTimeout, connectionTimeout) - - try { - zkClient.create(transactionLogNode, null, CreateMode.PERSISTENT) - } catch { - case e: ZkNodeExistsException ⇒ {} // do nothing - case e: Throwable ⇒ handleError(e) - } - - EventHandler.info(this, - ("Transaction log service started with" + - "\n\tdigest type [%s]" + - "\n\tensemble size [%s]" + - "\n\tquorum size [%s]" + - "\n\tlogging time out [%s]").format( - digestType, - ensembleSize, - quorumSize, - timeout)) - } - } - - /** - * Shuts down the transaction log. - */ - def shutdown() { - isConnected switchOff { - try { - EventHandler.info(this, "Shutting down transaction log...") - zkClient.close() - bookieClient.halt() - EventHandler.info(this, "Transaction log shut down successfully") - } catch { - case e: Throwable ⇒ handleError(e) - } - } - } - - def transactionLogPath(id: String): String = transactionLogNode + "/" + id - - /** - * Checks if a TransactionLog for the given id already exists. - */ - def exists(id: String): Boolean = { - val txLogPath = transactionLogPath(id) - zkClient.exists(txLogPath) - } - - /** - * Creates a new transaction log for the 'id' specified. If a TransactionLog already exists for the id, - * it will be overwritten. - */ - def newLogFor(id: String, isAsync: Boolean, replicationScheme: ReplicationScheme): TransactionLog = { - val txLogPath = transactionLogPath(id) - - val ledger = try { - if (exists(id)) { - //if it exists, we need to delete it first. This gives it the overwrite semantics we are looking for. - try { - val ledger = bookieClient.createLedger(ensembleSize, quorumSize, digestType, password) - val txLog = TransactionLog(ledger, id, false, null) - txLog.delete() - txLog.close() - } catch { - case e: Throwable ⇒ handleError(e) - } - } - - val future = Promise[LedgerHandle]() - if (isAsync) { - bookieClient.asyncCreateLedger( - ensembleSize, quorumSize, digestType, password, - new AsyncCallback.CreateCallback { - def createComplete( - returnCode: Int, - ledgerHandle: LedgerHandle, - ctx: AnyRef) { - val future = ctx.asInstanceOf[Promise[LedgerHandle]] - if (returnCode == BKException.Code.OK) future.success(ledgerHandle) - else future.failure(BKException.create(returnCode)) - } - }, - future) - await(future) - } else { - bookieClient.createLedger(ensembleSize, quorumSize, digestType, password) - } - } catch { - case e: Throwable ⇒ handleError(e) - } - - val logId = ledger.getId - try { - zkClient.create(txLogPath, null, CreateMode.PERSISTENT) - zkClient.writeData(txLogPath, logId) - logId //TODO: does this have any effect? - } catch { - case e: Throwable ⇒ - bookieClient.deleteLedger(logId) // clean up - handleError(new ReplicationException( - "Could not store transaction log [" + logId + - "] meta-data in ZooKeeper for UUID [" + id + "]", e)) - } - - EventHandler.info(this, "Created new transaction log [%s] for UUID [%s]".format(logId, id)) - TransactionLog(ledger, id, isAsync, replicationScheme) - } - - /** - * Fetches an existing transaction log for the 'id' specified. - * - * @throws ReplicationException if the log with the given id doesn't exist. - */ - def logFor(id: String, isAsync: Boolean, replicationScheme: ReplicationScheme): TransactionLog = { - val txLogPath = transactionLogPath(id) - - val logId = try { - val logId = zkClient.readData(txLogPath).asInstanceOf[Long] - EventHandler.debug(this, - "Retrieved transaction log [%s] for UUID [%s]".format(logId, id)) - logId - } catch { - case e: ZkNoNodeException ⇒ - handleError(new ReplicationException( - "Transaction log for UUID [" + id + "] does not exist in ZooKeeper")) - case e: Throwable ⇒ handleError(e) - } - - val ledger = try { - if (isAsync) { - val future = Promise[LedgerHandle]() - bookieClient.asyncOpenLedger( - logId, digestType, password, - new AsyncCallback.OpenCallback { - def openComplete(returnCode: Int, ledgerHandle: LedgerHandle, ctx: AnyRef) { - val future = ctx.asInstanceOf[Promise[LedgerHandle]] - if (returnCode == BKException.Code.OK) future.success(ledgerHandle) - else future.failure(BKException.create(returnCode)) - } - }, - future) - await(future) - } else { - bookieClient.openLedger(logId, digestType, password) - } - } catch { - case e: Throwable ⇒ handleError(e) - } - - TransactionLog(ledger, id, isAsync, replicationScheme) - } - - private[akka] def await[T](future: Promise[T]): T = { - future.await.value.get match { - case Right(result) => result - case Left(throwable) => handleError(throwable) - } - } - - private[akka] def handleError(e: Throwable): Nothing = { - EventHandler.error(e, this, e.toString) - throw e - } -} - -/** - * TODO: Documentation. - */ -object LocalBookKeeperEnsemble { - private val isRunning = new Switch(false) - - //TODO: should probably come from the config file. - private val port = 5555 - - @volatile - private var localBookKeeper: LocalBookKeeper = _ - - /** - * Starts the LocalBookKeeperEnsemble. - * - * Call can safely be made when already started. - * - * This call will block until it is started. - */ - def start() { - isRunning switchOn { - EventHandler.info(this, "Starting up LocalBookKeeperEnsemble...") - localBookKeeper = new LocalBookKeeper(TransactionLog.ensembleSize) - localBookKeeper.runZookeeper(port) - localBookKeeper.initializeZookeper() - localBookKeeper.runBookies() - EventHandler.info(this, "LocalBookKeeperEnsemble started up successfully") - } - } - - /** - * Shuts down the LocalBookKeeperEnsemble. - * - * Call can safely bemade when already shutdown. - * - * This call will block until the shutdown completes. - */ - def shutdown() { - isRunning switchOff { - EventHandler.info(this, "Shutting down LocalBookKeeperEnsemble...") - localBookKeeper.bs.foreach(_.shutdown()) // stop bookies - localBookKeeper.zkc.close() // stop zk client - localBookKeeper.zks.shutdown() // stop zk server - localBookKeeper.serverFactory.shutdown() // stop zk NIOServer - EventHandler.info(this, "LocalBookKeeperEnsemble shut down successfully") - } - } -} diff --git a/akka-remote/src/main/scala/akka/remote/VectorClock.scala b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala similarity index 99% rename from akka-remote/src/main/scala/akka/remote/VectorClock.scala rename to akka-cluster/src/main/scala/akka/cluster/VectorClock.scala index 42ea917669..a6a54de1d9 100644 --- a/akka-remote/src/main/scala/akka/remote/VectorClock.scala +++ b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala @@ -2,7 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.remote +package akka.cluster import akka.AkkaException diff --git a/akka-cluster/src/main/scala/akka/cluster/metrics/LocalNodeMetricsManager.scala b/akka-cluster/src/main/scala/akka/cluster/metrics/LocalNodeMetricsManager.scala deleted file mode 100644 index c366ed598c..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/metrics/LocalNodeMetricsManager.scala +++ /dev/null @@ -1,226 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.metrics - -import akka.cluster._ -import Cluster._ -import akka.cluster.zookeeper._ -import akka.actor._ -import Actor._ -import scala.collection.JavaConversions._ -import scala.collection.JavaConverters._ -import java.util.concurrent.{ ConcurrentHashMap, ConcurrentSkipListSet } -import java.util.concurrent.atomic.AtomicReference -import akka.util.{ Duration, Switch } -import akka.util.Helpers._ -import akka.util.duration._ -import org.I0Itec.zkclient.exception.ZkNoNodeException -import akka.event.EventHandler - -/* - * Instance of the metrics manager running on the node. To keep the fine performance, metrics of all the - * nodes in the cluster are cached internally, and refreshed from monitoring MBeans / Sigar (when if's local node), - * of ZooKeeper (if it's metrics of all the nodes in the cluster) after a specified timeout - - * metricsRefreshTimeout - * metricsRefreshTimeout defaults to 2 seconds, and can be declaratively defined through - * akka.conf: - * - * @exampl {{{ - * akka.cluster.metrics-refresh-timeout = 2 - * }}} - */ -class LocalNodeMetricsManager(zkClient: AkkaZkClient, private val metricsRefreshTimeout: Duration) - extends NodeMetricsManager { - - /* - * Provides metrics of the system that the node is running on, through monitoring MBeans, Hyperic Sigar - * and other systems - */ - lazy private val metricsProvider = SigarMetricsProvider(refreshTimeout.toMillis.toInt) fold ((thrw) ⇒ { - EventHandler.warning(this, """Hyperic Sigar library failed to load due to %s: %s. -All the metrics will be retreived from monitoring MBeans, and may be incorrect at some platforms. -In order to get better metrics, please put "sigar.jar" to the classpath, and add platform-specific native libary to "java.library.path".""" - .format(thrw.getClass.getName, thrw.getMessage)) - new JMXMetricsProvider - }, - sigar ⇒ sigar) - - /* - * Metrics of all nodes in the cluster - */ - private val localNodeMetricsCache = new ConcurrentHashMap[String, NodeMetrics] - - @volatile - private var _refreshTimeout = metricsRefreshTimeout - - /* - * Plugged monitors (both local and cluster-wide) - */ - private val alterationMonitors = new ConcurrentSkipListSet[MetricsAlterationMonitor] - - private val _isRunning = new Switch(false) - - /* - * If the value is true, metrics manages is started and running. Stopped, otherwise - */ - def isRunning = _isRunning.isOn - - /* - * Starts metrics manager. When metrics manager is started, it refreshes cache from ZooKeeper - * after refreshTimeout, and invokes plugged monitors - */ - def start() = { - _isRunning.switchOn { refresh() } - this - } - - private[cluster] def metricsForNode(nodeName: String): String = "%s/%s".format(node.NODE_METRICS, nodeName) - - /* - * Adds monitor that reacts, when specific conditions are satisfied - */ - def addMonitor(monitor: MetricsAlterationMonitor) = alterationMonitors add monitor - - def removeMonitor(monitor: MetricsAlterationMonitor) = alterationMonitors remove monitor - - def refreshTimeout_=(newValue: Duration) = _refreshTimeout = newValue - - /* - * Timeout after which metrics, cached in the metrics manager, will be refreshed from ZooKeeper - */ - def refreshTimeout = _refreshTimeout - - /* - * Stores metrics of the node in ZooKeeper - */ - private[akka] def storeMetricsInZK(metrics: NodeMetrics) = { - val metricsPath = metricsForNode(metrics.nodeName) - if (zkClient.exists(metricsPath)) { - zkClient.writeData(metricsPath, metrics) - } else { - ignore[ZkNoNodeException](zkClient.createEphemeral(metricsPath, metrics)) - } - } - - /* - * Gets metrics of the node from ZooKeeper - */ - private[akka] def getMetricsFromZK(nodeName: String) = { - zkClient.readData[NodeMetrics](metricsForNode(nodeName)) - } - - /* - * Removed metrics of the node from local cache and ZooKeeper - */ - def removeNodeMetrics(nodeName: String) = { - val metricsPath = metricsForNode(nodeName) - if (zkClient.exists(metricsPath)) { - ignore[ZkNoNodeException](zkClient.delete(metricsPath)) - } - - localNodeMetricsCache.remove(nodeName) - } - - /* - * Gets metrics of a local node directly from JMX monitoring beans/Hyperic Sigar - */ - def getLocalMetrics = metricsProvider.getLocalMetrics - - /* - * Gets metrics of the node, specified by the name. If useCached is true (default value), - * metrics snapshot is taken from the local cache; otherwise, it's retreived from ZooKeeper' - */ - def getMetrics(nodeName: String, useCached: Boolean = true): Option[NodeMetrics] = - if (useCached) - Option(localNodeMetricsCache.get(nodeName)) - else - try { - Some(getMetricsFromZK(nodeName)) - } catch { - case ex: ZkNoNodeException ⇒ None - } - - /* - * Return metrics of all nodes in the cluster from ZooKeeper - */ - private[akka] def getAllMetricsFromZK: Map[String, NodeMetrics] = { - val metricsPaths = zkClient.getChildren(node.NODE_METRICS).toList.toArray.asInstanceOf[Array[String]] - metricsPaths.flatMap { nodeName ⇒ getMetrics(nodeName, false).map((nodeName, _)) } toMap - } - - /* - * Gets cached metrics of all nodes in the cluster - */ - def getAllMetrics: Array[NodeMetrics] = localNodeMetricsCache.values.asScala.toArray - - /* - * Refreshes locally cached metrics from ZooKeeper, and invokes plugged monitors - */ - private[akka] def refresh() { - - storeMetricsInZK(getLocalMetrics) - refreshMetricsCacheFromZK() - - if (isRunning) { - Scheduler.schedule({ () ⇒ refresh() }, refreshTimeout.length, refreshTimeout.length, refreshTimeout.unit) - invokeMonitors() - } - } - - /* - * Refreshes metrics manager cache from ZooKeeper - */ - private def refreshMetricsCacheFromZK() { - val allMetricsFromZK = getAllMetricsFromZK - - localNodeMetricsCache.keySet.foreach { key ⇒ - if (!allMetricsFromZK.contains(key)) - localNodeMetricsCache.remove(key) - } - - // RACY: metrics for the node might have been removed both from ZK and local cache by the moment, - // but will be re-cached, since they're still present in allMetricsFromZK snapshot. Not important, because - // cache will be fixed soon, at the next iteration of refresh - allMetricsFromZK map { - case (node, metrics) ⇒ - localNodeMetricsCache.put(node, metrics) - } - } - - /* - * Invokes monitors with the cached metrics - */ - private def invokeMonitors(): Unit = if (!alterationMonitors.isEmpty) { - // RACY: metrics for some nodes might have been removed/added by that moment. Not important, - // because monitors will be fed with up-to-date metrics shortly, at the next iteration of refresh - val clusterNodesMetrics = getAllMetrics - val localNodeMetrics = clusterNodesMetrics.find(_.nodeName == nodeAddress.nodeName) - val iterator = alterationMonitors.iterator - - // RACY: there might be new monitors added after the iterator has been obtained. Not important, - // becuse refresh interval is meant to be very short, and all the new monitors will be called ad the - // next refresh iteration - while (iterator.hasNext) { - - val monitor = iterator.next - - monitor match { - case localMonitor: LocalMetricsAlterationMonitor ⇒ - localNodeMetrics.map { metrics ⇒ - if (localMonitor reactsOn metrics) - localMonitor react metrics - } - - case clusterMonitor: ClusterMetricsAlterationMonitor ⇒ - if (clusterMonitor reactsOn clusterNodesMetrics) - clusterMonitor react clusterNodesMetrics - } - - } - } - - def stop() = _isRunning.switchOff - -} diff --git a/akka-cluster/src/main/scala/akka/cluster/metrics/MetricsProvider.scala b/akka-cluster/src/main/scala/akka/cluster/metrics/MetricsProvider.scala deleted file mode 100644 index 0b366ef9c8..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/metrics/MetricsProvider.scala +++ /dev/null @@ -1,154 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.metrics - -import akka.cluster._ -import akka.event.EventHandler -import java.lang.management.ManagementFactory -import akka.util.ReflectiveAccess._ -import akka.util.Switch - -/* - * Snapshot of the JVM / system that's the node is running on - * - * @param nodeName name of the node, where metrics are gathered at - * @param usedHeapMemory amount of heap memory currently used - * @param committedHeapMemory amount of heap memory guaranteed to be available - * @param maxHeapMemory maximum amount of heap memory that can be used - * @param avaiableProcessors number of the processors avalable to the JVM - * @param systemLoadAverage system load average. If OS-specific Sigar's native library is plugged, - * it's used to calculate average load on the CPUs in the system. Otherwise, value is retreived from monitoring - * MBeans. Hyperic Sigar provides more precise values, and, thus, if the library is provided, it's used by default. - * - */ -case class DefaultNodeMetrics(nodeName: String, - usedHeapMemory: Long, - committedHeapMemory: Long, - maxHeapMemory: Long, - avaiableProcessors: Int, - systemLoadAverage: Double) extends NodeMetrics - -object MetricsProvider { - - /* - * Maximum value of system load average - */ - val MAX_SYS_LOAD_AVG = 1 - - /* - * Minimum value of system load average - */ - val MIN_SYS_LOAD_AVG = 0 - - /* - * Default value of system load average - */ - val DEF_SYS_LOAD_AVG = 0.5 - -} - -/* - * Abstracts metrics provider that returns metrics of the system the node is running at - */ -trait MetricsProvider { - - /* - * Gets metrics of the local system - */ - def getLocalMetrics: NodeMetrics - -} - -/* - * Loads JVM metrics through JMX monitoring beans - */ -class JMXMetricsProvider extends MetricsProvider { - - import MetricsProvider._ - - private val memoryMXBean = ManagementFactory.getMemoryMXBean - - private val osMXBean = ManagementFactory.getOperatingSystemMXBean - - /* - * Validates and calculates system load average - * - * @param avg system load average obtained from a specific monitoring provider (may be incorrect) - * @return system load average, or default value(0.5), if passed value was out of permitted - * bounds (0.0 to 1.0) - */ - @inline - protected final def calcSystemLoadAverage(avg: Double) = - if (avg >= MIN_SYS_LOAD_AVG && avg <= MAX_SYS_LOAD_AVG) avg else DEF_SYS_LOAD_AVG - - protected def systemLoadAverage = calcSystemLoadAverage(osMXBean.getSystemLoadAverage) - - def getLocalMetrics = - DefaultNodeMetrics(Cluster.nodeAddress.nodeName, - memoryMXBean.getHeapMemoryUsage.getUsed, - memoryMXBean.getHeapMemoryUsage.getCommitted, - memoryMXBean.getHeapMemoryUsage.getMax, - osMXBean.getAvailableProcessors, - systemLoadAverage) - -} - -/* - * Loads wider range of metrics of a better quality with Hyperic Sigar (native library) - * - * @param refreshTimeout Sigar gathers metrics during this interval - */ -class SigarMetricsProvider private (private val sigarInstance: AnyRef) extends JMXMetricsProvider { - - private val reportErrors = new Switch(true) - - private val getCpuPercMethod = sigarInstance.getClass.getMethod("getCpuPerc") - private val sigarCpuCombinedMethod = getCpuPercMethod.getReturnType.getMethod("getCombined") - - /* - * Wraps reflective calls to Hyperic Sigar - * - * @param f reflective call to Hyperic Sigar - * @param fallback function, which is invoked, if call to Sigar has been finished with exception - */ - private def callSigarMethodOrElse[T](callSigar: ⇒ T, fallback: ⇒ T): T = - try callSigar catch { - case thrw ⇒ - reportErrors.switchOff { - EventHandler.warning(this, "Failed to get metrics from Hyperic Sigar. %s: %s" - .format(thrw.getClass.getName, thrw.getMessage)) - } - fallback - } - - /* - * Obtains system load average from Sigar - * If the value cannot be obtained, falls back to system load average taken from JMX - */ - override def systemLoadAverage = callSigarMethodOrElse( - calcSystemLoadAverage(sigarCpuCombinedMethod - .invoke(getCpuPercMethod.invoke(sigarInstance)).asInstanceOf[Double]), - super.systemLoadAverage) - -} - -object SigarMetricsProvider { - - /* - * Instantiates Sigar metrics provider through reflections, in order to avoid creating dependencies to - * Hiperic Sigar library - */ - def apply(refreshTimeout: Int): Either[Throwable, MetricsProvider] = try { - for { - sigarInstance ← createInstance[AnyRef]("org.hyperic.sigar.Sigar", noParams, noArgs).right - sigarProxyCacheClass: Class[_] ← getClassFor("org.hyperic.sigar.SigarProxyCache").right - } yield new SigarMetricsProvider(sigarProxyCacheClass - .getMethod("newInstance", Array(sigarInstance.getClass, classOf[Int]): _*) - .invoke(null, sigarInstance, new java.lang.Integer(refreshTimeout))) - } catch { - case thrw ⇒ Left(thrw) - } - -} diff --git a/akka-cluster/src/main/scala/akka/cluster/storage/Storage.scala b/akka-cluster/src/main/scala/akka/cluster/storage/Storage.scala deleted file mode 100644 index a402f2def1..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/storage/Storage.scala +++ /dev/null @@ -1,366 +0,0 @@ -package akka.cluster.storage - -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -import akka.cluster.zookeeper.AkkaZkClient -import akka.AkkaException -import org.apache.zookeeper.{ KeeperException, CreateMode } -import org.apache.zookeeper.data.Stat -import java.util.concurrent.ConcurrentHashMap -import annotation.tailrec -import java.lang.{ RuntimeException, UnsupportedOperationException } - -/** - * Simple abstraction to store an Array of bytes based on some String key. - * - * Nothing is being said about ACID, transactions etc. It depends on the implementation - * of this Storage interface of what is and isn't done on the lowest level. - * - * The amount of data that is allowed to be insert/updated is implementation specific. The InMemoryStorage - * has no limits, but the ZooKeeperStorage has a maximum size of 1 mb. - * - * TODO: Class is up for better names. - * TODO: Instead of a String as key, perhaps also a byte-array. - */ -trait Storage { - - /** - * Loads the VersionedData for the given key. - * - * This call doesn't care about the actual version of the data. - * - * @param key: the key of the VersionedData to load. - * @return the VersionedData for the given entry. - * @throws MissingDataException if the entry with the given key doesn't exist. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def load(key: String): VersionedData - - /** - * Loads the VersionedData for the given key and expectedVersion. - * - * This call can be used for optimistic locking since the version is included. - * - * @param key: the key of the VersionedData to load - * @param expectedVersion the version the data to load should have. - * @throws MissingDataException if the data with the given key doesn't exist. - * @throws BadVersionException if the version is not the expected version. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def load(key: String, expectedVersion: Long): VersionedData - - /** - * Checks if a VersionedData with the given key exists. - * - * @param key the key to check the existence for. - * @return true if exists, false if not. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def exists(key: String): Boolean - - /** - * Inserts a byte-array based on some key. - * - * @param key the key of the Data to insert. - * @param bytes the data to insert. - * @return the version of the written data (can be used for optimistic locking). - * @throws DataExistsException when VersionedData with the given Key already exists. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def insert(key: String, bytes: Array[Byte]): Long - - /** - * Inserts the data if there is no data for that key, or overwrites it if it is there. - * - * This is the method you want to call if you just want to save something and don't - * care about any lost update issues. - * - * @param key the key of the data - * @param bytes the data to insert - * @return the version of the written data (can be used for optimistic locking). - * @throws StorageException if anything goes wrong while accessing the storage - */ - def insertOrOverwrite(key: String, bytes: Array[Byte]): Long - - /** - * Overwrites the current data for the given key. This call doesn't care about the version of the existing data. - * - * @param key the key of the data to overwrite - * @param bytes the data to insert. - * @return the version of the written data (can be used for optimistic locking). - * @throws MissingDataException when the entry with the given key doesn't exist. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def overwrite(key: String, bytes: Array[Byte]): Long - - /** - * Updates an existing value using an optimistic lock. So it expect the current data to have the expectedVersion - * and only then, it will do the update. - * - * @param key the key of the data to update - * @param bytes the content to write for the given key - * @param expectedVersion the version of the content that is expected to be there. - * @return the version of the written data (can be used for optimistic locking). - * @throws MissingDataException if no data for the given key exists - * @throws BadVersionException if the version if the found data doesn't match the expected version. So essentially - * if another update was already done. - * @throws StorageException if anything goes wrong while accessing the storage - */ - def update(key: String, bytes: Array[Byte], expectedVersion: Long): Long -} - -/** - * The VersionedData is a container of data (some bytes) and a version (a Long). - */ -class VersionedData(val data: Array[Byte], val version: Long) {} - -/** - * An AkkaException thrown by the Storage module. - */ -class StorageException(msg: String = null, cause: java.lang.Throwable = null) extends AkkaException(msg, cause) { - def this(msg: String) = this(msg, null); -} - -/** - * * - * A StorageException thrown when an operation is done on a non existing node. - */ -class MissingDataException(msg: String = null, cause: java.lang.Throwable = null) extends StorageException(msg, cause) { - def this(msg: String) = this(msg, null); -} - -/** - * A StorageException thrown when an operation is done on an existing node, but no node was expected. - */ -class DataExistsException(msg: String = null, cause: java.lang.Throwable = null) extends StorageException(msg, cause) { - def this(msg: String) = this(msg, null); -} - -/** - * A StorageException thrown when an operation causes an optimistic locking failure. - */ -class BadVersionException(msg: String = null, cause: java.lang.Throwable = null) extends StorageException(msg, cause) { - def this(msg: String) = this(msg, null); -} - -/** - * A Storage implementation based on ZooKeeper. - * - * The store method is atomic: - * - so everything is written or nothing is written - * - is isolated, so threadsafe, - * but it will not participate in any transactions. - * - */ -class ZooKeeperStorage(zkClient: AkkaZkClient, root: String = "/peter/storage") extends Storage { - - var path = "" - - //makes sure that the complete root exists on zookeeper. - root.split("/").foreach( - item ⇒ if (item.size > 0) { - - path = path + "/" + item - - if (!zkClient.exists(path)) { - //it could be that another thread is going to create this root node as well, so ignore it when it happens. - try { - zkClient.create(path, "".getBytes, CreateMode.PERSISTENT) - } catch { - case ignore: KeeperException.NodeExistsException ⇒ - } - } - }) - - def toZkPath(key: String): String = { - root + "/" + key - } - - def load(key: String) = try { - val stat = new Stat - val arrayOfBytes = zkClient.connection.readData(root + "/" + key, stat, false) - new VersionedData(arrayOfBytes, stat.getVersion) - } catch { - case e: KeeperException.NoNodeException ⇒ throw new MissingDataException( - String.format("Failed to load key [%s]: no data was found", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to load key [%s]", key), e) - } - - def load(key: String, expectedVersion: Long) = try { - val stat = new Stat - val arrayOfBytes = zkClient.connection.readData(root + "/" + key, stat, false) - - if (stat.getVersion != expectedVersion) throw new BadVersionException( - "Failed to update key [" + key + "]: version mismatch, expected [" + expectedVersion + "]" + - " but found [" + stat.getVersion + "]") - - new VersionedData(arrayOfBytes, stat.getVersion) - } catch { - case e: KeeperException.NoNodeException ⇒ throw new MissingDataException( - String.format("Failed to load key [%s]: no data was found", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to load key [%s]", key), e) - } - - def insertOrOverwrite(key: String, bytes: Array[Byte]) = { - try { - throw new UnsupportedOperationException() - } catch { - case e: KeeperException.NodeExistsException ⇒ throw new DataExistsException( - String.format("Failed to insert key [%s]: an entry already exists with the same key", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to insert key [%s]", key), e) - } - } - - def insert(key: String, bytes: Array[Byte]): Long = { - try { - zkClient.connection.create(root + "/" + key, bytes, CreateMode.PERSISTENT) - //todo: how to get hold of the version. - val version: Long = 0 - version - } catch { - case e: KeeperException.NodeExistsException ⇒ throw new DataExistsException( - String.format("Failed to insert key [%s]: an entry already exists with the same key", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to insert key [%s]", key), e) - } - } - - def exists(key: String) = try { - zkClient.connection.exists(toZkPath(key), false) - } catch { - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to check existance for key [%s]", key), e) - } - - def update(key: String, bytes: Array[Byte], expectedVersion: Long): Long = { - try { - zkClient.connection.writeData(root + "/" + key, bytes, expectedVersion.asInstanceOf[Int]) - throw new RuntimeException() - } catch { - case e: KeeperException.BadVersionException ⇒ throw new BadVersionException( - String.format("Failed to update key [%s]: version mismatch", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to update key [%s]", key), e) - } - } - - def overwrite(key: String, bytes: Array[Byte]): Long = { - try { - zkClient.connection.writeData(root + "/" + key, bytes) - -1L - } catch { - case e: KeeperException.NoNodeException ⇒ throw new MissingDataException( - String.format("Failed to overwrite key [%s]: a previous entry already exists", key), e) - case e: KeeperException ⇒ throw new StorageException( - String.format("Failed to overwrite key [%s]", key), e) - } - } -} - -object InMemoryStorage { - val InitialVersion = 0; -} - -/** - * An in memory {@link RawStore} implementation. Useful for testing purposes. - */ -final class InMemoryStorage extends Storage { - - private val map = new ConcurrentHashMap[String, VersionedData]() - - def load(key: String) = { - val result = map.get(key) - - if (result == null) throw new MissingDataException( - String.format("Failed to load key [%s]: no data was found", key)) - - result - } - - def load(key: String, expectedVersion: Long) = { - val result = load(key) - - if (result.version != expectedVersion) throw new BadVersionException( - "Failed to load key [" + key + "]: version mismatch, expected [" + result.version + "] " + - "but found [" + expectedVersion + "]") - - result - } - - def exists(key: String) = map.containsKey(key) - - def insert(key: String, bytes: Array[Byte]): Long = { - val version: Long = InMemoryStorage.InitialVersion - val result = new VersionedData(bytes, version) - - val previous = map.putIfAbsent(key, result) - if (previous != null) throw new DataExistsException( - String.format("Failed to insert key [%s]: the key already has been inserted previously", key)) - - version - } - - @tailrec - def update(key: String, bytes: Array[Byte], expectedVersion: Long): Long = { - val found = map.get(key) - - if (found == null) throw new MissingDataException( - String.format("Failed to update key [%s], no previous entry exist", key)) - - if (expectedVersion != found.version) throw new BadVersionException( - "Failed to update key [" + key + "]: version mismatch, expected [" + expectedVersion + "]" + - " but found [" + found.version + "]") - - val newVersion: Long = expectedVersion + 1 - - if (map.replace(key, found, new VersionedData(bytes, newVersion))) newVersion - else update(key, bytes, expectedVersion) - } - - @tailrec - def overwrite(key: String, bytes: Array[Byte]): Long = { - val current = map.get(key) - - if (current == null) throw new MissingDataException( - String.format("Failed to overwrite key [%s], no previous entry exist", key)) - - val update = new VersionedData(bytes, current.version + 1) - - if (map.replace(key, current, update)) update.version - else overwrite(key, bytes) - } - - def insertOrOverwrite(key: String, bytes: Array[Byte]): Long = { - val version = InMemoryStorage.InitialVersion - val result = new VersionedData(bytes, version) - - val previous = map.putIfAbsent(key, result) - - if (previous == null) result.version - else overwrite(key, bytes) - } -} - -//TODO: To minimize the number of dependencies, should the Storage not be placed in a seperate module? -//class VoldemortRawStorage(storeClient: StoreClient) extends Storage { -// -// def load(Key: String) = { -// try { -// -// } catch { -// case -// } -// } -// -// override def insert(key: String, bytes: Array[Byte]) { -// throw new UnsupportedOperationException() -// } -// -// def update(key: String, bytes: Array[Byte]) { -// throw new UnsupportedOperationException() -// } -//} diff --git a/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZkClient.scala b/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZkClient.scala deleted file mode 100644 index 9137959877..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZkClient.scala +++ /dev/null @@ -1,34 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster.zookeeper - -import org.I0Itec.zkclient._ -import org.I0Itec.zkclient.serialize._ -import org.I0Itec.zkclient.exception._ - -/** - * ZooKeeper client. Holds the ZooKeeper connection and manages its session. - */ -class AkkaZkClient(zkServers: String, - sessionTimeout: Int, - connectionTimeout: Int, - zkSerializer: ZkSerializer = new SerializableSerializer) - extends ZkClient(zkServers, sessionTimeout, connectionTimeout, zkSerializer) { - - def connection: ZkConnection = _connection.asInstanceOf[ZkConnection] - - def reconnect() { - val zkLock = getEventLock - - zkLock.lock() - try { - _connection.close() - _connection.connect(this) - } catch { - case e: InterruptedException ⇒ throw new ZkInterruptedException(e) - } finally { - zkLock.unlock() - } - } -} diff --git a/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZooKeeper.scala b/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZooKeeper.scala deleted file mode 100644 index b5165ffb72..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/zookeeper/AkkaZooKeeper.scala +++ /dev/null @@ -1,32 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster.zookeeper - -import org.I0Itec.zkclient._ -import org.apache.commons.io.FileUtils -import java.io.File - -object AkkaZooKeeper { - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalServer(dataPath: String, logPath: String): ZkServer = - startLocalServer(dataPath, logPath, 2181, 500) - - /** - * Starts up a local ZooKeeper server. Should only be used for testing purposes. - */ - def startLocalServer(dataPath: String, logPath: String, port: Int, tickTime: Int): ZkServer = { - FileUtils.deleteDirectory(new File(dataPath)) - FileUtils.deleteDirectory(new File(logPath)) - val zkServer = new ZkServer( - dataPath, logPath, - new IDefaultNameSpace() { - def createDefaultNameSpace(zkClient: ZkClient) {} - }, - port, tickTime) - zkServer.start() - zkServer - } -} diff --git a/akka-cluster/src/main/scala/akka/cluster/zookeeper/ZooKeeperBarrier.scala b/akka-cluster/src/main/scala/akka/cluster/zookeeper/ZooKeeperBarrier.scala deleted file mode 100644 index c1f51ceb96..0000000000 --- a/akka-cluster/src/main/scala/akka/cluster/zookeeper/ZooKeeperBarrier.scala +++ /dev/null @@ -1,104 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster.zookeeper - -import akka.util.Duration -import akka.util.duration._ - -import org.I0Itec.zkclient._ -import org.I0Itec.zkclient.exception._ - -import java.util.{ List ⇒ JList } -import java.util.concurrent.CountDownLatch - -class BarrierTimeoutException(message: String) extends RuntimeException(message) - -/** - * Barrier based on Zookeeper barrier tutorial. - */ -object ZooKeeperBarrier { - val BarriersNode = "/barriers" - val DefaultTimeout = 60 seconds - - def apply(zkClient: ZkClient, name: String, node: String, count: Int) = - new ZooKeeperBarrier(zkClient, name, node, count, DefaultTimeout) - - def apply(zkClient: ZkClient, name: String, node: String, count: Int, timeout: Duration) = - new ZooKeeperBarrier(zkClient, name, node, count, timeout) - - def apply(zkClient: ZkClient, cluster: String, name: String, node: String, count: Int) = - new ZooKeeperBarrier(zkClient, cluster + "-" + name, node, count, DefaultTimeout) - - def apply(zkClient: ZkClient, cluster: String, name: String, node: String, count: Int, timeout: Duration) = - new ZooKeeperBarrier(zkClient, cluster + "-" + name, node, count, timeout) - - def ignore[E: Manifest](body: ⇒ Unit) { - try { - body - } catch { - case e if manifest[E].erasure.isAssignableFrom(e.getClass) ⇒ () - } - } -} - -/** - * Barrier based on Zookeeper barrier tutorial. - */ -class ZooKeeperBarrier(zkClient: ZkClient, name: String, node: String, count: Int, timeout: Duration) - extends IZkChildListener { - - import ZooKeeperBarrier.{ BarriersNode, ignore } - - val barrier = BarriersNode + "/" + name - val entry = barrier + "/" + node - val ready = barrier + "/ready" - - val exitBarrier = new CountDownLatch(1) - - ignore[ZkNodeExistsException](zkClient.createPersistent(BarriersNode)) - ignore[ZkNodeExistsException](zkClient.createPersistent(barrier)) - - def apply(body: ⇒ Unit) { - enter() - body - leave() - } - - /** - * An await does a enter/leave making this barrier a 'single' barrier instead of a double barrier. - */ - def await() { - enter() - leave() - } - - def enter() = { - zkClient.createEphemeral(entry) - if (zkClient.countChildren(barrier) >= count) - ignore[ZkNodeExistsException](zkClient.createPersistent(ready)) - else - zkClient.waitUntilExists(ready, timeout.unit, timeout.length) - if (!zkClient.exists(ready)) { - throw new BarrierTimeoutException("Timeout (%s) while waiting for entry barrier" format timeout) - } - zkClient.subscribeChildChanges(barrier, this) - } - - def leave() { - zkClient.delete(entry) - exitBarrier.await(timeout.length, timeout.unit) - if (zkClient.countChildren(barrier) > 0) { - zkClient.unsubscribeChildChanges(barrier, this) - throw new BarrierTimeoutException("Timeout (%s) while waiting for exit barrier" format timeout) - } - zkClient.unsubscribeChildChanges(barrier, this) - } - - def handleChildChange(path: String, children: JList[String]) { - if (children.size <= 1) { - ignore[ZkNoNodeException](zkClient.delete(ready)) - exitBarrier.countDown() - } - } -} diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/GossipMembershipMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipMembershipMultiJvmSpec.scala similarity index 99% rename from akka-remote/src/multi-jvm/scala/akka/remote/GossipMembershipMultiJvmSpec.scala rename to akka-cluster/src/multi-jvm/scala/akka/cluster/GossipMembershipMultiJvmSpec.scala index 418f6f385b..c380d3e5eb 100644 --- a/akka-remote/src/multi-jvm/scala/akka/remote/GossipMembershipMultiJvmSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/GossipMembershipMultiJvmSpec.scala @@ -1,4 +1,4 @@ -// package akka.remote +// package akka.cluster // import akka.actor.Actor // import akka.remote._ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmSpec.scala deleted file mode 100644 index f1b9f5a7ae..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/newleader/NewLeaderChangeListenerMultiJvmSpec.scala +++ /dev/null @@ -1,63 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.changelisteners.newleader - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import ChangeListener._ -import Cluster._ -import akka.cluster.LocalCluster._ - -import java.util.concurrent._ - -object NewLeaderChangeListenerMultiJvmSpec { - var NrOfNodes = 2 -} - -class NewLeaderChangeListenerMultiJvmNode1 extends MasterClusterTestNode { - import NewLeaderChangeListenerMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A NewLeader change listener" must { - - "be invoked after leader election is completed" ignore { - barrier("start-node1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node2", NrOfNodes).await() - - System.exit(0) - } - } -} - -class NewLeaderChangeListenerMultiJvmNode2 extends ClusterTestNode { - import NewLeaderChangeListenerMultiJvmSpec._ - - "A NewLeader change listener" must { - - "be invoked after leader election is completed" ignore { - val latch = new CountDownLatch(1) - - barrier("start-node1", NrOfNodes).await() - - barrier("start-node2", NrOfNodes) { - node.register(new ChangeListener { - override def newLeader(node: String, client: ClusterNode) { - latch.countDown - } - }) - } - latch.await(10, TimeUnit.SECONDS) must be === true - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmSpec.scala deleted file mode 100644 index deec5c19e6..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodeconnected/NodeConnectedChangeListenerMultiJvmSpec.scala +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.changelisteners.nodeconnected - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import ChangeListener._ -import Cluster._ -import akka.cluster.LocalCluster._ - -import java.util.concurrent._ - -object NodeConnectedChangeListenerMultiJvmSpec { - var NrOfNodes = 2 -} - -class NodeConnectedChangeListenerMultiJvmNode1 extends MasterClusterTestNode { - import NodeConnectedChangeListenerMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A NodeConnected change listener" must { - - "be invoked when a new node joins the cluster" in { - val latch = new CountDownLatch(1) - node.register(new ChangeListener { - override def nodeConnected(node: String, client: ClusterNode) { - latch.countDown - } - }) - - barrier("start-node1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node2", NrOfNodes) { - latch.await(5, TimeUnit.SECONDS) must be === true - } - - node.shutdown() - } - } -} - -class NodeConnectedChangeListenerMultiJvmNode2 extends ClusterTestNode { - import NodeConnectedChangeListenerMultiJvmSpec._ - - "A NodeConnected change listener" must { - - "be invoked when a new node joins the cluster" in { - barrier("start-node1", NrOfNodes).await() - - barrier("start-node2", NrOfNodes) { - Cluster.node.start() - } - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmSpec.scala deleted file mode 100644 index 54a327126e..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/changelisteners/nodedisconnected/NodeDisconnectedChangeListenerMultiJvmSpec.scala +++ /dev/null @@ -1,65 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.changelisteners.nodedisconnected - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import ChangeListener._ -import Cluster._ -import akka.cluster.LocalCluster._ - -import java.util.concurrent._ - -object NodeDisconnectedChangeListenerMultiJvmSpec { - var NrOfNodes = 2 -} - -class NodeDisconnectedChangeListenerMultiJvmNode1 extends MasterClusterTestNode { - import NodeDisconnectedChangeListenerMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A NodeDisconnected change listener" must { - - "be invoked when a new node leaves the cluster" in { - val latch = new CountDownLatch(1) - node.register(new ChangeListener { - override def nodeDisconnected(node: String, client: ClusterNode) { - latch.countDown - } - }) - - barrier("start-node1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node2", NrOfNodes).await() - - latch.await(10, TimeUnit.SECONDS) must be === true - - node.shutdown() - } - } -} - -class NodeDisconnectedChangeListenerMultiJvmNode2 extends ClusterTestNode { - import NodeDisconnectedChangeListenerMultiJvmSpec._ - - "A NodeDisconnected change listener" must { - - "be invoked when a new node leaves the cluster" in { - barrier("start-node1", NrOfNodes).await() - - barrier("start-node2", NrOfNodes) { - Cluster.node.start() - } - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmSpec.scala deleted file mode 100644 index f9aabbb004..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/configuration/ConfigurationStorageMultiJvmSpec.scala +++ /dev/null @@ -1,89 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.configuration - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import Cluster._ -import akka.cluster.LocalCluster._ - -object ConfigurationStorageMultiJvmSpec { - var NrOfNodes = 2 -} - -class ConfigurationStorageMultiJvmNode1 extends MasterClusterTestNode { - import ConfigurationStorageMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A cluster" must { - - "be able to store, read and remove custom configuration data" in { - - barrier("start-node-1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node-2", NrOfNodes).await() - - barrier("store-config-data-node-1", NrOfNodes) { - node.setConfigElement("key1", "value1".getBytes) - } - - barrier("read-config-data-node-2", NrOfNodes).await() - - barrier("remove-config-data-node-2", NrOfNodes).await() - - barrier("try-read-config-data-node-1", NrOfNodes) { - val option = node.getConfigElement("key1") - option.isDefined must be(false) - - val elements = node.getConfigElementKeys - elements.size must be(0) - } - - node.shutdown() - } - } -} - -class ConfigurationStorageMultiJvmNode2 extends ClusterTestNode { - import ConfigurationStorageMultiJvmSpec._ - - "A cluster" must { - - "be able to store, read and remove custom configuration data" in { - - barrier("start-node-1", NrOfNodes).await() - - barrier("start-node-2", NrOfNodes) { - Cluster.node.start() - } - - barrier("store-config-data-node-1", NrOfNodes).await() - - barrier("read-config-data-node-2", NrOfNodes) { - val option = node.getConfigElement("key1") - option.isDefined must be(true) - option.get must be("value1".getBytes) - - val elements = node.getConfigElementKeys - elements.size must be(1) - elements.head must be("key1") - } - - barrier("remove-config-data-node-2", NrOfNodes) { - node.removeConfigElement("key1") - } - - barrier("try-read-config-data-node-1", NrOfNodes).await() - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmSpec.scala deleted file mode 100644 index 479f77e0d3..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/leader/election/LeaderElectionMultiJvmSpec.scala +++ /dev/null @@ -1,71 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.leader.election - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import ChangeListener._ -import Cluster._ -import akka.cluster.LocalCluster._ - -import java.util.concurrent._ - -object LeaderElectionMultiJvmSpec { - var NrOfNodes = 2 -} -/* -class LeaderElectionMultiJvmNode1 extends MasterClusterTestNode { - import LeaderElectionMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A cluster" must { - - "be able to elect a single leader in the cluster and perform re-election if leader resigns" in { - - barrier("start-node1", NrOfNodes) { - Cluster.node.start() - } - node.isLeader must be === true - - barrier("start-node2", NrOfNodes) { - } - node.isLeader must be === true - - barrier("stop-node1", NrOfNodes) { - node.resign() - } - } - } -} - -class LeaderElectionMultiJvmNode2 extends ClusterTestNode { - import LeaderElectionMultiJvmSpec._ - - "A cluster" must { - - "be able to elect a single leader in the cluster and perform re-election if leader resigns" in { - - barrier("start-node1", NrOfNodes) { - } - node.isLeader must be === false - - barrier("start-node2", NrOfNodes) { - Cluster.node.start() - } - node.isLeader must be === false - - barrier("stop-node1", NrOfNodes) { - } - Thread.sleep(1000) // wait for re-election - - node.isLeader must be === true - } - } -} -*/ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmSpec.scala deleted file mode 100644 index c20bf9269c..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/api/registry/RegistryStoreMultiJvmSpec.scala +++ /dev/null @@ -1,116 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.api.registry - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.actor._ -import Actor._ -import akka.cluster._ -import ChangeListener._ -import Cluster._ -import akka.config.Config -import akka.serialization.Serialization -import akka.cluster.LocalCluster._ - -import java.util.concurrent._ - -object RegistryStoreMultiJvmSpec { - var NrOfNodes = 2 - - class HelloWorld1 extends Actor with Serializable { - def receive = { - case "Hello" ⇒ - reply("World from node [" + Config.nodename + "]") - } - } - - class HelloWorld2 extends Actor with Serializable { - var counter = 0 - def receive = { - case "Hello" ⇒ - Thread.sleep(1000) - counter += 1 - case "Count" ⇒ - reply(counter) - } - } -} - -class RegistryStoreMultiJvmNode1 extends MasterClusterTestNode { - import RegistryStoreMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A cluster" must { - - "be able to store an ActorRef in the cluster without a replication strategy and retrieve it with 'use'" in { - - barrier("start-node-1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node-2", NrOfNodes).await() - - barrier("store-1-in-node-1", NrOfNodes) { - node.store("hello-world-1", classOf[HelloWorld1], Serialization.serializerFor(classOf[HelloWorld1])) - } - - barrier("use-1-in-node-2", NrOfNodes).await() - - barrier("store-2-in-node-1", NrOfNodes) { - node.store("hello-world-2", classOf[HelloWorld1], false, Serialization.serializerFor(classOf[HelloWorld1])) - } - - barrier("use-2-in-node-2", NrOfNodes).await() - - node.shutdown() - } - } -} - -class RegistryStoreMultiJvmNode2 extends ClusterTestNode { - import RegistryStoreMultiJvmSpec._ - - "A cluster" must { - - "be able to store an actor in the cluster with 'store' and retrieve it with 'use'" in { - - barrier("start-node-1", NrOfNodes).await() - - barrier("start-node-2", NrOfNodes) { - Cluster.node.start() - } - - barrier("store-1-in-node-1", NrOfNodes).await() - - barrier("use-1-in-node-2", NrOfNodes) { - val actorOrOption = node.use("hello-world-1") - if (actorOrOption.isEmpty) fail("Actor could not be retrieved") - - val actorRef = actorOrOption.get - actorRef.address must be("hello-world-1") - - (actorRef ? "Hello").as[String].get must be("World from node [node2]") - } - - barrier("store-2-in-node-1", NrOfNodes).await() - - barrier("use-2-in-node-2", NrOfNodes) { - val actorOrOption = node.use("hello-world-2") - if (actorOrOption.isEmpty) fail("Actor could not be retrieved") - - val actorRef = actorOrOption.get - actorRef.address must be("hello-world-2") - - (actorRef ? "Hello").as[String].get must be("World from node [node2]") - } - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.conf deleted file mode 100644 index 88df1a6421..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.conf deleted file mode 100644 index 88df1a6421..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmSpec.scala deleted file mode 100644 index ef0b79b4a7..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/deployment/DeploymentMultiJvmSpec.scala +++ /dev/null @@ -1,75 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.deployment - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.actor._ -import Actor._ -import akka.cluster._ -import Cluster._ -import akka.cluster.LocalCluster._ - -object DeploymentMultiJvmSpec { - var NrOfNodes = 2 -} - -class DeploymentMultiJvmNode1 extends MasterClusterTestNode { - import DeploymentMultiJvmSpec._ - - val testNodes = NrOfNodes - - "A ClusterDeployer" must { - - "be able to deploy deployments in akka.conf and lookup the deployments by 'address'" in { - - barrier("start-node-1", NrOfNodes) { - Cluster.node.start() - } - - barrier("start-node-2", NrOfNodes).await() - - barrier("perform-deployment-on-node-1", NrOfNodes) { - Deployer.start() - } - - barrier("lookup-deployment-node-2", NrOfNodes).await() - - node.shutdown() - } - } -} - -class DeploymentMultiJvmNode2 extends ClusterTestNode { - import DeploymentMultiJvmSpec._ - - "A cluster" must { - - "be able to store, read and remove custom configuration data" in { - - barrier("start-node-1", NrOfNodes).await() - - barrier("start-node-2", NrOfNodes) { - Cluster.node.start() - } - - barrier("perform-deployment-on-node-1", NrOfNodes).await() - - barrier("lookup-deployment-node-2", NrOfNodes) { - Deployer.start() - val deployments = Deployer.deploymentsInConfig - deployments map { oldDeployment ⇒ - val newDeployment = ClusterDeployer.lookupDeploymentFor(oldDeployment.address) - newDeployment must be('defined) - oldDeployment must equal(newDeployment.get) - } - } - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.conf deleted file mode 100644 index 8d5284be46..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.cluster.metrics-refresh-timeout = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmSpec.scala deleted file mode 100644 index 380d68d8ef..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/local/LocalMetricsMultiJvmSpec.scala +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.metrics.local - -import akka.cluster._ -import akka.actor._ -import Actor._ -import Cluster._ -import akka.dispatch._ -import akka.util.Duration -import akka.util.duration._ -import akka.cluster.metrics._ -import java.util.concurrent.atomic.AtomicInteger - -object LocalMetricsMultiJvmSpec { - val NrOfNodes = 1 -} - -class LocalMetricsMultiJvmNode1 extends MasterClusterTestNode { - - import LocalMetricsMultiJvmSpec._ - - val testNodes = NrOfNodes - - override def beforeAll = { - super.beforeAll() - node - } - - override def afterAll = { - node.shutdown() - super.afterAll() - } - - "Metrics manager" must { - - def timeout = node.metricsManager.refreshTimeout - - "be initialized with refresh timeout value, specified in akka.conf" in { - timeout must be(1.second) - } - - "return up-to-date local node metrics straight from MBeans/Sigar" in { - node.metricsManager.getLocalMetrics must not be (null) - - node.metricsManager.getLocalMetrics.systemLoadAverage must be(0.5 plusOrMinus 0.5) - } - - "return metrics cached in the MetricsManagerLocalMetrics" in { - node.metricsManager.getMetrics(nodeAddress.nodeName) must not be (null) - } - - "return local node metrics from ZNode" in { - node.metricsManager.getMetrics(nodeAddress.nodeName, false) must not be (null) - } - - "return cached metrics of all nodes in the cluster" in { - node.metricsManager.getAllMetrics.size must be(1) - node.metricsManager.getAllMetrics.find(_.nodeName == "node1") must not be (null) - } - - "throw no exceptions, when user attempts to get metrics of a non-existing node" in { - node.metricsManager.getMetrics("nonexisting") must be(None) - node.metricsManager.getMetrics("nonexisting", false) must be(None) - } - - "regularly update cached metrics" in { - val oldMetrics = node.metricsManager.getLocalMetrics - Thread sleep timeout.toMillis - node.metricsManager.getLocalMetrics must not be (oldMetrics) - } - - "allow to track JVM state and bind handles through MetricsAlterationMonitors" in { - val monitorReponse = Promise[String]() - - node.metricsManager.addMonitor(new LocalMetricsAlterationMonitor { - - val id = "heapMemoryThresholdMonitor" - - def reactsOn(metrics: NodeMetrics) = metrics.usedHeapMemory > 1 - - def react(metrics: NodeMetrics) = monitorReponse.success("Too much memory is used!") - - }) - - Await.result(monitorReponse, 5 seconds) must be("Too much memory is used!") - - } - - class FooMonitor(monitorWorked: AtomicInteger) extends LocalMetricsAlterationMonitor { - val id = "fooMonitor" - def reactsOn(metrics: NodeMetrics) = true - def react(metrics: NodeMetrics) = monitorWorked.set(monitorWorked.get + 1) - } - - "allow to unregister the monitor" in { - - val monitorWorked = new AtomicInteger(0) - val fooMonitor = new FooMonitor(monitorWorked) - - node.metricsManager.addMonitor(fooMonitor) - node.metricsManager.removeMonitor(fooMonitor) - - val oldValue = monitorWorked.get - Thread sleep timeout.toMillis - monitorWorked.get must be(oldValue) - - } - - "stop notifying monitors, when stopped" in { - - node.metricsManager.stop() - - val monitorWorked = new AtomicInteger(0) - - node.metricsManager.addMonitor(new LocalMetricsAlterationMonitor { - val id = "fooMonitor" - def reactsOn(metrics: NodeMetrics) = true - def react(metrics: NodeMetrics) = monitorWorked.set(monitorWorked.get + 1) - }) - - monitorWorked.get must be(0) - - node.metricsManager.start() - Thread sleep (timeout.toMillis * 2) - monitorWorked.get must be > (1) - - } - - } - -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.conf deleted file mode 100644 index 172e980612..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.conf +++ /dev/null @@ -1,3 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.conf deleted file mode 100644 index 172e980612..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.conf +++ /dev/null @@ -1,3 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmSpec.scala deleted file mode 100644 index 8c4730dc90..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/metrics/remote/RemoteMetricsMultiJvmSpec.scala +++ /dev/null @@ -1,133 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.metrics.remote - -import akka.cluster._ -import akka.actor._ -import Actor._ -import Cluster._ -import akka.dispatch._ -import akka.util.Duration -import akka.util.duration._ -import akka.cluster.metrics._ -import java.util.concurrent._ -import atomic.AtomicInteger - -object RemoteMetricsMultiJvmSpec { - val NrOfNodes = 2 - - val MetricsRefreshTimeout = 100.millis -} - -class AllMetricsAvailableMonitor(_id: String, completionLatch: CountDownLatch, clusterSize: Int) extends ClusterMetricsAlterationMonitor { - - val id = _id - - def reactsOn(allMetrics: Array[NodeMetrics]) = allMetrics.size == clusterSize - - def react(allMetrics: Array[NodeMetrics]) = completionLatch.countDown - -} - -class RemoteMetricsMultiJvmNode1 extends MasterClusterTestNode { - - import RemoteMetricsMultiJvmSpec._ - - val testNodes = NrOfNodes - - "Metrics manager" must { - "provide metrics of all nodes in the cluster" in { - - val allMetricsAvaiable = new CountDownLatch(1) - - node.metricsManager.refreshTimeout = MetricsRefreshTimeout - node.metricsManager.addMonitor(new AllMetricsAvailableMonitor("all-metrics-available", allMetricsAvaiable, NrOfNodes)) - - LocalCluster.barrier("node-start", NrOfNodes).await() - - allMetricsAvaiable.await() - - LocalCluster.barrier("check-all-remote-metrics", NrOfNodes) { - node.metricsManager.getAllMetrics.size must be(2) - } - - val cachedMetrics = node.metricsManager.getMetrics("node2") - val metricsFromZnode = node.metricsManager.getMetrics("node2", false) - - LocalCluster.barrier("check-single-remote-metrics", NrOfNodes) { - cachedMetrics must not be (null) - metricsFromZnode must not be (null) - } - - Thread sleep MetricsRefreshTimeout.toMillis - - LocalCluster.barrier("remote-metrics-is-updated", NrOfNodes) { - node.metricsManager.getMetrics("node2") must not be (cachedMetrics) - node.metricsManager.getMetrics("node2", false) must not be (metricsFromZnode) - } - - val someMetricsGone = new CountDownLatch(1) - node.metricsManager.addMonitor(new AllMetricsAvailableMonitor("some-metrics-gone", someMetricsGone, 1)) - - LocalCluster.barrier("some-nodes-leave", NrOfNodes).await() - - someMetricsGone.await(10, TimeUnit.SECONDS) must be(true) - - node.metricsManager.getMetrics("node2") must be(None) - node.metricsManager.getMetrics("node2", false) must be(None) - node.metricsManager.getAllMetrics.size must be(1) - - node.shutdown() - - } - } - -} - -class RemoteMetricsMultiJvmNode2 extends ClusterTestNode { - - import RemoteMetricsMultiJvmSpec._ - - val testNodes = NrOfNodes - - "Metrics manager" must { - "provide metrics of all nodes in the cluster" in { - - val allMetricsAvaiable = new CountDownLatch(1) - - node.metricsManager.refreshTimeout = MetricsRefreshTimeout - node.metricsManager.addMonitor(new AllMetricsAvailableMonitor("all-metrics-available", allMetricsAvaiable, NrOfNodes)) - - LocalCluster.barrier("node-start", NrOfNodes).await() - - allMetricsAvaiable.await() - - LocalCluster.barrier("check-all-remote-metrics", NrOfNodes) { - node.metricsManager.getAllMetrics.size must be(2) - } - - val cachedMetrics = node.metricsManager.getMetrics("node1") - val metricsFromZnode = node.metricsManager.getMetrics("node1", false) - - LocalCluster.barrier("check-single-remote-metrics", NrOfNodes) { - cachedMetrics must not be (null) - metricsFromZnode must not be (null) - } - - Thread sleep MetricsRefreshTimeout.toMillis - - LocalCluster.barrier("remote-metrics-is-updated", NrOfNodes) { - node.metricsManager.getMetrics("node1") must not be (cachedMetrics) - node.metricsManager.getMetrics("node1", false) must not be (metricsFromZnode) - } - - LocalCluster.barrier("some-nodes-leave", NrOfNodes) { - node.shutdown() - } - } - } - -} - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.conf deleted file mode 100644 index 2f642a20f0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.conf +++ /dev/null @@ -1,2 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmSpec.scala deleted file mode 100644 index 7dfdec2f7c..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/migration/MigrationExplicitMultiJvmSpec.scala +++ /dev/null @@ -1,112 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - * - * - * package akka.cluster.migration - * - * import org.scalatest.WordSpec - * import org.scalatest.matchers.MustMatchers - * import org.scalatest.BeforeAndAfterAll - * - * import akka.actor._ - * import Actor._ - * import akka.cluster._ - * import ChangeListener._ - * import Cluster._ - * import akka.config.Config - * import akka.serialization.Serialization - * import akka.cluster.LocalCluster._ - * - * import java.util.concurrent._ - * - * object MigrationExplicitMultiJvmSpec { - * var NrOfNodes = 2 - * - * class HelloWorld extends Actor with Serializable { - * def receive = { - * case "Hello" ⇒ - * reply("World from node [" + Config.nodename + "]") - * } - * } - * } - * - * class MigrationExplicitMultiJvmNode1 extends MasterClusterTestNode { - * import MigrationExplicitMultiJvmSpec._ - * - * val testNodes = NrOfNodes - * - * "A cluster" must { - * - * "be able to migrate an actor from one node to another" in { - * - * barrier("start-node-1", NrOfNodes) { - * Cluster.node.start() - * } - * - * barrier("start-node-2", NrOfNodes) { - * } - * - * barrier("store-1-in-node-1", NrOfNodes) { - * val serializer = Serialization.serializerFor(classOf[HelloWorld]).fold(x ⇒ fail("No serializer found"), s ⇒ s) - * node.store("hello-world", classOf[HelloWorld], serializer) - * } - * - * barrier("use-1-in-node-2", NrOfNodes) { - * } - * - * barrier("migrate-from-node2-to-node1", NrOfNodes) { - * } - * - * barrier("check-actor-is-moved-to-node1", NrOfNodes) { - * node.isInUseOnNode("hello-world") must be(true) - * - * val actorRef = Actor.registry.local.actorFor("hello-world").getOrElse(fail("Actor should have been in the local actor registry")) - * actorRef.address must be("hello-world") - * (actorRef ? "Hello").as[String].get must be("World from node [node1]") - * } - * - * node.shutdown() - * } - * } - * } - * - * class MigrationExplicitMultiJvmNode2 extends ClusterTestNode { - * import MigrationExplicitMultiJvmSpec._ - * - * "A cluster" must { - * - * "be able to migrate an actor from one node to another" in { - * - * barrier("start-node-1", NrOfNodes) { - * } - * - * barrier("start-node-2", NrOfNodes) { - * Cluster.node.start() - * } - * - * barrier("store-1-in-node-1", NrOfNodes) { - * } - * - * barrier("use-1-in-node-2", NrOfNodes) { - * val actorOrOption = node.use("hello-world") - * if (actorOrOption.isEmpty) fail("Actor could not be retrieved") - * - * val actorRef = actorOrOption.get - * actorRef.address must be("hello-world") - * - * (actorRef ? "Hello").as[String].get must be("World from node [node2]") - * } - * - * barrier("migrate-from-node2-to-node1", NrOfNodes) { - * node.migrate(NodeAddress(node.nodeAddress.clusterName, "node1"), "hello-world") - * Thread.sleep(2000) - * } - * - * barrier("check-actor-is-moved-to-node1", NrOfNodes) { - * } - * - * node.shutdown() - * } - * } - * } - */ diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf deleted file mode 100644 index f510c5253c..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.conf +++ /dev/null @@ -1,6 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-test.router = "round-robin" -akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"] -akka.actor.deployment.service-test.nr-of-instances = 2 \ No newline at end of file diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf deleted file mode 100644 index b7c3e53e6f..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.conf +++ /dev/null @@ -1,5 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-test.router = "round-robin" -akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"] -akka.actor.deployment.service-test.nr-of-instances = 2 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf deleted file mode 100644 index b7c3e53e6f..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.conf +++ /dev/null @@ -1,5 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-test.router = "round-robin" -akka.actor.deployment.service-test.cluster.preferred-nodes = ["node:node2","node:node3"] -akka.actor.deployment.service-test.nr-of-instances = 2 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.opts deleted file mode 100644 index 089e3b7776..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmNode3.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmSpec.scala deleted file mode 100644 index 98d2aaf394..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/reflogic/ClusterActorRefCleanupMultiJvmSpec.scala +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.reflogic - -import akka.cluster._ -import akka.cluster.Cluster._ -import akka.actor.Actor -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } -import akka.routing.RoutingException -import java.net.ConnectException -import java.nio.channels.{ ClosedChannelException, NotYetConnectedException } -import akka.cluster.LocalCluster._ - -object ClusterActorRefCleanupMultiJvmSpec { - - val NrOfNodes = 3 - - class TestActor extends Actor with Serializable { - def receive = { - case _ ⇒ {} - } - } - -} - -class ClusterActorRefCleanupMultiJvmNode1 extends MasterClusterTestNode { - - import ClusterActorRefCleanupMultiJvmSpec._ - - val testNodes = NrOfNodes - - "ClusterActorRef" must { - "cleanup itself" ignore { - Cluster.node.start() - barrier("awaitStarted", NrOfNodes).await() - - val ref = Actor.actorOf(Props[ClusterActorRefCleanupMultiJvmSpec.TestActor]("service-test") - - ref.isInstanceOf[ClusterActorRef] must be(true) - - val clusteredRef = ref.asInstanceOf[ClusterActorRef] - - barrier("awaitActorCreated", NrOfNodes).await() - - //verify that all remote actors are there. - clusteredRef.nrOfConnections must be(2) - - // ignore exceptions from killing nodes - val ignoreExceptions = Seq( - EventFilter[ClosedChannelException], - EventFilter[NotYetConnectedException], - EventFilter[RoutingException], - EventFilter[ConnectException]) - - EventHandler.notify(TestEvent.Mute(ignoreExceptions)) - - //just some waiting to make sure that the node has died. - Thread.sleep(5000) - - //send some request, this should trigger the cleanup - try { - clusteredRef ! "hello" - clusteredRef ! "hello" - } catch { - case e: ClosedChannelException ⇒ - case e: NotYetConnectedException ⇒ - case e: RoutingException ⇒ - } - - barrier("node-3-dead", NrOfNodes - 1).await() - - //since the call to the node failed, the node must have been removed from the list. - clusteredRef.nrOfConnections must be(1) - - //just some waiting to make sure that the node has died. - Thread.sleep(5000) - - //trigger the cleanup. - try { - clusteredRef ! "hello" - clusteredRef ! "hello" - } catch { - case e: ClosedChannelException ⇒ - case e: NotYetConnectedException ⇒ - case e: RoutingException ⇒ - } - - //now there must not be any remaining connections after the dead of the last actor. - clusteredRef.nrOfConnections must be(0) - - //and lets make sure we now get the correct exception if we try to use the ref. - intercept[RoutingException] { - clusteredRef ! "Hello" - } - - node.shutdown() - } - } -} - -class ClusterActorRefCleanupMultiJvmNode2 extends ClusterTestNode { - - import ClusterActorRefCleanupMultiJvmSpec._ - - val testNodes = NrOfNodes - - //we are only using the nodes for their capacity, not for testing on this node itself. - "___" must { - "___" ignore { - Runtime.getRuntime.addShutdownHook(new Thread() { - override def run() { - ClusterTestNode.exit(classOf[ClusterActorRefCleanupMultiJvmNode2].getName) - } - }) - - Cluster.node.start() - barrier("awaitStarted", NrOfNodes).await() - - barrier("awaitActorCreated", NrOfNodes).await() - - barrier("node-3-dead", NrOfNodes - 1).await() - - System.exit(0) - } - } -} - -class ClusterActorRefCleanupMultiJvmNode3 extends ClusterTestNode { - - import ClusterActorRefCleanupMultiJvmSpec._ - - val testNodes = NrOfNodes - - //we are only using the nodes for their capacity, not for testing on this node itself. - "___" must { - "___" ignore { - Runtime.getRuntime.addShutdownHook(new Thread() { - override def run() { - ClusterTestNode.exit(classOf[ClusterActorRefCleanupMultiJvmNode3].getName) - } - }) - - Cluster.node.start() - barrier("awaitStarted", NrOfNodes).await() - - barrier("awaitActorCreated", NrOfNodes).await() - - System.exit(0) - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.conf deleted file mode 100644 index dca432f404..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world-write-behind-nosnapshot.router = "direct" -akka.actor.deployment.hello-world-write-behind-nosnapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.strategy = "write-behind" -akka.cluster.replication.snapshot-frequency = 1000 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.conf deleted file mode 100644 index dca432f404..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world-write-behind-nosnapshot.router = "direct" -akka.actor.deployment.hello-world-write-behind-nosnapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-behind-nosnapshot.cluster.replication.strategy = "write-behind" -akka.cluster.replication.snapshot-frequency = 1000 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec.scala deleted file mode 100644 index a90d26ad8d..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/nosnapshot/ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec.scala +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -// package akka.cluster.replication.transactionlog.writebehind.nosnapshot - -// import akka.actor._ -// import akka.cluster._ -// import Cluster._ -// import akka.config.Config -// import akka.cluster.LocalCluster._ - -// object ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec { -// var NrOfNodes = 2 - -// sealed trait TransactionLogMessage extends Serializable -// case class Count(nr: Int) extends TransactionLogMessage -// case class Log(full: String) extends TransactionLogMessage -// case object GetLog extends TransactionLogMessage - -// class HelloWorld extends Actor with Serializable { -// var log = "" -// def receive = { -// case Count(nr) ⇒ -// log += nr.toString -// reply("World from node [" + Config.nodename + "]") -// case GetLog ⇒ -// reply(Log(log)) -// } -// } -// } - -// class ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode1 extends ClusterTestNode { -// import ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec._ - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes) { -// Cluster.node.start() -// } - -// barrier("create-actor-on-node1", NrOfNodes) { -// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-behind-nosnapshot") -// // node.isInUseOnNode("hello-world") must be(true) -// actorRef.address must be("hello-world-write-behind-nosnapshot") -// for (i ← 0 until 10) { -// (actorRef ? Count(i)).as[String] must be(Some("World from node [node1]")) -// } -// } - -// barrier("start-node2", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } - -// class ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmNode2 extends MasterClusterTestNode { -// import ReplicationTransactionLogWriteBehindNoSnapshotMultiJvmSpec._ - -// val testNodes = NrOfNodes - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes).await() - -// barrier("create-actor-on-node1", NrOfNodes).await() - -// barrier("start-node2", NrOfNodes) { -// Cluster.node.start() -// } - -// Thread.sleep(5000) // wait for fail-over from node1 to node2 - -// barrier("check-fail-over-to-node2", NrOfNodes - 1) { -// // both remaining nodes should now have the replica -// node.isInUseOnNode("hello-world-write-behind-nosnapshot") must be(true) -// val actorRef = Actor.registry.local.actorFor("hello-world-write-behind-nosnapshot").getOrElse(fail("Actor should have been in the local actor registry")) -// actorRef.address must be("hello-world-write-behind-nosnapshot") -// (actorRef ? GetLog).as[Log].get must be(Log("0123456789")) -// } - -// node.shutdown() -// } -// } - -// override def onReady() { -// LocalBookKeeperEnsemble.start() -// } - -// override def onShutdown() { -// TransactionLog.shutdown() -// LocalBookKeeperEnsemble.shutdown() -// } -// } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.conf deleted file mode 100644 index a3ec6ec2c3..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world.router = "direct" -akka.actor.deployment.hello-world-write-behind-snapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.strategy = "write-behind" -akka.cluster.replication.snapshot-frequency = 7 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.conf deleted file mode 100644 index a3ec6ec2c3..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world.router = "direct" -akka.actor.deployment.hello-world-write-behind-snapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-behind-snapshot.cluster.replication.strategy = "write-behind" -akka.cluster.replication.snapshot-frequency = 7 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec.scala deleted file mode 100644 index fde113080e..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writebehind/snapshot/ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec.scala +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -// package akka.cluster.replication.transactionlog.writebehind.snapshot - -// import akka.actor._ -// import akka.cluster._ -// import Cluster._ -// import akka.config.Config -// import akka.cluster.LocalCluster._ - -// object ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec { -// var NrOfNodes = 2 - -// sealed trait TransactionLogMessage extends Serializable -// case class Count(nr: Int) extends TransactionLogMessage -// case class Log(full: String) extends TransactionLogMessage -// case object GetLog extends TransactionLogMessage - -// class HelloWorld extends Actor with Serializable { -// var log = "" -// //println("Creating HelloWorld log =======> " + log) -// def receive = { -// case Count(nr) ⇒ -// log += nr.toString -// //println("Message to HelloWorld log =======> " + log) -// reply("World from node [" + Config.nodename + "]") -// case GetLog ⇒ -// reply(Log(log)) -// } -// } -// } - -// class ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode1 extends ClusterTestNode { -// import ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec._ - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes) { -// Cluster.node.start() -// } - -// barrier("create-actor-on-node1", NrOfNodes) { -// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-behind-snapshot") -// node.isInUseOnNode("hello-world-write-behind-snapshot") must be(true) -// actorRef.address must be("hello-world-write-behind-snapshot") -// var counter = 0 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// } - -// barrier("start-node2", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } - -// class ReplicationTransactionLogWriteBehindSnapshotMultiJvmNode2 extends MasterClusterTestNode { -// import ReplicationTransactionLogWriteBehindSnapshotMultiJvmSpec._ - -// val testNodes = NrOfNodes - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes).await() - -// barrier("create-actor-on-node1", NrOfNodes).await() - -// barrier("start-node2", NrOfNodes) { -// Cluster.node.start() -// } - -// Thread.sleep(5000) // wait for fail-over from node1 to node2 - -// barrier("check-fail-over-to-node2", NrOfNodes - 1) { -// // both remaining nodes should now have the replica -// node.isInUseOnNode("hello-world-write-behind-snapshot") must be(true) -// val actorRef = Actor.registry.local.actorFor("hello-world-write-behind-snapshot").getOrElse(fail("Actor should have been in the local actor registry")) -// actorRef.address must be("hello-world-write-behind-snapshot") -// (actorRef ? GetLog).as[Log].get must be(Log("0123456789")) -// } - -// node.shutdown() -// } -// } - -// override def onReady() { -// LocalBookKeeperEnsemble.start() -// } - -// override def onShutdown() { -// TransactionLog.shutdown() -// LocalBookKeeperEnsemble.shutdown() -// } -// } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.conf deleted file mode 100644 index 8de04a2eb1..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "DEBUG" -akka.actor.deployment.hello-world-write-through-nosnapshot.router = "direct" -akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.strategy = "write-through" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"] -akka.cluster.replication.snapshot-frequency = 1000 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.conf deleted file mode 100644 index 8de04a2eb1..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "DEBUG" -akka.actor.deployment.hello-world-write-through-nosnapshot.router = "direct" -akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-through-nosnapshot.cluster.replication.strategy = "write-through" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"] -akka.cluster.replication.snapshot-frequency = 1000 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec.scala deleted file mode 100644 index c2e6ed678b..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/nosnapshot/ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec.scala +++ /dev/null @@ -1,99 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -// package akka.cluster.replication.transactionlog.writethrough.nosnapshot - -// import akka.actor._ -// import akka.cluster._ -// import Cluster._ -// import akka.config.Config -// import akka.cluster.LocalCluster._ - -// object ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec { -// var NrOfNodes = 2 - -// sealed trait TransactionLogMessage extends Serializable -// case class Count(nr: Int) extends TransactionLogMessage -// case class Log(full: String) extends TransactionLogMessage -// case object GetLog extends TransactionLogMessage - -// class HelloWorld extends Actor with Serializable { -// var log = "" -// def receive = { -// case Count(nr) ⇒ -// println("Received number: " + nr + " on " + self.address) -// log += nr.toString -// reply("World from node [" + Config.nodename + "]") -// case GetLog ⇒ -// println("Received getLog on " + uuid) -// reply(Log(log)) -// } -// } -// } - -// class ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode1 extends ClusterTestNode { -// import ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec._ - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes) { -// Cluster.node.start() -// } - -// barrier("create-actor-on-node1", NrOfNodes) { -// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-through-nosnapshot") -// actorRef.address must be("hello-world-write-through-nosnapshot") -// for (i ← 0 until 10) -// (actorRef ? Count(i)).as[String] must be(Some("World from node [node1]")) -// } - -// barrier("start-node2", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } - -// class ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmNode2 extends MasterClusterTestNode { -// import ReplicationTransactionLogWriteThroughNoSnapshotMultiJvmSpec._ - -// val testNodes = NrOfNodes - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes).await() - -// barrier("create-actor-on-node1", NrOfNodes).await() - -// barrier("start-node2", NrOfNodes) { -// Cluster.node.start() -// } - -// Thread.sleep(5000) // wait for fail-over from node1 to node2 - -// barrier("check-fail-over-to-node2", NrOfNodes - 1) { -// // both remaining nodes should now have the replica -// node.isInUseOnNode("hello-world-write-through-nosnapshot") must be(true) -// val actorRef = Actor.registry.local.actorFor("hello-world-write-through-nosnapshot").getOrElse(fail("Actor should have been in the local actor registry")) -// actorRef.address must be("hello-world-write-through-nosnapshot") -// (actorRef ? GetLog).as[Log].get must be(Log("0123456789")) -// } - -// node.shutdown() -// } -// } - -// override def onReady() { -// LocalBookKeeperEnsemble.start() -// } - -// override def onShutdown() { -// TransactionLog.shutdown() -// LocalBookKeeperEnsemble.shutdown() -// } -// } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.conf deleted file mode 100644 index 82d6dc18ce..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world-write-through-snapshot.router = "direct" -akka.actor.deployment.hello-world-write-through-snapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-through-snapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-through-snapshot.cluster.replication.strategy = "write-through" -akka.cluster.replication.snapshot-frequency = 7 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.conf deleted file mode 100644 index 82d6dc18ce..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.conf +++ /dev/null @@ -1,7 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.hello-world-write-through-snapshot.router = "direct" -akka.actor.deployment.hello-world-write-through-snapshot.nr-of-instances = 1 -akka.actor.deployment.hello-world-write-through-snapshot.cluster.replication.storage = "transaction-log" -akka.actor.deployment.hello-world-write-through-snapshot.cluster.replication.strategy = "write-through" -akka.cluster.replication.snapshot-frequency = 7 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec.scala deleted file mode 100644 index 3df29dd510..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/replication/transactionlog/writethrough/snapshot/ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec.scala +++ /dev/null @@ -1,116 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -// package akka.cluster.replication.transactionlog.writethrough.snapshot - -// import akka.actor._ -// import akka.cluster._ -// import Cluster._ -// import akka.config.Config -// import akka.cluster.LocalCluster._ - -// object ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec { -// var NrOfNodes = 2 - -// sealed trait TransactionLogMessage extends Serializable -// case class Count(nr: Int) extends TransactionLogMessage -// case class Log(full: String) extends TransactionLogMessage -// case object GetLog extends TransactionLogMessage - -// class HelloWorld extends Actor with Serializable { -// var log = "" -// def receive = { -// case Count(nr) ⇒ -// log += nr.toString -// reply("World from node [" + Config.nodename + "]") -// case GetLog ⇒ -// reply(Log(log)) -// } -// } -// } - -// class ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode1 extends ClusterTestNode { -// import ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec._ - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes) { -// Cluster.node.start() -// } - -// barrier("create-actor-on-node1", NrOfNodes) { -// val actorRef = Actor.actorOf(Props[HelloWorld]("hello-world-write-through-snapshot") -// node.isInUseOnNode("hello-world-write-through-snapshot") must be(true) -// actorRef.address must be("hello-world-write-through-snapshot") -// var counter = 0 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// counter += 1 -// (actorRef ? Count(counter)).as[String].get must be("World from node [node1]") -// } - -// barrier("start-node2", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } - -// class ReplicationTransactionLogWriteThroughSnapshotMultiJvmNode2 extends MasterClusterTestNode { -// import ReplicationTransactionLogWriteThroughSnapshotMultiJvmSpec._ - -// val testNodes = NrOfNodes - -// "A cluster" must { - -// "be able to replicate an actor with a transaction log and replay transaction log after actor migration" ignore { - -// barrier("start-node1", NrOfNodes).await() - -// barrier("create-actor-on-node1", NrOfNodes).await() - -// barrier("start-node2", NrOfNodes) { -// Cluster.node.start() -// } - -// Thread.sleep(5000) // wait for fail-over from node1 to node2 - -// barrier("check-fail-over-to-node2", NrOfNodes - 1) { -// // both remaining nodes should now have the replica -// node.isInUseOnNode("hello-world-write-through-snapshot") must be(true) -// val actorRef = Actor.registry.local.actorFor("hello-world-write-through-snapshot").getOrElse(fail("Actor should have been in the local actor registry")) -// actorRef.address must be("hello-world-write-through-snapshot") -// (actorRef ? GetLog).as[Log].get must be(Log("0123456789")) -// } - -// node.shutdown() -// } -// } - -// override def onReady() { -// LocalBookKeeperEnsemble.start() -// } - -// override def onShutdown() { -// TransactionLog.shutdown() -// LocalBookKeeperEnsemble.shutdown() -// } -// } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.conf deleted file mode 100644 index 7332be6934..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.conf +++ /dev/null @@ -1,5 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "direct" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node2"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.conf deleted file mode 100644 index 7332be6934..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.conf +++ /dev/null @@ -1,5 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "direct" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node2"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmSpec.scala deleted file mode 100644 index 6bc1653836..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/failover/DirectRoutingFailoverMultiJvmSpec.scala +++ /dev/null @@ -1,90 +0,0 @@ -package akka.cluster.routing.direct.failover - -import akka.config.Config -import scala.Predef._ -import akka.cluster.{ ClusterActorRef, Cluster, MasterClusterTestNode, ClusterTestNode } -import akka.actor.{ ActorInitializationException, Actor, ActorRef } -import akka.util.duration._ -import akka.util.{ Duration, Timer } -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } -import java.net.ConnectException -import java.nio.channels.NotYetConnectedException -import akka.cluster.LocalCluster -import akka.dispatch.Await - -object DirectRoutingFailoverMultiJvmSpec { - - val NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - - def receive = { - case "identify" ⇒ - reply(Config.nodename) - } - } -} - -class DirectRoutingFailoverMultiJvmNode1 extends MasterClusterTestNode { - - import DirectRoutingFailoverMultiJvmSpec._ - - val testNodes = NrOfNodes - - "Direct Router" must { - "throw exception [ActorInitializationException] upon fail-over" ignore { - - val ignoreExceptions = Seq(EventFilter[NotYetConnectedException], EventFilter[ConnectException]) - EventHandler.notify(TestEvent.Mute(ignoreExceptions)) - - var actor: ActorRef = null - - LocalCluster.barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - LocalCluster.barrier("actor-creation", NrOfNodes) { - actor = Actor.actorOf(Props[SomeActor]("service-hello") - } - - LocalCluster.barrier("verify-actor", NrOfNodes) { - Await.result(actor ? "identify", timeout.duration) must equal("node2") - } - - val timer = Timer(30.seconds, true) - while (timer.isTicking && !Cluster.node.isInUseOnNode("service-hello")) {} - - LocalCluster.barrier("verify-fail-over", NrOfNodes - 1) { - actor ! "identify" // trigger failure and removal of connection to node2 - intercept[Exception] { - actor ! "identify" // trigger exception since no more connections - } - } - - Cluster.node.shutdown() - } - } -} - -class DirectRoutingFailoverMultiJvmNode2 extends ClusterTestNode { - - import DirectRoutingFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - LocalCluster.barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - LocalCluster.barrier("actor-creation", NrOfNodes).await() - - LocalCluster.barrier("verify-actor", NrOfNodes) { - Cluster.node.isInUseOnNode("service-hello") must be(true) - } - - Cluster.node.shutdown() - } - } -} - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNode1MultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNode1MultiJvmSpec.scala deleted file mode 100644 index 6ce2219978..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNode1MultiJvmSpec.scala +++ /dev/null @@ -1,60 +0,0 @@ -package akka.cluster.routing.direct.homenode - -import akka.config.Config -import akka.actor.Actor -import akka.cluster.{ ClusterTestNode, MasterClusterTestNode, Cluster } -import Cluster._ -import akka.cluster.LocalCluster._ - -object HomeNodeMultiJvmSpec { - - val NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - def receive = { - case "identify" ⇒ { - reply(Config.nodename) - } - } - } - -} - -class HomeNodeMultiJvmNode1 extends MasterClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - val testNodes = NrOfNodes - - "___" must { - "___" in { - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - barrier("waiting-for-end", NrOfNodes).await() - node.shutdown() - } - } -} - -class HomeNodeMultiJvmNode2 extends ClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - "Direct Router: A Direct Router" must { - "obey 'home-node' config option when instantiated actor in cluster" in { - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - - val actorNode1 = Actor.actorOf(Props[SomeActor]("service-node1") - val name1 = (actorNode1 ? "identify").get.asInstanceOf[String] - name1 must equal("node1") - - val actorNode2 = Actor.actorOf(Props[SomeActor]("service-node2") - val name2 = (actorNode2 ? "identify").get.asInstanceOf[String] - name2 must equal("node2") - - barrier("waiting-for-end", NrOfNodes).await() - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.conf deleted file mode 100644 index 893f798e1d..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.conf +++ /dev/null @@ -1,6 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-node1.router = "direct" -akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-node2.router = "direct" -akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.conf deleted file mode 100644 index 893f798e1d..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.conf +++ /dev/null @@ -1,6 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-node1.router = "direct" -akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-node2.router = "direct" -akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/homenode/HomeNodeMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.conf deleted file mode 100644 index aa0d7771c8..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "direct" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.conf deleted file mode 100644 index aa0d7771c8..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "direct" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"] diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmSpec.scala deleted file mode 100644 index a7b61af3e7..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/direct/normalusage/SingleReplicaDirectRoutingMultiJvmSpec.scala +++ /dev/null @@ -1,62 +0,0 @@ -package akka.cluster.routing.direct.normalusage - -import akka.actor.Actor -import akka.config.Config -import akka.cluster.{ ClusterActorRef, ClusterTestNode, MasterClusterTestNode, Cluster } -import akka.cluster.LocalCluster - -object SingleReplicaDirectRoutingMultiJvmSpec { - val NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - //println("---------------------------------------------------------------------------") - //println("SomeActor has been created on node [" + Config.nodename + "]") - //println("---------------------------------------------------------------------------") - - def receive = { - case "identify" ⇒ { - //println("The node received the 'identify' command: " + Config.nodename) - reply(Config.nodename) - } - } - } -} - -class SingleReplicaDirectRoutingMultiJvmNode1 extends MasterClusterTestNode { - - import SingleReplicaDirectRoutingMultiJvmSpec._ - - val testNodes = NrOfNodes - - "___" must { - "___" in { - Cluster.node.start() - LocalCluster.barrier("waiting-for-begin", NrOfNodes).await() - - LocalCluster.barrier("waiting-to-end", NrOfNodes).await() - Cluster.node.shutdown() - } - } -} - -class SingleReplicaDirectRoutingMultiJvmNode2 extends ClusterTestNode { - - import SingleReplicaDirectRoutingMultiJvmSpec._ - - "Direct Router: when node send message to existing node it" must { - "communicate with that node" in { - Cluster.node.start() - LocalCluster.barrier("waiting-for-begin", NrOfNodes).await() - - val actor = Actor.actorOf(Props[SomeActor]("service-hello").asInstanceOf[ClusterActorRef] - actor.isRunning must be(true) - - val result = (actor ? "identify").get - result must equal("node1") - - LocalCluster.barrier("waiting-to-end", NrOfNodes).await() - Cluster.node.shutdown() - } - } -} - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.conf deleted file mode 100644 index 1772693874..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1", "node:node3"] -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.timeout = 30 -akka.cluster.session-timeout = 10 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.opts deleted file mode 100644 index f1306829d9..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.conf deleted file mode 100644 index 1772693874..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1", "node:node3"] -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.timeout = 30 -akka.cluster.session-timeout = 10 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.opts deleted file mode 100644 index 897e69f626..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.conf deleted file mode 100644 index 1772693874..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1", "node:node3"] -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.timeout = 30 -akka.cluster.session-timeout = 10 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.opts deleted file mode 100644 index 4127fb94fc..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmNode3.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmSpec.scala deleted file mode 100644 index cbdc42dbe9..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/failover/RandomFailoverMultiJvmSpec.scala +++ /dev/null @@ -1,145 +0,0 @@ -package akka.cluster.routing.random.failover - -import akka.config.Config -import akka.cluster._ -import akka.actor.{ ActorRef, Actor } -import akka.event.EventHandler -import akka.util.duration._ -import akka.util.{ Duration, Timer } -import akka.testkit.{ EventFilter, TestEvent } -import java.util.{ Collections, Set ⇒ JSet } -import java.net.ConnectException -import java.nio.channels.NotYetConnectedException -import akka.cluster.LocalCluster._ -import akka.dispatch.Await - -object RandomFailoverMultiJvmSpec { - - val NrOfNodes = 3 - - class SomeActor extends Actor with Serializable { - - def receive = { - case "identify" ⇒ - reply(Config.nodename) - } - } - -} - -class RandomFailoverMultiJvmNode1 extends MasterClusterTestNode { - - import RandomFailoverMultiJvmSpec._ - - def testNodes = NrOfNodes - - "Random: when random router fails" must { - "jump to another replica" ignore { - val ignoreExceptions = Seq( - EventFilter[NotYetConnectedException], - EventFilter[ConnectException], - EventFilter[ClusterException], - EventFilter[java.nio.channels.ClosedChannelException]) - - var oldFoundConnections: JSet[String] = null - var actor: ActorRef = null - - barrier("node-start", NrOfNodes) { - EventHandler.notify(TestEvent.Mute(ignoreExceptions)) - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes) { - actor = Actor.actorOf(Props[SomeActor]("service-hello") - actor.isInstanceOf[ClusterActorRef] must be(true) - } - - val timer = Timer(30.seconds, true) - while (timer.isTicking && - !Cluster.node.isInUseOnNode("service-hello", "node1") && - !Cluster.node.isInUseOnNode("service-hello", "node3")) {} - - barrier("actor-usage", NrOfNodes) { - Cluster.node.isInUseOnNode("service-hello") must be(true) - oldFoundConnections = identifyConnections(actor) - - //since we have replication factor 2 - oldFoundConnections.size() must be(2) - } - - barrier("verify-fail-over", NrOfNodes - 1) { - val timer = Timer(30.seconds, true) - while (timer.isTicking && - !Cluster.node.isInUseOnNode("service-hello", "node1") && - !Cluster.node.isInUseOnNode("service-hello", "node2")) {} - - val newFoundConnections = identifyConnections(actor) - - //it still must be 2 since a different node should have been used to failover to - newFoundConnections.size() must be(2) - - //they are not disjoint since, there must be a single element that is in both - Collections.disjoint(newFoundConnections, oldFoundConnections) must be(false) - - //but they should not be equal since the shutdown-node has been replaced by another one. - newFoundConnections.equals(oldFoundConnections) must be(false) - } - - Cluster.node.shutdown() - } - } - - def identifyConnections(actor: ActorRef): JSet[String] = { - val set = new java.util.HashSet[String] - for (i ← 0 until 100) { // we should get hits from both nodes in 100 attempts, if not then not very random - val value = Await.result(actor ? "identify", timeout.duration).asInstanceOf[String] - set.add(value) - } - set - } -} - -class RandomFailoverMultiJvmNode2 extends ClusterTestNode { - - import RandomFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes).await() - barrier("actor-usage", NrOfNodes).await() - - Cluster.node.isInUseOnNode("service-hello") must be(false) - - Thread.sleep(5000) // wait for fail-over from node3 - - barrier("verify-fail-over", NrOfNodes - 1).await() - - Cluster.node.shutdown() - } - } -} - -class RandomFailoverMultiJvmNode3 extends ClusterTestNode { - - import RandomFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes).await() - barrier("actor-usage", NrOfNodes).await() - - Cluster.node.isInUseOnNode("service-hello") must be(true) - - Cluster.node.shutdown() - } - } -} - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.conf deleted file mode 100644 index 012685917c..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-node1.router = "random" -akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-node1.nr-of-instances = 1 -akka.actor.deployment.service-node2.router = "random" -akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"] -akka.actor.deployment.service-node2.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.conf deleted file mode 100644 index 012685917c..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-node1.router = "random" -akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-node1.nr-of-instances = 1 -akka.actor.deployment.service-node2.router = "random" -akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"] -akka.actor.deployment.service-node2.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmSpec.scala deleted file mode 100644 index a8f4887464..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/homenode/HomeNodeMultiJvmSpec.scala +++ /dev/null @@ -1,60 +0,0 @@ -package akka.cluster.routing.random.homenode - -import akka.config.Config -import akka.actor.Actor -import akka.cluster.{ ClusterTestNode, MasterClusterTestNode, Cluster } -import Cluster._ -import akka.cluster.LocalCluster._ - -object HomeNodeMultiJvmSpec { - - val NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - def receive = { - case "identify" ⇒ { - reply(Config.nodename) - } - } - } - -} - -class HomeNodeMultiJvmNode1 extends MasterClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - val testNodes = NrOfNodes - - "___" must { - "___" in { - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - barrier("waiting-for-end", NrOfNodes).await() - node.shutdown() - } - } -} - -class HomeNodeMultiJvmNode2 extends ClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - "Random Router: A Random Router" must { - "obey 'home-node' config option when instantiated actor in cluster" in { - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - - val actorNode1 = Actor.actorOf(Props[SomeActor]("service-node1") - val nameNode1 = (actorNode1 ? "identify").get.asInstanceOf[String] - nameNode1 must equal("node1") - - val actorNode2 = Actor.actorOf(Props[SomeActor]("service-node2") - val nameNode2 = (actorNode2 ? "identify").get.asInstanceOf[String] - nameNode2 must equal("node2") - - barrier("waiting-for-end", NrOfNodes).await() - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.conf deleted file mode 100644 index 729dc64fd6..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmSpec.scala deleted file mode 100644 index 525a09467a..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_1/Random1ReplicaMultiJvmSpec.scala +++ /dev/null @@ -1,51 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.routing.random.replicationfactor_1 - -import akka.cluster._ -import akka.cluster.Cluster._ -import akka.actor._ -import akka.config.Config -import akka.cluster.LocalCluster._ - -/** - * Test that if a single node is used with a random router with replication factor then the actor is instantiated - * on the single node. - */ -object Random1ReplicaMultiJvmSpec { - - class HelloWorld extends Actor with Serializable { - def receive = { - case "Hello" ⇒ - reply("World from node [" + Config.nodename + "]") - } - } - -} - -class Random1ReplicaMultiJvmNode1 extends MasterClusterTestNode { - - import Random1ReplicaMultiJvmSpec._ - - val testNodes = 1 - - "Random Router: A cluster" must { - - "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - Cluster.node.start() - - var hello = Actor.actorOf(Props[HelloWorld]("service-hello") - hello must not equal (null) - hello.address must equal("service-hello") - hello.isInstanceOf[ClusterActorRef] must be(true) - - hello must not equal (null) - val reply = (hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1")) - reply must equal("World from node [node1]") - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.conf deleted file mode 100644 index ae344f2100..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.nr-of-instances = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.conf deleted file mode 100644 index 09a37715d0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.cluster.repliction-factor = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.conf deleted file mode 100644 index ae344f2100..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "random" -akka.actor.deployment.service-hello.nr-of-instances = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.opts deleted file mode 100644 index 089e3b7776..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmNode3.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmSpec.scala deleted file mode 100644 index c1a4175a09..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/random/replicationfactor_3/Random3ReplicasMultiJvmSpec.scala +++ /dev/null @@ -1,119 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.routing.random.replicationfactor_3 - -import akka.cluster._ -import akka.actor._ -import akka.config.Config -import Cluster._ -import akka.cluster.LocalCluster._ -import akka.dispatch.Await - -/** - * When a MultiJvmNode is started, will it automatically be part of the cluster (so will it automatically be eligible - * for running actors, or will it be just a 'client' talking to the cluster. - */ -object Random3ReplicasMultiJvmSpec { - val NrOfNodes = 3 - - class HelloWorld extends Actor with Serializable { - def receive = { - case "Hello" ⇒ - reply("World from node [" + Config.nodename + "]") - } - } -} - -/** - * What is the purpose of this node? Is this just a node for the cluster to make use of? - */ -class Random3ReplicasMultiJvmNode1 extends MasterClusterTestNode { - - import Random3ReplicasMultiJvmSpec._ - - def testNodes: Int = NrOfNodes - - "___" must { - "___" in { - Cluster.node.start() - - barrier("start-nodes", NrOfNodes).await() - - barrier("create-actor", NrOfNodes).await() - - barrier("end-test", NrOfNodes).await() - - node.shutdown() - } - } -} - -class Random3ReplicasMultiJvmNode2 extends ClusterTestNode { - - import Random3ReplicasMultiJvmSpec._ - import Cluster._ - - "Random: A cluster" must { - - "distribute requests randomly" in { - Cluster.node.start() - - //wait till node 1 has started. - barrier("start-nodes", NrOfNodes).await() - - //check if the actorRef is the expected remoteActorRef. - var hello: ActorRef = null - hello = Actor.actorOf(Props[HelloWorld]("service-hello") - hello must not equal (null) - hello.address must equal("service-hello") - hello.isInstanceOf[ClusterActorRef] must be(true) - - barrier("create-actor", NrOfNodes).await() - - val replies = collection.mutable.Map.empty[String, Int] - def count(reply: String) = { - if (replies.get(reply).isEmpty) replies.put(reply, 1) - else replies.put(reply, replies(reply) + 1) - } - - for (i ← 0 until 1000) { - count(Await.result((hello ? "Hello").mapTo[String], 10 seconds)) - } - - val repliesNode1 = replies("World from node [node1]") - val repliesNode2 = replies("World from node [node2]") - val repliesNode3 = replies("World from node [node3]") - - assert(repliesNode1 > 100) - assert(repliesNode2 > 100) - assert(repliesNode3 > 100) - assert(repliesNode1 + repliesNode2 + repliesNode3 === 1000) - - barrier("end-test", NrOfNodes).await() - - node.shutdown() - } - } -} - -class Random3ReplicasMultiJvmNode3 extends ClusterTestNode { - - import Random3ReplicasMultiJvmSpec._ - import Cluster._ - - "___" must { - "___" in { - Cluster.node.start() - - barrier("start-nodes", NrOfNodes).await() - - barrier("create-actor", NrOfNodes).await() - - barrier("end-test", NrOfNodes).await() - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.conf deleted file mode 100644 index 0a858fb8fd..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1","node:node3"] -akka.cluster.include-ref-node-in-replica-set = on -akka.actor.timeout = 30 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.opts deleted file mode 100644 index f1306829d9..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.conf deleted file mode 100644 index 0a858fb8fd..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1","node:node3"] -akka.cluster.include-ref-node-in-replica-set = on -akka.actor.timeout = 30 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.opts deleted file mode 100644 index 897e69f626..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.conf deleted file mode 100644 index 0a858fb8fd..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1","node:node3"] -akka.cluster.include-ref-node-in-replica-set = on -akka.actor.timeout = 30 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.opts deleted file mode 100644 index 4127fb94fc..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmNode3.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 -Dakka.event.force-sync=true diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmSpec.scala deleted file mode 100644 index 1b97ef1075..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/failover/RoundRobinFailoverMultiJvmSpec.scala +++ /dev/null @@ -1,146 +0,0 @@ -package akka.cluster.routing.roundrobin.failover - -import akka.config.Config -import akka.cluster._ -import akka.actor.{ ActorRef, Actor } -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } -import akka.util.duration._ -import akka.util.{ Duration, Timer } -import java.util.{ Collections, Set ⇒ JSet } -import java.net.ConnectException -import java.nio.channels.NotYetConnectedException -import java.lang.Thread -import akka.cluster.LocalCluster._ -import akka.dispatch.Await - -object RoundRobinFailoverMultiJvmSpec { - - val NrOfNodes = 3 - - class SomeActor extends Actor with Serializable { - - def receive = { - case "identify" ⇒ - reply(Config.nodename) - } - } - -} - -class RoundRobinFailoverMultiJvmNode1 extends MasterClusterTestNode { - - import RoundRobinFailoverMultiJvmSpec._ - - def testNodes = NrOfNodes - - "Round Robin: when round robin router fails" must { - "jump to another replica" ignore { - val ignoreExceptions = Seq( - EventFilter[NotYetConnectedException], - EventFilter[ConnectException], - EventFilter[ClusterException]) - - var oldFoundConnections: JSet[String] = null - var actor: ActorRef = null - - barrier("node-start", NrOfNodes) { - EventHandler.notify(TestEvent.Mute(ignoreExceptions)) - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes) { - actor = Actor.actorOf(Props[SomeActor]("service-hello") - actor.isInstanceOf[ClusterActorRef] must be(true) - } - - val timer = Timer(30.seconds, true) - while (timer.isTicking && - !Cluster.node.isInUseOnNode("service-hello", "node1") && - !Cluster.node.isInUseOnNode("service-hello", "node3")) {} - //Thread.sleep(5000) // wait for all actors to start up on other nodes - - barrier("actor-usage", NrOfNodes) { - Cluster.node.isInUseOnNode("service-hello") must be(true) - oldFoundConnections = identifyConnections(actor) - - //since we have replication factor 2 - oldFoundConnections.size() must be(2) - } - - Thread.sleep(5000) // wait for fail-over from node3 - - barrier("verify-fail-over", NrOfNodes - 1) { - val timer = Timer(30.seconds, true) - while (timer.isTicking && - !Cluster.node.isInUseOnNode("service-hello", "node1") && - !Cluster.node.isInUseOnNode("service-hello", "node2")) {} - - val newFoundConnections = identifyConnections(actor) - - //it still must be 2 since a different node should have been used to failover to - newFoundConnections.size() must be(2) - - //they are not disjoint since, there must be a single element that is in both - Collections.disjoint(newFoundConnections, oldFoundConnections) must be(false) - - //but they should not be equal since the shutdown-node has been replaced by another one. - newFoundConnections.equals(oldFoundConnections) must be(false) - } - - Cluster.node.shutdown() - } - } - - def identifyConnections(actor: ActorRef): JSet[String] = { - val set = new java.util.HashSet[String] - for (i ← 0 until 100) { - val value = Await.result(actor ? "identify", timeout.duration).asInstanceOf[String] - set.add(value) - } - set - } -} - -class RoundRobinFailoverMultiJvmNode2 extends ClusterTestNode { - - import RoundRobinFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes).await() - barrier("actor-usage", NrOfNodes).await() - - Cluster.node.isInUseOnNode("service-hello") must be(false) - - Thread.sleep(5000) // wait for fail-over from node3 - - barrier("verify-fail-over", NrOfNodes - 1).await() - } - } -} - -class RoundRobinFailoverMultiJvmNode3 extends ClusterTestNode { - - import RoundRobinFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - barrier("node-start", NrOfNodes) { - Cluster.node.start() - } - - barrier("actor-creation", NrOfNodes).await() - barrier("actor-usage", NrOfNodes).await() - - Cluster.node.isInUseOnNode("service-hello") must be(true) - - Cluster.node.shutdown() - } - } -} - diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.conf deleted file mode 100644 index 85536cd656..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.conf +++ /dev/null @@ -1,8 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-node1.router = "round-robin" -akka.actor.deployment.service-node1.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-node1.nr-of-instances = 1 -akka.actor.deployment.service-node2.router = "round-robin" -akka.actor.deployment.service-node2.cluster.preferred-nodes = ["node:node2"] -akka.actor.deployment.service-node2.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.conf deleted file mode 100644 index 99c85fd1a8..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.conf +++ /dev/null @@ -1,5 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.cluster.preferred-nodes = ["node:node1"] -akka.actor.deployment.service-hello.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmSpec.scala deleted file mode 100644 index 4dc9e96429..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/homenode/HomeNodeMultiJvmSpec.scala +++ /dev/null @@ -1,63 +0,0 @@ -package akka.cluster.routing.roundrobin.homenode - -import akka.config.Config -import akka.actor.Actor -import akka.cluster.{ ClusterTestNode, MasterClusterTestNode, Cluster } -import Cluster._ -import akka.cluster.LocalCluster._ - -object HomeNodeMultiJvmSpec { - - val NrOfNodes = 2 - - class SomeActor extends Actor with Serializable { - def receive = { - case "identify" ⇒ { - reply(Config.nodename) - } - } - } - -} - -class HomeNodeMultiJvmNode1 extends MasterClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - val testNodes = NrOfNodes - - "___" must { - "___" in { - - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - barrier("waiting-for-end", NrOfNodes).await() - - node.shutdown() - } - } -} - -class HomeNodeMultiJvmNode2 extends ClusterTestNode { - - import HomeNodeMultiJvmSpec._ - - "Round Robin: A Router" must { - "obey 'home-node' config option when instantiated actor in cluster" in { - - Cluster.node.start() - barrier("waiting-for-begin", NrOfNodes).await() - - val actorNode1 = Actor.actorOf(Props[SomeActor]("service-node1") - val name1 = (actorNode1 ? "identify").get.asInstanceOf[String] - name1 must equal("node1") - - val actorNode2 = Actor.actorOf(Props[SomeActor]("service-node2") - val name2 = (actorNode2 ? "identify").get.asInstanceOf[String] - name2 must equal("node2") - - barrier("waiting-for-end", NrOfNodes).await() - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.conf deleted file mode 100644 index 88df1a6421..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 1 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala deleted file mode 100644 index f8fd41b0cf..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_1/RoundRobin1ReplicaMultiJvmSpec.scala +++ /dev/null @@ -1,49 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.routing.roundrobin.replicationfactor_1 - -import akka.cluster._ -import Cluster._ -import akka.actor._ -import akka.config.Config -import akka.cluster.LocalCluster._ - -/** - * Test that if a single node is used with a round robin router with replication factor then the actor is instantiated on the single node. - */ -object RoundRobin1ReplicaMultiJvmSpec { - - class HelloWorld extends Actor with Serializable { - def receive = { - case "Hello" ⇒ reply("World from node [" + Config.nodename + "]") - } - } - -} - -class RoundRobin1ReplicaMultiJvmNode1 extends MasterClusterTestNode { - - import RoundRobin1ReplicaMultiJvmSpec._ - - val testNodes = 1 - - "Round Robin: A cluster" must { - - "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - Cluster.node.start() - - var hello = Actor.actorOf(Props[HelloWorld]("service-hello") - hello must not equal (null) - hello.address must equal("service-hello") - hello.isInstanceOf[ClusterActorRef] must be(true) - - hello must not equal (null) - val reply = (hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1")) - reply must equal("World from node [node1]") - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.conf deleted file mode 100644 index a763b66792..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 2 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.conf deleted file mode 100644 index a763b66792..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 2 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmSpec.scala deleted file mode 100644 index b101a06f81..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_2/RoundRobin2ReplicasMultiJvmSpec.scala +++ /dev/null @@ -1,121 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.routing.roundrobin.replicationfactor_2 - -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.cluster._ -import Cluster._ -import akka.cluster.LocalCluster._ -import akka.actor._ -import akka.actor.Actor._ -import akka.config.Config -import akka.util.duration._ -import akka.util.{ Duration, Timer } -import akka.cluster.LocalCluster._ - -import java.util.concurrent.atomic.AtomicInteger -import java.util.concurrent.ConcurrentHashMap -import akka.dispatch.Await - -/** - * When a MultiJvmNode is started, will it automatically be part of the cluster (so will it automatically be eligible - * for running actors, or will it be just a 'client' talking to the cluster. - */ -object RoundRobin2ReplicasMultiJvmSpec { - val NrOfNodes = 2 - - class HelloWorld extends Actor with Serializable { - def receive = { - case "Hello" ⇒ - reply("World from node [" + Config.nodename + "]") - } - } -} - -class RoundRobin2ReplicasMultiJvmNode1 extends MasterClusterTestNode { - import RoundRobin2ReplicasMultiJvmSpec._ - - val testNodes = NrOfNodes - - "Round Robin: A cluster" must { - - "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - System.getProperty("akka.cluster.nodename", "") must be("node1") - System.getProperty("akka.remote.port", "") must be("9991") - - //wait till node 1 has started. - barrier("start-node1", NrOfNodes) { - Cluster.node.start() - } - - //wait till ndoe 2 has started. - barrier("start-node2", NrOfNodes).await() - - //wait till an actor reference on node 2 has become available. - barrier("get-ref-to-actor-on-node2", NrOfNodes) { - val timer = Timer(30.seconds, true) - while (timer.isTicking && !node.isInUseOnNode("service-hello")) {} - } - - //wait till the node 2 has send a message to the replica's. - barrier("send-message-from-node2-to-replicas", NrOfNodes).await() - - node.shutdown() - } - } -} - -class RoundRobin2ReplicasMultiJvmNode2 extends ClusterTestNode { - import RoundRobin2ReplicasMultiJvmSpec._ - - "Round Robin: A cluster" must { - - "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - System.getProperty("akka.cluster.nodename", "") must be("node2") - System.getProperty("akka.remote.port", "") must be("9992") - - //wait till node 1 has started. - barrier("start-node1", NrOfNodes).await() - - //wait till node 2 has started. - barrier("start-node2", NrOfNodes) { - Cluster.node.start() - } - - //check if the actorRef is the expected remoteActorRef. - var hello: ActorRef = null - barrier("get-ref-to-actor-on-node2", NrOfNodes) { - hello = Actor.actorOf(Props[HelloWorld]("service-hello") - hello must not equal (null) - hello.address must equal("service-hello") - hello.isInstanceOf[ClusterActorRef] must be(true) - } - - barrier("send-message-from-node2-to-replicas", NrOfNodes) { - //todo: is there a reason to check for null again since it already has been done in the previous block. - hello must not equal (null) - - val replies = new ConcurrentHashMap[String, AtomicInteger]() - def count(reply: String) = { - val counter = new AtomicInteger(0) - Option(replies.putIfAbsent(reply, counter)).getOrElse(counter).incrementAndGet() - } - - implicit val timeout = Timeout(Duration(20, "seconds")) - - for(i <- 1 to 8) - count(Await.result((hello ? "Hello").mapTo[String], timeout.duration)) - - replies.get("World from node [node1]").get must equal(4) - replies.get("World from node [node2]").get must equal(4) - } - - node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.conf deleted file mode 100644 index 8592b46c85..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.conf deleted file mode 100644 index 92bafcfe8b..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.cluster.repliction-factor = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.conf deleted file mode 100644 index 8592b46c85..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.conf +++ /dev/null @@ -1,4 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "round-robin" -akka.actor.deployment.service-hello.nr-of-instances = 3 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.opts deleted file mode 100644 index 089e3b7776..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmNode3.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node3 -Dakka.remote.port=9993 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmSpec.scala deleted file mode 100644 index f62b7d3e74..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/roundrobin/replicationfactor_3/RoundRobin3ReplicasMultiJvmSpec.scala +++ /dev/null @@ -1,158 +0,0 @@ -// /** -// * Copyright (C) 2009-2012 Typesafe Inc. -// */ - -// package akka.cluster.routing.roundrobin.replicationfactor_3 - -// import org.scalatest.WordSpec -// import org.scalatest.matchers.MustMatchers -// import org.scalatest.BeforeAndAfterAll - -// import akka.cluster._ -// import akka.actor._ -// import akka.actor.Actor._ -// import akka.util.duration._ -// import akka.util.{ Duration, Timer } -// import akka.config.Config -// import akka.cluster.LocalCluster._ -// import Cluster._ - -// /** -// * When a MultiJvmNode is started, will it automatically be part of the cluster (so will it automatically be eligible -// * for running actors, or will it be just a 'client' talking to the cluster. -// */ -// object RoundRobin3ReplicasMultiJvmSpec { -// val NrOfNodes = 3 - -// class HelloWorld extends Actor with Serializable { -// def receive = { -// case "Hello" ⇒ -// reply("World from node [" + Config.nodename + "]") -// } -// } -// } - -// /** -// * What is the purpose of this node? Is this just a node for the cluster to make use of? -// */ -// class RoundRobin3ReplicasMultiJvmNode1 extends MasterClusterTestNode { -// import RoundRobin3ReplicasMultiJvmSpec._ - -// val testNodes = NrOfNodes - -// "Round Robin: A cluster" must { - -// "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - -// //wait till node 1 has started. -// barrier("start-node1", NrOfNodes) { -// Cluster.node.boot() -// } - -// //wait till ndoe 2 has started. -// barrier("start-node2", NrOfNodes).await() - -// //wait till node 3 has started. -// barrier("start-node3", NrOfNodes).await() - -// //wait till an actor reference on node 2 has become available. -// barrier("get-ref-to-actor-on-node2", NrOfNodes) { -// val timer = Timer(30.seconds, true) -// while (timer.isTicking && !node.isInUseOnNode("service-hello")) {} -// } - -// //wait till the node 2 has send a message to the replica's. -// barrier("send-message-from-node2-to-replicas", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } - -// class RoundRobin3ReplicasMultiJvmNode2 extends ClusterTestNode { -// import RoundRobin3ReplicasMultiJvmSpec._ -// import Cluster._ - -// "Round Robin: A cluster" must { - -// "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { - -// //wait till node 1 has started. -// barrier("start-node1", NrOfNodes).await() - -// //wait till node 2 has started. -// barrier("start-node2", NrOfNodes) { -// Cluster.node.start() -// } - -// //wait till node 3 has started. -// barrier("start-node3", NrOfNodes).await() - -// //check if the actorRef is the expected remoteActorRef. -// var hello: ActorRef = null -// barrier("get-ref-to-actor-on-node2", NrOfNodes) { -// hello = Actor.actorOf(Props[HelloWorld]("service-hello") -// hello must not equal (null) -// hello.address must equal("service-hello") -// hello.isInstanceOf[ClusterActorRef] must be(true) -// } - -// barrier("send-message-from-node2-to-replicas", NrOfNodes) { -// //todo: is there a reason to check for null again since it already has been done in the previous block. -// hello must not equal (null) - -// val replies = collection.mutable.Map.empty[String, Int] -// def count(reply: String) = { -// if (replies.get(reply).isEmpty) replies.put(reply, 1) -// else replies.put(reply, replies(reply) + 1) -// } - -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node2"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node3"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node2"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node3"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node2"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node3"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node1"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node2"))) -// count((hello ? "Hello").as[String].getOrElse(fail("Should have recieved reply from node3"))) - -// replies("World from node [node1]") must equal(4) -// replies("World from node [node2]") must equal(4) -// replies("World from node [node3]") must equal(4) -// } - -// node.shutdown() -// } -// } -// } - -// class RoundRobin3ReplicasMultiJvmNode3 extends ClusterTestNode { -// import RoundRobin3ReplicasMultiJvmSpec._ -// import Cluster._ - -// "Round Robin: A cluster" must { - -// "create clustered actor, get a 'local' actor on 'home' node and a 'ref' to actor on remote node" in { -// barrier("start-node1", NrOfNodes).await() - -// barrier("start-node2", NrOfNodes).await() - -// barrier("start-node3", NrOfNodes) { -// Cluster.node.start() -// } - -// barrier("get-ref-to-actor-on-node2", NrOfNodes) { -// val timer = Timer(30.seconds, true) -// while (timer.isTicking && !node.isInUseOnNode("service-hello")) {} -// } - -// barrier("send-message-from-node2-to-replicas", NrOfNodes).await() - -// node.shutdown() -// } -// } -// } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.conf deleted file mode 100644 index fd2babf3a9..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.conf +++ /dev/null @@ -1,6 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "akka.routing.ScatterGatherFirstCompletedRouter" -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.timeout = 30 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.opts deleted file mode 100644 index dc86c1c9c0..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode1.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node1 -Dakka.remote.port=9991 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.conf b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.conf deleted file mode 100644 index fd2babf3a9..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.conf +++ /dev/null @@ -1,6 +0,0 @@ -akka.enabled-modules = ["cluster"] -akka.event-handlers = ["akka.testkit.TestEventListener"] -akka.event-handler-level = "WARNING" -akka.actor.deployment.service-hello.router = "akka.routing.ScatterGatherFirstCompletedRouter" -akka.actor.deployment.service-hello.nr-of-instances = 2 -akka.actor.timeout = 30 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.opts b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.opts deleted file mode 100644 index bb140941a5..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmNode2.opts +++ /dev/null @@ -1 +0,0 @@ --Dakka.cluster.nodename=node2 -Dakka.remote.port=9992 diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmSpec.scala deleted file mode 100644 index e8cc4f7d68..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/scattergather/failover/ScatterGatherFailoverMultiJvmSpec.scala +++ /dev/null @@ -1,114 +0,0 @@ -package akka.cluster.routing.scattergather.failover - -import akka.config.Config -import akka.cluster._ -import akka.actor.{ ActorRef, Actor } -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } -import java.util.{ Collections, Set ⇒ JSet } -import java.net.ConnectException -import java.nio.channels.NotYetConnectedException -import java.lang.Thread -import akka.routing.Routing.Broadcast -import akka.cluster.LocalCluster._ -import akka.dispatch.Await - -object ScatterGatherFailoverMultiJvmSpec { - - val NrOfNodes = 2 - - case class Shutdown(node: Option[String] = None) - case class Sleep(node: String) - - class TestActor extends Actor with Serializable { - - def shutdownNode = new Thread() { - override def run() { - Thread.sleep(2000) - Cluster.node.shutdown() - } - } - - def receive = { - case Shutdown(None) ⇒ shutdownNode - case Sleep(node) if node.equals(Config.nodename) ⇒ - Thread sleep 100 - reply(Config.nodename) - case Shutdown(Some(node)) if node.equals(Config.nodename) ⇒ shutdownNode - case _ ⇒ - Thread sleep 100 - reply(Config.nodename) - } - } - -} - -class ScatterGatherFailoverMultiJvmNode1 extends MasterClusterTestNode { - - import ScatterGatherFailoverMultiJvmSpec._ - - def testNodes = NrOfNodes - - "When the message is sent with ?, and all connections are up, router" must { - "return the first came reponse" ignore { - val ignoreExceptions = Seq( - EventFilter[NotYetConnectedException], - EventFilter[ConnectException], - EventFilter[ClusterException]) - - EventHandler.notify(TestEvent.Mute(ignoreExceptions)) - - Cluster.node.start() - LocalCluster.barrier("waiting-for-begin", NrOfNodes).await() - - /* - FIXME: Uncomment, when custom routers will be fully supported (ticket #1109) - - val actor = Actor.actorOf(Props[TestActor]("service-hello").asInstanceOf[ClusterActorRef] - - identifyConnections(actor).size() must be(2) - - // since node1 is falling asleep, response from node2 is gathered - (actor ? Broadcast(Sleep("node1"))).get.asInstanceOf[String] must be("node2") - - Thread sleep 100 - - // since node2 shuts down during processing the message, response from node1 is gathered - (actor ? Broadcast(Shutdown(Some("node2")))).get.asInstanceOf[String] must be("node1") - - */ - LocalCluster.barrier("waiting-for-end", NrOfNodes).await() - Cluster.node.shutdown() - } - } - - def identifyConnections(actor: ActorRef): JSet[String] = { - val set = new java.util.HashSet[String] - for (i ← 0 until NrOfNodes * 2) { - val value = Await.result(actor ? "foo", timeout.duration).asInstanceOf[String] - set.add(value) - } - set - } -} - -class ScatterGatherFailoverMultiJvmNode2 extends ClusterTestNode { - - import ScatterGatherFailoverMultiJvmSpec._ - - "___" must { - "___" ignore { - - Cluster.node.start() - LocalCluster.barrier("waiting-for-begin", NrOfNodes).await() - - /* - FIXME: Uncomment, when custom routers will be fully supported (ticket #1109) - Thread.sleep(30 *1000) - */ - - LocalCluster.barrier("waiting-for-end", NrOfNodes).await() - Cluster.node.shutdown() - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/sample/PingPongMultiJvmExample.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/sample/PingPongMultiJvmExample.scala deleted file mode 100644 index c7e9aceaf1..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/sample/PingPongMultiJvmExample.scala +++ /dev/null @@ -1,227 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.sample - -import akka.cluster._ - -import akka.actor._ -import akka.util.duration._ - -object PingPongMultiJvmExample { - val PING_ADDRESS = "ping" - val PONG_ADDRESS = "pong" - - val ClusterName = "ping-pong-cluster" - val NrOfNodes = 5 - val Pause = true - val PauseTimeout = 5 minutes - - // ----------------------------------------------- - // Messages - // ----------------------------------------------- - - sealed trait PingPong extends Serializable - case object Ping extends PingPong - case object Pong extends PingPong - case object Stop extends PingPong - - case class Serve(player: ActorRef) - - // ----------------------------------------------- - // Actors - // ----------------------------------------------- - - class PingActor extends Actor with Serializable { - var pong: ActorRef = _ - var play = true - - def receive = { - case Pong ⇒ - if (play) { - println("---->> PING") - pong ! Ping - } else { - println("---->> GAME OVER") - } - case Serve(player) ⇒ - pong = player - println("---->> SERVE") - pong ! Ping - case Stop ⇒ - play = false - } - } - - class PongActor extends Actor with Serializable { - def receive = { - case Ping ⇒ - println("---->> PONG") - reply(Pong) - } - } -} - -/* -object PingPongMultiJvmNode1 { - import PingPong._ - import BinaryFormats._ - - val PingService = classOf[PingActor].getName - val PongService = classOf[PongActor].getName - - def main(args: Array[String]) { run } - - def run = { - // ----------------------------------------------- - // Start monitoring - // ----------------------------------------------- - - //MonitoringServer.start - //Monitoring.startLocalDaemons - - // ----------------------------------------------- - // Start cluster - // ----------------------------------------------- - - Cluster.startLocalCluster() - - // create node - val node = Cluster.newNode(NodeAddress(ClusterName, "node1", port = 9991)) - - def pause(name: String, message: String) = { - node.barrier("user-prompt-" + name, NrOfNodes, PauseTimeout) { - println(message) - if (Pause) { - println("Press enter to continue (timeout of %s) ..." format PauseTimeout) - System.in.read - } - } - } - - pause("start", "Ready to start all nodes") - println("Starting nodes ...") - - Cluster.node.start() - - node.barrier("start", NrOfNodes) { - // wait for others to start - } - - // ----------------------------------------------- - // Store pong actors in the cluster - // ----------------------------------------------- - - pause("create", "Ready to create all actors") - println("Creating actors ...") - - // store the ping actor in the cluster, but do not deploy it anywhere - node.store(classOf[PingActor], PING_ADDRESS) - - // store the pong actor in the cluster and replicate it on all nodes - node.store(classOf[PongActor], PONG_ADDRESS, NrOfNodes) - - // give some time for the deployment - Thread.sleep(3000) - - // ----------------------------------------------- - // Get actor references - // ----------------------------------------------- - - // check out a local ping actor - val ping = node.use[PingActor](PING_ADDRESS).head - - // get a reference to all the pong actors through a round-robin router actor ref - val pong = node.ref(PONG_ADDRESS, router = Router.RoundRobin) - - // ----------------------------------------------- - // Play the game - // ----------------------------------------------- - - pause("play", "Ready to play ping pong") - - ping ! Serve(pong) - - // let them play for 3 seconds - Thread.sleep(3000) - - ping ! Stop - - // give some time for the game to finish - Thread.sleep(3000) - - // ----------------------------------------------- - // Stop actors - // ----------------------------------------------- - - pause("stop", "Ready to stop actors") - println("Stopping actors ...") - - ping.stop - pong.stop - - // give remote actors time to stop - Thread.sleep(5000) - - // ----------------------------------------------- - // Stop everything - // ----------------------------------------------- - - pause("shutdown", "Ready to shutdown") - println("Stopping everything ...") - - //Monitoring.stopLocalDaemons - //MonitoringServer.stop - - Actor.remote.shutdown - Actor.registry.local.shutdownAll - - node.stop - - Cluster.shutdownLocalCluster - } -} - -object PingPongMultiJvmNode2 extends PongNode(2) -object PingPongMultiJvmNode3 extends PongNode(3) -object PingPongMultiJvmNode4 extends PongNode(4) -object PingPongMultiJvmNode5 extends PongNode(5) - -class PongNode(number: Int) { - import PingPong._ - - def main(args: Array[String]) { run } - - def run = { - val node = Cluster.newNode(NodeAddress(ClusterName, "node" + number, port = 9990 + number)) - - def pause(name: String) = { - node.barrier("user-prompt-" + name, NrOfNodes, PauseTimeout) { - // wait for user prompt - } - } - - pause("start") - - node.barrier("start", NrOfNodes) { - Cluster.node.start() - } - - pause("create") - - pause("play") - - pause("stop") - - pause("shutdown") - - // clean up and stop - - Actor.remote.shutdown - Actor.registry.local.shutdownAll - - node.stop - } -} -*/ diff --git a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala similarity index 99% rename from akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala rename to akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala index cffc424408..d02199f703 100644 --- a/akka-remote/src/test/scala/akka/remote/AccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala @@ -1,4 +1,4 @@ -package akka.remote +package akka.cluster import java.net.InetSocketAddress import akka.testkit.AkkaSpec diff --git a/akka-cluster/src/test/scala/akka/cluster/AsynchronousTransactionLogSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AsynchronousTransactionLogSpec.scala deleted file mode 100644 index 0d26befc4e..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/AsynchronousTransactionLogSpec.scala +++ /dev/null @@ -1,230 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import org.apache.bookkeeper.client.BookKeeper -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.actor._ -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } - -import com.eaio.uuid.UUID - -class AsynchronousTransactionLogSpec extends WordSpec with MustMatchers with BeforeAndAfterAll { - private var bookKeeper: BookKeeper = _ - private var localBookKeeper: LocalBookKeeper = _ - - "An asynchronous Transaction Log" should { - "be able to record entries - asynchronous" in { - val uuid = (new UUID).toString - val txlog = TransactionLog.newLogFor(uuid, true, null) - val entry = "hello".getBytes("UTF-8") - txlog.recordEntry(entry) - Thread.sleep(200) - txlog.close - } - - "be able to be deleted - asynchronous" in { - val uuid = (new UUID).toString - val txlog = TransactionLog.newLogFor(uuid, true, null) - val entry = "hello".getBytes("UTF-8") - txlog.recordEntry(entry) - - txlog.delete() - txlog.close() - - val zkClient = TransactionLog.zkClient - assert(zkClient.readData(txlog.snapshotPath, true) == null) - assert(zkClient.readData(txlog.txLogPath, true) == null) - } - - "be able to be checked for existence - asynchronous" in { - val uuid = (new UUID).toString - TransactionLog.exists(uuid) must be(false) - - TransactionLog.newLogFor(uuid, true, null) - TransactionLog.exists(uuid) must be(true) - } - - "fail to be opened if non existing - asynchronous" in { - EventHandler.notify(TestEvent.Mute(EventFilter[ReplicationException])) - val uuid = (new UUID).toString - intercept[ReplicationException](TransactionLog.logFor(uuid, true, null)) - EventHandler.notify(TestEvent.UnMuteAll) - } - - "be able to overweite an existing txlog if one already exists - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close - - val txLog2 = TransactionLog.newLogFor(uuid, true, null) - txLog2.latestSnapshotId.isDefined must be(false) - txLog2.latestEntryId must be(-1) - } - - "be able to record and delete entries - asynchronous" in { - EventHandler.notify(TestEvent.Mute(EventFilter[ReplicationException])) - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.delete - Thread.sleep(200) - intercept[ReplicationException](TransactionLog.logFor(uuid, true, null)) - EventHandler.notify(TestEvent.UnMuteAll) - } - - "be able to record entries and read entries with 'entriesInRange' - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, true, null) - Thread.sleep(200) - val entries = txlog2.entriesInRange(0, 1).map(bytes ⇒ new String(bytes, "UTF-8")) - Thread.sleep(200) - entries.size must equal(2) - entries(0) must equal("hello") - entries(1) must equal("hello") - Thread.sleep(200) - txlog2.close - } - - "be able to record entries and read entries with 'entries' - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, true, null) - val entries = txlog2.entries.map(bytes ⇒ new String(bytes, "UTF-8")) - Thread.sleep(200) - entries.size must equal(4) - entries(0) must equal("hello") - entries(1) must equal("hello") - entries(2) must equal("hello") - entries(3) must equal("hello") - Thread.sleep(200) - txlog2.close - } - - "be able to record a snapshot - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - Thread.sleep(200) - txlog1.close - } - - "be able to record and read a snapshot and following entries - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - Thread.sleep(200) - - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, true, null) - Thread.sleep(200) - val (snapshotAsBytes, entriesAsBytes) = txlog2.latestSnapshotAndSubsequentEntries - Thread.sleep(200) - new String(snapshotAsBytes.getOrElse(fail("No snapshot")), "UTF-8") must equal("snapshot") - - val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8")) - Thread.sleep(200) - entries.size must equal(4) - entries(0) must equal("hello") - entries(1) must equal("hello") - entries(2) must equal("hello") - entries(3) must equal("hello") - Thread.sleep(200) - txlog2.close - } - - "be able to record entries then a snapshot then more entries - and then read from the snapshot and the following entries - asynchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, true, null) - Thread.sleep(200) - - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.recordEntry(entry) - Thread.sleep(200) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, true, null) - Thread.sleep(200) - val (snapshotAsBytes, entriesAsBytes) = txlog2.latestSnapshotAndSubsequentEntries - Thread.sleep(200) - new String(snapshotAsBytes.getOrElse(fail("No snapshot")), "UTF-8") must equal("snapshot") - val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8")) - Thread.sleep(200) - entries.size must equal(2) - entries(0) must equal("hello") - entries(1) must equal("hello") - Thread.sleep(200) - txlog2.close - } - } - - override def beforeAll() = { - LocalBookKeeperEnsemble.start() - TransactionLog.start() - } - - override def afterAll() = { - TransactionLog.shutdown() - LocalBookKeeperEnsemble.shutdown() - } -} diff --git a/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala similarity index 99% rename from akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala rename to akka-cluster/src/test/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala index 1e954b34fb..6366a9f65e 100644 --- a/akka-remote/src/test/scala/akka/remote/GossipingAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipingAccrualFailureDetectorSpec.scala @@ -1,7 +1,7 @@ // /** // * Copyright (C) 2009-2011 Typesafe Inc. // */ -// package akka.remote +// package akka.cluster // import java.net.InetSocketAddress diff --git a/akka-cluster/src/test/scala/akka/cluster/SynchronousTransactionLogSpec.scala b/akka-cluster/src/test/scala/akka/cluster/SynchronousTransactionLogSpec.scala deleted file mode 100644 index 3dc58d6c9a..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/SynchronousTransactionLogSpec.scala +++ /dev/null @@ -1,190 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import org.apache.bookkeeper.client.BookKeeper -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers -import org.scalatest.BeforeAndAfterAll - -import akka.actor._ -import akka.event.EventHandler -import akka.testkit.{ EventFilter, TestEvent } - -import com.eaio.uuid.UUID - -class SynchronousTransactionLogSpec extends WordSpec with MustMatchers with BeforeAndAfterAll { - private var bookKeeper: BookKeeper = _ - private var localBookKeeper: LocalBookKeeper = _ - - "A synchronous used Transaction Log" should { - - "be able to be deleted - synchronous" in { - val uuid = (new UUID).toString - val txlog = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog.recordEntry(entry) - - txlog.delete() - txlog.close() - - val zkClient = TransactionLog.zkClient - assert(zkClient.readData(txlog.snapshotPath, true) == null) - assert(zkClient.readData(txlog.txLogPath, true) == null) - } - - "fail to be opened if non existing - synchronous" in { - EventHandler.notify(TestEvent.Mute(EventFilter[ReplicationException])) - val uuid = (new UUID).toString - intercept[ReplicationException](TransactionLog.logFor(uuid, false, null)) - EventHandler.notify(TestEvent.UnMuteAll) - } - - "be able to be checked for existence - synchronous" in { - val uuid = (new UUID).toString - TransactionLog.exists(uuid) must be(false) - - TransactionLog.newLogFor(uuid, false, null) - TransactionLog.exists(uuid) must be(true) - } - - "be able to record entries - synchronous" in { - val uuid = (new UUID).toString - val txlog = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog.recordEntry(entry) - } - - "be able to overweite an existing txlog if one already exists - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close - - val txLog2 = TransactionLog.newLogFor(uuid, false, null) - txLog2.latestSnapshotId.isDefined must be(false) - txLog2.latestEntryId must be(-1) - } - - "be able to record and delete entries - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.delete - txlog1.close - // intercept[ReplicationException](TransactionLog.logFor(uuid, false, null)) - } - - "be able to record entries and read entries with 'entriesInRange' - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, false, null) - val entries = txlog2.entriesInRange(0, 1).map(bytes ⇒ new String(bytes, "UTF-8")) - entries.size must equal(2) - entries(0) must equal("hello") - entries(1) must equal("hello") - txlog2.close - } - - "be able to record entries and read entries with 'entries' - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close // should work without txlog.close - - val txlog2 = TransactionLog.logFor(uuid, false, null) - val entries = txlog2.entries.map(bytes ⇒ new String(bytes, "UTF-8")) - entries.size must equal(4) - entries(0) must equal("hello") - entries(1) must equal("hello") - entries(2) must equal("hello") - entries(3) must equal("hello") - txlog2.close - } - - "be able to record a snapshot - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - txlog1.close - } - - "be able to record and read a snapshot and following entries - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, false, null) - val (snapshotAsBytes, entriesAsBytes) = txlog2.latestSnapshotAndSubsequentEntries - new String(snapshotAsBytes.getOrElse(fail("No snapshot")), "UTF-8") must equal("snapshot") - - val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8")) - entries.size must equal(4) - entries(0) must equal("hello") - entries(1) must equal("hello") - entries(2) must equal("hello") - entries(3) must equal("hello") - txlog2.close - } - - "be able to record entries then a snapshot then more entries - and then read from the snapshot and the following entries - synchronous" in { - val uuid = (new UUID).toString - val txlog1 = TransactionLog.newLogFor(uuid, false, null) - - val entry = "hello".getBytes("UTF-8") - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - - val snapshot = "snapshot".getBytes("UTF-8") - txlog1.recordSnapshot(snapshot) - - txlog1.recordEntry(entry) - txlog1.recordEntry(entry) - txlog1.close - - val txlog2 = TransactionLog.logFor(uuid, false, null) - val (snapshotAsBytes, entriesAsBytes) = txlog2.latestSnapshotAndSubsequentEntries - new String(snapshotAsBytes.getOrElse(fail("No snapshot")), "UTF-8") must equal("snapshot") - - val entries = entriesAsBytes.map(bytes ⇒ new String(bytes, "UTF-8")) - entries.size must equal(2) - entries(0) must equal("hello") - entries(1) must equal("hello") - txlog2.close - } - } - - override def beforeAll() = { - LocalBookKeeperEnsemble.start() - TransactionLog.start() - } - - override def afterAll() = { - TransactionLog.shutdown() - LocalBookKeeperEnsemble.shutdown() - } -} diff --git a/akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala b/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala similarity index 99% rename from akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala rename to akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala index 03e4109423..df9cead7f8 100644 --- a/akka-remote/src/test/scala/akka/remote/VectorClockSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala @@ -1,4 +1,4 @@ -package akka.remote +package akka.cluster import java.net.InetSocketAddress import akka.testkit.AkkaSpec diff --git a/akka-cluster/src/test/scala/akka/cluster/sample/ClusteredPingPongSample.scala b/akka-cluster/src/test/scala/akka/cluster/sample/ClusteredPingPongSample.scala deleted file mode 100644 index c242185450..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/sample/ClusteredPingPongSample.scala +++ /dev/null @@ -1,134 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.sample - -import akka.cluster._ - -import akka.actor._ -import akka.actor.Actor._ - -import java.util.concurrent.CountDownLatch - -object PingPong { - val PING_ADDRESS = "ping" - val PONG_ADDRESS = "pong" - - val NrOfPings = 5 - - // ------------------------ - // Messages - // ------------------------ - - sealed trait PingPong extends Serializable - case object Ball extends PingPong - case object Stop extends PingPong - case class Latch(latch: CountDownLatch) extends PingPong - - // ------------------------ - // Actors - // ------------------------ - - class PingActor extends Actor with Serializable { - var count = 0 - var gameOverLatch: CountDownLatch = _ - - def receive = { - case Ball ⇒ - if (count < NrOfPings) { - println("---->> PING (%s)" format count) - count += 1 - reply(Ball) - } else { - sender.foreach(s ⇒ (s ? Stop).await) - gameOverLatch.countDown - self.stop - } - case Latch(latch) ⇒ - gameOverLatch = latch - } - } - - class PongActor extends Actor with Serializable { - def receive = { - case Ball ⇒ - reply(Ball) - case Stop ⇒ - reply(Stop) - self.stop - } - } -} - -/* -object ClusteredPingPongSample { - import PingPong._ - import BinaryFormats._ - - val CLUSTER_NAME = "test-cluster" - - def main(args: Array[String]) = run - - def run = { - - // ------------------------ - // Start cluster of 5 nodes - // ------------------------ - - Cluster.startLocalCluster() - val localNode = Cluster.newNode(NodeAddress(CLUSTER_NAME, "node0", port = 9991)).start - val remoteNodes = Cluster.newNode(NodeAddress(CLUSTER_NAME, "node1", port = 9992)).start :: - Cluster.newNode(NodeAddress(CLUSTER_NAME, "node2", port = 9993)).start :: - Cluster.newNode(NodeAddress(CLUSTER_NAME, "node3", port = 9994)).start :: - Cluster.newNode(NodeAddress(CLUSTER_NAME, "node4", port = 9995)).start :: Nil - - // ------------------------ - // Store the actors in the cluster - // ------------------------ - - // Store the PingActor in the cluster, but do not deploy it anywhere - localNode.store(classOf[PingActor], PING_ADDRESS) - - // Store the PongActor in the cluster and deploy it - // to 5 (replication factor) nodes in the cluster - localNode.store(classOf[PongActor], PONG_ADDRESS, 5) - - Thread.sleep(1000) // let the deployment finish - - // ------------------------ - // Get the actors from the cluster - // ------------------------ - - // Check out a local PingActor instance (not reference) - val ping = localNode.use[PingActor](PING_ADDRESS).head - - // Get a reference to all the pong actors through a round-robin router ActorRef - val pong = localNode.ref(PONG_ADDRESS, router = Router.RoundRobin) - - // ------------------------ - // Play the game - // ------------------------ - - val latch = new CountDownLatch(1) - ping ! Latch(latch) // register latch for actor to know when to stop - - println("---->> SERVE") - - implicit val replyTo = Some(pong) // set the reply address to the PongActor - ping ! Ball // serve - - latch.await // wait for game to finish - - println("---->> GAME OVER") - - // ------------------------ - // Clean up - // ------------------------ - - localNode.stop - remoteNodes.foreach(_.stop) - Cluster.shutdownLocalCluster() - } -} -*/ diff --git a/akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala b/akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala deleted file mode 100644 index daf817872e..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/sample/ComputeGridSample.scala +++ /dev/null @@ -1,91 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster.sample - -import akka.cluster._ -import akka.dispatch.Futures - -object ComputeGridSample { - //sample.cluster.ComputeGridSample.fun2 - - // FIXME rewrite as multi-jvm test - - /* - // run all - def run { - fun1 - fun2 - fun3 - fun4 - } - - // Send Function0[Unit] - def fun1 = { - Cluster.startLocalCluster() - val node = Cluster newNode (NodeAddress("test", "local", port = 9991)) start - val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start - - Thread.sleep(100) - val fun = () ⇒ println("=============>>> AKKA ROCKS <<<=============") - node send (fun, 2) // send and invoke function on to two cluster nodes - - node.stop - remote1.stop - Cluster.shutdownLocalCluster() - } - - // Send Function0[Any] - def fun2 = { - Cluster.startLocalCluster() - val local = Cluster newNode (NodeAddress("test", "local", port = 9991)) start - val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start - - Thread.sleep(100) - val fun = () ⇒ "AKKA ROCKS" - val futures = local send (fun, 2) // send and invoke function on to two cluster nodes and get result - - val result = Await.sync(Futures.fold("")(futures)(_ + " - " + _), timeout) - println("===================>>> Cluster says [" + result + "]") - - local.stop - remote1.stop - Cluster.shutdownLocalCluster() - } - - // Send Function1[Any, Unit] - def fun3 = { - Cluster.startLocalCluster() - val local = Cluster newNode (NodeAddress("test", "local", port = 9991)) start - val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start - - val fun = ((s: String) ⇒ println("=============>>> " + s + " <<<=============")).asInstanceOf[Function1[Any, Unit]] - local send (fun, "AKKA ROCKS", 2) // send and invoke function on to two cluster nodes - - local.stop - remote1.stop - Cluster.shutdownLocalCluster() - } - - // Send Function1[Any, Any] - def fun4 = { - Cluster.startLocalCluster() - val local = Cluster newNode (NodeAddress("test", "local", port = 9991)) start - val remote1 = Cluster newNode (NodeAddress("test", "remote1", port = 9992)) start - - val fun = ((i: Int) ⇒ i * i).asInstanceOf[Function1[Any, Any]] - - val future1 = local send (fun, 2, 1) head // send and invoke function on one cluster node and get result - val future2 = local send (fun, 2, 1) head // send and invoke function on one cluster node and get result - - // grab the result from the first one that returns - val result = Await.sync(Futures.firstCompletedOf(List(future1, future2)), timeout) - println("===================>>> Cluster says [" + result + "]") - - local.stop - remote1.stop - Cluster.shutdownLocalCluster() - } - */ -} diff --git a/akka-cluster/src/test/scala/akka/cluster/storage/InMemoryStorageSpec.scala b/akka-cluster/src/test/scala/akka/cluster/storage/InMemoryStorageSpec.scala deleted file mode 100644 index 762b189bd2..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/storage/InMemoryStorageSpec.scala +++ /dev/null @@ -1,241 +0,0 @@ -package akka.cluster.storage - -import org.scalatest.matchers.MustMatchers -import org.scalatest.WordSpec -import akka.cluster.storage.StorageTestUtils._ - -class InMemoryStorageSpec extends WordSpec with MustMatchers { - - "unversioned load" must { - "throw MissingDataException if non existing key" in { - val store = new InMemoryStorage() - - try { - store.load("foo") - fail() - } catch { - case e: MissingDataException ⇒ - } - } - - "return VersionedData if key existing" in { - val storage = new InMemoryStorage() - val key = "somekey" - val value = "somevalue".getBytes - storage.insert(key, value) - - val result = storage.load(key) - //todo: strange that the implicit store is not found - assertContent(key, value, result.version)(storage) - } - } - - "exist" must { - "return true if value exists" in { - val store = new InMemoryStorage() - val key = "somekey" - store.insert(key, "somevalue".getBytes) - store.exists(key) must be(true) - } - - "return false if value not exists" in { - val store = new InMemoryStorage() - store.exists("somekey") must be(false) - } - } - - "versioned load" must { - "throw MissingDataException if non existing key" in { - val store = new InMemoryStorage() - - try { - store.load("foo", 1) - fail() - } catch { - case e: MissingDataException ⇒ - } - } - - "return VersionedData if key existing and exact version match" in { - val storage = new InMemoryStorage() - val key = "somekey" - val value = "somevalue".getBytes - val storedVersion = storage.insert(key, value) - - val loaded = storage.load(key, storedVersion) - assert(loaded.version == storedVersion) - org.junit.Assert.assertArrayEquals(value, loaded.data) - } - - "throw BadVersionException is version too new" in { - val storage = new InMemoryStorage() - val key = "somekey" - val value = "somevalue".getBytes - val version = storage.insert(key, value) - - try { - storage.load(key, version + 1) - fail() - } catch { - case e: BadVersionException ⇒ - } - } - - "throw BadVersionException is version too old" in { - val storage = new InMemoryStorage() - val key = "somekey" - val value = "somevalue".getBytes - val version = storage.insert(key, value) - - try { - storage.load(key, version - 1) - fail() - } catch { - case e: BadVersionException ⇒ - } - } - } - - "insert" must { - - "place a new value when non previously existed" in { - val storage = new InMemoryStorage() - val key = "somekey" - val oldValue = "oldvalue".getBytes - storage.insert(key, oldValue) - - val result = storage.load(key) - assertContent(key, oldValue)(storage) - assert(InMemoryStorage.InitialVersion == result.version) - } - - "throw MissingDataException when there already exists an entry with the same key" in { - val storage = new InMemoryStorage() - val key = "somekey" - val initialValue = "oldvalue".getBytes - val initialVersion = storage.insert(key, initialValue) - - val newValue = "newValue".getBytes - - try { - storage.insert(key, newValue) - fail() - } catch { - case e: DataExistsException ⇒ - } - - assertContent(key, initialValue, initialVersion)(storage) - } - } - - "update" must { - - "throw MissingDataException when no node exists" in { - val storage = new InMemoryStorage() - - val key = "somekey" - - try { - storage.update(key, "somevalue".getBytes, 1) - fail() - } catch { - case e: MissingDataException ⇒ - } - } - - "replace if previous value exists and no other updates have been done" in { - val storage = new InMemoryStorage() - - //do the initial insert - val key = "foo" - val oldValue = "insert".getBytes - val initialVersion = storage.insert(key, oldValue) - - //do the update the will be the cause of the conflict. - val newValue: Array[Byte] = "update".getBytes - val newVersion = storage.update(key, newValue, initialVersion) - - assertContent(key, newValue, newVersion)(storage) - } - - "throw BadVersionException when already overwritten" in { - val storage = new InMemoryStorage() - - //do the initial insert - val key = "foo" - val oldValue = "insert".getBytes - val initialVersion = storage.insert(key, oldValue) - - //do the update the will be the cause of the conflict. - val newValue = "otherupdate".getBytes - val newVersion = storage.update(key, newValue, initialVersion) - - try { - storage.update(key, "update".getBytes, initialVersion) - fail() - } catch { - case e: BadVersionException ⇒ - } - - assertContent(key, newValue, newVersion)(storage) - } - } - - "overwrite" must { - - "throw MissingDataException when no node exists" in { - val storage = new InMemoryStorage() - val key = "somekey" - - try { - storage.overwrite(key, "somevalue".getBytes) - fail() - } catch { - case e: MissingDataException ⇒ - } - - storage.exists(key) must be(false) - } - - "succeed if previous value exist" in { - val storage = new InMemoryStorage() - val key = "somekey" - val oldValue = "oldvalue".getBytes - val newValue = "somevalue".getBytes - - val initialVersion = storage.insert(key, oldValue) - val overwriteVersion = storage.overwrite(key, newValue) - - assert(overwriteVersion == initialVersion + 1) - assertContent(key, newValue, overwriteVersion)(storage) - } - } - - "insertOrOverwrite" must { - "insert if nothing was inserted before" in { - val storage = new InMemoryStorage() - val key = "somekey" - val value = "somevalue".getBytes - - val version = storage.insertOrOverwrite(key, value) - - assert(version == InMemoryStorage.InitialVersion) - assertContent(key, value, version)(storage) - } - - "overwrite of something existed before" in { - val storage = new InMemoryStorage() - val key = "somekey" - val oldValue = "oldvalue".getBytes - val newValue = "somevalue".getBytes - - val initialVersion = storage.insert(key, oldValue) - - val overwriteVersion = storage.insertOrOverwrite(key, newValue) - - assert(overwriteVersion == initialVersion + 1) - assertContent(key, newValue, overwriteVersion)(storage) - } - } - -} diff --git a/akka-cluster/src/test/scala/akka/cluster/storage/StorageTestUtils.scala b/akka-cluster/src/test/scala/akka/cluster/storage/StorageTestUtils.scala deleted file mode 100644 index 71ad994356..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/storage/StorageTestUtils.scala +++ /dev/null @@ -1,15 +0,0 @@ -package akka.cluster.storage - -object StorageTestUtils { - - def assertContent(key: String, expectedData: Array[Byte], expectedVersion: Long)(implicit storage: Storage) { - val found = storage.load(key) - assert(found.version == expectedVersion, "versions should match, found[" + found.version + "], expected[" + expectedVersion + "]") - org.junit.Assert.assertArrayEquals(expectedData, found.data) - } - - def assertContent(key: String, expectedData: Array[Byte])(implicit storage: Storage) { - val found = storage.load(key) - org.junit.Assert.assertArrayEquals(expectedData, found.data) - } -} diff --git a/akka-cluster/src/test/scala/akka/cluster/storage/ZooKeeperStorageSpec.scala b/akka-cluster/src/test/scala/akka/cluster/storage/ZooKeeperStorageSpec.scala deleted file mode 100644 index 8767ccf88e..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/storage/ZooKeeperStorageSpec.scala +++ /dev/null @@ -1,132 +0,0 @@ -// package akka.cluster.storage - -// import org.scalatest.matchers.MustMatchers -// import akka.actor.Actor -// import org.scalatest.{ BeforeAndAfterEach, BeforeAndAfterAll, WordSpec } -// import org.I0Itec.zkclient.ZkServer -// //import zookeeper.AkkaZkClient -// import akka.cluster.storage.StorageTestUtils._ -// import java.io.File -// import java.util.concurrent.atomic.AtomicLong - -// class ZooKeeperStorageSpec extends WordSpec with MustMatchers with BeforeAndAfterAll with BeforeAndAfterEach { -// val dataPath = "_akka_cluster/data" -// val logPath = "_akka_cluster/log" -// var zkServer: ZkServer = _ -// //var zkClient: AkkaZkClient = _ -// val idGenerator = new AtomicLong - -// def generateKey: String = { -// "foo" + idGenerator.incrementAndGet() -// } - -// override def beforeAll() { -// /*new File(dataPath).delete() -// new File(logPath).delete() - -// try { -// zkServer = Cluster.startLocalCluster(dataPath, logPath) -// Thread.sleep(5000) -// Actor.cluster.start() -// zkClient = Cluster.newZkClient() -// } catch { -// case e ⇒ e.printStackTrace() -// }*/ -// } - -// override def afterAll() { -// /*zkClient.close() -// Actor.cluster.shutdown() -// ClusterDeployer.shutdown() -// Cluster.shutdownLocalCluster() -// Actor.registry.local.shutdownAll() */ -// } - -// /* -// "unversioned load" must { -// "throw MissingDataException if non existing key" in { -// val storage = new ZooKeeperStorage(zkClient) - -// try { -// storage.load(generateKey) -// fail() -// } catch { -// case e: MissingDataException ⇒ -// } -// } - -// "return VersionedData if key existing" in { -// val storage = new ZooKeeperStorage(zkClient) -// val key = generateKey -// val value = "somevalue".getBytes -// storage.insert(key, value) - -// val result = storage.load(key) -// //todo: strange that the implicit store is not found -// assertContent(key, value, result.version)(storage) -// } -// } */ - -// /*"overwrite" must { - -// "throw MissingDataException when there doesn't exist an entry to overwrite" in { -// val storage = new ZooKeeperStorage(zkClient) -// val key = generateKey -// val value = "value".getBytes - -// try { -// storage.overwrite(key, value) -// fail() -// } catch { -// case e: MissingDataException ⇒ -// } - -// assert(!storage.exists(key)) -// } - -// "overwrite if there is an existing value" in { -// val storage = new ZooKeeperStorage(zkClient) -// val key = generateKey -// val oldValue = "oldvalue".getBytes - -// storage.insert(key, oldValue) -// val newValue = "newValue".getBytes - -// val result = storage.overwrite(key, newValue) -// //assertContent(key, newValue, result.version)(storage) -// } -// } - -// "insert" must { - -// "place a new value when non previously existed" in { -// val storage = new ZooKeeperStorage(zkClient) -// val key = generateKey -// val oldValue = "oldvalue".getBytes -// storage.insert(key, oldValue) - -// val result = storage.load(key) -// assertContent(key, oldValue)(storage) -// assert(InMemoryStorage.InitialVersion == result.version) -// } - -// "throw DataExistsException when there already exists an entry with the same key" in { -// val storage = new ZooKeeperStorage(zkClient) -// val key = generateKey -// val oldValue = "oldvalue".getBytes - -// val initialVersion = storage.insert(key, oldValue) -// val newValue = "newValue".getBytes - -// try { -// storage.insert(key, newValue) -// fail() -// } catch { -// case e: DataExistsException ⇒ -// } - -// assertContent(key, oldValue, initialVersion)(storage) -// } -// } */ - -// } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index a169f9e9b5..4ef079457a 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -57,10 +57,6 @@ class RemoteActorRefProvider( def tempPath() = local.tempPath() def tempContainer = local.tempContainer - @volatile - private var _failureDetector: AccrualFailureDetector = _ - def failureDetector: AccrualFailureDetector = _failureDetector - @volatile private var _transport: RemoteTransport = _ def transport: RemoteTransport = _transport @@ -80,8 +76,6 @@ class RemoteActorRefProvider( def init(system: ActorSystemImpl) { local.init(system) - _failureDetector = new AccrualFailureDetector(remoteSettings.FailureDetectorThreshold, remoteSettings.FailureDetectorMaxSampleSize, system) - _remoteDaemon = new RemoteSystemDaemon(system, rootPath / "remote", rootGuardian, log) local.registerExtraNames(Map(("remote", remoteDaemon))) diff --git a/akka-remote/src/test/resources/log4j.properties b/akka-remote/src/test/resources/log4j.properties deleted file mode 100644 index 2d07c8e051..0000000000 --- a/akka-remote/src/test/resources/log4j.properties +++ /dev/null @@ -1,58 +0,0 @@ -# Define some default values that can be overridden by system properties -zookeeper.root.logger=INFO, CONSOLE -zookeeper.console.threshold=OFF -zookeeper.log.dir=. -zookeeper.log.file=zookeeper.log -zookeeper.log.threshold=DEBUG -zookeeper.tracelog.dir=. -zookeeper.tracelog.file=zookeeper_trace.log - -# -# ZooKeeper Logging Configuration -# - -# Format is " (, )+ - -# DEFAULT: console appender only -log4j.rootLogger=${zookeeper.root.logger} - -# Example with rolling log file -#log4j.rootLogger=DEBUG, CONSOLE, ROLLINGFILE - -# Example with rolling log file and tracing -#log4j.rootLogger=TRACE, CONSOLE, ROLLINGFILE, TRACEFILE - -# -# Log INFO level and above messages to the console -# -log4j.appender.CONSOLE=org.apache.log4j.ConsoleAppender -log4j.appender.CONSOLE.Threshold=${zookeeper.console.threshold} -log4j.appender.CONSOLE.layout=org.apache.log4j.PatternLayout -log4j.appender.CONSOLE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n - -# -# Add ROLLINGFILE to rootLogger to get log file output -# Log DEBUG level and above messages to a log file -log4j.appender.ROLLINGFILE=org.apache.log4j.RollingFileAppender -log4j.appender.ROLLINGFILE.Threshold=${zookeeper.log.threshold} -log4j.appender.ROLLINGFILE.File=${zookeeper.log.dir}/${zookeeper.log.file} - -# Max log file size of 10MB -log4j.appender.ROLLINGFILE.MaxFileSize=10MB -# uncomment the next line to limit number of backup files -#log4j.appender.ROLLINGFILE.MaxBackupIndex=10 - -log4j.appender.ROLLINGFILE.layout=org.apache.log4j.PatternLayout -log4j.appender.ROLLINGFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L] - %m%n - - -# -# Add TRACEFILE to rootLogger to get log file output -# Log DEBUG level and above messages to a log file -log4j.appender.TRACEFILE=org.apache.log4j.FileAppender -log4j.appender.TRACEFILE.Threshold=TRACE -log4j.appender.TRACEFILE.File=${zookeeper.tracelog.dir}/${zookeeper.tracelog.file} - -log4j.appender.TRACEFILE.layout=org.apache.log4j.PatternLayout -### Notice we are including log4j's NDC here (%x) -log4j.appender.TRACEFILE.layout.ConversionPattern=%d{ISO8601} [myid:%X{myid}] - %-5p [%t:%C{1}@%L][%x] - %m%n diff --git a/akka-remote/src/test/resources/logback-test.xml b/akka-remote/src/test/resources/logback-test.xml deleted file mode 100644 index 240a412687..0000000000 --- a/akka-remote/src/test/resources/logback-test.xml +++ /dev/null @@ -1,26 +0,0 @@ - - - - - - - - - - - - - - - [%4p] [%d{ISO8601}] [%t] %c{1}: %m%n - - - - - - - - - - - diff --git a/akka-remote/src/test/resources/zoo.cfg b/akka-remote/src/test/resources/zoo.cfg deleted file mode 100644 index b71eadcc33..0000000000 --- a/akka-remote/src/test/resources/zoo.cfg +++ /dev/null @@ -1,12 +0,0 @@ -# The number of milliseconds of each tick -tickTime=2000 -# The number of ticks that the initial -# synchronization phase can take -initLimit=10 -# The number of ticks that can pass between -# sending a request and getting an acknowledgement -syncLimit=5 -# the directory where the snapshot is stored. -dataDir=/export/crawlspace/mahadev/zookeeper/server1/data -# the port at which the clients will connect -clientPort=2181 diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index 9dada98416..a5c257ca84 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -31,7 +31,7 @@ object AkkaBuild extends Build { Unidoc.unidocExclude := Seq(samples.id, tutorials.id), Dist.distExclude := Seq(actorTests.id, akkaSbtPlugin.id, docs.id) ), - aggregate = Seq(actor, testkit, actorTests, remote, slf4j, agent, transactor, mailboxes, zeroMQ, kernel, akkaSbtPlugin, actorMigration, samples, tutorials, docs) + aggregate = Seq(actor, testkit, actorTests, remote, cluster, slf4j, agent, transactor, mailboxes, zeroMQ, kernel, akkaSbtPlugin, actorMigration, samples, tutorials, docs) ) lazy val actor = Project( @@ -86,6 +86,25 @@ object AkkaBuild extends Build { ) ) configs (MultiJvm) + lazy val cluster = Project( + id = "akka-cluster", + base = file("akka-cluster"), + dependencies = Seq(remote, remote % "test->test", testkit % "test->test"), + settings = defaultSettings ++ multiJvmSettings ++ schoirSettings ++ Seq( + libraryDependencies ++= Dependencies.cluster, + // disable parallel tests + parallelExecution in Test := false, + extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src => + (name: String) => (src ** (name + ".conf")).get.headOption.map("-Dakka.config=" + _.absolutePath).toSeq + }, + scalatestOptions in MultiJvm := Seq("-r", "org.scalatest.akka.QuietReporter"), + jvmOptions in MultiJvm := { + if (getBoolean("sbt.log.noformat")) Seq("-Dakka.test.nocolor=true") else Nil + }, + test in Test <<= (test in Test) dependsOn (test in MultiJvm) + ) + ) configs (MultiJvm) + lazy val slf4j = Project( id = "akka-slf4j", base = file("akka-slf4j"), @@ -301,7 +320,7 @@ object AkkaBuild extends Build { lazy val docs = Project( id = "akka-docs", base = file("akka-docs"), - dependencies = Seq(actor, testkit % "test->test", remote, slf4j, agent, transactor, fileMailbox, mongoMailbox, redisMailbox, beanstalkMailbox, zookeeperMailbox), + dependencies = Seq(actor, testkit % "test->test", remote, cluster, slf4j, agent, transactor, fileMailbox, mongoMailbox, redisMailbox, beanstalkMailbox, zookeeperMailbox), settings = defaultSettings ++ Seq( unmanagedSourceDirectories in Test <<= baseDirectory { _ ** "code" get }, libraryDependencies ++= Dependencies.docs, @@ -410,10 +429,7 @@ object Dependencies { Test.zookeeper, Test.log4j // needed for ZkBarrier in multi-jvm tests ) -// val cluster = Seq( -// bookkeeper, commonsCodec, commonsIo, guice, h2Lzf, jacksonCore, jacksonMapper, log4j, netty, -// protobuf, sjson, zkClient, zookeeper, zookeeperLock, Test.junit, Test.scalatest -// ) + val cluster = Seq(Test.junit, Test.scalatest) val slf4j = Seq(slf4jApi) From 0b59640820ae1586fe80248b8826abe242585521 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 31 Jan 2012 15:00:46 +0100 Subject: [PATCH 59/94] Fixed bunch of stuff based on feedback on pull request. Moved all cluster config to akka-cluster (and added test). MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/util/Duration.scala | 8 ++-- .../src/main/resources/reference.conf | 33 ++++++++++++++ .../akka/cluster/AccrualFailureDetector.scala | 4 +- .../scala/akka/cluster/ClusterSettings.scala | 26 +++++++++++ .../main/scala/akka/cluster/Gossiper.scala | 44 +++++++++---------- .../main/scala/akka/cluster/VectorClock.scala | 17 +++++-- .../akka/cluster/ClusterConfigSpec.scala | 35 +++++++++++++++ akka-remote/src/main/resources/reference.conf | 33 -------------- .../scala/akka/remote/RemoteSettings.scala | 16 ------- .../scala/akka/remote/RemoteConfigSpec.scala | 7 --- 10 files changed, 133 insertions(+), 90 deletions(-) create mode 100644 akka-cluster/src/main/resources/reference.conf create mode 100644 akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala create mode 100644 akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala diff --git a/akka-actor/src/main/scala/akka/util/Duration.scala b/akka-actor/src/main/scala/akka/util/Duration.scala index 312d733904..b276e4873c 100644 --- a/akka-actor/src/main/scala/akka/util/Duration.scala +++ b/akka-actor/src/main/scala/akka/util/Duration.scala @@ -42,10 +42,10 @@ case class Timer(timeout: Duration, throwExceptionOnTimeout: Boolean = false) { } } -case class Deadline(d: Duration) { - def +(other: Duration): Deadline = copy(d = d + other) - def -(other: Duration): Deadline = copy(d = d - other) - def -(other: Deadline): Duration = d - other.d +case class Deadline(time: Duration) { + def +(other: Duration): Deadline = copy(time = time + other) + def -(other: Duration): Deadline = copy(time = time - other) + def -(other: Deadline): Duration = time - other.time def timeLeft: Duration = this - Deadline.now } object Deadline { diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf new file mode 100644 index 0000000000..749c138a26 --- /dev/null +++ b/akka-cluster/src/main/resources/reference.conf @@ -0,0 +1,33 @@ +###################################### +# Akka Cluster Reference Config File # +###################################### + +# This the reference config file has all the default settings. +# Make your edits/overrides in your application.conf. + +akka { + + cluster { + seed-nodes = [] + seed-node-connection-timeout = 30s + max-time-to-retry-joining-cluster = 30s + + # accrual failure detection config + failure-detector { + + # defines the failure detector threshold + # A low threshold is prone to generate many wrong suspicions but ensures + # a quick detection in the event of a real crash. Conversely, a high + # threshold generates fewer mistakes but needs more time to detect + # actual crashes + threshold = 8 + + max-sample-size = 1000 + } + + gossip { + initialDelay = 5s + frequency = 1s + } + } +} diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index 892f7a026d..379bf98a6b 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -23,7 +23,7 @@ import System.{ currentTimeMillis ⇒ newTimestamp } *

* Default threshold is 8, but can be configured in the Akka config. */ -class AccrualFailureDetector(val threshold: Int = 8, val maxSampleSize: Int = 1000, system: ActorSystem) { +class AccrualFailureDetector(system: ActorSystem, val threshold: Int = 8, val maxSampleSize: Int = 1000) { private final val PhiFactor = 1.0 / math.log(10.0) @@ -54,7 +54,7 @@ class AccrualFailureDetector(val threshold: Int = 8, val maxSampleSize: Int = 10 */ @tailrec final def heartbeat(connection: Address) { - log.info("Heartbeat from connection [{}] ", connection) + log.debug("Heartbeat from connection [{}] ", connection) val oldState = state.get val latestTimestamp = oldState.timestamps.get(connection) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala new file mode 100644 index 0000000000..820290ea14 --- /dev/null +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -0,0 +1,26 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.Config +import akka.util.Duration +import java.util.concurrent.TimeUnit.MILLISECONDS +import akka.config.ConfigurationException +import scala.collection.JavaConverters._ +import akka.actor.Address +import akka.actor.AddressExtractor + +class ClusterSettings(val config: Config, val systemName: String) { + import config._ + // cluster config section + val FailureDetectorThreshold = getInt("akka.cluster.failure-detector.threshold") + val FailureDetectorMaxSampleSize = getInt("akka.cluster.failure-detector.max-sample-size") + val SeedNodeConnectionTimeout = Duration(config.getMilliseconds("akka.cluster.seed-node-connection-timeout"), MILLISECONDS) + val MaxTimeToRetryJoiningCluster = Duration(config.getMilliseconds("akka.cluster.max-time-to-retry-joining-cluster"), MILLISECONDS) + val InitialDelayForGossip = Duration(getMilliseconds("akka.cluster.gossip.initialDelay"), MILLISECONDS) + val GossipFrequency = Duration(getMilliseconds("akka.cluster.gossip.frequency"), MILLISECONDS) + val SeedNodes = Set.empty[Address] ++ getStringList("akka.cluster.seed-nodes").asScala.collect { + case AddressExtractor(addr) ⇒ addr + } +} diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala b/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala index e234d6e158..c7b4e21773 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala @@ -127,29 +127,28 @@ case class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { currentGossip: Gossip, memberMembershipChangeListeners: Set[NodeMembershipChangeListener] = Set.empty[NodeMembershipChangeListener]) - // configuration - private val remoteSettings = remote.remoteSettings + val remoteSettings = new RemoteSettings(system.settings.config, system.name) + val clusterSettings = new ClusterSettings(system.settings.config, system.name) - private val protocol = "akka" // TODO should this be hardcoded? - private val address = remote.transport.address - private val memberFingerprint = address.## + val protocol = "akka" // TODO should this be hardcoded? + val address = remote.transport.address - private val serialization = remote.serialization - private val failureDetector = new AccrualFailureDetector(remoteSettings.FailureDetectorThreshold, remoteSettings.FailureDetectorMaxSampleSize, system) - - private val initialDelayForGossip = remoteSettings.InitialDelayForGossip - private val gossipFrequency = remoteSettings.GossipFrequency - - implicit val seedNodeConnectionTimeout = remoteSettings.SeedNodeConnectionTimeout + val memberFingerprint = address.## + val initialDelayForGossip = clusterSettings.InitialDelayForGossip + val gossipFrequency = clusterSettings.GossipFrequency + implicit val seedNodeConnectionTimeout = clusterSettings.SeedNodeConnectionTimeout implicit val defaultTimeout = Timeout(remoteSettings.RemoteSystemDaemonAckTimeout) // seed members private val seeds: Set[Member] = { - if (remoteSettings.SeedNodes.isEmpty) throw new ConfigurationException( + if (clusterSettings.SeedNodes.isEmpty) throw new ConfigurationException( "At least one seed member must be defined in the configuration [akka.cluster.seed-members]") - else remoteSettings.SeedNodes map (address ⇒ Member(address, MemberStatus.Up())) + else clusterSettings.SeedNodes map (address ⇒ Member(address, MemberStatus.Up())) } + private val serialization = remote.serialization + private val failureDetector = new AccrualFailureDetector(system, clusterSettings.FailureDetectorThreshold, clusterSettings.FailureDetectorMaxSampleSize) + private val isRunning = new AtomicBoolean(true) private val log = Logging(system, "Gossiper") private val random = SecureRandom.getInstance("SHA1PRNG") @@ -164,14 +163,11 @@ case class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { log.info("Starting cluster Gossiper...") // join the cluster by connecting to one of the seed members and retrieve current cluster state (Gossip) - joinCluster(Timer(remoteSettings.MaxTimeToRetryJoiningCluster)) + joinCluster(Deadline(clusterSettings.MaxTimeToRetryJoiningCluster)) // start periodic gossip and cluster scrutinization - val initateGossipCanceller = system.scheduler.schedule( - Duration(initialDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(initateGossip()) - - val scrutinizeCanceller = system.scheduler.schedule( - Duration(initialDelayForGossip.toSeconds, SECONDS), Duration(gossipFrequency.toSeconds, SECONDS))(scrutinize()) + val initateGossipCanceller = system.scheduler.schedule(initialDelayForGossip, gossipFrequency)(initateGossip()) + val scrutinizeCanceller = system.scheduler.schedule(initialDelayForGossip, gossipFrequency)(scrutinize()) /** * Shuts down all connections to other members, the cluster daemon and the periodic gossip and cleanup tasks. @@ -293,7 +289,7 @@ case class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { /** * Joins the cluster by connecting to one of the seed members and retrieve current cluster state (Gossip). */ - private def joinCluster(timer: Timer) { + private def joinCluster(deadline: Deadline) { val seedNodes = seedNodesWithoutMyself // filter out myself if (!seedNodes.isEmpty) { // if we have seed members to contact @@ -316,16 +312,16 @@ case class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { case e: Exception ⇒ log.error( "Could not join cluster through any of the seed members - retrying for another {} seconds", - timer.timeLeft.toSeconds) + deadline.timeLeft.toSeconds) // retry joining the cluster unless // 1. Gossiper is shut down // 2. The connection time window has expired if (isRunning.get) { - if (timer.timeLeft.toMillis > 0) joinCluster(timer) // recur + if (deadline.timeLeft.toMillis > 0) joinCluster(deadline) // recur else throw new RemoteConnectionException( "Could not join cluster (any of the seed members) - giving up after trying for " + - timer.timeout.toSeconds + " seconds") + deadline.time.toSeconds + " seconds") } } } diff --git a/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala index a6a54de1d9..ef1f1be490 100644 --- a/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala +++ b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala @@ -8,10 +8,16 @@ import akka.AkkaException class VectorClockException(message: String) extends AkkaException(message) +/** + * Trait to be extended by classes that wants to be versioned using a VectorClock. + */ trait Versioned { def version: VectorClock } +/** + * Utility methods for comparing Versioned instances. + */ object Versioned { def latestVersionOf[T <: Versioned](versioned1: T, versioned2: T): T = { (versioned1.version compare versioned2.version) match { @@ -24,10 +30,11 @@ object Versioned { /** * Representation of a Vector-based clock (counting clock), inspired by Lamport logical clocks. - * + * {{ * Reference: * 1) Leslie Lamport (1978). "Time, clocks, and the ordering of events in a distributed system". Communications of the ACM 21 (7): 558-565. * 2) Friedemann Mattern (1988). "Virtual Time and Global States of Distributed Systems". Workshop on Parallel and Distributed Algorithms: pp. 215-226 + * }} */ case class VectorClock( versions: Vector[VectorClock.Entry] = Vector.empty[VectorClock.Entry], @@ -90,9 +97,11 @@ object VectorClock { /** * Compare two vector clocks. The outcomes will be one of the following: *

- * 1. Clock 1 is BEFORE clock 2 if there exists an i such that c1(i) <= c(2) and there does not exist a j such that c1(j) > c2(j). - * 2. Clock 1 is CONCURRENT to clock 2 if there exists an i, j such that c1(i) < c2(i) and c1(j) > c2(j). - * 3. Clock 1 is AFTER clock 2 otherwise. + * {{ + * 1. Clock 1 is BEFORE clock 2 if there exists an i such that c1(i) <= c(2) and there does not exist a j such that c1(j) > c2(j). + * 2. Clock 1 is CONCURRENT to clock 2 if there exists an i, j such that c1(i) < c2(i) and c1(j) > c2(j). + * 3. Clock 1 is AFTER clock 2 otherwise. + * }} * * @param v1 The first VectorClock * @param v2 The second VectorClock diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala new file mode 100644 index 0000000000..240d1ad3ff --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -0,0 +1,35 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import akka.testkit.AkkaSpec +import akka.util.duration._ +import akka.util.Duration + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class ClusterConfigSpec extends AkkaSpec( + """ + akka { + actor { + provider = "akka.remote.RemoteActorRefProvider" + } + } + """) { + + "Clustering" must { + + "be able to parse generic cluster config elements" in { + val settings = new ClusterSettings(system.settings.config, system.name) + import settings._ + FailureDetectorThreshold must be(8) + FailureDetectorMaxSampleSize must be(1000) + SeedNodeConnectionTimeout must be(30 seconds) + MaxTimeToRetryJoiningCluster must be(30 seconds) + InitialDelayForGossip must be(5 seconds) + GossipFrequency must be(1 second) + SeedNodes must be(Set()) + } + } +} diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 76f1980615..943b0d7122 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -118,42 +118,9 @@ akka { reconnection-time-window = 600s } - # The dispatcher used for remote system messages - compute-grid-dispatcher { - # defaults to same settings as default-dispatcher - name = ComputeGridDispatcher - } - # The dispatcher used for the system actor "network-event-sender" network-event-sender-dispatcher { type = PinnedDispatcher } - - } - - cluster { - use-cluster = off - - seed-nodes = [] - seed-node-connection-timeout = 30s - max-time-to-retry-joining-cluster = 30s - - # accrual failure detection config - failure-detector { - - # defines the failure detector threshold - # A low threshold is prone to generate many wrong suspicions but ensures - # a quick detection in the event of a real crash. Conversely, a high - # threshold generates fewer mistakes but needs more time to detect - # actual crashes - threshold = 8 - - max-sample-size = 1000 - } - - gossip { - initialDelay = 5s - frequency = 1s - } } } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index 0060233246..5c29d22161 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -13,26 +13,10 @@ import akka.actor.Address import akka.actor.AddressExtractor class RemoteSettings(val config: Config, val systemName: String) { - import config._ - val RemoteTransport = getString("akka.remote.transport") val LogReceive = getBoolean("akka.remote.log-received-messages") val LogSend = getBoolean("akka.remote.log-sent-messages") - - // TODO cluster config will go into akka-cluster/reference.conf when we enable that module - // cluster config section - val UseCluster = getBoolean("akka.cluster.use-cluster") - val FailureDetectorThreshold = getInt("akka.cluster.failure-detector.threshold") - val FailureDetectorMaxSampleSize = getInt("akka.cluster.failure-detector.max-sample-size") - val SeedNodeConnectionTimeout = Duration(config.getMilliseconds("akka.cluster.seed-node-connection-timeout"), MILLISECONDS) - val MaxTimeToRetryJoiningCluster = Duration(config.getMilliseconds("akka.cluster.max-time-to-retry-joining-cluster"), MILLISECONDS) - val InitialDelayForGossip = Duration(getMilliseconds("akka.cluster.gossip.initialDelay"), MILLISECONDS) - val GossipFrequency = Duration(getMilliseconds("akka.cluster.gossip.frequency"), MILLISECONDS) - val SeedNodes = Set.empty[Address] ++ getStringList("akka.cluster.seed-nodes").asScala.collect { - case AddressExtractor(addr) ⇒ addr - } - val RemoteSystemDaemonAckTimeout = Duration(getMilliseconds("akka.remote.remote-daemon-ack-timeout"), MILLISECONDS) val UntrustedMode = getBoolean("akka.remote.untrusted-mode") } diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala index b60b90b900..fbeaff5b6b 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala @@ -28,13 +28,6 @@ class RemoteConfigSpec extends AkkaSpec( RemoteTransport must be("akka.remote.netty.NettyRemoteTransport") UntrustedMode must be(false) RemoteSystemDaemonAckTimeout must be(30 seconds) - - FailureDetectorThreshold must be(8) - FailureDetectorMaxSampleSize must be(1000) - - InitialDelayForGossip must be(5 seconds) - GossipFrequency must be(1 second) - SeedNodes must be(Set()) } "be able to parse Netty config elements" in { From 7dbb5e9abb62ee49759b23eef2b56dd1fce35346 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 31 Jan 2012 15:07:15 +0100 Subject: [PATCH 60/94] Added try-finally blocks for each shutdown step in the Gossipper.shutdown method. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Jonas Bonér --- .../src/main/scala/akka/cluster/Gossiper.scala | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala b/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala index c7b4e21773..699ec4c6c8 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala @@ -174,11 +174,16 @@ case class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { */ def shutdown() { if (isRunning.compareAndSet(true, false)) { - log.info("Shutting down Gossiper for [{}]", address) - connectionManager.shutdown() - system.stop(clusterDaemon) - initateGossipCanceller.cancel() - scrutinizeCanceller.cancel() + log.info("Shutting down Gossiper for [{}]...", address) + try connectionManager.shutdown() finally { + try system.stop(clusterDaemon) finally { + try initateGossipCanceller.cancel() finally { + try scrutinizeCanceller.cancel() finally { + log.info("Gossiper for [{}] is shut down", address) + } + } + } + } } } From e32adebfd95cdeb9a12c28849a5733823093a042 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 31 Jan 2012 15:23:00 +0100 Subject: [PATCH 61/94] Adding doc to andThen and tryRecover --- .../code/akka/docs/future/FutureDocSpec.scala | 29 +++++++++++++++++++ akka-docs/scala/futures.rst | 18 ++++++++++++ 2 files changed, 47 insertions(+) diff --git a/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala b/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala index 175fc08ff5..023bdd8df7 100644 --- a/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala @@ -13,6 +13,7 @@ import akka.dispatch.Future import akka.dispatch.Await import akka.util.duration._ import akka.dispatch.Promise +import java.lang.IllegalStateException object FutureDocSpec { @@ -266,6 +267,19 @@ class FutureDocSpec extends AkkaSpec { Await.result(future, 1 second) must be(0) } + "demonstrate usage of tryRecover" in { + implicit val timeout = system.settings.ActorTimeout + val actor = system.actorOf(Props[MyActor]) + val msg1 = -1 + //#try-recover + val future = akka.pattern.ask(actor, msg1) tryRecover { + case e: ArithmeticException ⇒ Promise.successful(0) + case foo: IllegalArgumentException ⇒ Promise.failed[Int](new IllegalStateException("All br0ken!")) + } + //#try-recover + Await.result(future, 1 second) must be(0) + } + "demonstrate usage of zip" in { val future1 = Future { "foo" } val future2 = Future { "bar" } @@ -275,6 +289,21 @@ class FutureDocSpec extends AkkaSpec { Await.result(future3, 1 second) must be("foo bar") } + "demonstrate usage of andThen" in { + def loadPage(s: String) = s + val url = "foo bar" + def log(cause: Throwable) = () + def watchSomeTV = () + //#and-then + val result = Future { loadPage(url) } andThen { + case Left(exception) ⇒ log(exception) + } andThen { + case _ ⇒ watchSomeTV + } + //#and-then + Await.result(result, 1 second) must be("foo bar") + } + "demonstrate usage of or" in { val future1 = Future { "foo" } val future2 = Future { "bar" } diff --git a/akka-docs/scala/futures.rst b/akka-docs/scala/futures.rst index c46db30927..38edee51af 100644 --- a/akka-docs/scala/futures.rst +++ b/akka-docs/scala/futures.rst @@ -198,6 +198,18 @@ For this Akka supports ``onComplete``, ``onSuccess`` and ``onFailure``, of which .. includecode:: code/akka/docs/future/FutureDocSpec.scala :include: onComplete +Ordering +-------- + +Since callbacks are executed in any order and potentially in parallel, +it can be tricky at the times when you need sequential ordering of operations. +But there's a solution! And it's name is ``andThen``, and it creates a new Future with +the specified callback, a Future that will have the same result as the Future it's called on, +which allows for ordering like in the following sample: + +.. includecode:: code/akka/docs/future/FutureDocSpec.scala + :include: and-then + Auxiliary methods ----------------- @@ -232,3 +244,9 @@ our ``Future`` would have a result of 0. The ``recover`` method works very simil so multiple ``Exception``\s can be handled in this manner, and if an ``Exception`` is not handled this way it will behave as if we hadn't used the ``recover`` method. +You can also use the ``tryRecover`` method, which has the same relationship to ``recover`` as ``flatMap` has to ``map``, +and is use like this: + +.. includecode:: code/akka/docs/future/FutureDocSpec.scala + :include: try-recover + From 4162372024e90e395d4cd92fd204fb24283053bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 31 Jan 2012 15:48:01 +0100 Subject: [PATCH 62/94] Cleaned up AccrualFailureDetectorSpec after changing order of arguments in AccrualFailureDetector --- .../scala/akka/cluster/AccrualFailureDetectorSpec.scala | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala index d02199f703..f611fc9812 100644 --- a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala @@ -12,7 +12,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" val conn = Address("akka", "", Some("localhost"), Some(2552)) "mark node as available after a series of successful heartbeats" in { - val fd = new AccrualFailureDetector(system = system) + val fd = new AccrualFailureDetector(system) fd.heartbeat(conn) @@ -27,7 +27,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" // FIXME how should we deal with explicit removal of connection? - if triggered as failure then we have a problem in boostrap - see line 142 in AccrualFailureDetector "mark node as dead after explicit removal of connection" ignore { - val fd = new AccrualFailureDetector(system = system) + val fd = new AccrualFailureDetector(system) fd.heartbeat(conn) @@ -45,7 +45,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" } "mark node as dead if heartbeat are missed" in { - val fd = new AccrualFailureDetector(threshold = 3, system = system) + val fd = new AccrualFailureDetector(system, threshold = 3) fd.heartbeat(conn) @@ -63,7 +63,7 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" } "mark node as available if it starts heartbeat again after being marked dead due to detection of failure" in { - val fd = new AccrualFailureDetector(threshold = 3, system = system) + val fd = new AccrualFailureDetector(system, threshold = 3) fd.heartbeat(conn) From 575ae92fb915de2cb45bd524853ac5a58861bd4f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 31 Jan 2012 15:49:24 +0100 Subject: [PATCH 63/94] Changed line endings from DOS to UNIX --- .../AbstractRemoteActorMultiJvmSpec.scala | 56 +++---- .../multi-jvm/scala/akka/remote/Barrier.scala | 38 ++--- .../scala/akka/remote/ZKClient.scala | 156 +++++++++--------- 3 files changed, 125 insertions(+), 125 deletions(-) diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala index a560bc29d6..72bbffaeb5 100755 --- a/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala @@ -1,28 +1,28 @@ -package akka.remote - -import com.typesafe.config.{Config, ConfigFactory} - -trait AbstractRemoteActorMultiJvmSpec { - def NrOfNodes: Int - def commonConfig: Config - - private[this] val remotes: IndexedSeq[String] = { - val nodesOpt = Option(AkkaRemoteSpec.testNodes).map(_.split(",").toIndexedSeq) - nodesOpt getOrElse IndexedSeq.fill(NrOfNodes)("localhost") - } - - def akkaSpec(idx: Int) = "AkkaRemoteSpec@%s:%d".format(remotes(idx), 9991+idx) - - def akkaURIs(count: Int): String = { - 0 until count map {idx => "\"akka://" + akkaSpec(idx) + "\""} mkString "," - } - - val nodeConfigs = ((1 to NrOfNodes).toList zip remotes) map { - case (idx, host) => - ConfigFactory.parseString(""" - akka { - remote.netty.hostname="%s" - remote.netty.port = "%d" - }""".format(host, 9990+idx, idx)) withFallback commonConfig - } -} +package akka.remote + +import com.typesafe.config.{Config, ConfigFactory} + +trait AbstractRemoteActorMultiJvmSpec { + def NrOfNodes: Int + def commonConfig: Config + + private[this] val remotes: IndexedSeq[String] = { + val nodesOpt = Option(AkkaRemoteSpec.testNodes).map(_.split(",").toIndexedSeq) + nodesOpt getOrElse IndexedSeq.fill(NrOfNodes)("localhost") + } + + def akkaSpec(idx: Int) = "AkkaRemoteSpec@%s:%d".format(remotes(idx), 9991+idx) + + def akkaURIs(count: Int): String = { + 0 until count map {idx => "\"akka://" + akkaSpec(idx) + "\""} mkString "," + } + + val nodeConfigs = ((1 to NrOfNodes).toList zip remotes) map { + case (idx, host) => + ConfigFactory.parseString(""" + akka { + remote.netty.hostname="%s" + remote.netty.port = "%d" + }""".format(host, 9990+idx, idx)) withFallback commonConfig + } +} diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/Barrier.scala b/akka-remote/src/multi-jvm/scala/akka/remote/Barrier.scala index b11ec837b7..e99fca2a45 100755 --- a/akka-remote/src/multi-jvm/scala/akka/remote/Barrier.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/Barrier.scala @@ -1,19 +1,19 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.remote - -trait Barrier { - def await() = { enter(); leave() } - - def apply(body: ⇒ Unit) { - enter() - body - leave() - } - - def enter(): Unit - - def leave(): Unit -} +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.remote + +trait Barrier { + def await() = { enter(); leave() } + + def apply(body: ⇒ Unit) { + enter() + body + leave() + } + + def enter(): Unit + + def leave(): Unit +} diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala b/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala index 156c955566..733883228e 100755 --- a/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala @@ -1,78 +1,78 @@ -/** - * Copyright (C) 2011-2012 Typesafe - */ -package akka.remote - -import org.apache.zookeeper._ -import ZooDefs.Ids - -object ZkClient extends Watcher { - // Don't forget to close! - lazy val zk: ZooKeeper = { - val remoteNodes = AkkaRemoteSpec.testNodes split ',' - - // ZkServers are configured to listen on a specific port. - val connectString = remoteNodes map (_+":2181") mkString "," - new ZooKeeper(connectString, 3000, this) - } - - def process(ev: WatchedEvent) { - synchronized { notify() } - } - - class ZkBarrier(name: String, count: Int, root: String) extends Barrier { - @annotation.tailrec - private def waitForServer() { - // SI-1672 - val r = try { - zk.exists("/", false); true - } catch { - case _: KeeperException.ConnectionLossException => - Thread.sleep(10000) - false - } - if (!r) waitForServer() - } - waitForServer() - - try { - zk.create(root, Array[Byte](), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT) - } catch { - case _: KeeperException.NodeExistsException => - } - - val timeoutMs = 300*1000 - - private def block(num: Int) { - val start = System.currentTimeMillis - while (true) { - if (System.currentTimeMillis - start > timeoutMs) - throw new InterruptedException("Timed out blocking in zk") - - ZkClient.this.synchronized { - val children = zk.getChildren(root, true) - if (children.size < num) { - ZkClient.this.wait(timeoutMs) - } else - return - } - } - } - - def enter() { - zk.create(root + "/" + name, Array[Byte](), Ids.OPEN_ACL_UNSAFE, - CreateMode.EPHEMERAL) - - block(count) - } - - final def leave() { - zk.create(root + "/" + name + ".leave", Array[Byte](), Ids.OPEN_ACL_UNSAFE, - CreateMode.EPHEMERAL) - - block(2*count) - } - } - - def barrier(name: String, count: Int, root: String) = new ZkBarrier(name, count, root) -} +/** + * Copyright (C) 2011-2012 Typesafe + */ +package akka.remote + +import org.apache.zookeeper._ +import ZooDefs.Ids + +object ZkClient extends Watcher { + // Don't forget to close! + lazy val zk: ZooKeeper = { + val remoteNodes = AkkaRemoteSpec.testNodes split ',' + + // ZkServers are configured to listen on a specific port. + val connectString = remoteNodes map (_+":2181") mkString "," + new ZooKeeper(connectString, 3000, this) + } + + def process(ev: WatchedEvent) { + synchronized { notify() } + } + + class ZkBarrier(name: String, count: Int, root: String) extends Barrier { + @annotation.tailrec + private def waitForServer() { + // SI-1672 + val r = try { + zk.exists("/", false); true + } catch { + case _: KeeperException.ConnectionLossException => + Thread.sleep(10000) + false + } + if (!r) waitForServer() + } + waitForServer() + + try { + zk.create(root, Array[Byte](), Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT) + } catch { + case _: KeeperException.NodeExistsException => + } + + val timeoutMs = 300*1000 + + private def block(num: Int) { + val start = System.currentTimeMillis + while (true) { + if (System.currentTimeMillis - start > timeoutMs) + throw new InterruptedException("Timed out blocking in zk") + + ZkClient.this.synchronized { + val children = zk.getChildren(root, true) + if (children.size < num) { + ZkClient.this.wait(timeoutMs) + } else + return + } + } + } + + def enter() { + zk.create(root + "/" + name, Array[Byte](), Ids.OPEN_ACL_UNSAFE, + CreateMode.EPHEMERAL) + + block(count) + } + + final def leave() { + zk.create(root + "/" + name + ".leave", Array[Byte](), Ids.OPEN_ACL_UNSAFE, + CreateMode.EPHEMERAL) + + block(2*count) + } + } + + def barrier(name: String, count: Int, root: String) = new ZkBarrier(name, count, root) +} From 5a1c0da8ae2ce9a9bcc555b108be525104ce3e52 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 31 Jan 2012 15:50:06 +0100 Subject: [PATCH 64/94] converted tabs to spaces --- .../scala/akka/actor/ActorRefProvider.scala | 0 .../code/akka/docs/agent/AgentDocTest.java | 2 +- .../AbstractRemoteActorMultiJvmSpec.scala | 2 +- .../multi-jvm/scala/akka/remote/Barrier.scala | 0 .../scala/akka/remote/ZKClient.scala | 0 .../sample/src/main/config/logback.xml | 2 +- .../scala/concurrent/stm/JavaAPITests.java | 34 +++++++++---------- 7 files changed, 20 insertions(+), 20 deletions(-) mode change 100755 => 100644 akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala mode change 100755 => 100644 akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala mode change 100755 => 100644 akka-remote/src/multi-jvm/scala/akka/remote/Barrier.scala mode change 100755 => 100644 akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala old mode 100755 new mode 100644 diff --git a/akka-docs/java/code/akka/docs/agent/AgentDocTest.java b/akka-docs/java/code/akka/docs/agent/AgentDocTest.java index 94ddef2c9f..553d64eee5 100644 --- a/akka-docs/java/code/akka/docs/agent/AgentDocTest.java +++ b/akka-docs/java/code/akka/docs/agent/AgentDocTest.java @@ -44,7 +44,7 @@ public class AgentDocTest { @Test public void createAndClose() { - //#create + //#create ActorSystem system = ActorSystem.create("app"); Agent agent = new Agent(5, system); diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala b/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala old mode 100755 new mode 100644 index 72bbffaeb5..dd15817374 --- a/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala +++ b/akka-remote/src/multi-jvm/scala/akka/remote/AbstractRemoteActorMultiJvmSpec.scala @@ -11,7 +11,7 @@ trait AbstractRemoteActorMultiJvmSpec { nodesOpt getOrElse IndexedSeq.fill(NrOfNodes)("localhost") } - def akkaSpec(idx: Int) = "AkkaRemoteSpec@%s:%d".format(remotes(idx), 9991+idx) + def akkaSpec(idx: Int) = "AkkaRemoteSpec@%s:%d".format(remotes(idx), 9991+idx) def akkaURIs(count: Int): String = { 0 until count map {idx => "\"akka://" + akkaSpec(idx) + "\""} mkString "," diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/Barrier.scala b/akka-remote/src/multi-jvm/scala/akka/remote/Barrier.scala old mode 100755 new mode 100644 diff --git a/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala b/akka-remote/src/multi-jvm/scala/akka/remote/ZKClient.scala old mode 100755 new mode 100644 diff --git a/akka-sbt-plugin/sample/src/main/config/logback.xml b/akka-sbt-plugin/sample/src/main/config/logback.xml index bddac0313d..019d298192 100644 --- a/akka-sbt-plugin/sample/src/main/config/logback.xml +++ b/akka-sbt-plugin/sample/src/main/config/logback.xml @@ -9,7 +9,7 @@ - + diff --git a/akka-transactor/src/test/java/scala/concurrent/stm/JavaAPITests.java b/akka-transactor/src/test/java/scala/concurrent/stm/JavaAPITests.java index 63fb6abb74..09ed90af7a 100644 --- a/akka-transactor/src/test/java/scala/concurrent/stm/JavaAPITests.java +++ b/akka-transactor/src/test/java/scala/concurrent/stm/JavaAPITests.java @@ -27,9 +27,9 @@ public class JavaAPITests { public void atomicWithRunnable() { final Ref.View ref = newRef(0); atomic(new Runnable() { - public void run() { - ref.set(10); - } + public void run() { + ref.set(10); + } }); int value = ref.get(); assertEquals(10, value); @@ -39,9 +39,9 @@ public class JavaAPITests { public void atomicWithCallable() { final Ref.View ref = newRef(0); int oldValue = atomic(new Callable() { - public Integer call() { - return ref.swap(10); - } + public Integer call() { + return ref.swap(10); + } }); assertEquals(0, oldValue); int newValue = ref.get(); @@ -53,10 +53,10 @@ public class JavaAPITests { final Ref.View ref = newRef(0); try { atomic(new Runnable() { - public void run() { - ref.set(10); - throw new TestException(); - } + public void run() { + ref.set(10); + throw new TestException(); + } }); } catch (TestException e) { int value = ref.get(); @@ -69,9 +69,9 @@ public class JavaAPITests { public void transformInteger() { Ref.View ref = newRef(0); transform(ref, new AbstractFunction1() { - public Integer apply(Integer i) { - return i + 10; - } + public Integer apply(Integer i) { + return i + 10; + } }); int value = ref.get(); assertEquals(10, value); @@ -110,13 +110,13 @@ public class JavaAPITests { final Map map = newMap(); try { atomic(new Runnable() { - public void run() { - map.put(1, "one"); + public void run() { + map.put(1, "one"); map.put(2, "two"); assertTrue(map.containsKey(1)); assertTrue(map.containsKey(2)); - throw new TestException(); - } + throw new TestException(); + } }); } catch (TestException e) { assertFalse(map.containsKey(1)); From aa1c7ea9b9fa9a48fd525ef043e100ba80b0edbe Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 31 Jan 2012 16:00:46 +0100 Subject: [PATCH 65/94] Adding java documentation for andThen, recover and tryRecover --- .../akka/docs/future/FutureDocTestBase.java | 63 +++++++++++++++++++ akka-docs/java/futures.rst | 34 +++++++++- 2 files changed, 95 insertions(+), 2 deletions(-) diff --git a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java b/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java index 8ecfccbeac..d9a5308050 100644 --- a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java +++ b/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java @@ -315,6 +315,69 @@ public class FutureDocTestBase { //#filter } + public void sendToTheInternetz(String s) { + + } + + public void sendToIssueTracker(Throwable t) { + + } + + @Test public void useAndThen() { + //#and-then + Future future1 = Futures.successful("value", system.dispatcher()). + andThen(new OnComplete() { + public void onComplete(Throwable failure, String result) { + if (failure != null) sendToIssueTracker(failure); + } + }).andThen(new OnComplete() { + public void onComplete(Throwable failure, String result) { + if (result != null) sendToTheInternetz(result); + } + }); + //#and-then + } + + @Test public void useRecover() { + //#recover + Future future = future(new Callable() { + public Integer call() { + return 1 / 0; + } + }, system.dispatcher()).recover(new Recover() { + public Integer recover(Throwable problem) throws Throwable { + if (problem instanceof ArithmeticException) return 0; + else throw problem; + } + }); + int result = Await.result(future, Duration.create(1, SECONDS)); + assertEquals(result, 0); + //#recover + } + + @Test public void useTryRecover() { + //#try-recover + Future future = future(new Callable() { + public Integer call() { + return 1 / 0; + } + }, system.dispatcher()).tryRecover(new Recover>() { + public Future recover(Throwable problem) throws Throwable { + if (problem instanceof ArithmeticException) { + return future(new Callable() { + public Integer call() { + return 0; + } + }, system.dispatcher()); + } + else throw problem; + } + }); + int result = Await.result(future, Duration.create(1, SECONDS)); + assertEquals(result, 0); + //#try-recover + } + @Test public void useOnSuccessOnFailureAndOnComplete() { { Future future = Futures.successful("foo", system.dispatcher()); diff --git a/akka-docs/java/futures.rst b/akka-docs/java/futures.rst index e9b743535a..a75fb21ba5 100644 --- a/akka-docs/java/futures.rst +++ b/akka-docs/java/futures.rst @@ -67,7 +67,7 @@ These allow you to create 'pipelines' or 'streams' that the result will travel t Future is a Monad ^^^^^^^^^^^^^^^^^ -The first method for working with ``Future`` functionally is ``map``. This method takes a ``Function`` which performs +The first method for working with ``Future`` functionally is ``map``. This method takes a ``Mapper`` which performs some operation on the result of the ``Future``, and returning a new result. The return value of the ``map`` method is another ``Future`` that will contain the new result: @@ -176,6 +176,18 @@ For this Akka supports ``onComplete``, ``onSuccess`` and ``onFailure``, of which .. includecode:: code/akka/docs/future/FutureDocTestBase.java :include: onComplete +Ordering +-------- + +Since callbacks are executed in any order and potentially in parallel, +it can be tricky at the times when you need sequential ordering of operations. +But there's a solution! And it's name is ``andThen``, and it creates a new Future with +the specified callback, a Future that will have the same result as the Future it's called on, +which allows for ordering like in the following sample: + +.. includecode:: code/akka/docs/future/FutureDocTestBase.java + :include: and-then + Auxiliary methods ----------------- @@ -197,4 +209,22 @@ Exceptions Since the result of a ``Future`` is created concurrently to the rest of the program, exceptions must be handled differently. It doesn't matter if an ``UntypedActor`` or the dispatcher is completing the ``Future``, if an ``Exception`` is caught the ``Future`` will contain it instead of a valid result. If a ``Future`` does contain an ``Exception``, -calling ``Await.result`` will cause it to be thrown again so it can be handled properly. \ No newline at end of file +calling ``Await.result`` will cause it to be thrown again so it can be handled properly. + +It is also possible to handle an ``Exception`` by returning a different result. +This is done with the ``recover`` method. For example: + +.. includecode:: code/akka/docs/future/FutureDocTestBase.java + :include: recover + +In this example, if the actor replied with a ``akka.actor.Status.Failure`` containing the ``ArithmeticException``, +our ``Future`` would have a result of 0. The ``recover`` method works very similarly to the standard try/catch blocks, +so multiple ``Exception``\s can be handled in this manner, and if an ``Exception`` is not handled this way +it will behave as if we hadn't used the ``recover`` method. + +You can also use the ``tryRecover`` method, which has the same relationship to ``recover`` as ``flatMap` has to ``map``, +and is use like this: + +.. includecode:: code/akka/docs/future/FutureDocTestBase.java + :include: try-recover + From 2a4418799add8bff455fd39248d47895f993747c Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Tue, 31 Jan 2012 16:10:07 +0100 Subject: [PATCH 66/94] Replaced Action with Directive in SupervisorStrategy. See #1716 --- .../main/scala/akka/actor/FaultHandling.scala | 40 +++++++++---------- .../main/scala/akka/actor/UntypedActor.scala | 4 +- .../docs/actor/FaultHandlingTestBase.java | 8 ++-- .../actor/japi/FaultHandlingDocSample.java | 8 ++-- akka-docs/java/fault-tolerance.rst | 12 +++--- akka-docs/scala/fault-tolerance.rst | 16 ++++---- 6 files changed, 44 insertions(+), 44 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index 895268fb44..17243691b0 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -47,36 +47,36 @@ case class ChildRestartStats(val child: ActorRef, var maxNrOfRetriesCount: Int = trait SupervisorStrategyLowPriorityImplicits { this: SupervisorStrategy.type ⇒ /** - * Implicit conversion from `Seq` of Cause-Action pairs to a `Decider`. See makeDecider(causeAction). + * Implicit conversion from `Seq` of Cause-Directive pairs to a `Decider`. See makeDecider(causeDirective). */ - implicit def seqCauseAction2Decider(trapExit: Iterable[CauseAction]): Decider = makeDecider(trapExit) + implicit def seqCauseDirective2Decider(trapExit: Iterable[CauseDirective]): Decider = makeDecider(trapExit) // the above would clash with seqThrowable2Decider for empty lists } object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { - sealed trait Action + sealed trait Directive /** * Resumes message processing for the failed Actor */ - case object Resume extends Action + case object Resume extends Directive /** * Discards the old Actor instance and replaces it with a new, * then resumes message processing. */ - case object Restart extends Action + case object Restart extends Directive /** * Stops the Actor */ - case object Stop extends Action + case object Stop extends Directive /** * Escalates the failure to the supervisor of the supervisor, * by rethrowing the cause of the failure. */ - case object Escalate extends Action + case object Escalate extends Directive /** * Resumes message processing for the failed Actor @@ -127,9 +127,9 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { */ implicit def seqThrowable2Decider(trapExit: Seq[Class[_ <: Throwable]]): Decider = makeDecider(trapExit) - type Decider = PartialFunction[Throwable, Action] - type JDecider = akka.japi.Function[Throwable, Action] - type CauseAction = (Class[_ <: Throwable], Action) + type Decider = PartialFunction[Throwable, Directive] + type JDecider = akka.japi.Function[Throwable, Directive] + type CauseDirective = (Class[_ <: Throwable], Directive) /** * Decider builder which just checks whether one of @@ -152,14 +152,14 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { def makeDecider(trapExit: JIterable[Class[_ <: Throwable]]): Decider = makeDecider(trapExit.toSeq) /** - * Decider builder for Iterables of cause-action pairs, e.g. a map obtained + * Decider builder for Iterables of cause-directive pairs, e.g. a map obtained * from configuration; will sort the pairs so that the most specific type is * checked before all its subtypes, allowing carving out subtrees of the * Throwable hierarchy. */ - def makeDecider(flat: Iterable[CauseAction]): Decider = { - val actions = sort(flat) - return { case x ⇒ actions find (_._1 isInstance x) map (_._2) getOrElse Escalate } + def makeDecider(flat: Iterable[CauseDirective]): Decider = { + val directives = sort(flat) + return { case x ⇒ directives find (_._1 isInstance x) map (_._2) getOrElse Escalate } } def makeDecider(func: JDecider): Decider = { @@ -170,8 +170,8 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { * Sort so that subtypes always precede their supertypes, but without * obeying any order between unrelated subtypes (insert sort). */ - def sort(in: Iterable[CauseAction]): Seq[CauseAction] = - (new ArrayBuffer[CauseAction](in.size) /: in) { (buf, ca) ⇒ + def sort(in: Iterable[CauseDirective]): Seq[CauseDirective] = + (new ArrayBuffer[CauseDirective](in.size) /: in) { (buf, ca) ⇒ buf.indexWhere(_._1 isAssignableFrom ca._1) match { case -1 ⇒ buf append ca case x ⇒ buf insert (x, ca) @@ -215,8 +215,8 @@ abstract class SupervisorStrategy { * Returns whether it processed the failure or not */ def handleFailure(context: ActorContext, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]): Boolean = { - val action = if (decider.isDefinedAt(cause)) decider(cause) else Escalate - action match { + val directive = if (decider.isDefinedAt(cause)) decider(cause) else Escalate + directive match { case Resume ⇒ child.asInstanceOf[InternalActorRef].resume(); true case Restart ⇒ processFailure(context, true, child, cause, stats, children); true case Stop ⇒ processFailure(context, false, child, cause, stats, children); true @@ -230,7 +230,7 @@ abstract class SupervisorStrategy { * Restart all child actors when one fails * @param maxNrOfRetries the number of times an actor is allowed to be restarted, negative value means no limit * @param withinTimeRange duration of the time window for maxNrOfRetries, Duration.Inf means no window - * @param decider = mapping from Throwable to [[akka.actor.SupervisorStrategy.Action]], you can also use a + * @param decider = mapping from Throwable to [[akka.actor.SupervisorStrategy.Directive]], you can also use a * `Seq` of Throwables which maps the given Throwables to restarts, otherwise escalates. */ case class AllForOneStrategy(maxNrOfRetries: Int = -1, withinTimeRange: Duration = Duration.Inf)(val decider: SupervisorStrategy.Decider) @@ -273,7 +273,7 @@ case class AllForOneStrategy(maxNrOfRetries: Int = -1, withinTimeRange: Duration * Restart a child actor when it fails * @param maxNrOfRetries the number of times an actor is allowed to be restarted, negative value means no limit * @param withinTimeRange duration of the time window for maxNrOfRetries, Duration.Inf means no window - * @param decider = mapping from Throwable to [[akka.actor.SupervisorStrategy.Action]], you can also use a + * @param decider = mapping from Throwable to [[akka.actor.SupervisorStrategy.Directive]], you can also use a * `Seq` of Throwables which maps the given Throwables to restarts, otherwise escalates. */ case class OneForOneStrategy(maxNrOfRetries: Int = -1, withinTimeRange: Duration = Duration.Inf)(val decider: SupervisorStrategy.Decider) diff --git a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala index 6dd4d8c2c5..daa7467196 100644 --- a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala @@ -37,9 +37,9 @@ import akka.japi.{ Creator } * } * * private static SupervisorStrategy strategy = new OneForOneStrategy(10, Duration.parse("1 minute"), - * new Function() { + * new Function() { * @Override - * public Action apply(Throwable t) { + * public Directive apply(Throwable t) { * if (t instanceof ArithmeticException) { * return resume(); * } else if (t instanceof NullPointerException) { diff --git a/akka-docs/java/code/akka/docs/actor/FaultHandlingTestBase.java b/akka-docs/java/code/akka/docs/actor/FaultHandlingTestBase.java index abf2207a1d..bb8f11467c 100644 --- a/akka-docs/java/code/akka/docs/actor/FaultHandlingTestBase.java +++ b/akka-docs/java/code/akka/docs/actor/FaultHandlingTestBase.java @@ -40,9 +40,9 @@ public class FaultHandlingTestBase { //#strategy private static SupervisorStrategy strategy = new OneForOneStrategy(10, Duration.parse("1 minute"), - new Function() { + new Function() { @Override - public Action apply(Throwable t) { + public Directive apply(Throwable t) { if (t instanceof ArithmeticException) { return resume(); } else if (t instanceof NullPointerException) { @@ -78,9 +78,9 @@ public class FaultHandlingTestBase { //#strategy2 private static SupervisorStrategy strategy = new OneForOneStrategy(10, Duration.parse("1 minute"), - new Function() { + new Function() { @Override - public Action apply(Throwable t) { + public Directive apply(Throwable t) { if (t instanceof ArithmeticException) { return resume(); } else if (t instanceof NullPointerException) { diff --git a/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java b/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java index 265f005059..f10cf11051 100644 --- a/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java +++ b/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java @@ -115,9 +115,9 @@ public class FaultHandlingDocSample { // Stop the CounterService child if it throws ServiceUnavailable private static SupervisorStrategy strategy = new OneForOneStrategy(-1, Duration.Inf(), - new Function() { + new Function() { @Override - public Action apply(Throwable t) { + public Directive apply(Throwable t) { if (t instanceof ServiceUnavailable) { return stop(); } else { @@ -224,9 +224,9 @@ public class FaultHandlingDocSample { // Restart the storage child when StorageException is thrown. // After 3 restarts within 5 seconds it will be stopped. private static SupervisorStrategy strategy = new OneForOneStrategy(3, Duration.parse("5 seconds"), - new Function() { + new Function() { @Override - public Action apply(Throwable t) { + public Directive apply(Throwable t) { if (t instanceof StorageException) { return restart(); } else { diff --git a/akka-docs/java/fault-tolerance.rst b/akka-docs/java/fault-tolerance.rst index 8e2dfe3cd3..17107b8a82 100644 --- a/akka-docs/java/fault-tolerance.rst +++ b/akka-docs/java/fault-tolerance.rst @@ -43,7 +43,7 @@ For the sake of demonstration let us consider the following strategy: :include: strategy I have chosen a few well-known exception types in order to demonstrate the -application of the fault handling actions described in :ref:`supervision`. +application of the fault handling directives described in :ref:`supervision`. First off, it is a one-for-one strategy, meaning that each child is treated separately (an all-for-one strategy works very similarly, the only difference is that any decision is applied to all children of the supervisor, not only the @@ -71,7 +71,7 @@ in the same way as the default strategy defined above. Test Application ---------------- -The following section shows the effects of the different actions in practice, +The following section shows the effects of the different directives in practice, wherefor a test setup is needed. First off, we need a suitable supervisor: .. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java @@ -93,13 +93,13 @@ Let us create actors: .. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java :include: create -The first test shall demonstrate the ``Resume`` action, so we try it out by +The first test shall demonstrate the ``Resume`` directive, so we try it out by setting some non-initial state in the actor and have it fail: .. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java :include: resume -As you can see the value 42 survives the fault handling action. Now, if we +As you can see the value 42 survives the fault handling directive. Now, if we change the failure to a more serious ``NullPointerException``, that will no longer be the case: @@ -113,7 +113,7 @@ terminated by the supervisor: :include: stop Up to now the supervisor was completely unaffected by the child’s failure, -because the actions set did handle it. In case of an ``Exception``, this is not +because the directives set did handle it. In case of an ``Exception``, this is not true anymore and the supervisor escalates the failure. .. includecode:: code/akka/docs/actor/FaultHandlingTestBase.java @@ -123,7 +123,7 @@ The supervisor itself is supervised by the top-level actor provided by the :class:`ActorSystem`, which has the default policy to restart in case of all ``Exception`` cases (with the notable exceptions of ``ActorInitializationException`` and ``ActorKilledException``). Since the -default action in case of a restart is to kill all children, we expected our poor +default directive in case of a restart is to kill all children, we expected our poor child not to survive this failure. In case this is not desired (which depends on the use case), we need to use a diff --git a/akka-docs/scala/fault-tolerance.rst b/akka-docs/scala/fault-tolerance.rst index 8eaf9398b4..f8b9fe0631 100644 --- a/akka-docs/scala/fault-tolerance.rst +++ b/akka-docs/scala/fault-tolerance.rst @@ -43,7 +43,7 @@ For the sake of demonstration let us consider the following strategy: :include: strategy I have chosen a few well-known exception types in order to demonstrate the -application of the fault handling actions described in :ref:`supervision`. +application of the fault handling directives described in :ref:`supervision`. First off, it is a one-for-one strategy, meaning that each child is treated separately (an all-for-one strategy works very similarly, the only difference is that any decision is applied to all children of the supervisor, not only the @@ -53,8 +53,8 @@ that the respective limit does not apply, leaving the possibility to specify an absolute upper limit on the restarts or to make the restarts work infinitely. The match statement which forms the bulk of the body is of type ``Decider``, -which is a ``PartialFunction[Throwable, Action]``. This -is the piece which maps child failure types to their corresponding actions. +which is a ``PartialFunction[Throwable, Directive]``. This +is the piece which maps child failure types to their corresponding directives. Default Supervisor Strategy --------------------------- @@ -76,7 +76,7 @@ in the same way as the default strategy defined above. Test Application ---------------- -The following section shows the effects of the different actions in practice, +The following section shows the effects of the different directives in practice, wherefor a test setup is needed. First off, we need a suitable supervisor: .. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala @@ -99,13 +99,13 @@ Let us create actors: .. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala :include: create -The first test shall demonstrate the ``Resume`` action, so we try it out by +The first test shall demonstrate the ``Resume`` directive, so we try it out by setting some non-initial state in the actor and have it fail: .. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala :include: resume -As you can see the value 42 survives the fault handling action. Now, if we +As you can see the value 42 survives the fault handling directive. Now, if we change the failure to a more serious ``NullPointerException``, that will no longer be the case: @@ -119,7 +119,7 @@ terminated by the supervisor: :include: stop Up to now the supervisor was completely unaffected by the child’s failure, -because the actions set did handle it. In case of an ``Exception``, this is not +because the directives set did handle it. In case of an ``Exception``, this is not true anymore and the supervisor escalates the failure. .. includecode:: code/akka/docs/actor/FaultHandlingDocSpec.scala @@ -129,7 +129,7 @@ The supervisor itself is supervised by the top-level actor provided by the :class:`ActorSystem`, which has the default policy to restart in case of all ``Exception`` cases (with the notable exceptions of ``ActorInitializationException`` and ``ActorKilledException``). Since the -default action in case of a restart is to kill all children, we expected our poor +default directive in case of a restart is to kill all children, we expected our poor child not to survive this failure. In case this is not desired (which depends on the use case), we need to use a From 92426a82d953ae117fa3f74637ebe616adb0801e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 31 Jan 2012 17:19:38 +0100 Subject: [PATCH 67/94] Renaming Future.or to fallbackTo --- .../src/test/scala/akka/dispatch/FutureSpec.scala | 8 ++++---- akka-actor/src/main/scala/akka/dispatch/Future.scala | 2 +- .../java/code/akka/docs/future/FutureDocTestBase.java | 6 +++--- akka-docs/java/futures.rst | 4 ++-- akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala | 8 ++++---- akka-docs/scala/futures.rst | 4 ++-- 6 files changed, 16 insertions(+), 16 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala index 71db22cd9a..6e67fef831 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala @@ -55,11 +55,11 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa val empty = Promise[String]() val timedOut = Promise.successful[String]("Timedout") - Await.result(failure or timedOut, timeout.duration) must be("Timedout") - Await.result(timedOut or empty, timeout.duration) must be("Timedout") - Await.result(failure or failure or timedOut, timeout.duration) must be("Timedout") + Await.result(failure fallbackTo timedOut, timeout.duration) must be("Timedout") + Await.result(timedOut fallbackTo empty, timeout.duration) must be("Timedout") + Await.result(failure fallbackTo failure fallbackTo timedOut, timeout.duration) must be("Timedout") intercept[RuntimeException] { - Await.result(failure or otherFailure, timeout.duration) + Await.result(failure fallbackTo otherFailure, timeout.duration) }.getMessage must be("last") } } diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index 495faba5d6..b70b5f45fa 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -442,7 +442,7 @@ sealed trait Future[+T] extends japi.Future[T] with Await.Awaitable[T] { * Returns a new Future that will either hold the successful value of this Future, * or, it this Future fails, it will hold the result of "that" Future. */ - def or[U >: T](that: Future[U]): Future[U] = { + def fallbackTo[U >: T](that: Future[U]): Future[U] = { val p = Promise[U]() onComplete { case r @ Right(_) ⇒ p complete r diff --git a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java b/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java index e642047709..5eac2e891b 100644 --- a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java +++ b/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java @@ -382,7 +382,7 @@ public class FutureDocTestBase { } { - //#or + //#fallback-to Future future1 = Futures.failed(new IllegalStateException("OHNOES1"), system.dispatcher()); Future future2 = @@ -390,10 +390,10 @@ public class FutureDocTestBase { Future future3 = Futures.successful("bar", system.dispatcher()); Future future4 = - future1.or(future2).or(future3); // Will have "bar" in this case + future1.fallbackTo(future2).fallbackTo(future3); // Will have "bar" in this case String result = Await.result(future4, Duration.create(1, SECONDS)); assertEquals("bar", result); - //#or + //#fallback-to } } diff --git a/akka-docs/java/futures.rst b/akka-docs/java/futures.rst index e9b743535a..9d576dc002 100644 --- a/akka-docs/java/futures.rst +++ b/akka-docs/java/futures.rst @@ -179,11 +179,11 @@ For this Akka supports ``onComplete``, ``onSuccess`` and ``onFailure``, of which Auxiliary methods ----------------- -``Future`` ``or`` combines 2 Futures into a new ``Future``, and will hold the successful value of the second ``Future` +``Future`` ``fallbackTo`` combines 2 Futures into a new ``Future``, and will hold the successful value of the second ``Future` if the first ``Future`` fails. .. includecode:: code/akka/docs/future/FutureDocTestBase.java - :include: or + :include: fallback-to You can also combine two Futures into a new ``Future`` that will hold a tuple of the two Futures successful results, using the ``zip`` operation. diff --git a/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala b/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala index 175fc08ff5..bf85ee9cde 100644 --- a/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala @@ -275,13 +275,13 @@ class FutureDocSpec extends AkkaSpec { Await.result(future3, 1 second) must be("foo bar") } - "demonstrate usage of or" in { + "demonstrate usage of fallbackTo" in { val future1 = Future { "foo" } val future2 = Future { "bar" } val future3 = Future { "pigdog" } - //#or - val future4 = future1 or future2 or future3 - //#or + //#fallback-to + val future4 = future1 fallbackTo future2 fallbackTo future3 + //#fallback-to Await.result(future4, 1 second) must be("foo") } diff --git a/akka-docs/scala/futures.rst b/akka-docs/scala/futures.rst index c46db30927..152a68b227 100644 --- a/akka-docs/scala/futures.rst +++ b/akka-docs/scala/futures.rst @@ -201,11 +201,11 @@ For this Akka supports ``onComplete``, ``onSuccess`` and ``onFailure``, of which Auxiliary methods ----------------- -``Future`` ``or`` combines 2 Futures into a new ``Future``, and will hold the successful value of the second ``Future` +``Future`` ``fallbackTo`` combines 2 Futures into a new ``Future``, and will hold the successful value of the second ``Future` if the first ``Future`` fails. .. includecode:: code/akka/docs/future/FutureDocSpec.scala - :include: or + :include: fallback-to You can also combine two Futures into a new ``Future`` that will hold a tuple of the two Futures successful results, using the ``zip`` operation. From 571af3d8d27b6f24b274b49ebbd1bb8dbe10ecc7 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 31 Jan 2012 18:56:25 +0100 Subject: [PATCH 68/94] Removing pipeTo from akka.actor, renamed pipeTo(f,a) to pipe(f,a) --- .../test/scala/akka/dataflow/Future2Actor.scala | 3 +-- akka-actor/src/main/scala/akka/actor/package.scala | 11 ----------- .../src/main/scala/akka/pattern/Patterns.scala | 4 ++-- .../main/scala/akka/pattern/PipeToSupport.scala | 2 +- .../src/main/scala/akka/pattern/package.scala | 8 +++----- .../src/main/scala/akka/routing/Routing.scala | 2 +- .../akka/docs/actor/UntypedActorDocTestBase.java | 14 +++++++------- .../docs/actor/japi/FaultHandlingDocSample.java | 4 ++-- akka-docs/java/untyped-actors.rst | 8 ++++---- .../scala/code/akka/docs/actor/ActorDocSpec.scala | 4 ++-- .../akka/docs/actor/FaultHandlingDocSample.scala | 2 +- 11 files changed, 24 insertions(+), 38 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala b/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala index 09fba90fc8..eabe00a3b2 100644 --- a/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala +++ b/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala @@ -5,11 +5,10 @@ package akka.dataflow import akka.actor.{ Actor, Props } import akka.dispatch.{ Future, Await } -import akka.actor.future2actor import akka.util.duration._ import akka.testkit.AkkaSpec import akka.testkit.DefaultTimeout -import akka.pattern.ask +import akka.pattern.{ ask, pipeTo } class Future2ActorSpec extends AkkaSpec with DefaultTimeout { diff --git a/akka-actor/src/main/scala/akka/actor/package.scala b/akka-actor/src/main/scala/akka/actor/package.scala index c03d7f8689..9ec5348fee 100644 --- a/akka-actor/src/main/scala/akka/actor/package.scala +++ b/akka-actor/src/main/scala/akka/actor/package.scala @@ -27,15 +27,4 @@ package object actor { val i = n.lastIndexOf('.') n.substring(i + 1) } - - implicit def future2actor[T](f: akka.dispatch.Future[T]) = new { - def pipeTo(actor: ActorRef): this.type = { - f onComplete { - case Right(r) ⇒ actor ! r - case Left(f) ⇒ actor ! Status.Failure(f) - } - this - } - } - } diff --git a/akka-actor/src/main/scala/akka/pattern/Patterns.scala b/akka-actor/src/main/scala/akka/pattern/Patterns.scala index c3510d9b68..d585d88e13 100644 --- a/akka-actor/src/main/scala/akka/pattern/Patterns.scala +++ b/akka-actor/src/main/scala/akka/pattern/Patterns.scala @@ -83,10 +83,10 @@ object Patterns { * // apply some transformation (i.e. enrich with request info) * final Future transformed = f.map(new akka.japi.Function() { ... }); * // send it on to the next stage - * Patterns.pipeTo(transformed, nextActor); + * Patterns.pipe(transformed, nextActor); * }}} */ - def pipeTo[T](future: Future[T], actorRef: ActorRef): Future[T] = akka.pattern.pipeTo(future, actorRef) + def pipe[T](future: Future[T], recipient: ActorRef): Future[T] = akka.pattern.pipe(future, recipient) /** * Returns a [[akka.dispatch.Future]] that will be completed with success (value `true`) when diff --git a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala index 26f3b68e38..f386209458 100644 --- a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala @@ -9,7 +9,7 @@ import akka.dispatch.Future object PipeToSupport { class PipeableFuture[T](val future: Future[T]) { - def pipeTo(actorRef: ActorRef): Future[T] = akka.pattern.pipeTo(future, actorRef) + def pipeTo(actorRef: ActorRef): Future[T] = akka.pattern.pipe(future, actorRef) } } \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/pattern/package.scala b/akka-actor/src/main/scala/akka/pattern/package.scala index ac8fcf2df2..2a8c03229f 100644 --- a/akka-actor/src/main/scala/akka/pattern/package.scala +++ b/akka-actor/src/main/scala/akka/pattern/package.scala @@ -131,13 +131,11 @@ package object pattern { * * [see [[akka.dispatch.Future]] for a description of `flow`] */ - def pipeTo[T](future: Future[T], actorRef: ActorRef): Future[T] = { + def pipe[T](future: Future[T], recipient: ActorRef): Future[T] = future onComplete { - case Right(r) ⇒ actorRef ! r - case Left(f) ⇒ actorRef ! Status.Failure(f) + case Right(r) ⇒ recipient ! r + case Left(f) ⇒ recipient ! Status.Failure(f) } - future - } /** * Returns a [[akka.dispatch.Future]] that will be completed with success (value `true`) when diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 4ef7cff330..e2e6f14db7 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -10,7 +10,7 @@ import akka.util.Duration import akka.util.duration._ import com.typesafe.config.Config import akka.config.ConfigurationException -import akka.pattern.AskSupport +import akka.pattern.{ AskSupport, pipeTo } import scala.collection.JavaConversions.iterableAsScalaIterable /** diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java b/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java index b85599c3a0..a6d246fb2b 100644 --- a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java +++ b/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java @@ -38,16 +38,16 @@ import akka.util.Duration; import akka.actor.ActorTimeoutException; //#import-gracefulStop -//#import-askPipeTo +//#import-askPipe import static akka.pattern.Patterns.ask; -import static akka.pattern.Patterns.pipeTo; +import static akka.pattern.Patterns.pipe; import akka.dispatch.Future; import akka.dispatch.Futures; import akka.util.Duration; import akka.util.Timeout; import java.util.concurrent.TimeUnit; import java.util.ArrayList; -//#import-askPipeTo +//#import-askPipe import akka.actor.Props; import akka.actor.UntypedActor; @@ -224,12 +224,12 @@ public class UntypedActorDocTestBase { } @Test - public void usePatternsAskPipeTo() { + public void usePatternsAskPipe() { ActorSystem system = ActorSystem.create("MySystem"); ActorRef actorA = system.actorOf(new Props(MyUntypedActor.class)); ActorRef actorB = system.actorOf(new Props(MyUntypedActor.class)); ActorRef actorC = system.actorOf(new Props(MyUntypedActor.class)); - //#ask-pipeTo + //#ask-pipe final Timeout t = new Timeout(Duration.create(5, TimeUnit.SECONDS)); final ArrayList> futures = new ArrayList>(); @@ -247,8 +247,8 @@ public class UntypedActorDocTestBase { } }); - pipeTo(transformed, actorC); - //#ask-pipeTo + pipe(transformed, actorC); + //#ask-pipe system.shutdown(); } diff --git a/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java b/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java index 9da5c91248..2490e50794 100644 --- a/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java +++ b/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java @@ -24,7 +24,7 @@ import static akka.japi.Util.manifest; import static akka.actor.SupervisorStrategy.*; import static akka.pattern.Patterns.ask; -import static akka.pattern.Patterns.pipeTo; +import static akka.pattern.Patterns.pipe; import static akka.docs.actor.japi.FaultHandlingDocSample.WorkerApi.*; import static akka.docs.actor.japi.FaultHandlingDocSample.CounterServiceApi.*; @@ -145,7 +145,7 @@ public class FaultHandlingDocSample { counterService.tell(new Increment(1), getSelf()); // Send current progress to the initial sender - pipeTo(ask(counterService, GetCurrentCount, askTimeout) + pipe(ask(counterService, GetCurrentCount, askTimeout) .mapTo(manifest(CurrentCount.class)) .map(new Mapper() { public Progress apply(CurrentCount c) { diff --git a/akka-docs/java/untyped-actors.rst b/akka-docs/java/untyped-actors.rst index d755359f60..5120bb908d 100644 --- a/akka-docs/java/untyped-actors.rst +++ b/akka-docs/java/untyped-actors.rst @@ -323,15 +323,15 @@ Ask: Send-And-Receive-Future The ``ask`` pattern involves actors as well as futures, hence it is offered as a use pattern rather than a method on :class:`ActorRef`: -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java#import-askPipeTo +.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java#import-askPipe -.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java#ask-pipeTo +.. includecode:: code/akka/docs/actor/UntypedActorDocTestBase.java#ask-pipe -This example demonstrates ``ask`` together with the ``pipeTo`` pattern on +This example demonstrates ``ask`` together with the ``pipe`` pattern on futures, because this is likely to be a common combination. Please note that all of the above is completely non-blocking and asynchronous: ``ask`` produces a :class:`Future`, two of which are composed into a new future using the -:meth:`Futures.sequence` and :meth:`map` methods and then ``pipeTo`` installs +:meth:`Futures.sequence` and :meth:`map` methods and then ``pipe`` installs an ``onComplete``-handler on the future to effect the submission of the aggregated :class:`Result` to another actor. diff --git a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala index a4c903b564..4698c22315 100644 --- a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala @@ -314,7 +314,7 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { "using pattern ask / pipeTo" in { val actorA, actorB, actorC, actorD = system.actorOf(Props.empty) //#ask-pipeTo - import akka.pattern.{ ask, pipeTo } + import akka.pattern.{ ask, pipeTo, pipe } case class Result(x: Int, s: String, d: Double) case object Request @@ -329,7 +329,7 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { } yield Result(x, s, d) f pipeTo actorD // .. or .. - pipeTo(f, actorD) + pipe(f, actorD) //#ask-pipeTo } diff --git a/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala b/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala index fbdf3e25b9..09f32eee91 100644 --- a/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala +++ b/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala @@ -11,7 +11,7 @@ import akka.util.duration._ import akka.util.Duration import akka.util.Timeout import akka.event.LoggingReceive -import akka.pattern.ask +import akka.pattern.{ ask, pipeTo } import com.typesafe.config.ConfigFactory //#imports From e79c22d4ca82241d09af733903355bc1202c805f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 31 Jan 2012 19:25:12 +0100 Subject: [PATCH 69/94] Removed unused classes referencing CORBA from UUID --- .../src/main/java/com/eaio/uuid/UUID.java | 2 +- .../main/java/com/eaio/uuid/UUIDHelper.java | 150 +++++++++--------- .../main/java/com/eaio/uuid/UUIDHolder.java | 66 ++++---- 3 files changed, 109 insertions(+), 109 deletions(-) diff --git a/akka-actor/src/main/java/com/eaio/uuid/UUID.java b/akka-actor/src/main/java/com/eaio/uuid/UUID.java index 46bc867cc0..59a017b4ff 100644 --- a/akka-actor/src/main/java/com/eaio/uuid/UUID.java +++ b/akka-actor/src/main/java/com/eaio/uuid/UUID.java @@ -32,7 +32,7 @@ import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; -import org.omg.CORBA.portable.IDLEntity; +//import org.omg.CORBA.portable.IDLEntity; import com.eaio.util.lang.Hex; diff --git a/akka-actor/src/main/java/com/eaio/uuid/UUIDHelper.java b/akka-actor/src/main/java/com/eaio/uuid/UUIDHelper.java index 7abbe85895..69100102d9 100644 --- a/akka-actor/src/main/java/com/eaio/uuid/UUIDHelper.java +++ b/akka-actor/src/main/java/com/eaio/uuid/UUIDHelper.java @@ -1,86 +1,86 @@ -package com.eaio.uuid; +// package com.eaio.uuid; -/** -* com/eaio/uuid/UUIDHelper.java . -* Generated by the IDL-to-Java compiler (portable), version "3.1" -* from uuid.idl -* Sonntag, 7. März 2004 21.35 Uhr CET -*/ +// /** +// * com/eaio/uuid/UUIDHelper.java . +// * Generated by the IDL-to-Java compiler (portable), version "3.1" +// * from uuid.idl +// * Sonntag, 7. März 2004 21.35 Uhr CET +// */ -/** - * The UUID struct. - */ -abstract public class UUIDHelper -{ - private static String _id = "IDL:com/eaio/uuid/UUID:1.0"; +// /** +// * The UUID struct. +// */ +// abstract public class UUIDHelper +// { +// private static String _id = "IDL:com/eaio/uuid/UUID:1.0"; - public static void insert (org.omg.CORBA.Any a, com.eaio.uuid.UUID that) - { - org.omg.CORBA.portable.OutputStream out = a.create_output_stream (); - a.type (type ()); - write (out, that); - a.read_value (out.create_input_stream (), type ()); - } +// public static void insert (org.omg.CORBA.Any a, com.eaio.uuid.UUID that) +// { +// org.omg.CORBA.portable.OutputStream out = a.create_output_stream (); +// a.type (type ()); +// write (out, that); +// a.read_value (out.create_input_stream (), type ()); +// } - public static com.eaio.uuid.UUID extract (org.omg.CORBA.Any a) - { - return read (a.create_input_stream ()); - } +// public static com.eaio.uuid.UUID extract (org.omg.CORBA.Any a) +// { +// return read (a.create_input_stream ()); +// } - private static org.omg.CORBA.TypeCode __typeCode = null; - private static boolean __active = false; - synchronized public static org.omg.CORBA.TypeCode type () - { - if (__typeCode == null) - { - synchronized (org.omg.CORBA.TypeCode.class) - { - if (__typeCode == null) - { - if (__active) - { - return org.omg.CORBA.ORB.init().create_recursive_tc ( _id ); - } - __active = true; - org.omg.CORBA.StructMember[] _members0 = new org.omg.CORBA.StructMember [2]; - org.omg.CORBA.TypeCode _tcOf_members0 = null; - _tcOf_members0 = org.omg.CORBA.ORB.init ().get_primitive_tc (org.omg.CORBA.TCKind.tk_longlong); - _members0[0] = new org.omg.CORBA.StructMember ( - "time", - _tcOf_members0, - null); - _tcOf_members0 = org.omg.CORBA.ORB.init ().get_primitive_tc (org.omg.CORBA.TCKind.tk_longlong); - _members0[1] = new org.omg.CORBA.StructMember ( - "clockSeqAndNode", - _tcOf_members0, - null); - __typeCode = org.omg.CORBA.ORB.init ().create_struct_tc (com.eaio.uuid.UUIDHelper.id (), "UUID", _members0); - __active = false; - } - } - } - return __typeCode; - } +// private static org.omg.CORBA.TypeCode __typeCode = null; +// private static boolean __active = false; +// synchronized public static org.omg.CORBA.TypeCode type () +// { +// if (__typeCode == null) +// { +// synchronized (org.omg.CORBA.TypeCode.class) +// { +// if (__typeCode == null) +// { +// if (__active) +// { +// return org.omg.CORBA.ORB.init().create_recursive_tc ( _id ); +// } +// __active = true; +// org.omg.CORBA.StructMember[] _members0 = new org.omg.CORBA.StructMember [2]; +// org.omg.CORBA.TypeCode _tcOf_members0 = null; +// _tcOf_members0 = org.omg.CORBA.ORB.init ().get_primitive_tc (org.omg.CORBA.TCKind.tk_longlong); +// _members0[0] = new org.omg.CORBA.StructMember ( +// "time", +// _tcOf_members0, +// null); +// _tcOf_members0 = org.omg.CORBA.ORB.init ().get_primitive_tc (org.omg.CORBA.TCKind.tk_longlong); +// _members0[1] = new org.omg.CORBA.StructMember ( +// "clockSeqAndNode", +// _tcOf_members0, +// null); +// __typeCode = org.omg.CORBA.ORB.init ().create_struct_tc (com.eaio.uuid.UUIDHelper.id (), "UUID", _members0); +// __active = false; +// } +// } +// } +// return __typeCode; +// } - public static String id () - { - return _id; - } +// public static String id () +// { +// return _id; +// } - public static com.eaio.uuid.UUID read (org.omg.CORBA.portable.InputStream istream) - { - com.eaio.uuid.UUID value = new com.eaio.uuid.UUID (); - value.time = istream.read_longlong (); - value.clockSeqAndNode = istream.read_longlong (); - return value; - } +// public static com.eaio.uuid.UUID read (org.omg.CORBA.portable.InputStream istream) +// { +// com.eaio.uuid.UUID value = new com.eaio.uuid.UUID (); +// value.time = istream.read_longlong (); +// value.clockSeqAndNode = istream.read_longlong (); +// return value; +// } - public static void write (org.omg.CORBA.portable.OutputStream ostream, com.eaio.uuid.UUID value) - { - ostream.write_longlong (value.time); - ostream.write_longlong (value.clockSeqAndNode); - } +// public static void write (org.omg.CORBA.portable.OutputStream ostream, com.eaio.uuid.UUID value) +// { +// ostream.write_longlong (value.time); +// ostream.write_longlong (value.clockSeqAndNode); +// } -} +// } diff --git a/akka-actor/src/main/java/com/eaio/uuid/UUIDHolder.java b/akka-actor/src/main/java/com/eaio/uuid/UUIDHolder.java index d5531f5e00..bb529fac98 100644 --- a/akka-actor/src/main/java/com/eaio/uuid/UUIDHolder.java +++ b/akka-actor/src/main/java/com/eaio/uuid/UUIDHolder.java @@ -1,42 +1,42 @@ -package com.eaio.uuid; +// package com.eaio.uuid; -/** -* com/eaio/uuid/UUIDHolder.java . -* Generated by the IDL-to-Java compiler (portable), version "3.1" -* from uuid.idl -* Sonntag, 7. März 2004 21.35 Uhr CET -*/ +// /** +// * com/eaio/uuid/UUIDHolder.java . +// * Generated by the IDL-to-Java compiler (portable), version "3.1" +// * from uuid.idl +// * Sonntag, 7. März 2004 21.35 Uhr CET +// */ -/** - * The UUID struct. - */ -public final class UUIDHolder implements org.omg.CORBA.portable.Streamable -{ - public com.eaio.uuid.UUID value = null; +// /** +// * The UUID struct. +// */ +// public final class UUIDHolder implements org.omg.CORBA.portable.Streamable +// { +// public com.eaio.uuid.UUID value = null; - public UUIDHolder () - { - } +// public UUIDHolder () +// { +// } - public UUIDHolder (com.eaio.uuid.UUID initialValue) - { - value = initialValue; - } +// public UUIDHolder (com.eaio.uuid.UUID initialValue) +// { +// value = initialValue; +// } - public void _read (org.omg.CORBA.portable.InputStream i) - { - value = com.eaio.uuid.UUIDHelper.read (i); - } +// public void _read (org.omg.CORBA.portable.InputStream i) +// { +// value = com.eaio.uuid.UUIDHelper.read (i); +// } - public void _write (org.omg.CORBA.portable.OutputStream o) - { - com.eaio.uuid.UUIDHelper.write (o, value); - } +// public void _write (org.omg.CORBA.portable.OutputStream o) +// { +// com.eaio.uuid.UUIDHelper.write (o, value); +// } - public org.omg.CORBA.TypeCode _type () - { - return com.eaio.uuid.UUIDHelper.type (); - } +// public org.omg.CORBA.TypeCode _type () +// { +// return com.eaio.uuid.UUIDHelper.type (); +// } -} +// } From fd151d3c95f4b47608381bfc2a12182e2d825ceb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 31 Jan 2012 19:25:12 +0100 Subject: [PATCH 70/94] Removed unused classes referencing CORBA from UUID --- .../src/main/java/com/eaio/uuid/UUID.java | 2 +- .../main/java/com/eaio/uuid/UUIDHelper.java | 86 ------------------- .../main/java/com/eaio/uuid/UUIDHolder.java | 42 --------- 3 files changed, 1 insertion(+), 129 deletions(-) delete mode 100644 akka-actor/src/main/java/com/eaio/uuid/UUIDHelper.java delete mode 100644 akka-actor/src/main/java/com/eaio/uuid/UUIDHolder.java diff --git a/akka-actor/src/main/java/com/eaio/uuid/UUID.java b/akka-actor/src/main/java/com/eaio/uuid/UUID.java index 46bc867cc0..59a017b4ff 100644 --- a/akka-actor/src/main/java/com/eaio/uuid/UUID.java +++ b/akka-actor/src/main/java/com/eaio/uuid/UUID.java @@ -32,7 +32,7 @@ import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; -import org.omg.CORBA.portable.IDLEntity; +//import org.omg.CORBA.portable.IDLEntity; import com.eaio.util.lang.Hex; diff --git a/akka-actor/src/main/java/com/eaio/uuid/UUIDHelper.java b/akka-actor/src/main/java/com/eaio/uuid/UUIDHelper.java deleted file mode 100644 index 7abbe85895..0000000000 --- a/akka-actor/src/main/java/com/eaio/uuid/UUIDHelper.java +++ /dev/null @@ -1,86 +0,0 @@ -package com.eaio.uuid; - - -/** -* com/eaio/uuid/UUIDHelper.java . -* Generated by the IDL-to-Java compiler (portable), version "3.1" -* from uuid.idl -* Sonntag, 7. März 2004 21.35 Uhr CET -*/ - - -/** - * The UUID struct. - */ -abstract public class UUIDHelper -{ - private static String _id = "IDL:com/eaio/uuid/UUID:1.0"; - - public static void insert (org.omg.CORBA.Any a, com.eaio.uuid.UUID that) - { - org.omg.CORBA.portable.OutputStream out = a.create_output_stream (); - a.type (type ()); - write (out, that); - a.read_value (out.create_input_stream (), type ()); - } - - public static com.eaio.uuid.UUID extract (org.omg.CORBA.Any a) - { - return read (a.create_input_stream ()); - } - - private static org.omg.CORBA.TypeCode __typeCode = null; - private static boolean __active = false; - synchronized public static org.omg.CORBA.TypeCode type () - { - if (__typeCode == null) - { - synchronized (org.omg.CORBA.TypeCode.class) - { - if (__typeCode == null) - { - if (__active) - { - return org.omg.CORBA.ORB.init().create_recursive_tc ( _id ); - } - __active = true; - org.omg.CORBA.StructMember[] _members0 = new org.omg.CORBA.StructMember [2]; - org.omg.CORBA.TypeCode _tcOf_members0 = null; - _tcOf_members0 = org.omg.CORBA.ORB.init ().get_primitive_tc (org.omg.CORBA.TCKind.tk_longlong); - _members0[0] = new org.omg.CORBA.StructMember ( - "time", - _tcOf_members0, - null); - _tcOf_members0 = org.omg.CORBA.ORB.init ().get_primitive_tc (org.omg.CORBA.TCKind.tk_longlong); - _members0[1] = new org.omg.CORBA.StructMember ( - "clockSeqAndNode", - _tcOf_members0, - null); - __typeCode = org.omg.CORBA.ORB.init ().create_struct_tc (com.eaio.uuid.UUIDHelper.id (), "UUID", _members0); - __active = false; - } - } - } - return __typeCode; - } - - public static String id () - { - return _id; - } - - public static com.eaio.uuid.UUID read (org.omg.CORBA.portable.InputStream istream) - { - com.eaio.uuid.UUID value = new com.eaio.uuid.UUID (); - value.time = istream.read_longlong (); - value.clockSeqAndNode = istream.read_longlong (); - return value; - } - - public static void write (org.omg.CORBA.portable.OutputStream ostream, com.eaio.uuid.UUID value) - { - ostream.write_longlong (value.time); - ostream.write_longlong (value.clockSeqAndNode); - } - -} diff --git a/akka-actor/src/main/java/com/eaio/uuid/UUIDHolder.java b/akka-actor/src/main/java/com/eaio/uuid/UUIDHolder.java deleted file mode 100644 index d5531f5e00..0000000000 --- a/akka-actor/src/main/java/com/eaio/uuid/UUIDHolder.java +++ /dev/null @@ -1,42 +0,0 @@ -package com.eaio.uuid; - -/** -* com/eaio/uuid/UUIDHolder.java . -* Generated by the IDL-to-Java compiler (portable), version "3.1" -* from uuid.idl -* Sonntag, 7. März 2004 21.35 Uhr CET -*/ - - -/** - * The UUID struct. - */ -public final class UUIDHolder implements org.omg.CORBA.portable.Streamable -{ - public com.eaio.uuid.UUID value = null; - - public UUIDHolder () - { - } - - public UUIDHolder (com.eaio.uuid.UUID initialValue) - { - value = initialValue; - } - - public void _read (org.omg.CORBA.portable.InputStream i) - { - value = com.eaio.uuid.UUIDHelper.read (i); - } - - public void _write (org.omg.CORBA.portable.OutputStream o) - { - com.eaio.uuid.UUIDHelper.write (o, value); - } - - public org.omg.CORBA.TypeCode _type () - { - return com.eaio.uuid.UUIDHelper.type (); - } - -} From f915319e47417542d8e15b65d886a75a2075f068 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jonas=20Bone=CC=81r?= Date: Tue, 31 Jan 2012 19:31:56 +0100 Subject: [PATCH 71/94] Removed unused import --- akka-actor/src/main/java/com/eaio/uuid/UUID.java | 2 -- 1 file changed, 2 deletions(-) diff --git a/akka-actor/src/main/java/com/eaio/uuid/UUID.java b/akka-actor/src/main/java/com/eaio/uuid/UUID.java index 59a017b4ff..a578a68c6d 100644 --- a/akka-actor/src/main/java/com/eaio/uuid/UUID.java +++ b/akka-actor/src/main/java/com/eaio/uuid/UUID.java @@ -32,8 +32,6 @@ import java.io.ObjectInputStream; import java.io.ObjectOutputStream; import java.io.Serializable; -//import org.omg.CORBA.portable.IDLEntity; - import com.eaio.util.lang.Hex; /** From dcebd1deb209d953912976f2183893ab88a8f593 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Tue, 31 Jan 2012 21:38:20 +0100 Subject: [PATCH 72/94] Moving the InternalGetActor message to the companion object and verifying that it's possible to call apply from Java --- .../scala/akka/testkit/TestActorRef.scala | 4 +++- .../akka/testkit/TestActorRefJavaSpec.java | 19 +++++++++++++++++++ 2 files changed, 22 insertions(+), 1 deletion(-) create mode 100644 akka-testkit/src/test/java/akka/testkit/TestActorRefJavaSpec.java diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index 3cfbf0ce1b..d42cfcf165 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -34,7 +34,7 @@ class TestActorRef[T <: Actor]( _supervisor.path / name, false) { - private case object InternalGetActor extends AutoReceivedMessage + import TestActorRef.InternalGetActor override def newActorCell( system: ActorSystemImpl, @@ -98,6 +98,8 @@ class TestActorRef[T <: Actor]( object TestActorRef { + private case object InternalGetActor extends AutoReceivedMessage + private val number = new AtomicLong private[testkit] def randomName: String = { val l = number.getAndIncrement() diff --git a/akka-testkit/src/test/java/akka/testkit/TestActorRefJavaSpec.java b/akka-testkit/src/test/java/akka/testkit/TestActorRefJavaSpec.java new file mode 100644 index 0000000000..73350f819a --- /dev/null +++ b/akka-testkit/src/test/java/akka/testkit/TestActorRefJavaSpec.java @@ -0,0 +1,19 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.testkit; + +import org.junit.Test; +import akka.actor.Props; + +import static org.junit.Assert.*; + +public class TestActorRefJavaSpec { + + @Test + public void shouldBeAbleToUseApply() { + //Just a dummy call to make sure it compiles + TestActorRef ref = TestActorRef.apply(new Props(), null); + } +} \ No newline at end of file From 983a6d3acef876b44e1510d7ba73ffe694ea2ce4 Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 31 Jan 2012 21:48:24 +0100 Subject: [PATCH 73/94] FSM: remove Ev extractor and move -> into companion object, see #1759 --- .../test/scala/akka/actor/FSMActorSpec.scala | 24 +++++++------ .../test/scala/akka/actor/FSMTimingSpec.scala | 34 +++++++++---------- .../scala/akka/actor/FSMTransitionSpec.scala | 17 +++++----- .../src/main/scala/akka/actor/FSM.scala | 23 +++++++------ .../akka/docs/testkit/TestkitDocSpec.scala | 4 +-- .../scala/akka/testkit/TestFSMRefSpec.scala | 8 ++--- 6 files changed, 56 insertions(+), 54 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala index ca6d90e721..3a2c1bb627 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala @@ -7,7 +7,6 @@ package akka.actor import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } import akka.testkit._ import TestEvent.Mute -import FSM._ import akka.util.duration._ import akka.event._ import com.typesafe.config.ConfigFactory @@ -52,7 +51,7 @@ object FSMActorSpec { } } case Event("hello", _) ⇒ stay replying "world" - case Event("bye", _) ⇒ stop(Shutdown) + case Event("bye", _) ⇒ stop(FSM.Shutdown) } when(Open) { @@ -63,7 +62,7 @@ object FSMActorSpec { } whenUnhandled { - case Ev(msg) ⇒ { + case Event(msg, _) ⇒ { log.warning("unhandled event " + msg + " in state " + stateName + " with data " + stateData) unhandledLatch.open stay @@ -82,7 +81,7 @@ object FSMActorSpec { } onTermination { - case StopEvent(Shutdown, Locked, _) ⇒ + case StopEvent(FSM.Shutdown, Locked, _) ⇒ // stop is called from lockstate with shutdown as reason... terminatedLatch.open } @@ -110,6 +109,8 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im "unlock the lock" in { + import FSM.{ Transition, CurrentState, SubscribeTransitionCallBack } + val latches = new Latches import latches._ @@ -163,7 +164,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im val fsm = TestActorRef(new Actor with FSM[Int, Null] { startWith(1, null) when(1) { - case Ev("go") ⇒ goto(2) + case Event("go", _) ⇒ goto(2) } }) val name = fsm.path.toString @@ -182,7 +183,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im lazy val fsm = new Actor with FSM[Int, Null] { override def preStart = { started.countDown } startWith(1, null) - when(1) { NullFunction } + when(1) { FSM.NullFunction } onTermination { case x ⇒ testActor ! x } @@ -190,7 +191,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im val ref = system.actorOf(Props(fsm)) Await.ready(started, timeout.duration) system.stop(ref) - expectMsg(1 second, fsm.StopEvent(Shutdown, 1, null)) + expectMsg(1 second, fsm.StopEvent(FSM.Shutdown, 1, null)) } "log events and transitions if asked to do so" in { @@ -204,12 +205,12 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im val fsm = TestActorRef(new Actor with LoggingFSM[Int, Null] { startWith(1, null) when(1) { - case Ev("go") ⇒ - setTimer("t", Shutdown, 1.5 seconds, false) + case Event("go", _) ⇒ + setTimer("t", FSM.Shutdown, 1.5 seconds, false) goto(2) } when(2) { - case Ev("stop") ⇒ + case Event("stop", _) ⇒ cancelTimer("t") stop } @@ -230,7 +231,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im expectMsgPF(1 second, hint = "processing Event(stop,null)") { case Logging.Debug(`name`, `fsmClass`, s: String) if s.startsWith("processing Event(stop,null) from Actor[") ⇒ true } - expectMsgAllOf(1 second, Logging.Debug(name, fsmClass, "canceling timer 't'"), Normal) + expectMsgAllOf(1 second, Logging.Debug(name, fsmClass, "canceling timer 't'"), FSM.Normal) expectNoMsg(1 second) system.eventStream.unsubscribe(testActor) } @@ -251,6 +252,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im }) fsmref ! "log" val fsm = fsmref.underlyingActor + import FSM.LogEntry expectMsg(1 second, IndexedSeq(LogEntry(1, 0, "log"))) fsmref ! "count" fsmref ! "log" diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala index bf5a1974ee..59468125eb 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala @@ -160,37 +160,37 @@ object FSMTimingSpec { startWith(Initial, 0) when(Initial) { - case Ev(TestSingleTimer) ⇒ + case Event(TestSingleTimer, _) ⇒ setTimer("tester", Tick, 500 millis, false) goto(TestSingleTimer) - case Ev(TestRepeatedTimer) ⇒ + case Event(TestRepeatedTimer, _) ⇒ setTimer("tester", Tick, 100 millis, true) goto(TestRepeatedTimer) using 4 - case Ev(TestStateTimeoutOverride) ⇒ + case Event(TestStateTimeoutOverride, _) ⇒ goto(TestStateTimeout) forMax (Duration.Inf) - case Ev(x: FSMTimingSpec.State) ⇒ goto(x) + case Event(x: FSMTimingSpec.State, _) ⇒ goto(x) } when(TestStateTimeout, stateTimeout = 500 millis) { - case Ev(StateTimeout) ⇒ goto(Initial) - case Ev(Cancel) ⇒ goto(Initial) replying (Cancel) + case Event(StateTimeout, _) ⇒ goto(Initial) + case Event(Cancel, _) ⇒ goto(Initial) replying (Cancel) } when(TestSingleTimer) { - case Ev(Tick) ⇒ + case Event(Tick, _) ⇒ tester ! Tick goto(Initial) } when(TestCancelTimer) { - case Ev(Tick) ⇒ + case Event(Tick, _) ⇒ setTimer("hallo", Tock, 1 milli, false) TestKit.awaitCond(context.asInstanceOf[ActorCell].mailbox.hasMessages, 1 second) cancelTimer("hallo") sender ! Tick setTimer("hallo", Tock, 500 millis, false) stay - case Ev(Tock) ⇒ + case Event(Tock, _) ⇒ tester ! Tock stay - case Ev(Cancel) ⇒ + case Event(Cancel, _) ⇒ cancelTimer("hallo") goto(Initial) } @@ -206,29 +206,29 @@ object FSMTimingSpec { } when(TestCancelStateTimerInNamedTimerMessage) { // FSM is suspended after processing this message and resumed 500ms later - case Ev(Tick) ⇒ + case Event(Tick, _) ⇒ suspend(self) setTimer("named", Tock, 1 millis, false) TestKit.awaitCond(context.asInstanceOf[ActorCell].mailbox.hasMessages, 1 second) stay forMax (1 millis) replying Tick - case Ev(Tock) ⇒ + case Event(Tock, _) ⇒ goto(TestCancelStateTimerInNamedTimerMessage2) } when(TestCancelStateTimerInNamedTimerMessage2) { - case Ev(StateTimeout) ⇒ + case Event(StateTimeout, _) ⇒ goto(Initial) - case Ev(Cancel) ⇒ + case Event(Cancel, _) ⇒ goto(Initial) replying Cancel } when(TestUnhandled) { - case Ev(SetHandler) ⇒ + case Event(SetHandler, _) ⇒ whenUnhandled { - case Ev(Tick) ⇒ + case Event(Tick, _) ⇒ tester ! Unhandled(Tick) stay } stay - case Ev(Cancel) ⇒ + case Event(Cancel, _) ⇒ whenUnhandled(NullFunction) goto(Initial) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala index 8d8fc5e725..691be63a0b 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala @@ -5,7 +5,6 @@ package akka.actor import akka.testkit._ import akka.util.duration._ -import FSM._ import akka.util.Duration object FSMTransitionSpec { @@ -17,13 +16,13 @@ object FSMTransitionSpec { class MyFSM(target: ActorRef) extends Actor with FSM[Int, Unit] { startWith(0, Unit) when(0) { - case Ev("tick") ⇒ goto(1) + case Event("tick", _) ⇒ goto(1) } when(1) { - case Ev("tick") ⇒ goto(0) + case Event("tick", _) ⇒ goto(0) } whenUnhandled { - case Ev("reply") ⇒ stay replying "reply" + case Event("reply", _) ⇒ stay replying "reply" } initialize override def preRestart(reason: Throwable, msg: Option[Any]) { target ! "restarted" } @@ -32,10 +31,10 @@ object FSMTransitionSpec { class OtherFSM(target: ActorRef) extends Actor with FSM[Int, Int] { startWith(0, 0) when(0) { - case Ev("tick") ⇒ goto(1) using (1) + case Event("tick", _) ⇒ goto(1) using (1) } when(1) { - case Ev(_) ⇒ stay + case _ ⇒ stay } onTransition { case 0 -> 1 ⇒ target ! ((stateData, nextStateData)) @@ -56,6 +55,8 @@ class FSMTransitionSpec extends AkkaSpec with ImplicitSender { "A FSM transition notifier" must { "notify listeners" in { + import FSM.{ SubscribeTransitionCallBack, CurrentState, Transition } + val fsm = system.actorOf(Props(new MyFSM(testActor))) within(1 second) { fsm ! SubscribeTransitionCallBack(testActor) @@ -77,8 +78,8 @@ class FSMTransitionSpec extends AkkaSpec with ImplicitSender { })) within(300 millis) { - fsm ! SubscribeTransitionCallBack(forward) - expectMsg(CurrentState(fsm, 0)) + fsm ! FSM.SubscribeTransitionCallBack(forward) + expectMsg(FSM.CurrentState(fsm, 0)) system.stop(forward) fsm ! "tick" expectNoMsg diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 5660811c00..b277142e76 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -48,6 +48,14 @@ object FSM { } } + /** + * This extractor is just convenience for matching a (S, S) pair, including a + * reminder what the new state is. + */ + object -> { + def unapply[S](in: (S, S)) = Some(in) + } + case class LogEntry[S, D](stateName: S, stateData: D, event: Any) case class State[S, D](stateName: S, stateData: D, timeout: Option[Duration] = None, stopReason: Option[Reason] = None, replies: List[Any] = Nil) { @@ -174,6 +182,10 @@ trait FSM[S, D] extends Listeners { type Timeout = Option[Duration] type TransitionHandler = PartialFunction[(S, S), Unit] + // “import” so that it is visible without an import + val -> = FSM.-> + val StateTimeout = FSM.StateTimeout + val log = Logging(context.system, this) /** @@ -284,14 +296,6 @@ trait FSM[S, D] extends Listeners { */ protected final def setStateTimeout(state: S, timeout: Timeout): Unit = stateTimeouts(state) = timeout - /** - * This extractor is just convenience for matching a (S, S) pair, including a - * reminder what the new state is. - */ - object -> { - def unapply[S](in: (S, S)) = Some(in) - } - /** * Set handler which is called upon each state transition, i.e. not when * staying in the same state. This may use the pair extractor defined in the @@ -533,9 +537,6 @@ trait FSM[S, D] extends Listeners { } case class Event(event: Any, stateData: D) - object Ev { - def unapply[D](e: Event): Option[Any] = Some(e.event) - } case class StopEvent[S, D](reason: Reason, currentState: S, stateData: D) } diff --git a/akka-docs/scala/code/akka/docs/testkit/TestkitDocSpec.scala b/akka-docs/scala/code/akka/docs/testkit/TestkitDocSpec.scala index 3a4608e840..2b2cb003a9 100644 --- a/akka-docs/scala/code/akka/docs/testkit/TestkitDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/testkit/TestkitDocSpec.scala @@ -89,10 +89,10 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { val fsm = TestFSMRef(new Actor with FSM[Int, String] { startWith(1, "") when(1) { - case Ev("go") ⇒ goto(2) using "go" + case Event("go", _) ⇒ goto(2) using "go" } when(2) { - case Ev("back") ⇒ goto(1) using "back" + case Event("back", _) ⇒ goto(1) using "back" } }) diff --git a/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala index d2ec767504..86c6a8c7c5 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala @@ -12,19 +12,17 @@ import akka.util.duration._ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class TestFSMRefSpec extends AkkaSpec { - import FSM._ - "A TestFSMRef" must { "allow access to state data" in { val fsm = TestFSMRef(new Actor with FSM[Int, String] { startWith(1, "") when(1) { - case Ev("go") ⇒ goto(2) using "go" - case Ev(StateTimeout) ⇒ goto(2) using "timeout" + case Event("go", _) ⇒ goto(2) using "go" + case Event(StateTimeout, _) ⇒ goto(2) using "timeout" } when(2) { - case Ev("back") ⇒ goto(1) using "back" + case Event("back", _) ⇒ goto(1) using "back" } }, "test-fsm-ref-1") fsm.stateName must be(1) From 06b64680e369cb0326dc806712ff433a79e65091 Mon Sep 17 00:00:00 2001 From: Roland Date: Tue, 31 Jan 2012 21:58:31 +0100 Subject: [PATCH 74/94] doc updates for #1759 --- akka-docs/scala/fsm.rst | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/akka-docs/scala/fsm.rst b/akka-docs/scala/fsm.rst index 618381901c..2b35d21f41 100644 --- a/akka-docs/scala/fsm.rst +++ b/akka-docs/scala/fsm.rst @@ -178,7 +178,7 @@ demonstrated below: .. code-block:: scala when(Idle) { - case Ev(Start(msg)) => // convenience extractor when state data not needed + case Event(Start(msg), _) => goto(Timer) using (msg, sender) } @@ -188,9 +188,8 @@ demonstrated below: goto(Idle) } -The :class:`Event(msg, data)` case class may be used directly in the pattern as -shown in state Idle, or you may use the extractor :obj:`Ev(msg)` when the state -data are not needed. +The :class:`Event(msg: Any, data: D)` case class is parameterized with the data +type held by the FSM for convenient pattern matching. Defining the Initial State -------------------------- @@ -216,7 +215,7 @@ do something else in this case you can specify that with case Event(x : X, data) => log.info(this, "Received unhandled event: " + x) stay - case Ev(msg) => + case Event(msg, _) => log.warn(this, "Received unknown event: " + x) goto(Error) } @@ -259,7 +258,7 @@ All modifier can be chained to achieve a nice and concise description: .. code-block:: scala when(State) { - case Ev(msg) => + case Event(msg, _) => goto(Processing) using (msg) forMax (5 seconds) replying (WillDo) } @@ -396,7 +395,7 @@ state data which is available during termination handling. .. code-block:: scala when(A) { - case Ev(Stop) => + case Event(Stop, _) => doCleanup() stop() } From aa0b48a43b34f736869a6ed404c501ba3be8589f Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 1 Feb 2012 08:49:31 +0100 Subject: [PATCH 75/94] Config dir first in classpath. See #1761 --- akka-docs/modules/microkernel.rst | 2 +- akka-kernel/src/main/dist/bin/akka | 2 +- scripts/samples/start | 2 +- scripts/samples/start.bat | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/akka-docs/modules/microkernel.rst b/akka-docs/modules/microkernel.rst index ec6eabe3ef..7600e1ebd2 100644 --- a/akka-docs/modules/microkernel.rst +++ b/akka-docs/modules/microkernel.rst @@ -24,7 +24,7 @@ command (on a unix-based system): .. code-block:: none - bin/start sample.kernel.hello.HelloKernel + bin/akka sample.kernel.hello.HelloKernel Use ``Ctrl-C`` to interrupt and exit the microkernel. diff --git a/akka-kernel/src/main/dist/bin/akka b/akka-kernel/src/main/dist/bin/akka index 595bc6e34c..84ae2e5d78 100755 --- a/akka-kernel/src/main/dist/bin/akka +++ b/akka-kernel/src/main/dist/bin/akka @@ -19,6 +19,6 @@ declare AKKA_HOME="$(cd "$(cd "$(dirname "$0")"; pwd -P)"/..; pwd)" [ -n "$JAVA_OPTS" ] || JAVA_OPTS="-Xmx1024M -Xms1024M -Xss1M -XX:MaxPermSize=256M -XX:+UseParallelGC -XX:OnOutOfMemoryError=\"kill -9 %p\"" -[ -n "$AKKA_CLASSPATH" ] || AKKA_CLASSPATH="$AKKA_HOME/lib/scala-library.jar:$AKKA_HOME/lib/akka/*:$AKKA_HOME/config" +[ -n "$AKKA_CLASSPATH" ] || AKKA_CLASSPATH="$AKKA_HOME/lib/scala-library.jar:$AKKA_HOME/config:$AKKA_HOME/lib/akka/*" java "$JAVA_OPTS" -cp "$AKKA_CLASSPATH" -Dakka.home="$AKKA_HOME" -Dakka.kernel.quiet=$quiet akka.kernel.Main "$@" diff --git a/scripts/samples/start b/scripts/samples/start index 491c617db2..21563159f0 100755 --- a/scripts/samples/start +++ b/scripts/samples/start @@ -8,6 +8,6 @@ AKKA_HOME="$(cd "$SAMPLE"/../../../..; pwd)" [ -n "$AKKA_CLASSPATH" ] || AKKA_CLASSPATH="$AKKA_HOME/lib/scala-library.jar:$AKKA_HOME/lib/akka/*" -SAMPLE_CLASSPATH="$AKKA_CLASSPATH:$SAMPLE/lib/*:$SAMPLE/config" +SAMPLE_CLASSPATH="$SAMPLE/config:$AKKA_CLASSPATH:$SAMPLE/lib/*" java $JAVA_OPTS -cp "$SAMPLE_CLASSPATH" -Dakka.home="$SAMPLE" akka.kernel.Main diff --git a/scripts/samples/start.bat b/scripts/samples/start.bat index 1bffae4e5b..a6a3ec5e33 100644 --- a/scripts/samples/start.bat +++ b/scripts/samples/start.bat @@ -3,6 +3,6 @@ set SAMPLE=%~dp0.. set AKKA_HOME=%SAMPLE%\..\..\..\.. set JAVA_OPTS=-Xms1024M -Xmx1024M -Xss1M -XX:MaxPermSize=256M -XX:+UseParallelGC set AKKA_CLASSPATH=%AKKA_HOME%\lib\scala-library.jar;%AKKA_HOME%\lib\akka\* -set SAMPLE_CLASSPATH=%AKKA_CLASSPATH%;%SAMPLE%\lib\*;%SAMPLE%\config +set SAMPLE_CLASSPATH=%SAMPLE%\config;%AKKA_CLASSPATH%;%SAMPLE%\lib\* java %JAVA_OPTS% -cp "%SAMPLE_CLASSPATH%" -Dakka.home="%SAMPLE%" akka.kernel.Main From a950fe2d9b17f3ab18aa668113a7763b5b82bde4 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 1 Feb 2012 10:10:13 +0100 Subject: [PATCH 76/94] Switching to FJP as default in the tests --- .../src/test/scala/akka/testkit/AkkaSpec.scala | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala index 20f7e8b16a..172bdc230f 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala @@ -29,14 +29,11 @@ object AkkaSpec { stdout-loglevel = "WARNING" actor { default-dispatcher { - executor = "thread-pool-executor" - thread-pool-executor { - core-pool-size-factor = 2 - core-pool-size-min = 8 - core-pool-size-max = 8 - max-pool-size-factor = 2 - max-pool-size-min = 8 - max-pool-size-max = 8 + executor = "fork-join-executor" + fork-join-executor { + parallelism-min = 8 + parallelism-factor = 2.0 + parallelism-max = 8 } } } From 9b43134307f313e22bf0a3e095ede73c5147be20 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 1 Feb 2012 10:22:38 +0100 Subject: [PATCH 77/94] Fixing doc error --- akka-actor/src/main/scala/akka/actor/ActorRef.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index c8a07f9779..38e8ab679f 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -57,7 +57,7 @@ import akka.event.LoggingAdapter * * } else if (o instanceof Request3) { * val msg = ((Request3) o).getMsg(); - * getSender().tell(other.ask(msg, 5000)); // reply with Future for holding the other’s reply (timeout 5 seconds) + * getSender().tell(ask(other, msg, 5000)); // reply with Future for holding the other’s reply (timeout 5 seconds) * * } else { * unhandled(o); From 13fb8df2465d770d0a84c2f0ba7dc7a303681e76 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 1 Feb 2012 11:26:55 +0100 Subject: [PATCH 78/94] DOC: Fixed unclear docs of OneForOneStrategy and AllForOneStrategy. --- .../src/main/scala/akka/actor/FaultHandling.scala | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index 17243691b0..8a21f841bb 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -227,7 +227,10 @@ abstract class SupervisorStrategy { } /** - * Restart all child actors when one fails + * Applies the fault handling `Directive` (Resume, Restart, Stop) specified in the `Decider` + * to all children when one fails, as opposed to [[akka.actor.OneForOneStrategy]] that applies + * it only to the child actor that failed. + * * @param maxNrOfRetries the number of times an actor is allowed to be restarted, negative value means no limit * @param withinTimeRange duration of the time window for maxNrOfRetries, Duration.Inf means no window * @param decider = mapping from Throwable to [[akka.actor.SupervisorStrategy.Directive]], you can also use a @@ -270,7 +273,10 @@ case class AllForOneStrategy(maxNrOfRetries: Int = -1, withinTimeRange: Duration } /** - * Restart a child actor when it fails + * Applies the fault handling `Directive` (Resume, Restart, Stop) specified in the `Decider` + * to the child actor that failed, as opposed to [[akka.actor.AllForOneStrategy]] that applies + * it to all children. + * * @param maxNrOfRetries the number of times an actor is allowed to be restarted, negative value means no limit * @param withinTimeRange duration of the time window for maxNrOfRetries, Duration.Inf means no window * @param decider = mapping from Throwable to [[akka.actor.SupervisorStrategy.Directive]], you can also use a From 3363a6984d01a71aa9145cfcf993d170fdc08c2c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 1 Feb 2012 11:39:04 +0100 Subject: [PATCH 79/94] Remove that shit --- .../akka/remote/NetworkEventStream.scala | 77 ------------------- .../akka/remote/RemoteActorRefProvider.scala | 7 -- 2 files changed, 84 deletions(-) delete mode 100644 akka-remote/src/main/scala/akka/remote/NetworkEventStream.scala diff --git a/akka-remote/src/main/scala/akka/remote/NetworkEventStream.scala b/akka-remote/src/main/scala/akka/remote/NetworkEventStream.scala deleted file mode 100644 index 7254a914f7..0000000000 --- a/akka-remote/src/main/scala/akka/remote/NetworkEventStream.scala +++ /dev/null @@ -1,77 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.remote - -import scala.collection.mutable - -import akka.actor.{ Props, Address, ActorSystemImpl, Actor } - -/** - * Stream of all kinds of network events, remote failure and connection events, cluster failure and connection events etc. - * Also provides API for sender listener management. - */ -object NetworkEventStream { - - private sealed trait NetworkEventStreamEvent - - private case class Register(listener: Listener, connectionAddress: Address) - extends NetworkEventStreamEvent - - private case class Unregister(listener: Listener, connectionAddress: Address) - extends NetworkEventStreamEvent - - /** - * Base trait for network event listener. - */ - trait Listener { - def notify(event: RemoteLifeCycleEvent) - } - - /** - * Channel actor with a registry of listeners. - */ - private class Channel extends Actor { - - val listeners = new mutable.HashMap[Address, mutable.Set[Listener]]() { - override def default(k: Address) = mutable.Set.empty[Listener] - } - - def receive = { - case event: RemoteClientLifeCycleEvent ⇒ - listeners(event.remoteAddress) foreach (_ notify event) - - case event: RemoteServerLifeCycleEvent ⇒ // FIXME handle RemoteServerLifeCycleEvent, ticket #1408 and #1190 - - case Register(listener, connectionAddress) ⇒ - listeners(connectionAddress) += listener - - case Unregister(listener, connectionAddress) ⇒ - listeners(connectionAddress) -= listener - - case _ ⇒ //ignore other - } - } -} - -class NetworkEventStream(system: ActorSystemImpl) { - - import NetworkEventStream._ - - // FIXME: check that this supervision is correct, ticket #1408 - private[akka] val sender = - system.systemActorOf(Props[Channel].withDispatcher("akka.remote.network-event-sender-dispatcher"), "network-event-sender") - - /** - * Registers a network event stream listener (asyncronously). - */ - def register(listener: Listener, connectionAddress: Address) = - sender ! Register(listener, connectionAddress) - - /** - * Unregisters a network event stream listener (asyncronously) . - */ - def unregister(listener: Listener, connectionAddress: Address) = - sender ! Unregister(listener, connectionAddress) -} diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index 4ef079457a..e94fa19be3 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -69,10 +69,6 @@ class RemoteActorRefProvider( private var _remoteDaemon: InternalActorRef = _ def remoteDaemon = _remoteDaemon - @volatile - private var _networkEventStream: NetworkEventStream = _ - def networkEventStream = _networkEventStream - def init(system: ActorSystemImpl) { local.init(system) @@ -81,9 +77,6 @@ class RemoteActorRefProvider( _serialization = SerializationExtension(system) - _networkEventStream = new NetworkEventStream(system) - system.eventStream.subscribe(networkEventStream.sender, classOf[RemoteLifeCycleEvent]) - _transport = { val fqn = remoteSettings.RemoteTransport val args = Seq( From 4753f4af02e29ecf985f45d0271d7c31cbeccb62 Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 1 Feb 2012 11:42:27 +0100 Subject: [PATCH 80/94] Remove log warning on reschedule to stopped actor. See #1760 --- akka-actor/src/main/scala/akka/actor/Scheduler.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index eed0060e52..9ef93ef05d 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -126,7 +126,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, receiver ! message // Check if the receiver is still alive and kicking before reschedule the task if (receiver.isTerminated) { - log.warning("Could not reschedule message to be sent because receiving actor has been terminated.") + log.debug("Could not reschedule message to be sent because receiving actor has been terminated.") } else { scheduleNext(timeout, delay, continuousCancellable) } From 937ecc3f502e7ebcc3240127f38351f8b010f598 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 1 Feb 2012 11:46:46 +0100 Subject: [PATCH 81/94] Adding composable actor code snippet to docs, case closed --- akka-docs/scala/actors.rst | 4 ++++ .../code/akka/docs/actor/ActorDocSpec.scala | 23 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/akka-docs/scala/actors.rst b/akka-docs/scala/actors.rst index c6faa5e7e1..01ef6ade83 100644 --- a/akka-docs/scala/actors.rst +++ b/akka-docs/scala/actors.rst @@ -657,3 +657,7 @@ extend that, either through inheritance or delegation, is to use ``PartialFunction.orElse`` chaining. .. includecode:: code/akka/docs/actor/ActorDocSpec.scala#receive-orElse + +Or: + +.. includecode:: code/akka/docs/actor/ActorDocSpec.scala#receive-orElse2 \ No newline at end of file diff --git a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala index a4c903b564..3f1b3cd186 100644 --- a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala @@ -133,6 +133,29 @@ class SpecificActor extends GenericActor { case class MyMsg(subject: String) //#receive-orElse +//#receive-orElse2 +trait ComposableActor extends Actor { + private var receives: List[Receive] = List() + protected def registerReceive(receive: Receive) { + receives = receive :: receives + } + + def receive = receives reduce { _ orElse _ } +} + +class MyComposableActor extends ComposableActor { + override def preStart() { + registerReceive({ + case "foo" ⇒ /* Do something */ + }) + + registerReceive({ + case "bar" ⇒ /* Do something */ + }) + } +} + +//#receive-orElse2 class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { "import context" in { From 2e1ad851b3dcd053d7ff32689dc37a35026c2a7b Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 1 Feb 2012 11:53:46 +0100 Subject: [PATCH 82/94] Not a running junit test, only verifies compilation --- ...tActorRefJavaSpec.java => TestActorRefJavaCompile.java} | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) rename akka-testkit/src/test/java/akka/testkit/{TestActorRefJavaSpec.java => TestActorRefJavaCompile.java} (68%) diff --git a/akka-testkit/src/test/java/akka/testkit/TestActorRefJavaSpec.java b/akka-testkit/src/test/java/akka/testkit/TestActorRefJavaCompile.java similarity index 68% rename from akka-testkit/src/test/java/akka/testkit/TestActorRefJavaSpec.java rename to akka-testkit/src/test/java/akka/testkit/TestActorRefJavaCompile.java index 73350f819a..5c13557854 100644 --- a/akka-testkit/src/test/java/akka/testkit/TestActorRefJavaSpec.java +++ b/akka-testkit/src/test/java/akka/testkit/TestActorRefJavaCompile.java @@ -7,12 +7,9 @@ package akka.testkit; import org.junit.Test; import akka.actor.Props; -import static org.junit.Assert.*; +public class TestActorRefJavaCompile { -public class TestActorRefJavaSpec { - - @Test - public void shouldBeAbleToUseApply() { + public void shouldBeAbleToCompileWhenUsingApply() { //Just a dummy call to make sure it compiles TestActorRef ref = TestActorRef.apply(new Props(), null); } From d4a1b38cdfd74d46fe9cb28183c77bafa07832bc Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 1 Feb 2012 12:04:42 +0100 Subject: [PATCH 83/94] Removing Timer and adding isOverdue method on Deadline --- .../src/test/scala/akka/actor/IOActor.scala | 8 ++--- .../src/main/scala/akka/util/Duration.scala | 33 +------------------ 2 files changed, 5 insertions(+), 36 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala b/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala index 13ed9d8c7e..62da560831 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala @@ -4,7 +4,7 @@ package akka.actor -import akka.util.{ ByteString, Duration, Timer } +import akka.util.{ ByteString, Duration, Deadline } import akka.util.duration._ import scala.util.continuations._ import akka.testkit._ @@ -244,13 +244,13 @@ class IOActorSpec extends AkkaSpec with DefaultTimeout { val promise = Promise[T]()(executor) - val timer = timeout match { - case Some(duration) ⇒ Some(Timer(duration)) + val timer: Option[Deadline] = timeout match { + case Some(duration) ⇒ Some(Deadline(duration)) case None ⇒ None } def check(n: Int, e: Throwable): Boolean = - (count.isEmpty || (n < count.get)) && (timer.isEmpty || timer.get.isTicking) && (filter.isEmpty || filter.get(e)) + (count.isEmpty || (n < count.get)) && (timer.isEmpty || !timer.get.isOverdue()) && (filter.isEmpty || filter.get(e)) def run(n: Int) { future onComplete { diff --git a/akka-actor/src/main/scala/akka/util/Duration.scala b/akka-actor/src/main/scala/akka/util/Duration.scala index b276e4873c..ff33c5c9a9 100644 --- a/akka-actor/src/main/scala/akka/util/Duration.scala +++ b/akka-actor/src/main/scala/akka/util/Duration.scala @@ -10,43 +10,12 @@ import java.lang.{ Double ⇒ JDouble } class TimerException(message: String) extends RuntimeException(message) -/** - * Simple timer class. - * Usage: - *
- *   import akka.util.duration._
- *   import akka.util.Timer
- *
- *   val timer = Timer(30 seconds)
- *   while (timer.isTicking) { ... }
- * 
- */ -case class Timer(timeout: Duration, throwExceptionOnTimeout: Boolean = false) { - val startTime = Duration(System.nanoTime, NANOSECONDS) - - def timeLeft: Duration = { - val time = timeout.toNanos - (System.nanoTime - startTime.toNanos) - if (time <= 0) Duration(0, NANOSECONDS) - else Duration(time, NANOSECONDS) - } - - /** - * Returns true while the timer is ticking. After that it either throws and exception or - * returns false. Depending on if the 'throwExceptionOnTimeout' argument is true or false. - */ - def isTicking: Boolean = { - if (!(timeout.toNanos > (System.nanoTime - startTime.toNanos))) { - if (throwExceptionOnTimeout) throw new TimerException("Time out after " + timeout) - else false - } else true - } -} - case class Deadline(time: Duration) { def +(other: Duration): Deadline = copy(time = time + other) def -(other: Duration): Deadline = copy(time = time - other) def -(other: Deadline): Duration = time - other.time def timeLeft: Duration = this - Deadline.now + def isOverdue(): Boolean = timeLeft < Duration.Zero } object Deadline { def now: Deadline = Deadline(Duration(System.nanoTime, NANOSECONDS)) From aca5693ce688d4e5cb29473f509a965807ebbc9e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 1 Feb 2012 13:37:57 +0100 Subject: [PATCH 84/94] Restructuring the pattern internals so it is modular --- .../main/scala/akka/pattern/AskSupport.scala | 64 ++++++++- .../akka/pattern/GracefulStopSupport.scala | 47 +++++++ .../scala/akka/pattern/PipeToSupport.scala | 39 ++++- .../src/main/scala/akka/pattern/package.scala | 133 +----------------- .../src/main/scala/akka/routing/Routing.scala | 2 +- .../akka/remote/RemoteCommunicationSpec.scala | 4 +- 6 files changed, 150 insertions(+), 139 deletions(-) create mode 100644 akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 3e637fc81d..46ccc25307 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -22,7 +22,69 @@ class AskTimeoutException(message: String, cause: Throwable) extends TimeoutExce /** * This object contains implementation details of the “ask” pattern. */ -object AskSupport { +trait AskSupport { + + /** + * Import this implicit conversion to gain `?` and `ask` methods on + * [[akka.actor.ActorRef]], which will defer to the + * `ask(actorRef, message)(timeout)` method defined here. + * + * {{{ + * import akka.pattern.ask + * + * val future = actor ? message // => ask(actor, message) + * val future = actor ask message // => ask(actor, message) + * val future = actor.ask(message)(timeout) // => ask(actor, message)(timeout) + * }}} + * + * All of the above use an implicit [[akka.actor.Timeout]]. + */ + implicit def ask(actorRef: ActorRef): AskableActorRef = new AskableActorRef(actorRef) + + /** + * Sends a message asynchronously and returns a [[akka.dispatch.Future]] + * holding the eventual reply message; this means that the target actor + * needs to send the result to the `sender` reference provided. The Future + * will be completed with an [[akka.actor.AskTimeoutException]] after the + * given timeout has expired; this is independent from any timeout applied + * while awaiting a result for this future (i.e. in + * `Await.result(..., timeout)`). + * + * Warning: + * When using future callbacks, inside actors you need to carefully avoid closing over + * the containing actor’s object, i.e. do not call methods or access mutable state + * on the enclosing actor from within the callback. This would break the actor + * encapsulation and may introduce synchronization bugs and race conditions because + * the callback will be scheduled concurrently to the enclosing actor. Unfortunately + * there is not yet a way to detect these illegal accesses at compile time. + * + * Recommended usage: + * + * {{{ + * val f = ask(worker, request)(timeout) + * flow { + * EnrichedRequest(request, f()) + * } pipeTo nextActor + * }}} + * + * [see [[akka.dispatch.Future]] for a description of `flow`] + */ + def ask(actorRef: ActorRef, message: Any)(implicit timeout: Timeout): Future[Any] = actorRef match { + case ref: InternalActorRef if ref.isTerminated ⇒ + actorRef.tell(message) + Promise.failed(new AskTimeoutException("sending to terminated ref breaks promises"))(ref.provider.dispatcher) + case ref: InternalActorRef ⇒ + val provider = ref.provider + if (timeout.duration.length <= 0) { + actorRef.tell(message) + Promise.failed(new AskTimeoutException("not asking with negative timeout"))(provider.dispatcher) + } else { + val a = createAsker(provider, timeout) + actorRef.tell(message, a) + a.result + } + case _ ⇒ throw new IllegalArgumentException("incompatible ActorRef " + actorRef) + } /** * Implementation detail of the “ask” pattern enrichment of ActorRef diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala new file mode 100644 index 0000000000..d6fbd31c1e --- /dev/null +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -0,0 +1,47 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.pattern + +import akka.actor.{ ActorRef, Actor, ActorSystem, Props, PoisonPill, Terminated, ReceiveTimeout, ActorTimeoutException } +import akka.dispatch.{ Promise, Future } +import akka.util.Duration + +trait GracefulStopSupport { + /** + * Returns a [[akka.dispatch.Future]] that will be completed with success (value `true`) when + * existing messages of the target actor has been processed and the actor has been + * terminated. + * + * Useful when you need to wait for termination or compose ordered termination of several actors. + * + * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] + * is completed with failure [[akka.actor.ActorTimeoutException]]. + */ + def gracefulStop(target: ActorRef, timeout: Duration)(implicit system: ActorSystem): Future[Boolean] = { + if (target.isTerminated) { + Promise.successful(true) + } else { + val result = Promise[Boolean]() + system.actorOf(Props(new Actor { + // Terminated will be received when target has been stopped + context watch target + target ! PoisonPill + // ReceiveTimeout will be received if nothing else is received within the timeout + context setReceiveTimeout timeout + + def receive = { + case Terminated(a) if a == target ⇒ + result success true + context stop self + case ReceiveTimeout ⇒ + result failure new ActorTimeoutException( + "Failed to stop [%s] within [%s]".format(target.path, context.receiveTimeout)) + context stop self + } + })) + result + } + } +} \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala index f386209458..fef4a91ee8 100644 --- a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala @@ -3,13 +3,46 @@ */ package akka.pattern -import akka.actor.ActorRef import akka.dispatch.Future +import akka.actor.{ Status, ActorRef } -object PipeToSupport { +trait PipeToSupport { - class PipeableFuture[T](val future: Future[T]) { + final class PipeableFuture[T](val future: Future[T]) { def pipeTo(actorRef: ActorRef): Future[T] = akka.pattern.pipe(future, actorRef) } + /** + * Import this implicit conversion to gain the `pipeTo` method on [[akka.dispatch.Future]]: + * + * {{{ + * import akka.pattern.pipeTo + * + * Future { doExpensiveCalc() } pipeTo nextActor + * }}} + */ + implicit def pipeTo[T](future: Future[T]): PipeableFuture[T] = new PipeableFuture(future) + + /** + * Register an onComplete callback on this [[akka.dispatch.Future]] to send + * the result to the given actor reference. Returns the original Future to + * allow method chaining. + * + * Recommended usage example: + * + * {{{ + * val f = ask(worker, request)(timeout) + * flow { + * EnrichedRequest(request, f()) + * } pipeTo nextActor + * }}} + * + * [see [[akka.dispatch.Future]] for a description of `flow`] + */ + def pipe[T](future: Future[T], recipient: ActorRef): Future[T] = + future onComplete { + case Right(r) ⇒ recipient ! r + case Left(f) ⇒ recipient ! Status.Failure(f) + } + } \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/pattern/package.scala b/akka-actor/src/main/scala/akka/pattern/package.scala index 2a8c03229f..d223bf2a32 100644 --- a/akka-actor/src/main/scala/akka/pattern/package.scala +++ b/akka-actor/src/main/scala/akka/pattern/package.scala @@ -40,137 +40,6 @@ import akka.util.{ Timeout, Duration } * ask(actor, message); * }}} */ -package object pattern { - - /** - * Import this implicit conversion to gain `?` and `ask` methods on - * [[akka.actor.ActorRef]], which will defer to the - * `ask(actorRef, message)(timeout)` method defined here. - * - * {{{ - * import akka.pattern.ask - * - * val future = actor ? message // => ask(actor, message) - * val future = actor ask message // => ask(actor, message) - * val future = actor.ask(message)(timeout) // => ask(actor, message)(timeout) - * }}} - * - * All of the above use an implicit [[akka.actor.Timeout]]. - */ - implicit def ask(actorRef: ActorRef): AskSupport.AskableActorRef = new AskSupport.AskableActorRef(actorRef) - - /** - * Sends a message asynchronously and returns a [[akka.dispatch.Future]] - * holding the eventual reply message; this means that the target actor - * needs to send the result to the `sender` reference provided. The Future - * will be completed with an [[akka.actor.AskTimeoutException]] after the - * given timeout has expired; this is independent from any timeout applied - * while awaiting a result for this future (i.e. in - * `Await.result(..., timeout)`). - * - * Warning: - * When using future callbacks, inside actors you need to carefully avoid closing over - * the containing actor’s object, i.e. do not call methods or access mutable state - * on the enclosing actor from within the callback. This would break the actor - * encapsulation and may introduce synchronization bugs and race conditions because - * the callback will be scheduled concurrently to the enclosing actor. Unfortunately - * there is not yet a way to detect these illegal accesses at compile time. - * - * Recommended usage: - * - * {{{ - * val f = ask(worker, request)(timeout) - * flow { - * EnrichedRequest(request, f()) - * } pipeTo nextActor - * }}} - * - * [see [[akka.dispatch.Future]] for a description of `flow`] - */ - def ask(actorRef: ActorRef, message: Any)(implicit timeout: Timeout): Future[Any] = actorRef match { - case ref: InternalActorRef if ref.isTerminated ⇒ - actorRef.tell(message) - Promise.failed(new AskTimeoutException("sending to terminated ref breaks promises"))(ref.provider.dispatcher) - case ref: InternalActorRef ⇒ - val provider = ref.provider - if (timeout.duration.length <= 0) { - actorRef.tell(message) - Promise.failed(new AskTimeoutException("not asking with negative timeout"))(provider.dispatcher) - } else { - val a = AskSupport.createAsker(provider, timeout) - actorRef.tell(message, a) - a.result - } - case _ ⇒ throw new IllegalArgumentException("incompatible ActorRef " + actorRef) - } - - /** - * Import this implicit conversion to gain the `pipeTo` method on [[akka.dispatch.Future]]: - * - * {{{ - * import akka.pattern.pipeTo - * - * Future { doExpensiveCalc() } pipeTo nextActor - * }}} - */ - implicit def pipeTo[T](future: Future[T]): PipeToSupport.PipeableFuture[T] = new PipeToSupport.PipeableFuture(future) - - /** - * Register an onComplete callback on this [[akka.dispatch.Future]] to send - * the result to the given actor reference. Returns the original Future to - * allow method chaining. - * - * Recommended usage example: - * - * {{{ - * val f = ask(worker, request)(timeout) - * flow { - * EnrichedRequest(request, f()) - * } pipeTo nextActor - * }}} - * - * [see [[akka.dispatch.Future]] for a description of `flow`] - */ - def pipe[T](future: Future[T], recipient: ActorRef): Future[T] = - future onComplete { - case Right(r) ⇒ recipient ! r - case Left(f) ⇒ recipient ! Status.Failure(f) - } - - /** - * Returns a [[akka.dispatch.Future]] that will be completed with success (value `true`) when - * existing messages of the target actor has been processed and the actor has been - * terminated. - * - * Useful when you need to wait for termination or compose ordered termination of several actors. - * - * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] - * is completed with failure [[akka.actor.ActorTimeoutException]]. - */ - def gracefulStop(target: ActorRef, timeout: Duration)(implicit system: ActorSystem): Future[Boolean] = { - if (target.isTerminated) { - Promise.successful(true) - } else { - val result = Promise[Boolean]() - system.actorOf(Props(new Actor { - // Terminated will be received when target has been stopped - context watch target - target ! PoisonPill - // ReceiveTimeout will be received if nothing else is received within the timeout - context setReceiveTimeout timeout - - def receive = { - case Terminated(a) if a == target ⇒ - result success true - context stop self - case ReceiveTimeout ⇒ - result failure new ActorTimeoutException( - "Failed to stop [%s] within [%s]".format(target.path, context.receiveTimeout)) - context stop self - } - })) - result - } - } +package object pattern extends PipeToSupport with AskSupport with GracefulStopSupport { } diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index e2e6f14db7..46dcbde8d7 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -766,7 +766,7 @@ trait ScatterGatherFirstCompletedLike { this: RouterConfig ⇒ { case (sender, message) ⇒ val provider: ActorRefProvider = routeeProvider.context.asInstanceOf[ActorCell].systemImpl.provider - val asker = AskSupport.createAsker(provider, within) + val asker = akka.pattern.createAsker(provider, within) asker.result.pipeTo(sender) toAll(asker, routeeProvider.routees) } diff --git a/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala index 0f6898a239..88d80d6d81 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala @@ -82,8 +82,8 @@ akka { "support ask" in { Await.result(here ? "ping", timeout.duration) match { - case ("pong", s: akka.pattern.AskSupport.PromiseActorRef) ⇒ // good - case m ⇒ fail(m + " was not (pong, AskActorRef)") + case ("pong", s: akka.pattern.PromiseActorRef) ⇒ // good + case m ⇒ fail(m + " was not (pong, AskActorRef)") } } From 75e90cccdfb05fc8501672df0cd3de1f7885f365 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 1 Feb 2012 14:04:01 +0100 Subject: [PATCH 85/94] Cleaning up the code --- .../scala/akka/dataflow/Future2Actor.scala | 2 +- .../main/scala/akka/pattern/Patterns.scala | 7 ++-- .../scala/akka/pattern/PipeToSupport.scala | 36 ++++++------------- .../src/main/scala/akka/pattern/package.scala | 2 +- .../src/main/scala/akka/routing/Routing.scala | 2 +- .../code/akka/docs/actor/ActorDocSpec.scala | 4 +-- .../docs/actor/FaultHandlingDocSample.scala | 2 +- 7 files changed, 20 insertions(+), 35 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala b/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala index eabe00a3b2..26f92d8a6d 100644 --- a/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala +++ b/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala @@ -8,7 +8,7 @@ import akka.dispatch.{ Future, Await } import akka.util.duration._ import akka.testkit.AkkaSpec import akka.testkit.DefaultTimeout -import akka.pattern.{ ask, pipeTo } +import akka.pattern.{ ask, pipe } class Future2ActorSpec extends AkkaSpec with DefaultTimeout { diff --git a/akka-actor/src/main/scala/akka/pattern/Patterns.scala b/akka-actor/src/main/scala/akka/pattern/Patterns.scala index d585d88e13..348d472e89 100644 --- a/akka-actor/src/main/scala/akka/pattern/Patterns.scala +++ b/akka-actor/src/main/scala/akka/pattern/Patterns.scala @@ -6,7 +6,7 @@ package akka.pattern object Patterns { import akka.actor.{ ActorRef, ActorSystem } import akka.dispatch.Future - import akka.pattern.{ ask ⇒ scalaAsk } + import akka.pattern.{ ask ⇒ scalaAsk, pipe ⇒ scalaPipe } import akka.util.{ Timeout, Duration } /** @@ -86,7 +86,7 @@ object Patterns { * Patterns.pipe(transformed, nextActor); * }}} */ - def pipe[T](future: Future[T], recipient: ActorRef): Future[T] = akka.pattern.pipe(future, recipient) + def pipe[T](future: Future[T], recipient: ActorRef): Future[T] = scalaPipe(future) pipeTo recipient /** * Returns a [[akka.dispatch.Future]] that will be completed with success (value `true`) when @@ -98,7 +98,6 @@ object Patterns { * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] * is completed with failure [[akka.actor.ActorTimeoutException]]. */ - def gracefulStop(target: ActorRef, timeout: Duration, system: ActorSystem): Future[java.lang.Boolean] = { + def gracefulStop(target: ActorRef, timeout: Duration, system: ActorSystem): Future[java.lang.Boolean] = akka.pattern.gracefulStop(target, timeout)(system).asInstanceOf[Future[java.lang.Boolean]] - } } diff --git a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala index fef4a91ee8..66c584867a 100644 --- a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala @@ -9,7 +9,16 @@ import akka.actor.{ Status, ActorRef } trait PipeToSupport { final class PipeableFuture[T](val future: Future[T]) { - def pipeTo(actorRef: ActorRef): Future[T] = akka.pattern.pipe(future, actorRef) + def pipeTo(recipient: ActorRef): Future[T] = + future onComplete { + case Right(r) ⇒ recipient ! r + case Left(f) ⇒ recipient ! Status.Failure(f) + } + + def to(recipient: ActorRef): PipeableFuture[T] = { + pipeTo(recipient) + this + } } /** @@ -21,28 +30,5 @@ trait PipeToSupport { * Future { doExpensiveCalc() } pipeTo nextActor * }}} */ - implicit def pipeTo[T](future: Future[T]): PipeableFuture[T] = new PipeableFuture(future) - - /** - * Register an onComplete callback on this [[akka.dispatch.Future]] to send - * the result to the given actor reference. Returns the original Future to - * allow method chaining. - * - * Recommended usage example: - * - * {{{ - * val f = ask(worker, request)(timeout) - * flow { - * EnrichedRequest(request, f()) - * } pipeTo nextActor - * }}} - * - * [see [[akka.dispatch.Future]] for a description of `flow`] - */ - def pipe[T](future: Future[T], recipient: ActorRef): Future[T] = - future onComplete { - case Right(r) ⇒ recipient ! r - case Left(f) ⇒ recipient ! Status.Failure(f) - } - + implicit def pipe[T](future: Future[T]): PipeableFuture[T] = new PipeableFuture(future) } \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/pattern/package.scala b/akka-actor/src/main/scala/akka/pattern/package.scala index d223bf2a32..ec4786a4c0 100644 --- a/akka-actor/src/main/scala/akka/pattern/package.scala +++ b/akka-actor/src/main/scala/akka/pattern/package.scala @@ -42,4 +42,4 @@ import akka.util.{ Timeout, Duration } */ package object pattern extends PipeToSupport with AskSupport with GracefulStopSupport { -} +} \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 46dcbde8d7..e3c349cb89 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -10,7 +10,7 @@ import akka.util.Duration import akka.util.duration._ import com.typesafe.config.Config import akka.config.ConfigurationException -import akka.pattern.{ AskSupport, pipeTo } +import akka.pattern.pipe import scala.collection.JavaConversions.iterableAsScalaIterable /** diff --git a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala index 98fa19aba2..55a205746f 100644 --- a/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/actor/ActorDocSpec.scala @@ -337,7 +337,7 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { "using pattern ask / pipeTo" in { val actorA, actorB, actorC, actorD = system.actorOf(Props.empty) //#ask-pipeTo - import akka.pattern.{ ask, pipeTo, pipe } + import akka.pattern.{ ask, pipe } case class Result(x: Int, s: String, d: Double) case object Request @@ -352,7 +352,7 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { } yield Result(x, s, d) f pipeTo actorD // .. or .. - pipe(f, actorD) + pipe(f) to actorD //#ask-pipeTo } diff --git a/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala b/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala index 09f32eee91..d08bcb53b2 100644 --- a/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala +++ b/akka-docs/scala/code/akka/docs/actor/FaultHandlingDocSample.scala @@ -11,7 +11,7 @@ import akka.util.duration._ import akka.util.Duration import akka.util.Timeout import akka.event.LoggingReceive -import akka.pattern.{ ask, pipeTo } +import akka.pattern.{ ask, pipe } import com.typesafe.config.ConfigFactory //#imports From 985acc821c2a8428a26861a79cb6042619f888e6 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 1 Feb 2012 14:29:56 +0100 Subject: [PATCH 86/94] Fixing Deadline, removing TimeoutException and fixing bugs in Gossiper etc --- akka-actor-tests/src/test/scala/akka/actor/IOActor.scala | 4 ++-- akka-actor/src/main/scala/akka/util/Duration.scala | 7 +++---- akka-cluster/src/main/scala/akka/cluster/Gossiper.scala | 2 +- 3 files changed, 6 insertions(+), 7 deletions(-) diff --git a/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala b/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala index 62da560831..915a8d5fc8 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala @@ -245,12 +245,12 @@ class IOActorSpec extends AkkaSpec with DefaultTimeout { val promise = Promise[T]()(executor) val timer: Option[Deadline] = timeout match { - case Some(duration) ⇒ Some(Deadline(duration)) + case Some(duration) ⇒ Some(duration fromNow) case None ⇒ None } def check(n: Int, e: Throwable): Boolean = - (count.isEmpty || (n < count.get)) && (timer.isEmpty || !timer.get.isOverdue()) && (filter.isEmpty || filter.get(e)) + (count.isEmpty || (n < count.get)) && (timer.isEmpty || timer.get.hasTimeLeft()) && (filter.isEmpty || filter.get(e)) def run(n: Int) { future onComplete { diff --git a/akka-actor/src/main/scala/akka/util/Duration.scala b/akka-actor/src/main/scala/akka/util/Duration.scala index ff33c5c9a9..2b6aae1eb3 100644 --- a/akka-actor/src/main/scala/akka/util/Duration.scala +++ b/akka-actor/src/main/scala/akka/util/Duration.scala @@ -8,14 +8,13 @@ import java.util.concurrent.TimeUnit import TimeUnit._ import java.lang.{ Double ⇒ JDouble } -class TimerException(message: String) extends RuntimeException(message) - -case class Deadline(time: Duration) { +case class Deadline private (time: Duration) { def +(other: Duration): Deadline = copy(time = time + other) def -(other: Duration): Deadline = copy(time = time - other) def -(other: Deadline): Duration = time - other.time def timeLeft: Duration = this - Deadline.now - def isOverdue(): Boolean = timeLeft < Duration.Zero + def hasTimeLeft(): Boolean = !isOverdue() //Code reuse FTW + def isOverdue(): Boolean = (time.toNanos - System.nanoTime()) < 0 } object Deadline { def now: Deadline = Deadline(Duration(System.nanoTime, NANOSECONDS)) diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala b/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala index 699ec4c6c8..1b9026d082 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Gossiper.scala @@ -163,7 +163,7 @@ case class Gossiper(remote: RemoteActorRefProvider, system: ActorSystemImpl) { log.info("Starting cluster Gossiper...") // join the cluster by connecting to one of the seed members and retrieve current cluster state (Gossip) - joinCluster(Deadline(clusterSettings.MaxTimeToRetryJoiningCluster)) + joinCluster(clusterSettings.MaxTimeToRetryJoiningCluster fromNow) // start periodic gossip and cluster scrutinization val initateGossipCanceller = system.scheduler.schedule(initialDelayForGossip, gossipFrequency)(initateGossip()) From 009a1afe897315e07bec068711ad6e634cd3c55c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 1 Feb 2012 14:54:54 +0100 Subject: [PATCH 87/94] Making createAsker private, adding docs for 'to', changing Java API to be symmetric to Java --- akka-actor/src/main/scala/akka/pattern/AskSupport.scala | 5 ++++- akka-actor/src/main/scala/akka/pattern/Patterns.scala | 4 ++-- akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala | 7 ++++++- .../java/code/akka/docs/actor/UntypedActorDocTestBase.java | 2 +- .../code/akka/docs/actor/japi/FaultHandlingDocSample.java | 3 ++- 5 files changed, 15 insertions(+), 6 deletions(-) diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 46ccc25307..f0e5939f96 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -183,7 +183,10 @@ trait AskSupport { } } - def createAsker(provider: ActorRefProvider, timeout: Timeout): PromiseActorRef = { + /** + * INTERNAL AKKA USE ONLY + */ + private[akka] def createAsker(provider: ActorRefProvider, timeout: Timeout): PromiseActorRef = { val path = provider.tempPath() val result = Promise[Any]()(provider.dispatcher) val a = new PromiseActorRef(provider, path, provider.tempContainer, result, provider.deathWatch) diff --git a/akka-actor/src/main/scala/akka/pattern/Patterns.scala b/akka-actor/src/main/scala/akka/pattern/Patterns.scala index 348d472e89..7167775b29 100644 --- a/akka-actor/src/main/scala/akka/pattern/Patterns.scala +++ b/akka-actor/src/main/scala/akka/pattern/Patterns.scala @@ -83,10 +83,10 @@ object Patterns { * // apply some transformation (i.e. enrich with request info) * final Future transformed = f.map(new akka.japi.Function() { ... }); * // send it on to the next stage - * Patterns.pipe(transformed, nextActor); + * Patterns.pipe(transformed).to(nextActor); * }}} */ - def pipe[T](future: Future[T], recipient: ActorRef): Future[T] = scalaPipe(future) pipeTo recipient + def pipe[T](future: Future[T]): PipeableFuture[T] = scalaPipe(future) /** * Returns a [[akka.dispatch.Future]] that will be completed with success (value `true`) when diff --git a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala index 66c584867a..b611fd7128 100644 --- a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala @@ -25,9 +25,14 @@ trait PipeToSupport { * Import this implicit conversion to gain the `pipeTo` method on [[akka.dispatch.Future]]: * * {{{ - * import akka.pattern.pipeTo + * import akka.pattern.pipe * * Future { doExpensiveCalc() } pipeTo nextActor + * + * or + * + * pipe(someFuture) to nextActor + * * }}} */ implicit def pipe[T](future: Future[T]): PipeableFuture[T] = new PipeableFuture(future) diff --git a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java b/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java index a6d246fb2b..749dd1e1d9 100644 --- a/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java +++ b/akka-docs/java/code/akka/docs/actor/UntypedActorDocTestBase.java @@ -247,7 +247,7 @@ public class UntypedActorDocTestBase { } }); - pipe(transformed, actorC); + pipe(transformed).to(actorC); //#ask-pipe system.shutdown(); } diff --git a/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java b/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java index 4486450f43..db39a5d663 100644 --- a/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java +++ b/akka-docs/java/code/akka/docs/actor/japi/FaultHandlingDocSample.java @@ -151,7 +151,8 @@ public class FaultHandlingDocSample { public Progress apply(CurrentCount c) { return new Progress(100.0 * c.count / totalCount); } - }), progressListener); + })) + .to(progressListener); } else { unhandled(msg); } From 9421f37f965b240dbdbe632a9594b9d5afab7f4a Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 1 Feb 2012 16:06:30 +0100 Subject: [PATCH 88/94] Ripping out ReadTimeout and adding Idle timeout and fixing issues with configured port on top of that --- .../main/java/akka/remote/RemoteProtocol.java | 752 +++++++++--------- .../src/main/protocol/RemoteProtocol.proto | 1 + akka-remote/src/main/resources/reference.conf | 20 +- .../scala/akka/remote/RemoteTransport.scala | 2 +- .../main/scala/akka/remote/netty/Client.scala | 52 +- .../main/scala/akka/remote/netty/Server.scala | 13 +- .../scala/akka/remote/netty/Settings.scala | 6 +- .../scala/akka/remote/RemoteConfigSpec.scala | 6 +- 8 files changed, 448 insertions(+), 404 deletions(-) diff --git a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java index 90c493e176..0fcb423d1e 100644 --- a/akka-remote/src/main/java/akka/remote/RemoteProtocol.java +++ b/akka-remote/src/main/java/akka/remote/RemoteProtocol.java @@ -12,10 +12,12 @@ public final class RemoteProtocol { implements com.google.protobuf.ProtocolMessageEnum { CONNECT(0, 1), SHUTDOWN(1, 2), + HEARTBEAT(2, 3), ; public static final int CONNECT_VALUE = 1; public static final int SHUTDOWN_VALUE = 2; + public static final int HEARTBEAT_VALUE = 3; public final int getNumber() { return value; } @@ -24,6 +26,7 @@ public final class RemoteProtocol { switch (value) { case 1: return CONNECT; case 2: return SHUTDOWN; + case 3: return HEARTBEAT; default: return null; } } @@ -54,7 +57,7 @@ public final class RemoteProtocol { } private static final CommandType[] VALUES = { - CONNECT, SHUTDOWN, + CONNECT, SHUTDOWN, HEARTBEAT, }; public static CommandType valueOf( @@ -460,7 +463,7 @@ public final class RemoteProtocol { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); if (messageBuilder_ == null) { @@ -477,20 +480,20 @@ public final class RemoteProtocol { bitField0_ = (bitField0_ & ~0x00000002); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return akka.remote.RemoteProtocol.AkkaRemoteProtocol.getDescriptor(); } - + public akka.remote.RemoteProtocol.AkkaRemoteProtocol getDefaultInstanceForType() { return akka.remote.RemoteProtocol.AkkaRemoteProtocol.getDefaultInstance(); } - + public akka.remote.RemoteProtocol.AkkaRemoteProtocol build() { akka.remote.RemoteProtocol.AkkaRemoteProtocol result = buildPartial(); if (!result.isInitialized()) { @@ -498,7 +501,7 @@ public final class RemoteProtocol { } return result; } - + private akka.remote.RemoteProtocol.AkkaRemoteProtocol buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { akka.remote.RemoteProtocol.AkkaRemoteProtocol result = buildPartial(); @@ -508,7 +511,7 @@ public final class RemoteProtocol { } return result; } - + public akka.remote.RemoteProtocol.AkkaRemoteProtocol buildPartial() { akka.remote.RemoteProtocol.AkkaRemoteProtocol result = new akka.remote.RemoteProtocol.AkkaRemoteProtocol(this); int from_bitField0_ = bitField0_; @@ -533,7 +536,7 @@ public final class RemoteProtocol { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof akka.remote.RemoteProtocol.AkkaRemoteProtocol) { return mergeFrom((akka.remote.RemoteProtocol.AkkaRemoteProtocol)other); @@ -542,7 +545,7 @@ public final class RemoteProtocol { return this; } } - + public Builder mergeFrom(akka.remote.RemoteProtocol.AkkaRemoteProtocol other) { if (other == akka.remote.RemoteProtocol.AkkaRemoteProtocol.getDefaultInstance()) return this; if (other.hasMessage()) { @@ -554,23 +557,23 @@ public final class RemoteProtocol { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (hasMessage()) { if (!getMessage().isInitialized()) { - + return false; } } if (hasInstruction()) { if (!getInstruction().isInitialized()) { - + return false; } } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -615,9 +618,9 @@ public final class RemoteProtocol { } } } - + private int bitField0_; - + // optional .RemoteMessageProtocol message = 1; private akka.remote.RemoteProtocol.RemoteMessageProtocol message_ = akka.remote.RemoteProtocol.RemoteMessageProtocol.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< @@ -695,7 +698,7 @@ public final class RemoteProtocol { } } private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.RemoteMessageProtocol, akka.remote.RemoteProtocol.RemoteMessageProtocol.Builder, akka.remote.RemoteProtocol.RemoteMessageProtocolOrBuilder> + akka.remote.RemoteProtocol.RemoteMessageProtocol, akka.remote.RemoteProtocol.RemoteMessageProtocol.Builder, akka.remote.RemoteProtocol.RemoteMessageProtocolOrBuilder> getMessageFieldBuilder() { if (messageBuilder_ == null) { messageBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -707,7 +710,7 @@ public final class RemoteProtocol { } return messageBuilder_; } - + // optional .RemoteControlProtocol instruction = 2; private akka.remote.RemoteProtocol.RemoteControlProtocol instruction_ = akka.remote.RemoteProtocol.RemoteControlProtocol.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< @@ -785,7 +788,7 @@ public final class RemoteProtocol { } } private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.RemoteControlProtocol, akka.remote.RemoteProtocol.RemoteControlProtocol.Builder, akka.remote.RemoteProtocol.RemoteControlProtocolOrBuilder> + akka.remote.RemoteProtocol.RemoteControlProtocol, akka.remote.RemoteProtocol.RemoteControlProtocol.Builder, akka.remote.RemoteProtocol.RemoteControlProtocolOrBuilder> getInstructionFieldBuilder() { if (instructionBuilder_ == null) { instructionBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -797,42 +800,42 @@ public final class RemoteProtocol { } return instructionBuilder_; } - + // @@protoc_insertion_point(builder_scope:AkkaRemoteProtocol) } - + static { defaultInstance = new AkkaRemoteProtocol(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:AkkaRemoteProtocol) } - + public interface RemoteMessageProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required .ActorRefProtocol recipient = 1; boolean hasRecipient(); akka.remote.RemoteProtocol.ActorRefProtocol getRecipient(); akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getRecipientOrBuilder(); - + // required .MessageProtocol message = 2; boolean hasMessage(); akka.remote.RemoteProtocol.MessageProtocol getMessage(); akka.remote.RemoteProtocol.MessageProtocolOrBuilder getMessageOrBuilder(); - + // optional .ActorRefProtocol sender = 4; boolean hasSender(); akka.remote.RemoteProtocol.ActorRefProtocol getSender(); akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getSenderOrBuilder(); - + // repeated .MetadataEntryProtocol metadata = 5; - java.util.List + java.util.List getMetadataList(); akka.remote.RemoteProtocol.MetadataEntryProtocol getMetadata(int index); int getMetadataCount(); - java.util.List + java.util.List getMetadataOrBuilderList(); akka.remote.RemoteProtocol.MetadataEntryProtocolOrBuilder getMetadataOrBuilder( int index); @@ -845,26 +848,26 @@ public final class RemoteProtocol { super(builder); } private RemoteMessageProtocol(boolean noInit) {} - + private static final RemoteMessageProtocol defaultInstance; public static RemoteMessageProtocol getDefaultInstance() { return defaultInstance; } - + public RemoteMessageProtocol getDefaultInstanceForType() { return defaultInstance; } - + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return akka.remote.RemoteProtocol.internal_static_RemoteMessageProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_RemoteMessageProtocol_fieldAccessorTable; } - + private int bitField0_; // required .ActorRefProtocol recipient = 1; public static final int RECIPIENT_FIELD_NUMBER = 1; @@ -878,7 +881,7 @@ public final class RemoteProtocol { public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getRecipientOrBuilder() { return recipient_; } - + // required .MessageProtocol message = 2; public static final int MESSAGE_FIELD_NUMBER = 2; private akka.remote.RemoteProtocol.MessageProtocol message_; @@ -891,7 +894,7 @@ public final class RemoteProtocol { public akka.remote.RemoteProtocol.MessageProtocolOrBuilder getMessageOrBuilder() { return message_; } - + // optional .ActorRefProtocol sender = 4; public static final int SENDER_FIELD_NUMBER = 4; private akka.remote.RemoteProtocol.ActorRefProtocol sender_; @@ -904,14 +907,14 @@ public final class RemoteProtocol { public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getSenderOrBuilder() { return sender_; } - + // repeated .MetadataEntryProtocol metadata = 5; public static final int METADATA_FIELD_NUMBER = 5; private java.util.List metadata_; public java.util.List getMetadataList() { return metadata_; } - public java.util.List + public java.util.List getMetadataOrBuilderList() { return metadata_; } @@ -925,7 +928,7 @@ public final class RemoteProtocol { int index) { return metadata_.get(index); } - + private void initFields() { recipient_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); message_ = akka.remote.RemoteProtocol.MessageProtocol.getDefaultInstance(); @@ -936,7 +939,7 @@ public final class RemoteProtocol { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasRecipient()) { memoizedIsInitialized = 0; return false; @@ -968,7 +971,7 @@ public final class RemoteProtocol { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -986,12 +989,12 @@ public final class RemoteProtocol { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -1013,14 +1016,14 @@ public final class RemoteProtocol { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static akka.remote.RemoteProtocol.RemoteMessageProtocol parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -1087,14 +1090,14 @@ public final class RemoteProtocol { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(akka.remote.RemoteProtocol.RemoteMessageProtocol prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { @@ -1108,17 +1111,17 @@ public final class RemoteProtocol { getDescriptor() { return akka.remote.RemoteProtocol.internal_static_RemoteMessageProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_RemoteMessageProtocol_fieldAccessorTable; } - + // Construct using akka.remote.RemoteProtocol.RemoteMessageProtocol.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); @@ -1134,7 +1137,7 @@ public final class RemoteProtocol { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); if (recipientBuilder_ == null) { @@ -1163,20 +1166,20 @@ public final class RemoteProtocol { } return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return akka.remote.RemoteProtocol.RemoteMessageProtocol.getDescriptor(); } - + public akka.remote.RemoteProtocol.RemoteMessageProtocol getDefaultInstanceForType() { return akka.remote.RemoteProtocol.RemoteMessageProtocol.getDefaultInstance(); } - + public akka.remote.RemoteProtocol.RemoteMessageProtocol build() { akka.remote.RemoteProtocol.RemoteMessageProtocol result = buildPartial(); if (!result.isInitialized()) { @@ -1184,7 +1187,7 @@ public final class RemoteProtocol { } return result; } - + private akka.remote.RemoteProtocol.RemoteMessageProtocol buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { akka.remote.RemoteProtocol.RemoteMessageProtocol result = buildPartial(); @@ -1194,7 +1197,7 @@ public final class RemoteProtocol { } return result; } - + public akka.remote.RemoteProtocol.RemoteMessageProtocol buildPartial() { akka.remote.RemoteProtocol.RemoteMessageProtocol result = new akka.remote.RemoteProtocol.RemoteMessageProtocol(this); int from_bitField0_ = bitField0_; @@ -1236,7 +1239,7 @@ public final class RemoteProtocol { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof akka.remote.RemoteProtocol.RemoteMessageProtocol) { return mergeFrom((akka.remote.RemoteProtocol.RemoteMessageProtocol)other); @@ -1245,7 +1248,7 @@ public final class RemoteProtocol { return this; } } - + public Builder mergeFrom(akka.remote.RemoteProtocol.RemoteMessageProtocol other) { if (other == akka.remote.RemoteProtocol.RemoteMessageProtocol.getDefaultInstance()) return this; if (other.hasRecipient()) { @@ -1275,7 +1278,7 @@ public final class RemoteProtocol { metadataBuilder_ = null; metadata_ = other.metadata_; bitField0_ = (bitField0_ & ~0x00000008); - metadataBuilder_ = + metadataBuilder_ = com.google.protobuf.GeneratedMessage.alwaysUseFieldBuilders ? getMetadataFieldBuilder() : null; } else { @@ -1286,39 +1289,39 @@ public final class RemoteProtocol { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasRecipient()) { - + return false; } if (!hasMessage()) { - + return false; } if (!getRecipient().isInitialized()) { - + return false; } if (!getMessage().isInitialized()) { - + return false; } if (hasSender()) { if (!getSender().isInitialized()) { - + return false; } } for (int i = 0; i < getMetadataCount(); i++) { if (!getMetadata(i).isInitialized()) { - + return false; } } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -1378,9 +1381,9 @@ public final class RemoteProtocol { } } } - + private int bitField0_; - + // required .ActorRefProtocol recipient = 1; private akka.remote.RemoteProtocol.ActorRefProtocol recipient_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< @@ -1458,7 +1461,7 @@ public final class RemoteProtocol { } } private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> + akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> getRecipientFieldBuilder() { if (recipientBuilder_ == null) { recipientBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -1470,7 +1473,7 @@ public final class RemoteProtocol { } return recipientBuilder_; } - + // required .MessageProtocol message = 2; private akka.remote.RemoteProtocol.MessageProtocol message_ = akka.remote.RemoteProtocol.MessageProtocol.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< @@ -1548,7 +1551,7 @@ public final class RemoteProtocol { } } private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.MessageProtocol, akka.remote.RemoteProtocol.MessageProtocol.Builder, akka.remote.RemoteProtocol.MessageProtocolOrBuilder> + akka.remote.RemoteProtocol.MessageProtocol, akka.remote.RemoteProtocol.MessageProtocol.Builder, akka.remote.RemoteProtocol.MessageProtocolOrBuilder> getMessageFieldBuilder() { if (messageBuilder_ == null) { messageBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -1560,7 +1563,7 @@ public final class RemoteProtocol { } return messageBuilder_; } - + // optional .ActorRefProtocol sender = 4; private akka.remote.RemoteProtocol.ActorRefProtocol sender_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< @@ -1638,7 +1641,7 @@ public final class RemoteProtocol { } } private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> + akka.remote.RemoteProtocol.ActorRefProtocol, akka.remote.RemoteProtocol.ActorRefProtocol.Builder, akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder> getSenderFieldBuilder() { if (senderBuilder_ == null) { senderBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -1650,7 +1653,7 @@ public final class RemoteProtocol { } return senderBuilder_; } - + // repeated .MetadataEntryProtocol metadata = 5; private java.util.List metadata_ = java.util.Collections.emptyList(); @@ -1660,10 +1663,10 @@ public final class RemoteProtocol { bitField0_ |= 0x00000008; } } - + private com.google.protobuf.RepeatedFieldBuilder< akka.remote.RemoteProtocol.MetadataEntryProtocol, akka.remote.RemoteProtocol.MetadataEntryProtocol.Builder, akka.remote.RemoteProtocol.MetadataEntryProtocolOrBuilder> metadataBuilder_; - + public java.util.List getMetadataList() { if (metadataBuilder_ == null) { return java.util.Collections.unmodifiableList(metadata_); @@ -1801,7 +1804,7 @@ public final class RemoteProtocol { return metadataBuilder_.getMessageOrBuilder(index); } } - public java.util.List + public java.util.List getMetadataOrBuilderList() { if (metadataBuilder_ != null) { return metadataBuilder_.getMessageOrBuilderList(); @@ -1818,12 +1821,12 @@ public final class RemoteProtocol { return getMetadataFieldBuilder().addBuilder( index, akka.remote.RemoteProtocol.MetadataEntryProtocol.getDefaultInstance()); } - public java.util.List + public java.util.List getMetadataBuilderList() { return getMetadataFieldBuilder().getBuilderList(); } private com.google.protobuf.RepeatedFieldBuilder< - akka.remote.RemoteProtocol.MetadataEntryProtocol, akka.remote.RemoteProtocol.MetadataEntryProtocol.Builder, akka.remote.RemoteProtocol.MetadataEntryProtocolOrBuilder> + akka.remote.RemoteProtocol.MetadataEntryProtocol, akka.remote.RemoteProtocol.MetadataEntryProtocol.Builder, akka.remote.RemoteProtocol.MetadataEntryProtocolOrBuilder> getMetadataFieldBuilder() { if (metadataBuilder_ == null) { metadataBuilder_ = new com.google.protobuf.RepeatedFieldBuilder< @@ -1836,29 +1839,29 @@ public final class RemoteProtocol { } return metadataBuilder_; } - + // @@protoc_insertion_point(builder_scope:RemoteMessageProtocol) } - + static { defaultInstance = new RemoteMessageProtocol(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:RemoteMessageProtocol) } - + public interface RemoteControlProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required .CommandType commandType = 1; boolean hasCommandType(); akka.remote.RemoteProtocol.CommandType getCommandType(); - + // optional string cookie = 2; boolean hasCookie(); String getCookie(); - + // optional .AddressProtocol origin = 3; boolean hasOrigin(); akka.remote.RemoteProtocol.AddressProtocol getOrigin(); @@ -1872,26 +1875,26 @@ public final class RemoteProtocol { super(builder); } private RemoteControlProtocol(boolean noInit) {} - + private static final RemoteControlProtocol defaultInstance; public static RemoteControlProtocol getDefaultInstance() { return defaultInstance; } - + public RemoteControlProtocol getDefaultInstanceForType() { return defaultInstance; } - + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return akka.remote.RemoteProtocol.internal_static_RemoteControlProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_RemoteControlProtocol_fieldAccessorTable; } - + private int bitField0_; // required .CommandType commandType = 1; public static final int COMMANDTYPE_FIELD_NUMBER = 1; @@ -1902,7 +1905,7 @@ public final class RemoteProtocol { public akka.remote.RemoteProtocol.CommandType getCommandType() { return commandType_; } - + // optional string cookie = 2; public static final int COOKIE_FIELD_NUMBER = 2; private java.lang.Object cookie_; @@ -1914,7 +1917,7 @@ public final class RemoteProtocol { if (ref instanceof String) { return (String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; String s = bs.toStringUtf8(); if (com.google.protobuf.Internal.isValidUtf8(bs)) { @@ -1926,7 +1929,7 @@ public final class RemoteProtocol { private com.google.protobuf.ByteString getCookieBytes() { java.lang.Object ref = cookie_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); cookie_ = b; return b; @@ -1934,7 +1937,7 @@ public final class RemoteProtocol { return (com.google.protobuf.ByteString) ref; } } - + // optional .AddressProtocol origin = 3; public static final int ORIGIN_FIELD_NUMBER = 3; private akka.remote.RemoteProtocol.AddressProtocol origin_; @@ -1947,7 +1950,7 @@ public final class RemoteProtocol { public akka.remote.RemoteProtocol.AddressProtocolOrBuilder getOriginOrBuilder() { return origin_; } - + private void initFields() { commandType_ = akka.remote.RemoteProtocol.CommandType.CONNECT; cookie_ = ""; @@ -1957,7 +1960,7 @@ public final class RemoteProtocol { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasCommandType()) { memoizedIsInitialized = 0; return false; @@ -1971,7 +1974,7 @@ public final class RemoteProtocol { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -1986,12 +1989,12 @@ public final class RemoteProtocol { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -2009,14 +2012,14 @@ public final class RemoteProtocol { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static akka.remote.RemoteProtocol.RemoteControlProtocol parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -2083,14 +2086,14 @@ public final class RemoteProtocol { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(akka.remote.RemoteProtocol.RemoteControlProtocol prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { @@ -2104,17 +2107,17 @@ public final class RemoteProtocol { getDescriptor() { return akka.remote.RemoteProtocol.internal_static_RemoteControlProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_RemoteControlProtocol_fieldAccessorTable; } - + // Construct using akka.remote.RemoteProtocol.RemoteControlProtocol.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); @@ -2127,7 +2130,7 @@ public final class RemoteProtocol { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); commandType_ = akka.remote.RemoteProtocol.CommandType.CONNECT; @@ -2142,20 +2145,20 @@ public final class RemoteProtocol { bitField0_ = (bitField0_ & ~0x00000004); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return akka.remote.RemoteProtocol.RemoteControlProtocol.getDescriptor(); } - + public akka.remote.RemoteProtocol.RemoteControlProtocol getDefaultInstanceForType() { return akka.remote.RemoteProtocol.RemoteControlProtocol.getDefaultInstance(); } - + public akka.remote.RemoteProtocol.RemoteControlProtocol build() { akka.remote.RemoteProtocol.RemoteControlProtocol result = buildPartial(); if (!result.isInitialized()) { @@ -2163,7 +2166,7 @@ public final class RemoteProtocol { } return result; } - + private akka.remote.RemoteProtocol.RemoteControlProtocol buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { akka.remote.RemoteProtocol.RemoteControlProtocol result = buildPartial(); @@ -2173,7 +2176,7 @@ public final class RemoteProtocol { } return result; } - + public akka.remote.RemoteProtocol.RemoteControlProtocol buildPartial() { akka.remote.RemoteProtocol.RemoteControlProtocol result = new akka.remote.RemoteProtocol.RemoteControlProtocol(this); int from_bitField0_ = bitField0_; @@ -2198,7 +2201,7 @@ public final class RemoteProtocol { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof akka.remote.RemoteProtocol.RemoteControlProtocol) { return mergeFrom((akka.remote.RemoteProtocol.RemoteControlProtocol)other); @@ -2207,7 +2210,7 @@ public final class RemoteProtocol { return this; } } - + public Builder mergeFrom(akka.remote.RemoteProtocol.RemoteControlProtocol other) { if (other == akka.remote.RemoteProtocol.RemoteControlProtocol.getDefaultInstance()) return this; if (other.hasCommandType()) { @@ -2222,21 +2225,21 @@ public final class RemoteProtocol { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasCommandType()) { - + return false; } if (hasOrigin()) { if (!getOrigin().isInitialized()) { - + return false; } } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -2288,9 +2291,9 @@ public final class RemoteProtocol { } } } - + private int bitField0_; - + // required .CommandType commandType = 1; private akka.remote.RemoteProtocol.CommandType commandType_ = akka.remote.RemoteProtocol.CommandType.CONNECT; public boolean hasCommandType() { @@ -2314,7 +2317,7 @@ public final class RemoteProtocol { onChanged(); return this; } - + // optional string cookie = 2; private java.lang.Object cookie_ = ""; public boolean hasCookie() { @@ -2350,7 +2353,7 @@ public final class RemoteProtocol { cookie_ = value; onChanged(); } - + // optional .AddressProtocol origin = 3; private akka.remote.RemoteProtocol.AddressProtocol origin_ = akka.remote.RemoteProtocol.AddressProtocol.getDefaultInstance(); private com.google.protobuf.SingleFieldBuilder< @@ -2428,7 +2431,7 @@ public final class RemoteProtocol { } } private com.google.protobuf.SingleFieldBuilder< - akka.remote.RemoteProtocol.AddressProtocol, akka.remote.RemoteProtocol.AddressProtocol.Builder, akka.remote.RemoteProtocol.AddressProtocolOrBuilder> + akka.remote.RemoteProtocol.AddressProtocol, akka.remote.RemoteProtocol.AddressProtocol.Builder, akka.remote.RemoteProtocol.AddressProtocolOrBuilder> getOriginFieldBuilder() { if (originBuilder_ == null) { originBuilder_ = new com.google.protobuf.SingleFieldBuilder< @@ -2440,21 +2443,21 @@ public final class RemoteProtocol { } return originBuilder_; } - + // @@protoc_insertion_point(builder_scope:RemoteControlProtocol) } - + static { defaultInstance = new RemoteControlProtocol(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:RemoteControlProtocol) } - + public interface ActorRefProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required string path = 1; boolean hasPath(); String getPath(); @@ -2467,26 +2470,26 @@ public final class RemoteProtocol { super(builder); } private ActorRefProtocol(boolean noInit) {} - + private static final ActorRefProtocol defaultInstance; public static ActorRefProtocol getDefaultInstance() { return defaultInstance; } - + public ActorRefProtocol getDefaultInstanceForType() { return defaultInstance; } - + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return akka.remote.RemoteProtocol.internal_static_ActorRefProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_ActorRefProtocol_fieldAccessorTable; } - + private int bitField0_; // required string path = 1; public static final int PATH_FIELD_NUMBER = 1; @@ -2499,7 +2502,7 @@ public final class RemoteProtocol { if (ref instanceof String) { return (String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; String s = bs.toStringUtf8(); if (com.google.protobuf.Internal.isValidUtf8(bs)) { @@ -2511,7 +2514,7 @@ public final class RemoteProtocol { private com.google.protobuf.ByteString getPathBytes() { java.lang.Object ref = path_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); path_ = b; return b; @@ -2519,7 +2522,7 @@ public final class RemoteProtocol { return (com.google.protobuf.ByteString) ref; } } - + private void initFields() { path_ = ""; } @@ -2527,7 +2530,7 @@ public final class RemoteProtocol { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasPath()) { memoizedIsInitialized = 0; return false; @@ -2535,7 +2538,7 @@ public final class RemoteProtocol { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -2544,12 +2547,12 @@ public final class RemoteProtocol { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -2559,14 +2562,14 @@ public final class RemoteProtocol { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static akka.remote.RemoteProtocol.ActorRefProtocol parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -2633,14 +2636,14 @@ public final class RemoteProtocol { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(akka.remote.RemoteProtocol.ActorRefProtocol prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { @@ -2654,17 +2657,17 @@ public final class RemoteProtocol { getDescriptor() { return akka.remote.RemoteProtocol.internal_static_ActorRefProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_ActorRefProtocol_fieldAccessorTable; } - + // Construct using akka.remote.RemoteProtocol.ActorRefProtocol.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); @@ -2676,27 +2679,27 @@ public final class RemoteProtocol { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); path_ = ""; bitField0_ = (bitField0_ & ~0x00000001); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return akka.remote.RemoteProtocol.ActorRefProtocol.getDescriptor(); } - + public akka.remote.RemoteProtocol.ActorRefProtocol getDefaultInstanceForType() { return akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); } - + public akka.remote.RemoteProtocol.ActorRefProtocol build() { akka.remote.RemoteProtocol.ActorRefProtocol result = buildPartial(); if (!result.isInitialized()) { @@ -2704,7 +2707,7 @@ public final class RemoteProtocol { } return result; } - + private akka.remote.RemoteProtocol.ActorRefProtocol buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { akka.remote.RemoteProtocol.ActorRefProtocol result = buildPartial(); @@ -2714,7 +2717,7 @@ public final class RemoteProtocol { } return result; } - + public akka.remote.RemoteProtocol.ActorRefProtocol buildPartial() { akka.remote.RemoteProtocol.ActorRefProtocol result = new akka.remote.RemoteProtocol.ActorRefProtocol(this); int from_bitField0_ = bitField0_; @@ -2727,7 +2730,7 @@ public final class RemoteProtocol { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof akka.remote.RemoteProtocol.ActorRefProtocol) { return mergeFrom((akka.remote.RemoteProtocol.ActorRefProtocol)other); @@ -2736,7 +2739,7 @@ public final class RemoteProtocol { return this; } } - + public Builder mergeFrom(akka.remote.RemoteProtocol.ActorRefProtocol other) { if (other == akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance()) return this; if (other.hasPath()) { @@ -2745,15 +2748,15 @@ public final class RemoteProtocol { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasPath()) { - + return false; } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -2785,9 +2788,9 @@ public final class RemoteProtocol { } } } - + private int bitField0_; - + // required string path = 1; private java.lang.Object path_ = ""; public boolean hasPath() { @@ -2823,29 +2826,29 @@ public final class RemoteProtocol { path_ = value; onChanged(); } - + // @@protoc_insertion_point(builder_scope:ActorRefProtocol) } - + static { defaultInstance = new ActorRefProtocol(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:ActorRefProtocol) } - + public interface MessageProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required bytes message = 1; boolean hasMessage(); com.google.protobuf.ByteString getMessage(); - + // required int32 serializerId = 2; boolean hasSerializerId(); int getSerializerId(); - + // optional bytes messageManifest = 3; boolean hasMessageManifest(); com.google.protobuf.ByteString getMessageManifest(); @@ -2858,26 +2861,26 @@ public final class RemoteProtocol { super(builder); } private MessageProtocol(boolean noInit) {} - + private static final MessageProtocol defaultInstance; public static MessageProtocol getDefaultInstance() { return defaultInstance; } - + public MessageProtocol getDefaultInstanceForType() { return defaultInstance; } - + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return akka.remote.RemoteProtocol.internal_static_MessageProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_MessageProtocol_fieldAccessorTable; } - + private int bitField0_; // required bytes message = 1; public static final int MESSAGE_FIELD_NUMBER = 1; @@ -2888,7 +2891,7 @@ public final class RemoteProtocol { public com.google.protobuf.ByteString getMessage() { return message_; } - + // required int32 serializerId = 2; public static final int SERIALIZERID_FIELD_NUMBER = 2; private int serializerId_; @@ -2898,7 +2901,7 @@ public final class RemoteProtocol { public int getSerializerId() { return serializerId_; } - + // optional bytes messageManifest = 3; public static final int MESSAGEMANIFEST_FIELD_NUMBER = 3; private com.google.protobuf.ByteString messageManifest_; @@ -2908,7 +2911,7 @@ public final class RemoteProtocol { public com.google.protobuf.ByteString getMessageManifest() { return messageManifest_; } - + private void initFields() { message_ = com.google.protobuf.ByteString.EMPTY; serializerId_ = 0; @@ -2918,7 +2921,7 @@ public final class RemoteProtocol { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasMessage()) { memoizedIsInitialized = 0; return false; @@ -2930,7 +2933,7 @@ public final class RemoteProtocol { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -2945,12 +2948,12 @@ public final class RemoteProtocol { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -2968,14 +2971,14 @@ public final class RemoteProtocol { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static akka.remote.RemoteProtocol.MessageProtocol parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -3042,14 +3045,14 @@ public final class RemoteProtocol { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(akka.remote.RemoteProtocol.MessageProtocol prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { @@ -3063,17 +3066,17 @@ public final class RemoteProtocol { getDescriptor() { return akka.remote.RemoteProtocol.internal_static_MessageProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_MessageProtocol_fieldAccessorTable; } - + // Construct using akka.remote.RemoteProtocol.MessageProtocol.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); @@ -3085,7 +3088,7 @@ public final class RemoteProtocol { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); message_ = com.google.protobuf.ByteString.EMPTY; @@ -3096,20 +3099,20 @@ public final class RemoteProtocol { bitField0_ = (bitField0_ & ~0x00000004); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return akka.remote.RemoteProtocol.MessageProtocol.getDescriptor(); } - + public akka.remote.RemoteProtocol.MessageProtocol getDefaultInstanceForType() { return akka.remote.RemoteProtocol.MessageProtocol.getDefaultInstance(); } - + public akka.remote.RemoteProtocol.MessageProtocol build() { akka.remote.RemoteProtocol.MessageProtocol result = buildPartial(); if (!result.isInitialized()) { @@ -3117,7 +3120,7 @@ public final class RemoteProtocol { } return result; } - + private akka.remote.RemoteProtocol.MessageProtocol buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { akka.remote.RemoteProtocol.MessageProtocol result = buildPartial(); @@ -3127,7 +3130,7 @@ public final class RemoteProtocol { } return result; } - + public akka.remote.RemoteProtocol.MessageProtocol buildPartial() { akka.remote.RemoteProtocol.MessageProtocol result = new akka.remote.RemoteProtocol.MessageProtocol(this); int from_bitField0_ = bitField0_; @@ -3148,7 +3151,7 @@ public final class RemoteProtocol { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof akka.remote.RemoteProtocol.MessageProtocol) { return mergeFrom((akka.remote.RemoteProtocol.MessageProtocol)other); @@ -3157,7 +3160,7 @@ public final class RemoteProtocol { return this; } } - + public Builder mergeFrom(akka.remote.RemoteProtocol.MessageProtocol other) { if (other == akka.remote.RemoteProtocol.MessageProtocol.getDefaultInstance()) return this; if (other.hasMessage()) { @@ -3172,19 +3175,19 @@ public final class RemoteProtocol { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasMessage()) { - + return false; } if (!hasSerializerId()) { - + return false; } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -3226,9 +3229,9 @@ public final class RemoteProtocol { } } } - + private int bitField0_; - + // required bytes message = 1; private com.google.protobuf.ByteString message_ = com.google.protobuf.ByteString.EMPTY; public boolean hasMessage() { @@ -3252,7 +3255,7 @@ public final class RemoteProtocol { onChanged(); return this; } - + // required int32 serializerId = 2; private int serializerId_ ; public boolean hasSerializerId() { @@ -3273,7 +3276,7 @@ public final class RemoteProtocol { onChanged(); return this; } - + // optional bytes messageManifest = 3; private com.google.protobuf.ByteString messageManifest_ = com.google.protobuf.ByteString.EMPTY; public boolean hasMessageManifest() { @@ -3297,25 +3300,25 @@ public final class RemoteProtocol { onChanged(); return this; } - + // @@protoc_insertion_point(builder_scope:MessageProtocol) } - + static { defaultInstance = new MessageProtocol(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:MessageProtocol) } - + public interface MetadataEntryProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required string key = 1; boolean hasKey(); String getKey(); - + // required bytes value = 2; boolean hasValue(); com.google.protobuf.ByteString getValue(); @@ -3328,26 +3331,26 @@ public final class RemoteProtocol { super(builder); } private MetadataEntryProtocol(boolean noInit) {} - + private static final MetadataEntryProtocol defaultInstance; public static MetadataEntryProtocol getDefaultInstance() { return defaultInstance; } - + public MetadataEntryProtocol getDefaultInstanceForType() { return defaultInstance; } - + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return akka.remote.RemoteProtocol.internal_static_MetadataEntryProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_MetadataEntryProtocol_fieldAccessorTable; } - + private int bitField0_; // required string key = 1; public static final int KEY_FIELD_NUMBER = 1; @@ -3360,7 +3363,7 @@ public final class RemoteProtocol { if (ref instanceof String) { return (String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; String s = bs.toStringUtf8(); if (com.google.protobuf.Internal.isValidUtf8(bs)) { @@ -3372,7 +3375,7 @@ public final class RemoteProtocol { private com.google.protobuf.ByteString getKeyBytes() { java.lang.Object ref = key_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); key_ = b; return b; @@ -3380,7 +3383,7 @@ public final class RemoteProtocol { return (com.google.protobuf.ByteString) ref; } } - + // required bytes value = 2; public static final int VALUE_FIELD_NUMBER = 2; private com.google.protobuf.ByteString value_; @@ -3390,7 +3393,7 @@ public final class RemoteProtocol { public com.google.protobuf.ByteString getValue() { return value_; } - + private void initFields() { key_ = ""; value_ = com.google.protobuf.ByteString.EMPTY; @@ -3399,7 +3402,7 @@ public final class RemoteProtocol { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasKey()) { memoizedIsInitialized = 0; return false; @@ -3411,7 +3414,7 @@ public final class RemoteProtocol { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -3423,12 +3426,12 @@ public final class RemoteProtocol { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -3442,14 +3445,14 @@ public final class RemoteProtocol { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static akka.remote.RemoteProtocol.MetadataEntryProtocol parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -3516,14 +3519,14 @@ public final class RemoteProtocol { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(akka.remote.RemoteProtocol.MetadataEntryProtocol prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { @@ -3537,17 +3540,17 @@ public final class RemoteProtocol { getDescriptor() { return akka.remote.RemoteProtocol.internal_static_MetadataEntryProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_MetadataEntryProtocol_fieldAccessorTable; } - + // Construct using akka.remote.RemoteProtocol.MetadataEntryProtocol.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); @@ -3559,7 +3562,7 @@ public final class RemoteProtocol { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); key_ = ""; @@ -3568,20 +3571,20 @@ public final class RemoteProtocol { bitField0_ = (bitField0_ & ~0x00000002); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return akka.remote.RemoteProtocol.MetadataEntryProtocol.getDescriptor(); } - + public akka.remote.RemoteProtocol.MetadataEntryProtocol getDefaultInstanceForType() { return akka.remote.RemoteProtocol.MetadataEntryProtocol.getDefaultInstance(); } - + public akka.remote.RemoteProtocol.MetadataEntryProtocol build() { akka.remote.RemoteProtocol.MetadataEntryProtocol result = buildPartial(); if (!result.isInitialized()) { @@ -3589,7 +3592,7 @@ public final class RemoteProtocol { } return result; } - + private akka.remote.RemoteProtocol.MetadataEntryProtocol buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { akka.remote.RemoteProtocol.MetadataEntryProtocol result = buildPartial(); @@ -3599,7 +3602,7 @@ public final class RemoteProtocol { } return result; } - + public akka.remote.RemoteProtocol.MetadataEntryProtocol buildPartial() { akka.remote.RemoteProtocol.MetadataEntryProtocol result = new akka.remote.RemoteProtocol.MetadataEntryProtocol(this); int from_bitField0_ = bitField0_; @@ -3616,7 +3619,7 @@ public final class RemoteProtocol { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof akka.remote.RemoteProtocol.MetadataEntryProtocol) { return mergeFrom((akka.remote.RemoteProtocol.MetadataEntryProtocol)other); @@ -3625,7 +3628,7 @@ public final class RemoteProtocol { return this; } } - + public Builder mergeFrom(akka.remote.RemoteProtocol.MetadataEntryProtocol other) { if (other == akka.remote.RemoteProtocol.MetadataEntryProtocol.getDefaultInstance()) return this; if (other.hasKey()) { @@ -3637,19 +3640,19 @@ public final class RemoteProtocol { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasKey()) { - + return false; } if (!hasValue()) { - + return false; } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -3686,9 +3689,9 @@ public final class RemoteProtocol { } } } - + private int bitField0_; - + // required string key = 1; private java.lang.Object key_ = ""; public boolean hasKey() { @@ -3724,7 +3727,7 @@ public final class RemoteProtocol { key_ = value; onChanged(); } - + // required bytes value = 2; private com.google.protobuf.ByteString value_ = com.google.protobuf.ByteString.EMPTY; public boolean hasValue() { @@ -3748,29 +3751,29 @@ public final class RemoteProtocol { onChanged(); return this; } - + // @@protoc_insertion_point(builder_scope:MetadataEntryProtocol) } - + static { defaultInstance = new MetadataEntryProtocol(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:MetadataEntryProtocol) } - + public interface AddressProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required string system = 1; boolean hasSystem(); String getSystem(); - + // required string hostname = 2; boolean hasHostname(); String getHostname(); - + // required uint32 port = 3; boolean hasPort(); int getPort(); @@ -3783,26 +3786,26 @@ public final class RemoteProtocol { super(builder); } private AddressProtocol(boolean noInit) {} - + private static final AddressProtocol defaultInstance; public static AddressProtocol getDefaultInstance() { return defaultInstance; } - + public AddressProtocol getDefaultInstanceForType() { return defaultInstance; } - + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return akka.remote.RemoteProtocol.internal_static_AddressProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_AddressProtocol_fieldAccessorTable; } - + private int bitField0_; // required string system = 1; public static final int SYSTEM_FIELD_NUMBER = 1; @@ -3815,7 +3818,7 @@ public final class RemoteProtocol { if (ref instanceof String) { return (String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; String s = bs.toStringUtf8(); if (com.google.protobuf.Internal.isValidUtf8(bs)) { @@ -3827,7 +3830,7 @@ public final class RemoteProtocol { private com.google.protobuf.ByteString getSystemBytes() { java.lang.Object ref = system_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); system_ = b; return b; @@ -3835,7 +3838,7 @@ public final class RemoteProtocol { return (com.google.protobuf.ByteString) ref; } } - + // required string hostname = 2; public static final int HOSTNAME_FIELD_NUMBER = 2; private java.lang.Object hostname_; @@ -3847,7 +3850,7 @@ public final class RemoteProtocol { if (ref instanceof String) { return (String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; String s = bs.toStringUtf8(); if (com.google.protobuf.Internal.isValidUtf8(bs)) { @@ -3859,7 +3862,7 @@ public final class RemoteProtocol { private com.google.protobuf.ByteString getHostnameBytes() { java.lang.Object ref = hostname_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); hostname_ = b; return b; @@ -3867,7 +3870,7 @@ public final class RemoteProtocol { return (com.google.protobuf.ByteString) ref; } } - + // required uint32 port = 3; public static final int PORT_FIELD_NUMBER = 3; private int port_; @@ -3877,7 +3880,7 @@ public final class RemoteProtocol { public int getPort() { return port_; } - + private void initFields() { system_ = ""; hostname_ = ""; @@ -3887,7 +3890,7 @@ public final class RemoteProtocol { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasSystem()) { memoizedIsInitialized = 0; return false; @@ -3903,7 +3906,7 @@ public final class RemoteProtocol { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -3918,12 +3921,12 @@ public final class RemoteProtocol { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -3941,14 +3944,14 @@ public final class RemoteProtocol { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static akka.remote.RemoteProtocol.AddressProtocol parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -4015,14 +4018,14 @@ public final class RemoteProtocol { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(akka.remote.RemoteProtocol.AddressProtocol prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { @@ -4036,17 +4039,17 @@ public final class RemoteProtocol { getDescriptor() { return akka.remote.RemoteProtocol.internal_static_AddressProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_AddressProtocol_fieldAccessorTable; } - + // Construct using akka.remote.RemoteProtocol.AddressProtocol.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); @@ -4058,7 +4061,7 @@ public final class RemoteProtocol { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); system_ = ""; @@ -4069,20 +4072,20 @@ public final class RemoteProtocol { bitField0_ = (bitField0_ & ~0x00000004); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return akka.remote.RemoteProtocol.AddressProtocol.getDescriptor(); } - + public akka.remote.RemoteProtocol.AddressProtocol getDefaultInstanceForType() { return akka.remote.RemoteProtocol.AddressProtocol.getDefaultInstance(); } - + public akka.remote.RemoteProtocol.AddressProtocol build() { akka.remote.RemoteProtocol.AddressProtocol result = buildPartial(); if (!result.isInitialized()) { @@ -4090,7 +4093,7 @@ public final class RemoteProtocol { } return result; } - + private akka.remote.RemoteProtocol.AddressProtocol buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { akka.remote.RemoteProtocol.AddressProtocol result = buildPartial(); @@ -4100,7 +4103,7 @@ public final class RemoteProtocol { } return result; } - + public akka.remote.RemoteProtocol.AddressProtocol buildPartial() { akka.remote.RemoteProtocol.AddressProtocol result = new akka.remote.RemoteProtocol.AddressProtocol(this); int from_bitField0_ = bitField0_; @@ -4121,7 +4124,7 @@ public final class RemoteProtocol { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof akka.remote.RemoteProtocol.AddressProtocol) { return mergeFrom((akka.remote.RemoteProtocol.AddressProtocol)other); @@ -4130,7 +4133,7 @@ public final class RemoteProtocol { return this; } } - + public Builder mergeFrom(akka.remote.RemoteProtocol.AddressProtocol other) { if (other == akka.remote.RemoteProtocol.AddressProtocol.getDefaultInstance()) return this; if (other.hasSystem()) { @@ -4145,23 +4148,23 @@ public final class RemoteProtocol { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasSystem()) { - + return false; } if (!hasHostname()) { - + return false; } if (!hasPort()) { - + return false; } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -4203,9 +4206,9 @@ public final class RemoteProtocol { } } } - + private int bitField0_; - + // required string system = 1; private java.lang.Object system_ = ""; public boolean hasSystem() { @@ -4241,7 +4244,7 @@ public final class RemoteProtocol { system_ = value; onChanged(); } - + // required string hostname = 2; private java.lang.Object hostname_ = ""; public boolean hasHostname() { @@ -4277,7 +4280,7 @@ public final class RemoteProtocol { hostname_ = value; onChanged(); } - + // required uint32 port = 3; private int port_ ; public boolean hasPort() { @@ -4298,25 +4301,25 @@ public final class RemoteProtocol { onChanged(); return this; } - + // @@protoc_insertion_point(builder_scope:AddressProtocol) } - + static { defaultInstance = new AddressProtocol(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:AddressProtocol) } - + public interface ExceptionProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required string classname = 1; boolean hasClassname(); String getClassname(); - + // required string message = 2; boolean hasMessage(); String getMessage(); @@ -4329,26 +4332,26 @@ public final class RemoteProtocol { super(builder); } private ExceptionProtocol(boolean noInit) {} - + private static final ExceptionProtocol defaultInstance; public static ExceptionProtocol getDefaultInstance() { return defaultInstance; } - + public ExceptionProtocol getDefaultInstanceForType() { return defaultInstance; } - + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return akka.remote.RemoteProtocol.internal_static_ExceptionProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_ExceptionProtocol_fieldAccessorTable; } - + private int bitField0_; // required string classname = 1; public static final int CLASSNAME_FIELD_NUMBER = 1; @@ -4361,7 +4364,7 @@ public final class RemoteProtocol { if (ref instanceof String) { return (String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; String s = bs.toStringUtf8(); if (com.google.protobuf.Internal.isValidUtf8(bs)) { @@ -4373,7 +4376,7 @@ public final class RemoteProtocol { private com.google.protobuf.ByteString getClassnameBytes() { java.lang.Object ref = classname_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); classname_ = b; return b; @@ -4381,7 +4384,7 @@ public final class RemoteProtocol { return (com.google.protobuf.ByteString) ref; } } - + // required string message = 2; public static final int MESSAGE_FIELD_NUMBER = 2; private java.lang.Object message_; @@ -4393,7 +4396,7 @@ public final class RemoteProtocol { if (ref instanceof String) { return (String) ref; } else { - com.google.protobuf.ByteString bs = + com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; String s = bs.toStringUtf8(); if (com.google.protobuf.Internal.isValidUtf8(bs)) { @@ -4405,7 +4408,7 @@ public final class RemoteProtocol { private com.google.protobuf.ByteString getMessageBytes() { java.lang.Object ref = message_; if (ref instanceof String) { - com.google.protobuf.ByteString b = + com.google.protobuf.ByteString b = com.google.protobuf.ByteString.copyFromUtf8((String) ref); message_ = b; return b; @@ -4413,7 +4416,7 @@ public final class RemoteProtocol { return (com.google.protobuf.ByteString) ref; } } - + private void initFields() { classname_ = ""; message_ = ""; @@ -4422,7 +4425,7 @@ public final class RemoteProtocol { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasClassname()) { memoizedIsInitialized = 0; return false; @@ -4434,7 +4437,7 @@ public final class RemoteProtocol { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -4446,12 +4449,12 @@ public final class RemoteProtocol { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -4465,14 +4468,14 @@ public final class RemoteProtocol { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static akka.remote.RemoteProtocol.ExceptionProtocol parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -4539,14 +4542,14 @@ public final class RemoteProtocol { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(akka.remote.RemoteProtocol.ExceptionProtocol prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { @@ -4560,17 +4563,17 @@ public final class RemoteProtocol { getDescriptor() { return akka.remote.RemoteProtocol.internal_static_ExceptionProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_ExceptionProtocol_fieldAccessorTable; } - + // Construct using akka.remote.RemoteProtocol.ExceptionProtocol.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); @@ -4582,7 +4585,7 @@ public final class RemoteProtocol { private static Builder create() { return new Builder(); } - + public Builder clear() { super.clear(); classname_ = ""; @@ -4591,20 +4594,20 @@ public final class RemoteProtocol { bitField0_ = (bitField0_ & ~0x00000002); return this; } - + public Builder clone() { return create().mergeFrom(buildPartial()); } - + public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { return akka.remote.RemoteProtocol.ExceptionProtocol.getDescriptor(); } - + public akka.remote.RemoteProtocol.ExceptionProtocol getDefaultInstanceForType() { return akka.remote.RemoteProtocol.ExceptionProtocol.getDefaultInstance(); } - + public akka.remote.RemoteProtocol.ExceptionProtocol build() { akka.remote.RemoteProtocol.ExceptionProtocol result = buildPartial(); if (!result.isInitialized()) { @@ -4612,7 +4615,7 @@ public final class RemoteProtocol { } return result; } - + private akka.remote.RemoteProtocol.ExceptionProtocol buildParsed() throws com.google.protobuf.InvalidProtocolBufferException { akka.remote.RemoteProtocol.ExceptionProtocol result = buildPartial(); @@ -4622,7 +4625,7 @@ public final class RemoteProtocol { } return result; } - + public akka.remote.RemoteProtocol.ExceptionProtocol buildPartial() { akka.remote.RemoteProtocol.ExceptionProtocol result = new akka.remote.RemoteProtocol.ExceptionProtocol(this); int from_bitField0_ = bitField0_; @@ -4639,7 +4642,7 @@ public final class RemoteProtocol { onBuilt(); return result; } - + public Builder mergeFrom(com.google.protobuf.Message other) { if (other instanceof akka.remote.RemoteProtocol.ExceptionProtocol) { return mergeFrom((akka.remote.RemoteProtocol.ExceptionProtocol)other); @@ -4648,7 +4651,7 @@ public final class RemoteProtocol { return this; } } - + public Builder mergeFrom(akka.remote.RemoteProtocol.ExceptionProtocol other) { if (other == akka.remote.RemoteProtocol.ExceptionProtocol.getDefaultInstance()) return this; if (other.hasClassname()) { @@ -4660,19 +4663,19 @@ public final class RemoteProtocol { this.mergeUnknownFields(other.getUnknownFields()); return this; } - + public final boolean isInitialized() { if (!hasClassname()) { - + return false; } if (!hasMessage()) { - + return false; } return true; } - + public Builder mergeFrom( com.google.protobuf.CodedInputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) @@ -4709,9 +4712,9 @@ public final class RemoteProtocol { } } } - + private int bitField0_; - + // required string classname = 1; private java.lang.Object classname_ = ""; public boolean hasClassname() { @@ -4747,7 +4750,7 @@ public final class RemoteProtocol { classname_ = value; onChanged(); } - + // required string message = 2; private java.lang.Object message_ = ""; public boolean hasMessage() { @@ -4783,31 +4786,31 @@ public final class RemoteProtocol { message_ = value; onChanged(); } - + // @@protoc_insertion_point(builder_scope:ExceptionProtocol) } - + static { defaultInstance = new ExceptionProtocol(true); defaultInstance.initFields(); } - + // @@protoc_insertion_point(class_scope:ExceptionProtocol) } - + public interface DurableMailboxMessageProtocolOrBuilder extends com.google.protobuf.MessageOrBuilder { - + // required .ActorRefProtocol recipient = 1; boolean hasRecipient(); akka.remote.RemoteProtocol.ActorRefProtocol getRecipient(); akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getRecipientOrBuilder(); - + // optional .ActorRefProtocol sender = 2; boolean hasSender(); akka.remote.RemoteProtocol.ActorRefProtocol getSender(); akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getSenderOrBuilder(); - + // required bytes message = 3; boolean hasMessage(); com.google.protobuf.ByteString getMessage(); @@ -4820,26 +4823,26 @@ public final class RemoteProtocol { super(builder); } private DurableMailboxMessageProtocol(boolean noInit) {} - + private static final DurableMailboxMessageProtocol defaultInstance; public static DurableMailboxMessageProtocol getDefaultInstance() { return defaultInstance; } - + public DurableMailboxMessageProtocol getDefaultInstanceForType() { return defaultInstance; } - + public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { return akka.remote.RemoteProtocol.internal_static_DurableMailboxMessageProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_DurableMailboxMessageProtocol_fieldAccessorTable; } - + private int bitField0_; // required .ActorRefProtocol recipient = 1; public static final int RECIPIENT_FIELD_NUMBER = 1; @@ -4853,7 +4856,7 @@ public final class RemoteProtocol { public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getRecipientOrBuilder() { return recipient_; } - + // optional .ActorRefProtocol sender = 2; public static final int SENDER_FIELD_NUMBER = 2; private akka.remote.RemoteProtocol.ActorRefProtocol sender_; @@ -4866,7 +4869,7 @@ public final class RemoteProtocol { public akka.remote.RemoteProtocol.ActorRefProtocolOrBuilder getSenderOrBuilder() { return sender_; } - + // required bytes message = 3; public static final int MESSAGE_FIELD_NUMBER = 3; private com.google.protobuf.ByteString message_; @@ -4876,7 +4879,7 @@ public final class RemoteProtocol { public com.google.protobuf.ByteString getMessage() { return message_; } - + private void initFields() { recipient_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); sender_ = akka.remote.RemoteProtocol.ActorRefProtocol.getDefaultInstance(); @@ -4886,7 +4889,7 @@ public final class RemoteProtocol { public final boolean isInitialized() { byte isInitialized = memoizedIsInitialized; if (isInitialized != -1) return isInitialized == 1; - + if (!hasRecipient()) { memoizedIsInitialized = 0; return false; @@ -4908,7 +4911,7 @@ public final class RemoteProtocol { memoizedIsInitialized = 1; return true; } - + public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { getSerializedSize(); @@ -4923,12 +4926,12 @@ public final class RemoteProtocol { } getUnknownFields().writeTo(output); } - + private int memoizedSerializedSize = -1; public int getSerializedSize() { int size = memoizedSerializedSize; if (size != -1) return size; - + size = 0; if (((bitField0_ & 0x00000001) == 0x00000001)) { size += com.google.protobuf.CodedOutputStream @@ -4946,14 +4949,14 @@ public final class RemoteProtocol { memoizedSerializedSize = size; return size; } - + private static final long serialVersionUID = 0L; @java.lang.Override protected java.lang.Object writeReplace() throws java.io.ObjectStreamException { return super.writeReplace(); } - + public static akka.remote.RemoteProtocol.DurableMailboxMessageProtocol parseFrom( com.google.protobuf.ByteString data) throws com.google.protobuf.InvalidProtocolBufferException { @@ -5020,14 +5023,14 @@ public final class RemoteProtocol { return newBuilder().mergeFrom(input, extensionRegistry) .buildParsed(); } - + public static Builder newBuilder() { return Builder.create(); } public Builder newBuilderForType() { return newBuilder(); } public static Builder newBuilder(akka.remote.RemoteProtocol.DurableMailboxMessageProtocol prototype) { return newBuilder().mergeFrom(prototype); } public Builder toBuilder() { return newBuilder(this); } - + @java.lang.Override protected Builder newBuilderForType( com.google.protobuf.GeneratedMessage.BuilderParent parent) { @@ -5041,17 +5044,17 @@ public final class RemoteProtocol { getDescriptor() { return akka.remote.RemoteProtocol.internal_static_DurableMailboxMessageProtocol_descriptor; } - + protected com.google.protobuf.GeneratedMessage.FieldAccessorTable internalGetFieldAccessorTable() { return akka.remote.RemoteProtocol.internal_static_DurableMailboxMessageProtocol_fieldAccessorTable; } - + // Construct using akka.remote.RemoteProtocol.DurableMailboxMessageProtocol.newBuilder() private Builder() { maybeForceBuilderInitialization(); } - + private Builder(com.google.protobuf.GeneratedMessage.BuilderParent parent) { super(parent); maybeForceBuilderInitialization(); @@ -5532,12 +5535,13 @@ public final class RemoteProtocol { "assname\030\001 \002(\t\022\017\n\007message\030\002 \002(\t\"y\n\035Durabl" + "eMailboxMessageProtocol\022$\n\trecipient\030\001 \002" + "(\0132\021.ActorRefProtocol\022!\n\006sender\030\002 \001(\0132\021.", - "ActorRefProtocol\022\017\n\007message\030\003 \002(\014*(\n\013Com" + - "mandType\022\013\n\007CONNECT\020\001\022\014\n\010SHUTDOWN\020\002*K\n\026R" + - "eplicationStorageType\022\r\n\tTRANSIENT\020\001\022\023\n\017" + - "TRANSACTION_LOG\020\002\022\r\n\tDATA_GRID\020\003*>\n\027Repl" + - "icationStrategyType\022\021\n\rWRITE_THROUGH\020\001\022\020" + - "\n\014WRITE_BEHIND\020\002B\017\n\013akka.remoteH\001" + "ActorRefProtocol\022\017\n\007message\030\003 \002(\014*7\n\013Com" + + "mandType\022\013\n\007CONNECT\020\001\022\014\n\010SHUTDOWN\020\002\022\r\n\tH" + + "EARTBEAT\020\003*K\n\026ReplicationStorageType\022\r\n\t" + + "TRANSIENT\020\001\022\023\n\017TRANSACTION_LOG\020\002\022\r\n\tDATA" + + "_GRID\020\003*>\n\027ReplicationStrategyType\022\021\n\rWR" + + "ITE_THROUGH\020\001\022\020\n\014WRITE_BEHIND\020\002B\017\n\013akka." + + "remoteH\001" }; com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner assigner = new com.google.protobuf.Descriptors.FileDescriptor.InternalDescriptorAssigner() { diff --git a/akka-remote/src/main/protocol/RemoteProtocol.proto b/akka-remote/src/main/protocol/RemoteProtocol.proto index 7fe287522d..da9414a110 100644 --- a/akka-remote/src/main/protocol/RemoteProtocol.proto +++ b/akka-remote/src/main/protocol/RemoteProtocol.proto @@ -41,6 +41,7 @@ message RemoteControlProtocol { enum CommandType { CONNECT = 1; SHUTDOWN = 2; + HEARTBEAT = 3; } /** diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 943b0d7122..ff19cefa70 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -110,9 +110,23 @@ akka { # (O) Time between reconnect attempts for active clients reconnect-delay = 5s - # (O) Inactivity period after which active client connection is shutdown; will be - # re-established in case of new communication requests - read-timeout = 3600s + # (O) Read inactivity period (lowest resolution is seconds) + # after which active client connection is shutdown; + # will be re-established in case of new communication requests. + # A value of 0 will turn this feature off + read-timeout = 0s + + # (O) Write inactivity period (lowest resolution is seconds) + # after which active client connection is shutdown; + # will be re-established in case of new communication requests + # A value of 0 will turn this feature off + write-timeout = 10s + + # (O) Inactivity period of both reads and writes (lowest resolution is seconds) + # after which active client connection is shutdown; + # will be re-established in case of new communication requests + # A value of 0 will turn this feature off + all-timeout = 0s # (O) Maximum time window that a client should try to reconnect for reconnection-time-window = 600s diff --git a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala index 3fbe5913b2..a9f1199546 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala @@ -8,9 +8,9 @@ import scala.reflect.BeanProperty import akka.actor.{ Terminated, LocalRef, InternalActorRef, AutoReceivedMessage, AddressExtractor, Address, ActorSystemImpl, ActorSystem, ActorRef } import akka.dispatch.SystemMessage import akka.event.{ LoggingAdapter, Logging } -import akka.remote.RemoteProtocol.{ RemoteMessageProtocol, RemoteControlProtocol, AkkaRemoteProtocol, ActorRefProtocol } import akka.AkkaException import akka.serialization.Serialization +import akka.remote.RemoteProtocol._ /** * Remote life-cycle events. diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 2947d9db26..7c4a462774 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -10,7 +10,6 @@ import org.jboss.netty.channel.group.DefaultChannelGroup import org.jboss.netty.channel.{ ChannelHandler, StaticChannelPipeline, SimpleChannelUpstreamHandler, MessageEvent, ExceptionEvent, ChannelStateEvent, ChannelPipelineFactory, ChannelPipeline, ChannelHandlerContext, ChannelFuture, Channel } import org.jboss.netty.handler.codec.frame.{ LengthFieldPrepender, LengthFieldBasedFrameDecoder } import org.jboss.netty.handler.execution.ExecutionHandler -import org.jboss.netty.handler.timeout.{ ReadTimeoutHandler, ReadTimeoutException } import akka.remote.RemoteProtocol.{ RemoteControlProtocol, CommandType, AkkaRemoteProtocol } import akka.remote.{ RemoteProtocol, RemoteMessage, RemoteLifeCycleEvent, RemoteClientStarted, RemoteClientShutdown, RemoteClientException, RemoteClientError, RemoteClientDisconnected, RemoteClientConnected } import akka.actor.{ simpleName, Address } @@ -24,6 +23,7 @@ import java.net.InetAddress import org.jboss.netty.util.TimerTask import org.jboss.netty.util.Timeout import java.util.concurrent.TimeUnit +import org.jboss.netty.handler.timeout.{ IdleState, IdleStateEvent, IdleStateAwareChannelHandler, IdleStateHandler } class RemoteClientMessageBufferException(message: String, cause: Throwable) extends AkkaException(message, cause) { def this(msg: String) = this(msg, null) @@ -159,7 +159,7 @@ class ActiveRemoteClient private[akka] ( executionHandler = new ExecutionHandler(netty.executor) val b = new ClientBootstrap(netty.clientChannelFactory) - b.setPipelineFactory(new ActiveRemoteClientPipelineFactory(name, b, executionHandler, remoteAddress, this)) + b.setPipelineFactory(new ActiveRemoteClientPipelineFactory(name, b, executionHandler, remoteAddress, localAddress, this)) b.setOption("tcpNoDelay", true) b.setOption("keepAlive", true) b.setOption("connectTimeoutMillis", settings.ConnectionTimeout.toMillis) @@ -234,14 +234,37 @@ class ActiveRemoteClientHandler( val name: String, val bootstrap: ClientBootstrap, val remoteAddress: Address, + val localAddress: Address, val timer: HashedWheelTimer, val client: ActiveRemoteClient) - extends SimpleChannelUpstreamHandler { + extends IdleStateAwareChannelHandler { def runOnceNow(thunk: ⇒ Unit): Unit = timer.newTimeout(new TimerTask() { def run(timeout: Timeout) = try { thunk } finally { timeout.cancel() } }, 0, TimeUnit.MILLISECONDS) + override def channelIdle(ctx: ChannelHandlerContext, e: IdleStateEvent) { + import IdleState._ + + def createHeartBeat(localAddress: Address, cookie: Option[String]): AkkaRemoteProtocol = { + val beat = RemoteControlProtocol.newBuilder.setCommandType(CommandType.HEARTBEAT) + if (cookie.nonEmpty) beat.setCookie(cookie.get) + + client.netty.createControlEnvelope( + beat.setOrigin(RemoteProtocol.AddressProtocol.newBuilder + .setSystem(localAddress.system) + .setHostname(localAddress.host.get) + .setPort(localAddress.port.get) + .build).build) + } + + e.getState match { + case READER_IDLE ⇒ e.getChannel.close() + case WRITER_IDLE ⇒ e.getChannel.write(createHeartBeat(localAddress, client.netty.settings.SecureCookie)) + case ALL_IDLE ⇒ e.getChannel.close() + } + } + override def messageReceived(ctx: ChannelHandlerContext, event: MessageEvent) { try { event.getMessage match { @@ -291,18 +314,9 @@ class ActiveRemoteClientHandler( } override def exceptionCaught(ctx: ChannelHandlerContext, event: ExceptionEvent) = { - val cause = event.getCause - if (cause ne null) { - client.notifyListeners(RemoteClientError(cause, client.netty, client.remoteAddress)) - cause match { - case e: ReadTimeoutException ⇒ - runOnceNow { - client.netty.shutdownClientConnection(remoteAddress) // spawn in another thread - } - case e: Exception ⇒ event.getChannel.close() - } - - } else client.notifyListeners(RemoteClientError(new Exception("Unknown cause"), client.netty, client.remoteAddress)) + val cause = if (event.getCause ne null) event.getCause else new Exception("Unknown cause") + client.notifyListeners(RemoteClientError(cause, client.netty, client.remoteAddress)) + event.getChannel.close() } } @@ -311,17 +325,21 @@ class ActiveRemoteClientPipelineFactory( bootstrap: ClientBootstrap, executionHandler: ExecutionHandler, remoteAddress: Address, + localAddress: Address, client: ActiveRemoteClient) extends ChannelPipelineFactory { import client.netty.settings def getPipeline: ChannelPipeline = { - val timeout = new ReadTimeoutHandler(client.netty.timer, settings.ReadTimeout.length, settings.ReadTimeout.unit) + val timeout = new IdleStateHandler(client.netty.timer, + settings.ReadTimeout.toSeconds.toInt, + settings.WriteTimeout.toSeconds.toInt, + settings.AllTimeout.toSeconds.toInt) val lenDec = new LengthFieldBasedFrameDecoder(settings.MessageFrameSize, 0, 4, 0, 4) val lenPrep = new LengthFieldPrepender(4) val messageDec = new RemoteMessageDecoder val messageEnc = new RemoteMessageEncoder(client.netty) - val remoteClient = new ActiveRemoteClientHandler(name, bootstrap, remoteAddress, client.netty.timer, client) + val remoteClient = new ActiveRemoteClientHandler(name, bootstrap, remoteAddress, localAddress, client.netty.timer, client) new StaticChannelPipeline(timeout, lenDec, messageDec, lenPrep, messageEnc, executionHandler, remoteClient) } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index a8bc6ef67b..d0e8aafe05 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -51,7 +51,7 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { private[akka] var channel: Channel = _ def start(): Unit = { - channel = bootstrap.bind(new InetSocketAddress(ip, settings.DesiredPortFromConfig)) + channel = bootstrap.bind(new InetSocketAddress(ip, settings.PortSelector)) openChannels.add(channel) netty.notifyListeners(RemoteServerStarted(netty)) } @@ -61,9 +61,9 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { val shutdownSignal = { val b = RemoteControlProtocol.newBuilder.setCommandType(CommandType.SHUTDOWN) b.setOrigin(RemoteProtocol.AddressProtocol.newBuilder - .setSystem(settings.systemName) - .setHostname(settings.Hostname) - .setPort(settings.DesiredPortFromConfig) + .setSystem(netty.address.system) + .setHostname(netty.address.host.get) //FIXME Should probably be settings.host + .setPort(netty.address.port.get) .build) if (settings.SecureCookie.nonEmpty) b.setCookie(settings.SecureCookie.get) @@ -187,8 +187,9 @@ class RemoteServerHandler( val inbound = Address("akka", origin.getSystem, Some(origin.getHostname), Some(origin.getPort)) val client = new PassiveRemoteClient(event.getChannel, netty, inbound) netty.bindClient(inbound, client) - case CommandType.SHUTDOWN ⇒ //Will be unbound in channelClosed - case _ ⇒ //Unknown command + case CommandType.SHUTDOWN ⇒ //Will be unbound in channelClosed + case CommandType.HEARTBEAT ⇒ //Other guy is still alive + case _ ⇒ //Unknown command } case _ ⇒ //ignore } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index 3f7c8f83de..0db6cabf18 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -30,6 +30,8 @@ class NettySettings(config: Config, val systemName: String) { val ReconnectionTimeWindow = Duration(getMilliseconds("reconnection-time-window"), MILLISECONDS) val ReadTimeout = Duration(getMilliseconds("read-timeout"), MILLISECONDS) + val WriteTimeout = Duration(getMilliseconds("write-timeout"), MILLISECONDS) + val AllTimeout = Duration(getMilliseconds("all-timeout"), MILLISECONDS) val ReconnectDelay = Duration(getMilliseconds("reconnect-delay"), MILLISECONDS) val MessageFrameSize = getBytes("message-frame-size").toInt @@ -37,7 +39,9 @@ class NettySettings(config: Config, val systemName: String) { case "" ⇒ InetAddress.getLocalHost.getHostAddress case value ⇒ value } - val DesiredPortFromConfig = getInt("port") + + @deprecated("WARNING: This should only be used by professionals.") + val PortSelector = getInt("port") val ConnectionTimeout = Duration(getMilliseconds("connection-timeout"), MILLISECONDS) diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala index fbeaff5b6b..9b326ae0c0 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala @@ -43,7 +43,7 @@ class RemoteConfigSpec extends AkkaSpec( RequireCookie must be(false) UsePassiveConnections must be(true) Hostname must not be "" // will be set to the local IP - DesiredPortFromConfig must be(2552) + PortSelector must be(2552) MessageFrameSize must be(1048576) ConnectionTimeout must be(2 minutes) Backlog must be(4096) @@ -52,7 +52,9 @@ class RemoteConfigSpec extends AkkaSpec( MaxChannelMemorySize must be(0) MaxTotalMemorySize must be(0) ReconnectDelay must be(5 seconds) - ReadTimeout must be(1 hour) + ReadTimeout must be(0 millis) + WriteTimeout must be(10 seconds) + AllTimeout must be(0 millis) ReconnectionTimeWindow must be(10 minutes) } From e58d0d99407ef6c910c68c43de5289abe8442f31 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 1 Feb 2012 16:20:51 +0100 Subject: [PATCH 89/94] Switching to an orchestrated teardown on READ_TIMEOUT --- akka-remote/src/main/resources/reference.conf | 3 +-- akka-remote/src/main/scala/akka/remote/netty/Client.scala | 5 ++--- akka-remote/src/main/scala/akka/remote/netty/Server.scala | 2 +- 3 files changed, 4 insertions(+), 6 deletions(-) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index ff19cefa70..1158d12295 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -117,8 +117,7 @@ akka { read-timeout = 0s # (O) Write inactivity period (lowest resolution is seconds) - # after which active client connection is shutdown; - # will be re-established in case of new communication requests + # after which a heartbeat is sent across the wire. # A value of 0 will turn this feature off write-timeout = 10s diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 7c4a462774..eafd01d91a 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -259,9 +259,8 @@ class ActiveRemoteClientHandler( } e.getState match { - case READER_IDLE ⇒ e.getChannel.close() - case WRITER_IDLE ⇒ e.getChannel.write(createHeartBeat(localAddress, client.netty.settings.SecureCookie)) - case ALL_IDLE ⇒ e.getChannel.close() + case READER_IDLE | ALL_IDLE ⇒ runOnceNow { client.netty.shutdownClientConnection(remoteAddress) } + case WRITER_IDLE ⇒ e.getChannel.write(createHeartBeat(localAddress, client.netty.settings.SecureCookie)) } } diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index d0e8aafe05..83fb378a62 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -62,7 +62,7 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { val b = RemoteControlProtocol.newBuilder.setCommandType(CommandType.SHUTDOWN) b.setOrigin(RemoteProtocol.AddressProtocol.newBuilder .setSystem(netty.address.system) - .setHostname(netty.address.host.get) //FIXME Should probably be settings.host + .setHostname(netty.address.host.get) .setPort(netty.address.port.get) .build) if (settings.SecureCookie.nonEmpty) From d0d391b1dcea86a8c7c845e6350973cb78a44d34 Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Wed, 1 Feb 2012 16:34:20 +0100 Subject: [PATCH 90/94] Renaming NettyRemoteSupport to NettyRemoteTransport everywhere --- akka-docs/java/remoting.rst | 2 +- akka-docs/scala/remoting.rst | 2 +- .../akka-sample-remote/src/main/resources/common.conf | 2 +- akka-spring/src/test/resources/akka-test.conf | 2 +- .../src/test/scala/TypedActorSpringFeatureTest.scala | 8 ++++---- .../src/test/scala/UntypedActorSpringFeatureTest.scala | 8 ++++---- 6 files changed, 12 insertions(+), 12 deletions(-) diff --git a/akka-docs/java/remoting.rst b/akka-docs/java/remoting.rst index 05101497e1..bf6f06e88b 100644 --- a/akka-docs/java/remoting.rst +++ b/akka-docs/java/remoting.rst @@ -25,7 +25,7 @@ to your ``application.conf`` file:: provider = "akka.remote.RemoteActorRefProvider" } remote { - transport = "akka.remote.netty.NettyRemoteSupport" + transport = "akka.remote.netty.NettyRemoteTransport" server { hostname = "127.0.0.1" port = 2552 diff --git a/akka-docs/scala/remoting.rst b/akka-docs/scala/remoting.rst index 2d460aa060..06fe5d59ba 100644 --- a/akka-docs/scala/remoting.rst +++ b/akka-docs/scala/remoting.rst @@ -21,7 +21,7 @@ to your ``application.conf`` file:: provider = "akka.remote.RemoteActorRefProvider" } remote { - transport = "akka.remote.netty.NettyRemoteSupport" + transport = "akka.remote.netty.NettyRemoteTransport" server { hostname = "127.0.0.1" port = 2552 diff --git a/akka-samples/akka-sample-remote/src/main/resources/common.conf b/akka-samples/akka-sample-remote/src/main/resources/common.conf index 79009c0aea..7be9ad0ca1 100644 --- a/akka-samples/akka-sample-remote/src/main/resources/common.conf +++ b/akka-samples/akka-sample-remote/src/main/resources/common.conf @@ -6,7 +6,7 @@ akka { } remote { - transport = "akka.remote.netty.NettyRemoteSupport" + transport = "akka.remote.netty.NettyRemoteTransport" server { hostname = "127.0.0.1" } diff --git a/akka-spring/src/test/resources/akka-test.conf b/akka-spring/src/test/resources/akka-test.conf index 806783d217..a4838f6cc7 100644 --- a/akka-spring/src/test/resources/akka-test.conf +++ b/akka-spring/src/test/resources/akka-test.conf @@ -128,7 +128,7 @@ akka { # secure-cookie = "050E0A0D0D06010A00000900040D060F0C09060B" # generate your own with '$AKKA_HOME/scripts/generate_secure_cookie.sh' or using 'Crypt.generateSecureCookie' secure-cookie = "" - layer = "akka.remote.netty.NettyRemoteSupport" + layer = "akka.remote.netty.NettyRemoteTransport" server { hostname = "localhost" # The hostname or IP that clients should connect to diff --git a/akka-spring/src/test/scala/TypedActorSpringFeatureTest.scala b/akka-spring/src/test/scala/TypedActorSpringFeatureTest.scala index 1fa4874408..8ca1a981d6 100644 --- a/akka-spring/src/test/scala/TypedActorSpringFeatureTest.scala +++ b/akka-spring/src/test/scala/TypedActorSpringFeatureTest.scala @@ -13,7 +13,7 @@ import org.springframework.context.ApplicationContext import org.springframework.context.support.ClassPathXmlApplicationContext import org.springframework.core.io.{ ClassPathResource, Resource } import org.scalatest.{ BeforeAndAfterAll, FeatureSpec } -import akka.remote.netty.NettyRemoteSupport +import akka.remote.netty.NettyRemoteTransport import akka.actor._ import akka.actor.Actor._ import java.util.concurrent.{TimeoutException, CountDownLatch} @@ -36,17 +36,17 @@ object RemoteTypedActorLog { @RunWith(classOf[JUnitRunner]) class TypedActorSpringFeatureTest extends FeatureSpec with ShouldMatchers with BeforeAndAfterAll { - var optimizeLocal_? = remote.asInstanceOf[NettyRemoteSupport].optimizeLocalScoped_? + var optimizeLocal_? = remote.asInstanceOf[NettyRemoteTransport].optimizeLocalScoped_? override def beforeAll { - remote.asInstanceOf[NettyRemoteSupport].optimizeLocal.set(false) //Can't run the test if we're eliminating all remote calls + remote.asInstanceOf[NettyRemoteTransport].optimizeLocal.set(false) //Can't run the test if we're eliminating all remote calls remote.start("localhost", 9990) val typedActor = TypedActor.newInstance(classOf[RemoteTypedActorOne], classOf[RemoteTypedActorOneImpl], 1000) remote.registerTypedActor("typed-actor-service", typedActor) } override def afterAll { - remote.asInstanceOf[NettyRemoteSupport].optimizeLocal.set(optimizeLocal_?) //Reset optimizelocal after all tests + remote.asInstanceOf[NettyRemoteTransport].optimizeLocal.set(optimizeLocal_?) //Reset optimizelocal after all tests remote.shutdown Thread.sleep(1000) diff --git a/akka-spring/src/test/scala/UntypedActorSpringFeatureTest.scala b/akka-spring/src/test/scala/UntypedActorSpringFeatureTest.scala index 66ca68dba7..92ca4c500e 100644 --- a/akka-spring/src/test/scala/UntypedActorSpringFeatureTest.scala +++ b/akka-spring/src/test/scala/UntypedActorSpringFeatureTest.scala @@ -9,7 +9,7 @@ import org.scalatest.matchers.ShouldMatchers import org.scalatest.junit.JUnitRunner import org.junit.runner.RunWith import org.springframework.context.support.ClassPathXmlApplicationContext -import akka.remote.netty.NettyRemoteSupport +import akka.remote.netty.NettyRemoteTransport import org.scalatest.{ BeforeAndAfterAll, FeatureSpec } import java.util.concurrent.CountDownLatch @@ -23,15 +23,15 @@ import akka.actor.Actor._ @RunWith(classOf[JUnitRunner]) class UntypedActorSpringFeatureTest extends FeatureSpec with ShouldMatchers with BeforeAndAfterAll { - var optimizeLocal_? = remote.asInstanceOf[NettyRemoteSupport].optimizeLocalScoped_? + var optimizeLocal_? = remote.asInstanceOf[NettyRemoteTransport].optimizeLocalScoped_? override def beforeAll { - remote.asInstanceOf[NettyRemoteSupport].optimizeLocal.set(false) //Can't run the test if we're eliminating all remote calls + remote.asInstanceOf[NettyRemoteTransport].optimizeLocal.set(false) //Can't run the test if we're eliminating all remote calls remote.start("localhost", 9990) } override def afterAll { - remote.asInstanceOf[NettyRemoteSupport].optimizeLocal.set(optimizeLocal_?) //Reset optimizelocal after all tests + remote.asInstanceOf[NettyRemoteTransport].optimizeLocal.set(optimizeLocal_?) //Reset optimizelocal after all tests remote.shutdown Thread.sleep(1000) From 11b30fc1883a4fc65ba3d7941ec8bb0c3551c6ec Mon Sep 17 00:00:00 2001 From: Patrik Nordwall Date: Wed, 1 Feb 2012 20:59:04 +0100 Subject: [PATCH 91/94] Fixed config in akka-sample-remote. See #1766 --- akka-samples/akka-sample-remote/src/main/resources/common.conf | 2 -- 1 file changed, 2 deletions(-) diff --git a/akka-samples/akka-sample-remote/src/main/resources/common.conf b/akka-samples/akka-sample-remote/src/main/resources/common.conf index 7be9ad0ca1..a60eef8369 100644 --- a/akka-samples/akka-sample-remote/src/main/resources/common.conf +++ b/akka-samples/akka-sample-remote/src/main/resources/common.conf @@ -1,12 +1,10 @@ akka { - version = "2.0-SNAPSHOT" actor { provider = "akka.remote.RemoteActorRefProvider" } remote { - transport = "akka.remote.netty.NettyRemoteTransport" server { hostname = "127.0.0.1" } From c201d28fbbbde9acd7e9de9b787f943478629f2c Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Thu, 2 Feb 2012 09:22:26 +0100 Subject: [PATCH 92/94] #1773 bites the dust --- akka-actor/src/main/scala/akka/dispatch/Mailbox.scala | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index d2bc7ff01d..3097dfc05b 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -303,7 +303,7 @@ trait BoundedMessageQueueSemantics extends QueueBasedMessageQueue { final def enqueue(receiver: ActorRef, handle: Envelope) { if (pushTimeOut.length > 0) { queue.offer(handle, pushTimeOut.length, pushTimeOut.unit) || { - throw new MessageQueueAppendFailedException("Couldn't enqueue message " + handle + " to " + toString) + throw new MessageQueueAppendFailedException("Couldn't enqueue message " + handle + " to " + receiver) } } else queue put handle } From d8fa7578e1ffabb85adb869273131f43e1dbc34f Mon Sep 17 00:00:00 2001 From: Roland Date: Thu, 2 Feb 2012 11:46:52 +0100 Subject: [PATCH 93/94] move RemoteServerStarted message to NettyRemoteTransport, fixes #1771 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit - also make port=0 (auto) for two remote tests (config&deployer) - also fix remote-sample conf files to configure “netty” section --- .../main/scala/akka/remote/netty/NettyRemoteSupport.scala | 3 ++- akka-remote/src/main/scala/akka/remote/netty/Server.scala | 3 +-- .../src/test/scala/akka/remote/RemoteConfigSpec.scala | 7 +++---- .../src/test/scala/akka/remote/RemoteDeployerSpec.scala | 1 + .../akka-sample-remote/src/main/resources/application.conf | 6 +++--- .../akka-sample-remote/src/main/resources/common.conf | 2 +- 6 files changed, 11 insertions(+), 11 deletions(-) diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index a225dd7aa8..9ad92ca236 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -23,7 +23,7 @@ import akka.actor.{ Address, ActorSystemImpl, ActorRef } import akka.dispatch.MonitorableThreadFactory import akka.event.Logging import akka.remote.RemoteProtocol.AkkaRemoteProtocol -import akka.remote.{ RemoteTransportException, RemoteTransport, RemoteSettings, RemoteMarshallingOps, RemoteActorRefProvider, RemoteActorRef } +import akka.remote.{ RemoteTransportException, RemoteTransport, RemoteSettings, RemoteMarshallingOps, RemoteActorRefProvider, RemoteActorRef, RemoteServerStarted } /** * Provides the implementation of the Netty remote support @@ -73,6 +73,7 @@ class NettyRemoteTransport(val remoteSettings: RemoteSettings, val system: Actor def start(): Unit = { server.start() setAddressFromChannel(server.channel) + notifyListeners(RemoteServerStarted(this)) } def shutdown(): Unit = { diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index 83fb378a62..2c51875e9d 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -15,7 +15,7 @@ import org.jboss.netty.handler.codec.frame.{ LengthFieldPrepender, LengthFieldBa import org.jboss.netty.handler.execution.ExecutionHandler import akka.event.Logging import akka.remote.RemoteProtocol.{ RemoteControlProtocol, CommandType, AkkaRemoteProtocol } -import akka.remote.{ RemoteServerStarted, RemoteServerShutdown, RemoteServerError, RemoteServerClientDisconnected, RemoteServerClientConnected, RemoteServerClientClosed, RemoteProtocol, RemoteMessage } +import akka.remote.{ RemoteServerShutdown, RemoteServerError, RemoteServerClientDisconnected, RemoteServerClientConnected, RemoteServerClientClosed, RemoteProtocol, RemoteMessage } import akka.actor.Address import java.net.InetAddress import akka.actor.ActorSystemImpl @@ -53,7 +53,6 @@ class NettyRemoteServer(val netty: NettyRemoteTransport) { def start(): Unit = { channel = bootstrap.bind(new InetSocketAddress(ip, settings.PortSelector)) openChannels.add(channel) - netty.notifyListeners(RemoteServerStarted(netty)) } def shutdown() { diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala index 9b326ae0c0..3074e033d7 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala @@ -13,9 +13,8 @@ import akka.remote.netty.NettyRemoteTransport class RemoteConfigSpec extends AkkaSpec( """ akka { - actor { - provider = "akka.remote.RemoteActorRefProvider" - } + actor.provider = "akka.remote.RemoteActorRefProvider" + remote.netty.port = 0 } """) { @@ -43,7 +42,7 @@ class RemoteConfigSpec extends AkkaSpec( RequireCookie must be(false) UsePassiveConnections must be(true) Hostname must not be "" // will be set to the local IP - PortSelector must be(2552) + PortSelector must be(0) MessageFrameSize must be(1048576) ConnectionTimeout must be(2 minutes) Backlog must be(4096) diff --git a/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala index 1b32ce7112..1b250f1ea9 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteDeployerSpec.scala @@ -18,6 +18,7 @@ object RemoteDeployerSpec { remote = "akka://sys@wallace:2552" } } + akka.remote.netty.port = 0 """, ConfigParseOptions.defaults) class RecipeActor extends Actor { diff --git a/akka-samples/akka-sample-remote/src/main/resources/application.conf b/akka-samples/akka-sample-remote/src/main/resources/application.conf index ce550b33eb..0fe79b9290 100644 --- a/akka-samples/akka-sample-remote/src/main/resources/application.conf +++ b/akka-samples/akka-sample-remote/src/main/resources/application.conf @@ -3,7 +3,7 @@ calculator { include "common" akka { - remote.server.port = 2552 + remote.netty.port = 2552 } } //#calculator @@ -13,7 +13,7 @@ remotelookup { include "common" akka { - remote.server.port = 2553 + remote.netty.port = 2553 } } //#remotelookup @@ -31,7 +31,7 @@ remotecreation { } } - remote.server.port = 2554 + remote.netty.port = 2554 } } //#remotecreation diff --git a/akka-samples/akka-sample-remote/src/main/resources/common.conf b/akka-samples/akka-sample-remote/src/main/resources/common.conf index a60eef8369..a3e16cf1a9 100644 --- a/akka-samples/akka-sample-remote/src/main/resources/common.conf +++ b/akka-samples/akka-sample-remote/src/main/resources/common.conf @@ -5,7 +5,7 @@ akka { } remote { - server { + netty { hostname = "127.0.0.1" } } From 8193e61aee389b52cca88301db06129837510c7e Mon Sep 17 00:00:00 2001 From: Viktor Klang Date: Fri, 3 Feb 2012 09:25:56 +0100 Subject: [PATCH 94/94] Renaming tryRecover to recoverWith as in SIP14, adding parameter to foreach --- .../src/test/java/akka/dispatch/JavaFutureTests.java | 4 ++-- .../src/test/scala/akka/dispatch/FutureSpec.scala | 8 ++++---- akka-actor/src/main/scala/akka/dispatch/Future.scala | 6 +++--- .../java/code/akka/docs/future/FutureDocTestBase.java | 2 +- akka-docs/java/futures.rst | 2 +- akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala | 4 ++-- akka-docs/scala/futures.rst | 2 +- 7 files changed, 14 insertions(+), 14 deletions(-) diff --git a/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java index 812ff052a2..f494fd7d81 100644 --- a/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java +++ b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java @@ -306,10 +306,10 @@ public class JavaFutureTests { } @Test - public void tryRecoverToMustBeCallable() { + public void recoverWithToMustBeCallable() { final IllegalStateException fail = new IllegalStateException("OHNOES"); Promise p = Futures.promise(system.dispatcher()); - Future f = p.future().tryRecover(new Recover>() { + Future f = p.future().recoverWith(new Recover>() { public Future recover(Throwable t) throws Throwable { if (t == fail) return Futures.successful("foo", system.dispatcher()).future(); else throw t; diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala index c04a063a16..e058218f2d 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala @@ -302,18 +302,18 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa } } - "tryRecover from exceptions" in { + "recoverWith from exceptions" in { val o = new IllegalStateException("original") val r = new IllegalStateException("recovered") intercept[IllegalStateException] { - Await.result(Promise.failed[String](o) tryRecover { case _ if false == true ⇒ Promise.successful("yay!") }, timeout.duration) + Await.result(Promise.failed[String](o) recoverWith { case _ if false == true ⇒ Promise.successful("yay!") }, timeout.duration) } must be(o) - Await.result(Promise.failed[String](o) tryRecover { case _ ⇒ Promise.successful("yay!") }, timeout.duration) must equal("yay!") + Await.result(Promise.failed[String](o) recoverWith { case _ ⇒ Promise.successful("yay!") }, timeout.duration) must equal("yay!") intercept[IllegalStateException] { - Await.result(Promise.failed[String](o) tryRecover { case _ ⇒ Promise.failed[String](r) }, timeout.duration) + Await.result(Promise.failed[String](o) recoverWith { case _ ⇒ Promise.failed[String](r) }, timeout.duration) } must be(r) } diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index c188f66893..08a73a1925 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -482,10 +482,10 @@ sealed trait Future[+T] extends Await.Awaitable[T] { * * {{{ * val f = Future { Int.MaxValue } - * Future (6 / 0) tryRecover { case e: ArithmeticException => f } // result: Int.MaxValue + * Future (6 / 0) recoverWith { case e: ArithmeticException => f } // result: Int.MaxValue * }}} */ - def tryRecover[U >: T](pf: PartialFunction[Throwable, Future[U]]): Future[U] = { + def recoverWith[U >: T](pf: PartialFunction[Throwable, Future[U]]): Future[U] = { val p = Promise[U]() onComplete { @@ -603,7 +603,7 @@ sealed trait Future[+T] extends Await.Awaitable[T] { /** * Same as onSuccess { case r => f(r) } but is also used in for-comprehensions */ - final def foreach(f: T ⇒ Unit): Unit = onComplete { + final def foreach[U](f: T ⇒ U): Unit = onComplete { case Right(r) ⇒ f(r) case _ ⇒ } diff --git a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java b/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java index eaf6a41c27..b064eb803b 100644 --- a/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java +++ b/akka-docs/java/code/akka/docs/future/FutureDocTestBase.java @@ -361,7 +361,7 @@ public class FutureDocTestBase { public Integer call() { return 1 / 0; } - }, system.dispatcher()).tryRecover(new Recover>() { + }, system.dispatcher()).recoverWith(new Recover>() { public Future recover(Throwable problem) throws Throwable { if (problem instanceof ArithmeticException) { return future(new Callable() { diff --git a/akka-docs/java/futures.rst b/akka-docs/java/futures.rst index 4586c58996..00f17e57df 100644 --- a/akka-docs/java/futures.rst +++ b/akka-docs/java/futures.rst @@ -222,7 +222,7 @@ our ``Future`` would have a result of 0. The ``recover`` method works very simil so multiple ``Exception``\s can be handled in this manner, and if an ``Exception`` is not handled this way it will behave as if we hadn't used the ``recover`` method. -You can also use the ``tryRecover`` method, which has the same relationship to ``recover`` as ``flatMap` has to ``map``, +You can also use the ``recoverWith`` method, which has the same relationship to ``recover`` as ``flatMap` has to ``map``, and is use like this: .. includecode:: code/akka/docs/future/FutureDocTestBase.java diff --git a/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala b/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala index 7ddf46b66b..098fe873ad 100644 --- a/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala +++ b/akka-docs/scala/code/akka/docs/future/FutureDocSpec.scala @@ -267,12 +267,12 @@ class FutureDocSpec extends AkkaSpec { Await.result(future, 1 second) must be(0) } - "demonstrate usage of tryRecover" in { + "demonstrate usage of recoverWith" in { implicit val timeout = system.settings.ActorTimeout val actor = system.actorOf(Props[MyActor]) val msg1 = -1 //#try-recover - val future = akka.pattern.ask(actor, msg1) tryRecover { + val future = akka.pattern.ask(actor, msg1) recoverWith { case e: ArithmeticException ⇒ Promise.successful(0) case foo: IllegalArgumentException ⇒ Promise.failed[Int](new IllegalStateException("All br0ken!")) } diff --git a/akka-docs/scala/futures.rst b/akka-docs/scala/futures.rst index 0313ba7d9d..181cc9f8fa 100644 --- a/akka-docs/scala/futures.rst +++ b/akka-docs/scala/futures.rst @@ -244,7 +244,7 @@ our ``Future`` would have a result of 0. The ``recover`` method works very simil so multiple ``Exception``\s can be handled in this manner, and if an ``Exception`` is not handled this way it will behave as if we hadn't used the ``recover`` method. -You can also use the ``tryRecover`` method, which has the same relationship to ``recover`` as ``flatMap` has to ``map``, +You can also use the ``recoverWith`` method, which has the same relationship to ``recover`` as ``flatMap` has to ``map``, and is use like this: .. includecode:: code/akka/docs/future/FutureDocSpec.scala