diff --git a/akka-actor-tests/src/test/java/akka/japi/JavaAPITestBase.java b/akka-actor-tests/src/test/java/akka/japi/JavaAPITestBase.java index b3a092b1f9..386b849b48 100644 --- a/akka-actor-tests/src/test/java/akka/japi/JavaAPITestBase.java +++ b/akka-actor-tests/src/test/java/akka/japi/JavaAPITestBase.java @@ -1,8 +1,11 @@ package akka.japi; +import akka.actor.ExtendedActorSystem; import akka.event.LoggingAdapter; import akka.event.NoLogging; +import akka.serialization.JavaSerializer; import org.junit.Test; +import java.util.concurrent.Callable; import static org.junit.Assert.*; @@ -54,4 +57,13 @@ public class JavaAPITestBase { LoggingAdapter a = NoLogging.getInstance(); assertNotNull(a); } + + @Test + public void mustBeAbleToUseCurrentSystem() { + assertNull(JavaSerializer.currentSystem().withValue(null, new Callable() { + public ExtendedActorSystem call() { + return JavaSerializer.currentSystem().value(); + } + })); + } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala index 038cb2d0e6..781b8d4cab 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala @@ -4,7 +4,6 @@ package akka.actor import language.postfixOps - import akka.testkit._ import org.scalatest.junit.JUnitSuite import com.typesafe.config.ConfigFactory @@ -15,6 +14,10 @@ import akka.util.Timeout import akka.japi.Util.immutableSeq import scala.concurrent.Future import akka.pattern.ask +import akka.dispatch._ +import com.typesafe.config.Config +import java.util.concurrent.{ LinkedBlockingQueue, BlockingQueue, TimeUnit } +import akka.util.Switch class JavaExtensionSpec extends JavaExtension with JUnitSuite @@ -68,10 +71,57 @@ object ActorSystemSpec { } } + case class FastActor(latch: TestLatch, testActor: ActorRef) extends Actor { + val ref1 = context.actorOf(Props.empty) + val ref2 = context.actorFor(ref1.path.toString) + testActor ! ref2.getClass + latch.countDown() + + def receive = { + case _ ⇒ + } + } + + class SlowDispatcher(_config: Config, _prerequisites: DispatcherPrerequisites) extends MessageDispatcherConfigurator(_config, _prerequisites) { + private val instance = new Dispatcher( + prerequisites, + config.getString("id"), + config.getInt("throughput"), + Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), + mailboxType, + configureExecutor(), + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) { + val doneIt = new Switch + override protected[akka] def registerForExecution(mbox: Mailbox, hasMessageHint: Boolean, hasSystemMessageHint: Boolean): Boolean = { + val ret = super.registerForExecution(mbox, hasMessageHint, hasSystemMessageHint) + doneIt.switchOn { + TestKit.awaitCond(mbox.actor.actor != null, 1.second) + mbox.actor.actor match { + case FastActor(latch, _) ⇒ Await.ready(latch, 1.second) + } + } + ret + } + } + + /** + * Returns the same dispatcher instance for each invocation + */ + override def dispatcher(): MessageDispatcher = instance + } + + val config = s""" + akka.extensions = ["akka.actor.TestExtension"] + slow { + type="${classOf[SlowDispatcher].getName}" + }""" + } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class ActorSystemSpec extends AkkaSpec("""akka.extensions = ["akka.actor.TestExtension"]""") with ImplicitSender { +class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSender { + + import ActorSystemSpec.FastActor "An ActorSystem" must { @@ -165,6 +215,11 @@ class ActorSystemSpec extends AkkaSpec("""akka.extensions = ["akka.actor.TestExt Await.result(Future.sequence(waves), timeout.duration + 5.seconds) must be === Seq("done", "done", "done") } + "find actors that just have been created" in { + system.actorOf(Props(new FastActor(TestLatch(), testActor)).withDispatcher("slow")) + expectMsgType[Class[_]] must be(classOf[LocalActorRef]) + } + "reliable deny creation of actors while shutting down" in { val system = ActorSystem() import system.dispatcher diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala index f0e5e44ad4..b4860154ea 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala @@ -13,6 +13,9 @@ import akka.event._ import com.typesafe.config.ConfigFactory import scala.concurrent.Await import akka.util.Timeout +import org.scalatest.matchers.Matcher +import org.scalatest.matchers.HavePropertyMatcher +import org.scalatest.matchers.HavePropertyMatchResult object FSMActorSpec { val timeout = Timeout(2 seconds) @@ -199,6 +202,45 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im expectMsg(1 second, fsm.StopEvent(FSM.Shutdown, 1, null)) } + "cancel all timers when terminated" in { + val timerNames = List("timer-1", "timer-2", "timer-3") + + // Lazy so fsmref can refer to checkTimersActive + lazy val fsmref = TestFSMRef(new Actor with FSM[String, Null] { + startWith("not-started", null) + when("not-started") { + case Event("start", _) ⇒ goto("started") replying "starting" + } + when("started", stateTimeout = 10 seconds) { + case Event("stop", _) ⇒ stop() + } + onTransition { + case "not-started" -> "started" ⇒ + for (timerName ← timerNames) setTimer(timerName, (), 10 seconds, false) + } + onTermination { + case _ ⇒ { + checkTimersActive(false) + testActor ! "stopped" + } + } + }) + + def checkTimersActive(active: Boolean) { + for (timer ← timerNames) fsmref.isTimerActive(timer) must be(active) + fsmref.isStateTimerActive must be(active) + } + + checkTimersActive(false) + + fsmref ! "start" + expectMsg(1 second, "starting") + checkTimersActive(true) + + fsmref ! "stop" + expectMsg(1 second, "stopped") + } + "log events and transitions if asked to do so" in { import scala.collection.JavaConverters._ val config = ConfigFactory.parseMap(Map("akka.loglevel" -> "DEBUG", diff --git a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala index 3932df4ea3..ba34987c9c 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala @@ -11,7 +11,7 @@ import akka.pattern.ask import java.util.concurrent.atomic.AtomicInteger @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout { +class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout with ImplicitSender { private val cancellables = new ConcurrentLinkedQueue[Cancellable]() import system.dispatcher @@ -33,39 +33,47 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout "schedule more than once" in { case object Tick - val countDownLatch = new CountDownLatch(3) - val tickActor = system.actorOf(Props(new Actor { - def receive = { case Tick ⇒ countDownLatch.countDown() } + case object Tock + + val tickActor, tickActor2 = system.actorOf(Props(new Actor { + var ticks = 0 + def receive = { + case Tick ⇒ + if (ticks < 3) { + sender ! Tock + ticks += 1 + } + } })) // run every 50 milliseconds collectCancellable(system.scheduler.schedule(0 milliseconds, 50 milliseconds, tickActor, Tick)) // after max 1 second it should be executed at least the 3 times already - assert(countDownLatch.await(2, TimeUnit.SECONDS)) + expectMsg(Tock) + expectMsg(Tock) + expectMsg(Tock) + expectNoMsg(500 millis) - val countDownLatch2 = new CountDownLatch(3) - - collectCancellable(system.scheduler.schedule(0 milliseconds, 50 milliseconds)(countDownLatch2.countDown())) + collectCancellable(system.scheduler.schedule(0 milliseconds, 50 milliseconds)(tickActor2 ! Tick)) // after max 1 second it should be executed at least the 3 times already - assert(countDownLatch2.await(2, TimeUnit.SECONDS)) + expectMsg(Tock) + expectMsg(Tock) + expectMsg(Tock) + expectNoMsg(500 millis) } "stop continuous scheduling if the receiving actor has been terminated" taggedAs TimingTest in { - val actor = system.actorOf(Props(new Actor { - def receive = { - case x ⇒ testActor ! x - } - })) + val actor = system.actorOf(Props(new Actor { def receive = { case x ⇒ sender ! x } })) // run immediately and then every 100 milliseconds collectCancellable(system.scheduler.schedule(0 milliseconds, 100 milliseconds, actor, "msg")) expectMsg("msg") // stop the actor and, hence, the continuous messaging from happening - actor ! PoisonPill + system stop actor - expectNoMsg(1 second) + expectNoMsg(500 millis) } "schedule once" in { @@ -93,19 +101,9 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout * ticket #372 */ "be cancellable" in { - object Ping - val ticks = new CountDownLatch(1) + for (_ ← 1 to 10) system.scheduler.scheduleOnce(1 second, testActor, "fail").cancel() - val actor = system.actorOf(Props(new Actor { - def receive = { case Ping ⇒ ticks.countDown() } - })) - - (1 to 10).foreach { i ⇒ - val timeout = collectCancellable(system.scheduler.scheduleOnce(1 second, actor, Ping)) - timeout.cancel() - } - - assert(ticks.await(3, TimeUnit.SECONDS) == false) //No counting down should've been made + expectNoMsg(2 seconds) } "be cancellable during initial delay" taggedAs TimingTest in { @@ -200,31 +198,24 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout case object Msg val actor = system.actorOf(Props(new Actor { - def receive = { - case Msg ⇒ ticks.countDown() - } + def receive = { case Msg ⇒ ticks.countDown() } })) val startTime = System.nanoTime() - val cancellable = system.scheduler.schedule(1 second, 300 milliseconds, actor, Msg) + collectCancellable(system.scheduler.schedule(1 second, 300 milliseconds, actor, Msg)) Await.ready(ticks, 3 seconds) - val elapsedTimeMs = (System.nanoTime() - startTime) / 1000000 - assert(elapsedTimeMs > 1600) - assert(elapsedTimeMs < 2000) // the precision is not ms exact - cancellable.cancel() + (System.nanoTime() - startTime).nanos.toMillis must be(1800L plusOrMinus 199) } "adjust for scheduler inaccuracy" taggedAs TimingTest in { val startTime = System.nanoTime val n = 33 val latch = new TestLatch(n) - system.scheduler.schedule(150.millis, 150.millis) { - latch.countDown() - } + system.scheduler.schedule(150.millis, 150.millis) { latch.countDown() } Await.ready(latch, 6.seconds) - val rate = n * 1000.0 / (System.nanoTime - startTime).nanos.toMillis - rate must be(6.66 plusOrMinus (0.4)) + // Rate + n * 1000.0 / (System.nanoTime - startTime).nanos.toMillis must be(6.66 plusOrMinus 0.4) } "not be affected by long running task" taggedAs TimingTest in { @@ -236,8 +227,8 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout latch.countDown() } Await.ready(latch, 6.seconds) - val rate = n * 1000.0 / (System.nanoTime - startTime).nanos.toMillis - rate must be(4.4 plusOrMinus (0.3)) + // Rate + n * 1000.0 / (System.nanoTime - startTime).nanos.toMillis must be(4.4 plusOrMinus 0.3) } } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala index 201b6c6949..7a206671d4 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala @@ -94,6 +94,14 @@ object TypedActorSpec { @throws(classOf[TimeoutException]) def joptionPigdog(delay: Long): JOption[String] + def nullFuture(): Future[Any] = null + + def nullJOption(): JOption[Any] = null + + def nullOption(): Option[Any] = null + + def nullReturn(): Any = null + def incr() @throws(classOf[TimeoutException]) @@ -283,6 +291,14 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) mustStop(t) } + "be able to call null returning methods" in { + val t = newFooBar + t.nullJOption() must be === JOption.none + t.nullOption() must be === None + t.nullReturn() must be === null + Await.result(t.nullFuture(), remaining) must be === null + } + "be able to call Future-returning methods non-blockingly" in { val t = newFooBar val f = t.futurePigdog(200) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala index 4dad37c1be..4e76c5bea6 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala @@ -51,19 +51,28 @@ class PriorityDispatcherSpec extends AkkaSpec(PriorityDispatcherSpec.config) wit def testOrdering(dispatcherKey: String) { val msgs = (1 to 100) toList + // It's important that the actor under test is not a top level actor + // with RepointableActorRef, since messages might be queued in + // UnstartedCell and the sent to the PriorityQueue and consumed immediately + // without the ordering taking place. val actor = system.actorOf(Props(new Actor { + context.actorOf(Props(new Actor { - val acc = scala.collection.mutable.ListBuffer[Int]() + val acc = scala.collection.mutable.ListBuffer[Int]() - scala.util.Random.shuffle(msgs) foreach { m ⇒ self ! m } + scala.util.Random.shuffle(msgs) foreach { m ⇒ self ! m } - self.tell('Result, testActor) + self.tell('Result, testActor) - def receive = { - case i: Int ⇒ acc += i - case 'Result ⇒ sender ! acc.toList - } - }).withDispatcher(dispatcherKey)) + def receive = { + case i: Int ⇒ acc += i + case 'Result ⇒ sender ! acc.toList + } + }).withDispatcher(dispatcherKey)) + + def receive = Actor.emptyBehavior + + })) expectMsgType[List[_]] must be === msgs } diff --git a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala index f1ef0564f6..0adfa56c4c 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala @@ -8,6 +8,7 @@ import language.postfixOps import akka.testkit.AkkaSpec import akka.actor.{ Props, Actor } +import java.util.concurrent.TimeoutException import scala.concurrent.{ Future, Promise, Await } import scala.concurrent.duration._ @@ -39,10 +40,10 @@ class PatternSpec extends AkkaSpec { Await.ready(gracefulStop(target, 1 millis), 1 second) } - "complete Future with AskTimeoutException when actor not terminated within timeout" in { + "complete Future with TimeoutException when actor not terminated within timeout" in { val target = system.actorOf(Props[TargetActor]) target ! Work(250 millis) - intercept[AskTimeoutException] { Await.result(gracefulStop(target, 10 millis), 200 millis) } + intercept[TimeoutException] { Await.result(gracefulStop(target, 10 millis), 200 millis) } } } diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala index 9a9dbe24ac..b2eeccf3bf 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -173,20 +173,21 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with routeeSize(router) must be(resizer.upperBound) } - "backoff" in { + "backoff" in within(10 seconds) { val resizer = DefaultResizer( lowerBound = 1, upperBound = 5, rampupRate = 1.0, backoffRate = 1.0, - backoffThreshold = 0.20, + backoffThreshold = 0.40, pressureThreshold = 1, messagesPerResize = 1) val router = system.actorOf(Props(new Actor { def receive = { - case n: Int ⇒ Thread.sleep((n millis).dilated.toMillis) + case n: Int if n <= 0 ⇒ // done + case n: Int ⇒ Thread.sleep((n millis).dilated.toMillis) } }).withRouter(RoundRobinRouter(resizer = Some(resizer)))) @@ -202,12 +203,11 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with Thread.sleep((300 millis).dilated.toMillis) // let it cool down - for (m ← 0 to 5) { - router ! 1 - Thread.sleep((500 millis).dilated.toMillis) - } + awaitCond({ + router ! 0 // trigger resize + routeeSize(router) < z + }, interval = 500.millis.dilated) - awaitCond(Try(routeeSize(router) < (z)).getOrElse(false)) } } diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 22a5e66971..9d7522f950 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -118,6 +118,7 @@ class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with val names = 1 to 20 map { "routee" + _ } toList actor ! TestRun("test", names, actors) + 1 to actors foreach { _ ⇒ val routees = expectMsgType[RouterRoutees].routees routees.map(_.path.name) must be === names diff --git a/akka-actor/src/main/java/akka/actor/AbstractActorRef.java b/akka-actor/src/main/java/akka/actor/AbstractActorRef.java index 97ef09c501..650182a457 100644 --- a/akka-actor/src/main/java/akka/actor/AbstractActorRef.java +++ b/akka-actor/src/main/java/akka/actor/AbstractActorRef.java @@ -8,10 +8,12 @@ import akka.util.Unsafe; final class AbstractActorRef { final static long cellOffset; + final static long lookupOffset; static { try { cellOffset = Unsafe.instance.objectFieldOffset(RepointableActorRef.class.getDeclaredField("_cellDoNotCallMeDirectly")); + lookupOffset = Unsafe.instance.objectFieldOffset(RepointableActorRef.class.getDeclaredField("_lookupDoNotCallMeDirectly")); } catch(Throwable t){ throw new ExceptionInInitializerError(t); } diff --git a/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java b/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java index e95ff9ad95..73b3cf143d 100644 --- a/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java +++ b/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java @@ -24,6 +24,7 @@ import java.util.concurrent.ThreadFactory; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import akka.util.Helpers; import scala.concurrent.duration.Duration; import scala.concurrent.duration.FiniteDuration; import akka.event.LoggingAdapter; @@ -91,7 +92,6 @@ public class HashedWheelTimer implements Timer { final ReusableIterator[] iterators; final int mask; final ReadWriteLock lock = new ReentrantReadWriteLock(); - final boolean isWindows = System.getProperty("os.name", "").toLowerCase().indexOf("win") >= 0; volatile int wheelCursor; private LoggingAdapter logger; @@ -396,7 +396,7 @@ public class HashedWheelTimer implements Timer { // the JVM if it runs on windows. // // See https://github.com/netty/netty/issues/356 - if (isWindows) { + if (Helpers.isWindows()) { sleepTimeMs = (sleepTimeMs / 10) * 10; } diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index d02f37cf7b..3706799f23 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -82,8 +82,9 @@ case class Terminated private[akka] (@BeanProperty actor: ActorRef)( * INTERNAL API * * Used for remote death watch. Failure detector publish this to the - * `eventStream` when a remote node is detected to be unreachable. - * The watcher ([[akka.actor.DeathWatch]]) subscribes to the `eventStream` + * `eventStream` when a remote node is detected to be unreachable and/or decided to + * be removed. + * The watcher ([[akka.actor.dungeon.DeathWatch]]) subscribes to the `eventStream` * and translates this event to [[akka.actor.Terminated]], which is sent itself. */ @SerialVersionUID(1L) @@ -179,8 +180,8 @@ object ActorInitializationException { * * @param actor is the actor whose preRestart() hook failed * @param cause is the exception thrown by that actor within preRestart() - * @param origCause is the exception which caused the restart in the first place - * @param msg is the message which was optionally passed into preRestart() + * @param originalCause is the exception which caused the restart in the first place + * @param messageOption is the message which was optionally passed into preRestart() */ @SerialVersionUID(1L) case class PreRestartException private[akka] (actor: ActorRef, cause: Throwable, originalCause: Throwable, messageOption: Option[Any]) @@ -196,7 +197,7 @@ case class PreRestartException private[akka] (actor: ActorRef, cause: Throwable, * * @param actor is the actor whose constructor or postRestart() hook failed * @param cause is the exception thrown by that actor within preRestart() - * @param origCause is the exception which caused the restart in the first place + * @param originalCause is the exception which caused the restart in the first place */ @SerialVersionUID(1L) case class PostRestartException private[akka] (actor: ActorRef, cause: Throwable, originalCause: Throwable) diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 9cf2b5b3df..51f11c044c 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -208,6 +208,11 @@ private[akka] trait Cell { * The system internals where this Cell lives. */ def systemImpl: ActorSystemImpl + /** + * Start the cell: enqueued message must not be processed before this has + * been called. The usual action is to attach the mailbox to a dispatcher. + */ + def start(): this.type /** * Recursively suspend this actor and all its children. Must not throw exceptions. */ @@ -361,10 +366,10 @@ private[akka] class ActorCell( case null ⇒ faultResume(inRespToFailure) case w: WaitingForChildren ⇒ w.enqueue(message) } - case Terminate() ⇒ terminate() - case Supervise(child, uid) ⇒ supervise(child, uid) - case ChildTerminated(child) ⇒ todo = handleChildTerminated(child) - case NoMessage ⇒ // only here to suppress warning + case Terminate() ⇒ terminate() + case Supervise(child, async, uid) ⇒ supervise(child, async, uid) + case ChildTerminated(child) ⇒ todo = handleChildTerminated(child) + case NoMessage ⇒ // only here to suppress warning } } catch { case e @ (_: InterruptedException | NonFatal(_)) ⇒ handleInvokeFailure(Nil, e, "error while processing " + message) @@ -492,21 +497,21 @@ private[akka] class ActorCell( } } - private def supervise(child: ActorRef, uid: Int): Unit = if (!isTerminating) { + private def supervise(child: ActorRef, async: Boolean, uid: Int): Unit = if (!isTerminating) { // Supervise is the first thing we get from a new child, so store away the UID for later use in handleFailure() initChild(child) match { case Some(crs) ⇒ crs.uid = uid - handleSupervise(child) + handleSupervise(child, async) if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(actor), "now supervising " + child)) case None ⇒ publish(Error(self.path.toString, clazz(actor), "received Supervise from unregistered child " + child + ", this will not end well")) } } // future extension point - protected def handleSupervise(child: ActorRef): Unit = child match { - case r: RepointableActorRef ⇒ r.activate() - case _ ⇒ + protected def handleSupervise(child: ActorRef, async: Boolean): Unit = child match { + case r: RepointableActorRef if async ⇒ r.point() + case _ ⇒ } final protected def clearActorFields(actorInstance: Actor): Unit = { diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index b0b4c3d939..5a3bb7dac2 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -42,8 +42,7 @@ trait ActorRefProvider { def deadLetters: ActorRef /** - * The root path for all actors within this actor system, including remote - * address if enabled. + * The root path for all actors within this actor system, not including any remote address information. */ def rootPath: ActorPath @@ -146,6 +145,11 @@ trait ActorRefProvider { * attempt is made to verify actual reachability). */ def getExternalAddressFor(addr: Address): Option[Address] + + /** + * Obtain the external address of the default transport. + */ + def getDefaultAddress: Address } /** @@ -317,6 +321,10 @@ private[akka] object SystemGuardian { /** * Local ActorRef provider. + * + * INTERNAL API! + * + * Depending on this class is not supported, only the [[ActorRefProvider]] interface is supported. */ class LocalActorRefProvider( _systemName: String, @@ -381,7 +389,7 @@ class LocalActorRefProvider( override def sendSystemMessage(message: SystemMessage): Unit = stopped ifOff { message match { - case Supervise(_, _) ⇒ // TODO register child in some map to keep track of it and enable shutdown after all dead + case Supervise(_, _, _) ⇒ // TODO register child in some map to keep track of it and enable shutdown after all dead case ChildTerminated(_) ⇒ stop() case _ ⇒ log.error(this + " received unexpected system message [" + message + "]") } @@ -585,16 +593,17 @@ class LocalActorRefProvider( if (settings.DebugRouterMisconfiguration && deployer.lookup(path).isDefined) log.warning("Configuration says that {} should be a router, but code disagrees. Remove the config or add a routerConfig to its Props.") - if (async) new RepointableActorRef(system, props, supervisor, path).initialize() + if (async) new RepointableActorRef(system, props, supervisor, path).initialize(async) else new LocalActorRef(system, props, supervisor, path) case router ⇒ val lookup = if (lookupDeploy) deployer.lookup(path) else None val fromProps = Iterator(props.deploy.copy(routerConfig = props.deploy.routerConfig withFallback router)) val d = fromProps ++ deploy.iterator ++ lookup.iterator reduce ((a, b) ⇒ b withFallback a) - val ref = new RoutedActorRef(system, props.withRouter(d.routerConfig), supervisor, path).initialize() - if (async) ref else ref.activate() + new RoutedActorRef(system, props.withRouter(d.routerConfig), supervisor, path).initialize(async) } } def getExternalAddressFor(addr: Address): Option[Address] = if (addr == rootPath.address) Some(addr) else None + + def getDefaultAddress: Address = rootPath.address } diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 8bada6e0ba..45025f1887 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -613,7 +613,7 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, protected def createScheduler(): Scheduler = new DefaultScheduler( new HashedWheelTimer(log, - threadFactory.copy(threadFactory.name + "-scheduler"), + threadFactory.withName(threadFactory.name + "-scheduler"), settings.SchedulerTickDuration, settings.SchedulerTicksPerWheel), log) diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 0edb0a0490..8ed7dc754a 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -139,16 +139,24 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce } def parseConfig(key: String, config: Config): Option[Deploy] = { - val deployment = config.withFallback(default) + val router = createRouterConfig(deployment.getString("router"), key, config, deployment) + Some(Deploy(key, deployment, router, NoScopeGiven)) + } + /** + * Factory method for creating `RouterConfig` + * @param routerType the configured name of the router, or FQCN + * @param key the full configuration key of the deployment section + * @param config the user defined config of the deployment, without defaults + * @param deployment the deployment config, with defaults + */ + protected def createRouterConfig(routerType: String, key: String, config: Config, deployment: Config): RouterConfig = { val routees = immutableSeq(deployment.getStringList("routees.paths")) - val nrOfInstances = deployment.getInt("nr-of-instances") + val resizer = if (config.hasPath("resizer")) Some(DefaultResizer(deployment.getConfig("resizer"))) else None - val resizer: Option[Resizer] = if (config.hasPath("resizer")) Some(DefaultResizer(deployment.getConfig("resizer"))) else None - - val router: RouterConfig = deployment.getString("router") match { + routerType match { case "from-code" ⇒ NoRouter case "round-robin" ⇒ RoundRobinRouter(nrOfInstances, routees, resizer) case "random" ⇒ RandomRouter(nrOfInstances, routees, resizer) @@ -170,7 +178,6 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce .format(fqn, key), exception) }).get } - - Some(Deploy(key, deployment, router, NoScopeGiven)) } + } diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 069691ce67..fb4db204c2 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -238,7 +238,7 @@ object FSM { * setTimer("tock", TockMsg, 1 second, true) // repeating * setTimer("lifetime", TerminateMsg, 1 hour, false) // single-shot * cancelTimer("tock") - * timerActive_? ("tock") + * isTimerActive("tock") * */ trait FSM[S, D] extends Listeners with ActorLogging { @@ -372,7 +372,15 @@ trait FSM[S, D] extends Listeners with ActorLogging { * timer does not exist, has previously been canceled or if it was a * single-shot timer whose message was already received. */ - final def timerActive_?(name: String) = timers contains name + @deprecated("use isTimerActive instead", "2.2") + final def timerActive_?(name: String): Boolean = isTimerActive(name) + + /** + * Inquire whether the named timer is still active. Returns true unless the + * timer does not exist, has previously been canceled or if it was a + * single-shot timer whose message was already received. + */ + final def isTimerActive(name: String): Boolean = timers contains name /** * Set state timeout explicitly. This method can safely be used from within a @@ -380,6 +388,11 @@ trait FSM[S, D] extends Listeners with ActorLogging { */ final def setStateTimeout(state: S, timeout: Timeout): Unit = stateTimeouts(state) = timeout + /** + * Internal API, used for testing. + */ + private[akka] final def isStateTimerActive = timeoutFuture.isDefined + /** * Set handler which is called upon each state transition, i.e. not when * staying in the same state. This may use the pair extractor defined in the @@ -634,6 +647,8 @@ trait FSM[S, D] extends Listeners with ActorLogging { case Failure(msg: AnyRef) ⇒ log.error(msg.toString) case _ ⇒ } + for (timer ← timers.values) timer.cancel() + timers.clear() val stopEvent = StopEvent(reason, currentState.stateName, currentState.stateData) if (terminateEvent.isDefinedAt(stopEvent)) terminateEvent(stopEvent) diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index 693ca75565..30e28834dd 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -4,8 +4,6 @@ package akka.actor -import language.existentials - import akka.dispatch._ import akka.japi.Creator import scala.reflect.ClassTag diff --git a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala index 8ecb1cbb72..02aef18564 100644 --- a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala @@ -5,17 +5,18 @@ package akka.actor import java.io.ObjectStreamException +import java.util.{ LinkedList ⇒ JLinkedList, ListIterator ⇒ JListIterator } import java.util.concurrent.TimeUnit import java.util.concurrent.locks.ReentrantLock import scala.annotation.tailrec -import scala.collection.mutable.Queue import scala.concurrent.forkjoin.ThreadLocalRandom import akka.actor.dungeon.ChildrenContainer -import akka.dispatch.{ Envelope, Supervise, SystemMessage, Terminate } import akka.event.Logging.Warning import akka.util.Unsafe +import akka.dispatch._ +import util.Try /** * This actor ref starts out with some dummy cell (by default just enqueuing @@ -32,17 +33,34 @@ private[akka] class RepointableActorRef( val path: ActorPath) extends ActorRefWithCell with RepointableRef { - import AbstractActorRef.cellOffset + import AbstractActorRef.{ cellOffset, lookupOffset } + /* + * H E R E B E D R A G O N S ! + * + * There are two main functions of a Cell: message queueing and child lookup. + * When switching out the UnstartedCell for its real replacement, the former + * must be switched after all messages have been drained from the temporary + * queue into the real mailbox, while the latter must be switched before + * processing the very first message (i.e. before Cell.start()). Hence there + * are two refs here, one for each function, and they are switched just so. + */ @volatile private var _cellDoNotCallMeDirectly: Cell = _ + @volatile private var _lookupDoNotCallMeDirectly: Cell = _ def underlying: Cell = Unsafe.instance.getObjectVolatile(this, cellOffset).asInstanceOf[Cell] + def lookup = Unsafe.instance.getObjectVolatile(this, lookupOffset).asInstanceOf[Cell] @tailrec final def swapCell(next: Cell): Cell = { val old = underlying if (Unsafe.instance.compareAndSwapObject(this, cellOffset, old, next)) old else swapCell(next) } + @tailrec final def swapLookup(next: Cell): Cell = { + val old = lookup + if (Unsafe.instance.compareAndSwapObject(this, lookupOffset, old, next)) old else swapLookup(next) + } + /** * Initialize: make a dummy cell which holds just a mailbox, then tell our * supervisor that we exist so that he can create the real Cell in @@ -52,12 +70,17 @@ private[akka] class RepointableActorRef( * * This is protected so that others can have different initialization. */ - def initialize(): this.type = { - val uid = ThreadLocalRandom.current.nextInt() - swapCell(new UnstartedCell(system, this, props, supervisor, uid)) - supervisor.sendSystemMessage(Supervise(this, uid)) - this - } + def initialize(async: Boolean): this.type = + underlying match { + case null ⇒ + val uid = ThreadLocalRandom.current.nextInt() + swapCell(new UnstartedCell(system, this, props, supervisor, uid)) + swapLookup(underlying) + supervisor.sendSystemMessage(Supervise(this, async, uid)) + if (!async) point() + this + case other ⇒ throw new IllegalStateException("initialize called more than once!") + } /** * This method is supposed to be called by the supervisor in handleSupervise() @@ -65,21 +88,31 @@ private[akka] class RepointableActorRef( * modification of the `underlying` field, though it is safe to send messages * at any time. */ - def activate(): this.type = { + def point(): this.type = underlying match { - case u: UnstartedCell ⇒ u.replaceWith(newCell(u)) - case _ ⇒ // this happens routinely for things which were created async=false + case u: UnstartedCell ⇒ + /* + * The problem here was that if the real actor (which will start running + * at cell.start()) creates children in its constructor, then this may + * happen before the swapCell in u.replaceWith, meaning that those + * children cannot be looked up immediately, e.g. if they shall become + * routees. + */ + val cell = newCell(u) + swapLookup(cell) + cell.start() + u.replaceWith(cell) + this + case null ⇒ throw new IllegalStateException("underlying cell is null") + case _ ⇒ this // this happens routinely for things which were created async=false } - this - } /** * This is called by activate() to obtain the cell which is to replace the * unstarted cell. The cell must be fully functional. */ - def newCell(old: Cell): Cell = - new ActorCell(system, this, props, supervisor). - init(old.asInstanceOf[UnstartedCell].uid, sendSupervise = false).start() + def newCell(old: UnstartedCell): Cell = + new ActorCell(system, this, props, supervisor).init(old.uid, sendSupervise = false) def start(): Unit = () @@ -91,7 +124,11 @@ private[akka] class RepointableActorRef( def restart(cause: Throwable): Unit = underlying.restart(cause) - def isStarted: Boolean = !underlying.isInstanceOf[UnstartedCell] + def isStarted: Boolean = underlying match { + case _: UnstartedCell ⇒ false + case null ⇒ throw new IllegalStateException("isStarted called before initialized") + case _ ⇒ true + } def isTerminated: Boolean = underlying.isTerminated @@ -107,7 +144,7 @@ private[akka] class RepointableActorRef( case ".." ⇒ getParent.getChild(name) case "" ⇒ getChild(name) case other ⇒ - underlying.getChildByName(other) match { + lookup.getChildByName(other) match { case Some(crs: ChildRestartStats) ⇒ crs.child.asInstanceOf[InternalActorRef].getChild(name) case _ ⇒ Nobody } @@ -122,117 +159,116 @@ private[akka] class RepointableActorRef( protected def writeReplace(): AnyRef = SerializedActorRef(path) } -private[akka] class UnstartedCell(val systemImpl: ActorSystemImpl, val self: RepointableActorRef, val props: Props, val supervisor: InternalActorRef, val uid: Int) - extends Cell { +private[akka] class UnstartedCell(val systemImpl: ActorSystemImpl, + val self: RepointableActorRef, + val props: Props, + val supervisor: InternalActorRef, + val uid: Int) extends Cell { /* * This lock protects all accesses to this cell’s queues. It also ensures * safe switching to the started ActorCell. */ - val lock = new ReentrantLock + private[this] final val lock = new ReentrantLock - // use Envelope to keep on-send checks in the same place - val queue: Queue[Envelope] = Queue() - val systemQueue: Queue[SystemMessage] = Queue() - var suspendCount: Int = 0 + // use Envelope to keep on-send checks in the same place ACCESS MUST BE PROTECTED BY THE LOCK + private[this] final val queue = new JLinkedList[Any]() - private def timeout = system.settings.UnstartedPushTimeout.duration.toMillis + import systemImpl.settings.UnstartedPushTimeout.{ duration ⇒ timeout } - def replaceWith(cell: Cell): Unit = { - lock.lock() + def replaceWith(cell: Cell): Unit = locked { try { - /* - * The CallingThreadDispatcher nicely dives under the ReentrantLock and - * breaks things by enqueueing into stale queues from within the message - * processing which happens in-line for sendSystemMessage() and tell(). - * Since this is the only possible way to f*ck things up within this - * lock, double-tap (well, N-tap, really); concurrent modification is - * still not possible because we’re the only thread accessing the queues. - */ - while (systemQueue.nonEmpty || queue.nonEmpty) { - while (systemQueue.nonEmpty) { - val msg = systemQueue.dequeue() - cell.sendSystemMessage(msg) - } - if (queue.nonEmpty) { - val envelope = queue.dequeue() - cell.tell(envelope.message, envelope.sender) + while (!queue.isEmpty) { + queue.poll() match { + case s: SystemMessage ⇒ cell.sendSystemMessage(s) + case e: Envelope ⇒ cell.tell(e.message, e.sender) } } - } finally try + } finally { self.swapCell(cell) - finally try - for (_ ← 1 to suspendCount) cell.suspend() - finally - lock.unlock() + } } def system: ActorSystem = systemImpl - def suspend(): Unit = { - lock.lock() - try suspendCount += 1 - finally lock.unlock() - } - def resume(causedByFailure: Throwable): Unit = { - lock.lock() - try suspendCount -= 1 - finally lock.unlock() - } - def restart(cause: Throwable): Unit = { - lock.lock() - try suspendCount -= 1 - finally lock.unlock() - } + def start(): this.type = this + def suspend(): Unit = sendSystemMessage(Suspend()) + def resume(causedByFailure: Throwable): Unit = sendSystemMessage(Resume(causedByFailure)) + def restart(cause: Throwable): Unit = sendSystemMessage(Recreate(cause)) def stop(): Unit = sendSystemMessage(Terminate()) - def isTerminated: Boolean = false + def isTerminated: Boolean = locked { + val cell = self.underlying + if (cellIsReady(cell)) cell.isTerminated else false + } def parent: InternalActorRef = supervisor def childrenRefs: ChildrenContainer = ChildrenContainer.EmptyChildrenContainer def getChildByName(name: String): Option[ChildRestartStats] = None + def tell(message: Any, sender: ActorRef): Unit = { val useSender = if (sender eq Actor.noSender) system.deadLetters else sender - if (lock.tryLock(timeout, TimeUnit.MILLISECONDS)) { + if (lock.tryLock(timeout.length, timeout.unit)) { try { - if (self.underlying eq this) queue enqueue Envelope(message, useSender, system) - else self.underlying.tell(message, useSender) - } finally { - lock.unlock() - } + val cell = self.underlying + if (cellIsReady(cell)) { + cell.tell(message, useSender) + } else if (!queue.offer(Envelope(message, useSender, system))) { + system.eventStream.publish(Warning(self.path.toString, getClass, "dropping message of type " + message.getClass + " due to enqueue failure")) + system.deadLetters ! DeadLetter(message, useSender, self) + } + } finally lock.unlock() } else { + system.eventStream.publish(Warning(self.path.toString, getClass, "dropping message of type" + message.getClass + " due to lock timeout")) system.deadLetters ! DeadLetter(message, useSender, self) } } - def sendSystemMessage(msg: SystemMessage): Unit = { - if (lock.tryLock(timeout, TimeUnit.MILLISECONDS)) { + + // FIXME: once we have guaranteed delivery of system messages, hook this in! + def sendSystemMessage(msg: SystemMessage): Unit = + if (lock.tryLock(timeout.length, timeout.unit)) { try { - if (self.underlying eq this) systemQueue enqueue msg - else self.underlying.sendSystemMessage(msg) - } finally { - lock.unlock() - } + val cell = self.underlying + if (cellIsReady(cell)) { + cell.sendSystemMessage(msg) + } else { + // systemMessages that are sent during replace need to jump to just after the last system message in the queue, so it's processed before other messages + val wasEnqueued = if ((self.lookup ne this) && (self.underlying eq this) && !queue.isEmpty()) { + @tailrec def tryEnqueue(i: JListIterator[Any] = queue.listIterator(), insertIntoIndex: Int = -1): Boolean = + if (i.hasNext()) + tryEnqueue(i, + if (i.next().isInstanceOf[SystemMessage]) i.nextIndex() // update last sysmsg seen so far + else insertIntoIndex) // or just keep the last seen one + else if (insertIntoIndex == -1) queue.offer(msg) + else Try(queue.add(insertIntoIndex, msg)).isSuccess + tryEnqueue() + } else queue.offer(msg) + + if (!wasEnqueued) { + system.eventStream.publish(Warning(self.path.toString, getClass, "dropping system message " + msg + " due to enqueue failure")) + system.deadLetters ! DeadLetter(msg, self, self) + } + } + } finally lock.unlock() } else { - // FIXME: once we have guaranteed delivery of system messages, hook this in! system.eventStream.publish(Warning(self.path.toString, getClass, "dropping system message " + msg + " due to lock timeout")) system.deadLetters ! DeadLetter(msg, self, self) } - } + def isLocal = true - def hasMessages: Boolean = { - lock.lock() - try { - if (self.underlying eq this) !queue.isEmpty - else self.underlying.hasMessages - } finally { - lock.unlock() - } + + private[this] final def cellIsReady(cell: Cell): Boolean = (cell ne this) && (cell ne null) + + def hasMessages: Boolean = locked { + val cell = self.underlying + if (cellIsReady(cell)) cell.hasMessages else !queue.isEmpty } - def numberOfMessages: Int = { + + def numberOfMessages: Int = locked { + val cell = self.underlying + if (cellIsReady(cell)) cell.numberOfMessages else queue.size + } + + private[this] final def locked[T](body: ⇒ T): T = { lock.lock() - try { - if (self.underlying eq this) queue.size - else self.underlying.numberOfMessages - } finally { - lock.unlock() - } + try body finally lock.unlock() } } diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index 2f8310a071..bbb830110d 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -15,6 +15,7 @@ import akka.util.internal._ import concurrent.ExecutionContext import scala.concurrent.duration.FiniteDuration +// The Scheduler trait is included in the documentation. KEEP THE LINES SHORT!!! //#scheduler /** * An Akka scheduler service. This one needs one special behavior: if @@ -38,7 +39,7 @@ trait Scheduler { initialDelay: FiniteDuration, interval: FiniteDuration, receiver: ActorRef, - message: Any)(implicit executor: ExecutionContext): Cancellable + message: Any)(implicit executor: ExecutionContext, sender: ActorRef = Actor.noSender): Cancellable /** * Schedules a function to be run repeatedly with an initial delay and a @@ -50,7 +51,8 @@ trait Scheduler { */ def schedule( initialDelay: FiniteDuration, - interval: FiniteDuration)(f: ⇒ Unit)(implicit executor: ExecutionContext): Cancellable + interval: FiniteDuration)(f: ⇒ Unit)( + implicit executor: ExecutionContext): Cancellable /** * Schedules a function to be run repeatedly with an initial delay and @@ -93,7 +95,8 @@ trait Scheduler { * Scala API */ def scheduleOnce( - delay: FiniteDuration)(f: ⇒ Unit)(implicit executor: ExecutionContext): Cancellable + delay: FiniteDuration)(f: ⇒ Unit)( + implicit executor: ExecutionContext): Cancellable } //#scheduler @@ -133,7 +136,7 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter) override def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, receiver: ActorRef, - message: Any)(implicit executor: ExecutionContext): Cancellable = { + message: Any)(implicit executor: ExecutionContext, sender: ActorRef = Actor.noSender): Cancellable = { val continuousCancellable = new ContinuousCancellable continuousCancellable.init( hashedWheelTimer.newTimeout( diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala index cc12ed07a2..7509cd758d 100644 --- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala @@ -128,9 +128,13 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi case class MethodCall(method: Method, parameters: Array[AnyRef]) { def isOneWay = method.getReturnType == java.lang.Void.TYPE - def returnsFuture_? = classOf[Future[_]].isAssignableFrom(method.getReturnType) - def returnsJOption_? = classOf[akka.japi.Option[_]].isAssignableFrom(method.getReturnType) - def returnsOption_? = classOf[scala.Option[_]].isAssignableFrom(method.getReturnType) + def returnsFuture = classOf[Future[_]] isAssignableFrom method.getReturnType + def returnsJOption = classOf[akka.japi.Option[_]] isAssignableFrom method.getReturnType + def returnsOption = classOf[scala.Option[_]] isAssignableFrom method.getReturnType + + @deprecated("use returnsFuture instead", "2.2") def returnsFuture_? = returnsFuture + @deprecated("use returnsJOption instead", "2.2") def returnsJOption_? = returnsJOption + @deprecated("use returnsOption instead", "2.2") def returnsOption_? = returnsOption /** * Invokes the Method on the supplied instance @@ -196,6 +200,9 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi private val selfReference = new ThreadLocal[AnyRef] private val currentContext = new ThreadLocal[ActorContext] + @SerialVersionUID(1L) + private case object NullResponse + /** * Returns the reference to the proxy when called inside a method call in a TypedActor * @@ -296,14 +303,17 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi if (m.isOneWay) m(me) else { try { - if (m.returnsFuture_?) { - val s = sender - m(me).asInstanceOf[Future[Any]] onComplete { - case Failure(f) ⇒ s ! Status.Failure(f) - case Success(r) ⇒ s ! r - } - } else { - sender ! m(me) + val s = sender + m(me) match { + case f: Future[_] if m.returnsFuture ⇒ + implicit val dispatcher = context.dispatcher + f onComplete { + case Success(null) ⇒ s ! NullResponse + case Success(result) ⇒ s ! result + case Failure(f) ⇒ s ! Status.Failure(f) + } + case null ⇒ s ! NullResponse + case result ⇒ s ! result } } catch { case NonFatal(e) ⇒ @@ -391,6 +401,7 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi * INTERNAL USE ONLY */ private[akka] class TypedActorInvocationHandler(@transient val extension: TypedActorExtension, @transient val actorVar: AtomVar[ActorRef], @transient val timeout: Timeout) extends InvocationHandler with Serializable { + def actor = actorVar.get @throws(classOf[Throwable]) def invoke(proxy: AnyRef, method: Method, args: Array[AnyRef]): AnyRef = method.getName match { @@ -398,17 +409,24 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi case "equals" ⇒ (args.length == 1 && (proxy eq args(0)) || actor == extension.getActorRefFor(args(0))).asInstanceOf[AnyRef] //Force boxing of the boolean case "hashCode" ⇒ actor.hashCode.asInstanceOf[AnyRef] case _ ⇒ + implicit val dispatcher = extension.system.dispatcher import akka.pattern.ask MethodCall(method, args) match { - case m if m.isOneWay ⇒ actor ! m; null //Null return value - case m if m.returnsFuture_? ⇒ ask(actor, m)(timeout) - case m if m.returnsJOption_? || m.returnsOption_? ⇒ + case m if m.isOneWay ⇒ actor ! m; null //Null return value + case m if m.returnsFuture ⇒ ask(actor, m)(timeout) map { + case NullResponse ⇒ null + case other ⇒ other + } + case m if m.returnsJOption || m.returnsOption ⇒ val f = ask(actor, m)(timeout) (try { Await.ready(f, timeout.duration).value } catch { case _: TimeoutException ⇒ None }) match { - case None | Some(Success(null)) ⇒ if (m.returnsJOption_?) JOption.none[Any] else None - case Some(t: Try[_]) ⇒ t.get.asInstanceOf[AnyRef] + case None | Some(Success(NullResponse)) ⇒ if (m.returnsJOption) JOption.none[Any] else None + case Some(t: Try[_]) ⇒ t.get.asInstanceOf[AnyRef] } - case m ⇒ Await.result(ask(actor, m)(timeout), timeout.duration).asInstanceOf[AnyRef] + case m ⇒ Await.result(ask(actor, m)(timeout), timeout.duration) match { + case NullResponse ⇒ null + case other ⇒ other.asInstanceOf[AnyRef] + } } } @throws(classOf[ObjectStreamException]) private def writeReplace(): AnyRef = SerializedTypedActorInvocationHandler(actor, timeout.duration) @@ -605,7 +623,7 @@ case class ContextualTypedActorFactory(typedActor: TypedActorExtension, actorFac override def isTypedActor(proxyOrNot: AnyRef): Boolean = typedActor.isTypedActor(proxyOrNot) } -class TypedActorExtension(system: ExtendedActorSystem) extends TypedActorFactory with Extension { +class TypedActorExtension(val system: ExtendedActorSystem) extends TypedActorFactory with Extension { import TypedActor._ //Import the goodies from the companion object protected def actorFactory: ActorRefFactory = system protected def typedActor = this @@ -655,8 +673,8 @@ class TypedActorExtension(system: ExtendedActorSystem) extends TypedActorFactory /** * INTERNAL USE ONLY */ - private[akka] def invocationHandlerFor(typedActor_? : AnyRef): TypedActorInvocationHandler = - if ((typedActor_? ne null) && Proxy.isProxyClass(typedActor_?.getClass)) typedActor_? match { + private[akka] def invocationHandlerFor(@deprecatedName('typedActor_?) typedActor: AnyRef): TypedActorInvocationHandler = + if ((typedActor ne null) && Proxy.isProxyClass(typedActor.getClass)) typedActor match { case null ⇒ null case other ⇒ Proxy.getInvocationHandler(other) match { case null ⇒ null diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala b/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala index 74e06f3034..ba856206ea 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala @@ -52,19 +52,24 @@ private[akka] trait Children { this: ActorCell ⇒ } final def stop(actor: ActorRef): Unit = { - val started = actor match { - case r: RepointableRef ⇒ r.isStarted - case _ ⇒ true + if (childrenRefs.getByRef(actor).isDefined) { + @tailrec def shallDie(ref: ActorRef): Boolean = { + val c = childrenRefs + swapChildrenRefs(c, c.shallDie(ref)) || shallDie(ref) + } + + if (actor match { + case r: RepointableRef ⇒ r.isStarted + case _ ⇒ true + }) shallDie(actor) } - if (childrenRefs.getByRef(actor).isDefined && started) shallDie(actor) actor.asInstanceOf[InternalActorRef].stop() } /* * low level CAS helpers */ - - @inline private def swapChildrenRefs(oldChildren: ChildrenContainer, newChildren: ChildrenContainer): Boolean = + @inline private final def swapChildrenRefs(oldChildren: ChildrenContainer, newChildren: ChildrenContainer): Boolean = Unsafe.instance.compareAndSwapObject(this, AbstractActorCell.childrenOffset, oldChildren, newChildren) @tailrec final def reserveChild(name: String): Boolean = { @@ -89,18 +94,6 @@ private[akka] trait Children { this: ActorCell ⇒ } } - @tailrec final protected def shallDie(ref: ActorRef): Boolean = { - val c = childrenRefs - swapChildrenRefs(c, c.shallDie(ref)) || shallDie(ref) - } - - @tailrec final private def removeChild(ref: ActorRef): ChildrenContainer = { - val c = childrenRefs - val n = c.remove(ref) - if (swapChildrenRefs(c, n)) n - else removeChild(ref) - } - @tailrec final protected def setChildrenTerminationReason(reason: ChildrenContainer.SuspendReason): Boolean = { childrenRefs match { case c: ChildrenContainer.TerminatingChildrenContainer ⇒ @@ -143,10 +136,18 @@ private[akka] trait Children { this: ActorCell ⇒ protected def getAllChildStats: immutable.Iterable[ChildRestartStats] = childrenRefs.stats protected def removeChildAndGetStateChange(child: ActorRef): Option[SuspendReason] = { - childrenRefs match { + @tailrec def removeChild(ref: ActorRef): ChildrenContainer = { + val c = childrenRefs + val n = c.remove(ref) + if (swapChildrenRefs(c, n)) n else removeChild(ref) + } + + childrenRefs match { // The match must be performed BEFORE the removeChild case TerminatingChildrenContainer(_, _, reason) ⇒ - val newContainer = removeChild(child) - if (!newContainer.isInstanceOf[TerminatingChildrenContainer]) Some(reason) else None + removeChild(child) match { + case _: TerminatingChildrenContainer ⇒ None + case _ ⇒ Some(reason) + } case _ ⇒ removeChild(child) None diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala b/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala index aefd2bcc55..469aac78c2 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala @@ -55,7 +55,7 @@ private[akka] trait Dispatch { this: ActorCell ⇒ if (sendSupervise) { // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - parent.sendSystemMessage(akka.dispatch.Supervise(self, uid)) + parent.sendSystemMessage(akka.dispatch.Supervise(self, async = false, uid)) parent ! NullMessage // read ScalaDoc of NullMessage to see why } this diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 1b9de36e77..f8fa5e1046 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -108,7 +108,7 @@ private[akka] case class Terminate() extends SystemMessage // sent to self from /** * INTERNAL API */ -private[akka] case class Supervise(child: ActorRef, uid: Int) extends SystemMessage // sent to supervisor ActorRef from ActorCell.start +private[akka] case class Supervise(child: ActorRef, async: Boolean, uid: Int) extends SystemMessage // sent to supervisor ActorRef from ActorCell.start /** * INTERNAL API */ @@ -450,7 +450,6 @@ abstract class MessageDispatcherConfigurator(val config: Config, val prerequisit } class ThreadPoolExecutorConfigurator(config: Config, prerequisites: DispatcherPrerequisites) extends ExecutorServiceConfigurator(config, prerequisites) { - import ThreadPoolConfigBuilder.conf_? val threadPoolConfig: ThreadPoolConfig = createThreadPoolConfigBuilder(config, prerequisites).config @@ -461,15 +460,15 @@ class ThreadPoolExecutorConfigurator(config: Config, prerequisites: DispatcherPr .setCorePoolSizeFromFactor(config getInt "core-pool-size-min", config getDouble "core-pool-size-factor", config getInt "core-pool-size-max") .setMaxPoolSizeFromFactor(config getInt "max-pool-size-min", config getDouble "max-pool-size-factor", config getInt "max-pool-size-max") .configure( - conf_?(Some(config getInt "task-queue-size") flatMap { + Some(config getInt "task-queue-size") flatMap { case size if size > 0 ⇒ Some(config getString "task-queue-type") map { case "array" ⇒ ThreadPoolConfig.arrayBlockingQueue(size, false) //TODO config fairness? case "" | "linked" ⇒ ThreadPoolConfig.linkedBlockingQueue(size) case x ⇒ throw new IllegalArgumentException("[%s] is not a valid task-queue-type [array|linked]!" format x) - } + } map { qf ⇒ (q: ThreadPoolConfigBuilder) ⇒ q.setQueueFactory(qf) } case _ ⇒ None - })(queueFactory ⇒ _.setQueueFactory(queueFactory))) + }) } def createExecutorServiceFactory(id: String, threadFactory: ThreadFactory): ExecutorServiceFactory = @@ -527,7 +526,7 @@ class ForkJoinExecutorConfigurator(config: Config, prerequisites: DispatcherPrer val tf = threadFactory match { case m: MonitorableThreadFactory ⇒ // add the dispatcher id to the thread names - m.copy(m.name + "-" + id) + m.withName(m.name + "-" + id) case other ⇒ other } new ForkJoinExecutorServiceFactory( diff --git a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala index 52d5587597..eb5b2686c3 100644 --- a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala @@ -20,7 +20,7 @@ class PinnedDispatcher( _id: String, _mailboxType: MailboxType, _shutdownTimeout: FiniteDuration, - _threadPoolConfig: ThreadPoolConfig = ThreadPoolConfig()) + _threadPoolConfig: ThreadPoolConfig) extends Dispatcher(_prerequisites, _id, Int.MaxValue, diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index 9d06a7b74c..8fb4d6dc4f 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -92,17 +92,13 @@ case class ThreadPoolConfig(allowCorePoolTimeout: Boolean = ThreadPoolConfig.def val tf = threadFactory match { case m: MonitorableThreadFactory ⇒ // add the dispatcher id to the thread names - m.copy(m.name + "-" + id) + m.withName(m.name + "-" + id) case other ⇒ other } new ThreadPoolExecutorServiceFactory(tf) } } -object ThreadPoolConfigBuilder { - def conf_?[T](opt: Option[T])(fun: (T) ⇒ ThreadPoolConfigBuilder ⇒ ThreadPoolConfigBuilder): Option[(ThreadPoolConfigBuilder) ⇒ ThreadPoolConfigBuilder] = opt map fun -} - /** * A DSL to configure and create a MessageDispatcher with a ThreadPoolExecutor */ @@ -183,9 +179,9 @@ object MonitorableThreadFactory { case class MonitorableThreadFactory(name: String, daemonic: Boolean, contextClassLoader: Option[ClassLoader], - exceptionHandler: Thread.UncaughtExceptionHandler = MonitorableThreadFactory.doNothing) + exceptionHandler: Thread.UncaughtExceptionHandler = MonitorableThreadFactory.doNothing, + protected val counter: AtomicLong = new AtomicLong) extends ThreadFactory with ForkJoinPool.ForkJoinWorkerThreadFactory { - protected val counter = new AtomicLong def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = { val t = wire(new MonitorableThreadFactory.AkkaForkJoinWorkerThread(pool)) @@ -196,6 +192,8 @@ case class MonitorableThreadFactory(name: String, def newThread(runnable: Runnable): Thread = wire(new Thread(runnable, name + "-" + counter.incrementAndGet())) + def withName(newName: String): MonitorableThreadFactory = copy(newName) + protected def wire[T <: Thread](t: T): T = { t.setUncaughtExceptionHandler(exceptionHandler) t.setDaemon(daemonic) diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index 2ff45b0290..eef089a85c 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -73,7 +73,15 @@ trait AskSupport { * * See [[scala.concurrent.Future]] for a description of `flow` */ - def ask(actorRef: ActorRef, message: Any)(implicit timeout: Timeout): Future[Any] = actorRef match { + def ask(actorRef: ActorRef, message: Any)(implicit timeout: Timeout): Future[Any] = actorRef ? message +} + +/* + * Implementation class of the “ask” pattern enrichment of ActorRef + */ +final class AskableActorRef(val actorRef: ActorRef) extends AnyVal { + + def ask(message: Any)(implicit timeout: Timeout): Future[Any] = actorRef match { case ref: InternalActorRef if ref.isTerminated ⇒ actorRef ! message Future.failed[Any](new AskTimeoutException("Recipient[%s] had already been terminated." format actorRef)) @@ -88,71 +96,7 @@ trait AskSupport { case _ ⇒ Future.failed[Any](new IllegalArgumentException("Unsupported type of ActorRef for the recipient. Question not sent to [%s]" format actorRef)) } - /** - * Implementation detail of the “ask” pattern enrichment of ActorRef - */ - private[akka] final class AskableActorRef(val actorRef: ActorRef) { - - /** - * Sends a message asynchronously and returns a [[scala.concurrent.Future]] - * holding the eventual reply message; this means that the target actor - * needs to send the result to the `sender` reference provided. The Future - * will be completed with an [[akka.pattern.AskTimeoutException]] after the - * given timeout has expired; this is independent from any timeout applied - * while awaiting a result for this future (i.e. in - * `Await.result(..., timeout)`). - * - * Warning: - * When using future callbacks, inside actors you need to carefully avoid closing over - * the containing actor’s object, i.e. do not call methods or access mutable state - * on the enclosing actor from within the callback. This would break the actor - * encapsulation and may introduce synchronization bugs and race conditions because - * the callback will be scheduled concurrently to the enclosing actor. Unfortunately - * there is not yet a way to detect these illegal accesses at compile time. - * - * Recommended usage: - * - * {{{ - * flow { - * val f = worker.ask(request)(timeout) - * EnrichedRequest(request, f()) - * } pipeTo nextActor - * }}} - * - * See the [[scala.concurrent.Future]] companion object for a description of `flow` - */ - def ask(message: Any)(implicit timeout: Timeout): Future[Any] = akka.pattern.ask(actorRef, message)(timeout) - - /** - * Sends a message asynchronously and returns a [[scala.concurrent.Future]] - * holding the eventual reply message; this means that the target actor - * needs to send the result to the `sender` reference provided. The Future - * will be completed with an [[akka.pattern.AskTimeoutException]] after the - * given timeout has expired; this is independent from any timeout applied - * while awaiting a result for this future (i.e. in - * `Await.result(..., timeout)`). - * - * Warning: - * When using future callbacks, inside actors you need to carefully avoid closing over - * the containing actor’s object, i.e. do not call methods or access mutable state - * on the enclosing actor from within the callback. This would break the actor - * encapsulation and may introduce synchronization bugs and race conditions because - * the callback will be scheduled concurrently to the enclosing actor. Unfortunately - * there is not yet a way to detect these illegal accesses at compile time. - * - * Recommended usage: - * - * {{{ - * flow { - * val f = worker ? request - * EnrichedRequest(request, f()) - * } pipeTo nextActor - * }}} - * - * See the [[scala.concurrent.Future]] companion object for a description of `flow` - */ - def ?(message: Any)(implicit timeout: Timeout): Future[Any] = akka.pattern.ask(actorRef, message)(timeout) - } + def ?(message: Any)(implicit timeout: Timeout): Future[Any] = ask(message)(timeout) } /** diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHashingRouter.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHashingRouter.scala index 0214c6736e..e88195f577 100644 --- a/akka-actor/src/main/scala/akka/routing/ConsistentHashingRouter.scala +++ b/akka-actor/src/main/scala/akka/routing/ConsistentHashingRouter.scala @@ -237,7 +237,7 @@ trait ConsistentHashingLike { this: RouterConfig ⇒ } val log = Logging(routeeProvider.context.system, routeeProvider.context.self) - val selfAddress = routeeProvider.context.system.asInstanceOf[ExtendedActorSystem].provider.rootPath.address + val selfAddress = routeeProvider.context.system.asInstanceOf[ExtendedActorSystem].provider.getDefaultAddress val vnodes = if (virtualNodesFactor == 0) routeeProvider.context.system.settings.DefaultVirtualNodesFactor else virtualNodesFactor diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 8c3f059a40..8c2c81bac2 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -7,7 +7,6 @@ import language.implicitConversions import language.postfixOps import scala.collection.immutable -import scala.collection.JavaConverters.iterableAsScalaIterableConverter import scala.concurrent.duration._ import akka.actor._ import akka.ConfigurationException @@ -17,12 +16,14 @@ import akka.japi.Util.immutableSeq import com.typesafe.config.Config import java.util.concurrent.atomic.{ AtomicLong, AtomicBoolean } import java.util.concurrent.TimeUnit +import akka.event.Logging.Warning import scala.concurrent.forkjoin.ThreadLocalRandom import scala.annotation.tailrec +import akka.event.Logging.Warning /** * A RoutedActorRef is an ActorRef that has a set of connected ActorRef and it uses a Router to - * send a message to on (or more) of these actors. + * send a message to one (or more) of these actors. */ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _supervisor: InternalActorRef, _path: ActorPath) extends RepointableActorRef(_system, _props, _supervisor, _path) { @@ -36,11 +37,11 @@ private[akka] class RoutedActorRef(_system: ActorSystemImpl, _props: Props, _sup _props.routerConfig.verifyConfig() - override def newCell(old: Cell): Cell = new RoutedActorCell(system, this, props, supervisor, old.asInstanceOf[UnstartedCell].uid) + override def newCell(old: UnstartedCell): Cell = new RoutedActorCell(system, this, props, supervisor).init(old.uid, sendSupervise = false) } -private[akka] class RoutedActorCell(_system: ActorSystemImpl, _ref: InternalActorRef, _props: Props, _supervisor: InternalActorRef, _uid: Int) +private[akka] class RoutedActorCell(_system: ActorSystemImpl, _ref: InternalActorRef, _props: Props, _supervisor: InternalActorRef) extends ActorCell( _system, _ref, @@ -71,14 +72,12 @@ private[akka] class RoutedActorCell(_system: ActorSystemImpl, _ref: InternalActo r } - init(_uid, sendSupervise = false).start() - /* * end of construction */ def applyRoute(sender: ActorRef, message: Any): immutable.Iterable[Destination] = message match { - case _: AutoReceivedMessage ⇒ Destination(self, self) :: Nil + case _: AutoReceivedMessage ⇒ Destination(sender, self) :: Nil case CurrentRoutees ⇒ sender ! RouterRoutees(_routees); Nil case msg if route.isDefinedAt(sender, msg) ⇒ route(sender, message) case _ ⇒ Nil @@ -91,7 +90,7 @@ private[akka] class RoutedActorCell(_system: ActorSystemImpl, _ref: InternalActo * `RouterConfig.createRoute` and `Resizer.resize` */ private[akka] def addRoutees(newRoutees: immutable.Iterable[ActorRef]): Unit = { - _routees = _routees ++ newRoutees + _routees ++= newRoutees // subscribe to Terminated messages for all route destinations, to be handled by Router actor newRoutees foreach watch } @@ -106,31 +105,39 @@ private[akka] class RoutedActorCell(_system: ActorSystemImpl, _ref: InternalActo _routees = abandonedRoutees.foldLeft(_routees) { (xs, x) ⇒ unwatch(x); xs.filterNot(_ == x) } } + /** + * Send the message to the destinations defined by the `route` function. + * + * If the message is a [[akka.routing.RouterEnvelope]] it will be + * unwrapped before sent to the destinations. + * + * When [[akka.routing.CurrentRoutees]] is sent to the RoutedActorRef it + * replies with [[akka.routing.RouterRoutees]]. + * + * Resize is triggered when messages are sent to the routees, and the + * resizer is invoked asynchronously, i.e. not necessarily before the + * message has been sent. + */ override def tell(message: Any, sender: ActorRef): Unit = { - resize() - val s = if (sender eq null) system.deadLetters else sender - val msg = message match { case wrapped: RouterEnvelope ⇒ wrapped.message case m ⇒ m } - - applyRoute(s, message) match { - case Destination(_, x) :: Nil if x == self ⇒ super.tell(message, s) - case refs ⇒ - refs foreach (p ⇒ - if (p.recipient == self) super.tell(msg, p.sender) - else p.recipient.!(msg)(p.sender)) + applyRoute(s, message) foreach { + case Destination(snd, `self`) ⇒ + super.tell(msg, snd) + case Destination(snd, recipient) ⇒ + resize() // only resize when the message target is one of the routees + recipient.tell(msg, snd) } } - def resize(): Unit = { + def resize(): Unit = for (r ← routerConfig.resizer) { if (r.isTimeForResize(resizeCounter.getAndIncrement()) && resizeInProgress.compareAndSet(false, true)) super.tell(Router.Resize, self) } - } } /** @@ -199,7 +206,9 @@ trait RouterConfig { /** * Routers with dynamically resizable number of routees return the [[akka.routing.Resizer]] - * to use. + * to use. The resizer is invoked once when the router is created, before any messages can + * be sent to it. Resize is also triggered when messages are sent to the routees, and the + * resizer is invoked asynchronously, i.e. not necessarily before the message has been sent. */ def resizer: Option[Resizer] = None diff --git a/akka-actor/src/main/scala/akka/serialization/Serialization.scala b/akka-actor/src/main/scala/akka/serialization/Serialization.scala index 071195c11e..e0e52e8189 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serialization.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serialization.scala @@ -32,7 +32,7 @@ object Serialization { private final def configToMap(path: String): Map[String, String] = { import scala.collection.JavaConverters._ - config.getConfig(path).root.unwrapped.asScala.mapValues(_.toString).toMap + config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) ⇒ (k -> v.toString) } } } } @@ -58,16 +58,16 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { * using the optional type hint to the Serializer and the optional ClassLoader ot load it into. * Returns either the resulting object or an Exception if one was thrown. */ - def deserialize(bytes: Array[Byte], serializerId: Int, clazz: Option[Class[_]]): Try[AnyRef] = - Try(serializerByIdentity(serializerId).fromBinary(bytes, clazz)) + def deserialize[T](bytes: Array[Byte], serializerId: Int, clazz: Option[Class[_ <: T]]): Try[T] = + Try(serializerByIdentity(serializerId).fromBinary(bytes, clazz).asInstanceOf[T]) /** * Deserializes the given array of bytes using the specified type to look up what Serializer should be used. * You can specify an optional ClassLoader to load the object into. * Returns either the resulting object or an Exception if one was thrown. */ - def deserialize(bytes: Array[Byte], clazz: Class[_]): Try[AnyRef] = - Try(serializerFor(clazz).fromBinary(bytes, Some(clazz))) + def deserialize[T](bytes: Array[Byte], clazz: Class[T]): Try[T] = + Try(serializerFor(clazz).fromBinary(bytes, Some(clazz)).asInstanceOf[T]) /** * Returns the Serializer configured for the given object, returns the NullSerializer if it's null. diff --git a/akka-actor/src/main/scala/akka/serialization/Serializer.scala b/akka-actor/src/main/scala/akka/serialization/Serializer.scala index 5670ba61ba..bdf8adbf85 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serializer.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serializer.scala @@ -5,9 +5,11 @@ package akka.serialization */ import java.io.{ ObjectOutputStream, ByteArrayOutputStream, ObjectInputStream, ByteArrayInputStream } +import java.util.concurrent.Callable import akka.util.ClassLoaderObjectInputStream import akka.actor.ExtendedActorSystem import scala.util.DynamicVariable +import akka.serialization.JavaSerializer.CurrentSystem /** * A Serializer represents a bimap between an object and an array of bytes representing that object. @@ -93,9 +95,22 @@ object JavaSerializer { * currentSystem.withValue(system) { * ...code... * } + * + * or + * + * currentSystem.withValue(system, callable) */ - val currentSystem = new DynamicVariable[ExtendedActorSystem](null) - + val currentSystem = new CurrentSystem + final class CurrentSystem extends DynamicVariable[ExtendedActorSystem](null) { + /** + * Java API + * @param value - the current value under the call to callable.call() + * @param callable - the operation to be performed + * @tparam S - the return type + * @return the result of callable.call() + */ + def withValue[S](value: ExtendedActorSystem, callable: Callable[S]): S = super.withValue[S](value)(callable.call) + } } /** diff --git a/akka-actor/src/main/scala/akka/util/ByteString.scala b/akka-actor/src/main/scala/akka/util/ByteString.scala index afffa75ac4..70f929c11a 100644 --- a/akka-actor/src/main/scala/akka/util/ByteString.scala +++ b/akka-actor/src/main/scala/akka/util/ByteString.scala @@ -45,6 +45,11 @@ object ByteString { */ def apply(string: String, charset: String): ByteString = CompactByteString(string, charset) + /** + * Creates a new ByteString by copying a byte array. + */ + def fromArray(array: Array[Byte]): ByteString = apply(array) + /** * Creates a new ByteString by copying length bytes starting at offset from * an Array. @@ -52,6 +57,16 @@ object ByteString { def fromArray(array: Array[Byte], offset: Int, length: Int): ByteString = CompactByteString.fromArray(array, offset, length) + /** + * Creates a new ByteString which will contain the UTF-8 representation of the given String + */ + def fromString(string: String): ByteString = apply(string) + + /** + * Creates a new ByteString which will contain the representation of the given String in the given charset + */ + def fromString(string: String, charset: String): ByteString = apply(string, charset) + val empty: ByteString = CompactByteString(Array.empty[Byte]) def newBuilder: ByteStringBuilder = new ByteStringBuilder @@ -282,6 +297,12 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz override def indexWhere(p: Byte ⇒ Boolean): Int = iterator.indexWhere(p) override def indexOf[B >: Byte](elem: B): Int = iterator.indexOf(elem) + /** + * JAVA API + * @return this ByteString copied into a byte array + */ + protected[ByteString] def toArray: Array[Byte] = toArray[Byte] // protected[ByteString] == public to Java but hidden to Scala * fnizz * + override def toArray[B >: Byte](implicit arg0: ClassTag[B]): Array[B] = iterator.toArray override def copyToArray[B >: Byte](xs: Array[B], start: Int, len: Int): Unit = iterator.copyToArray(xs, start, len) diff --git a/akka-actor/src/main/scala/akka/util/Helpers.scala b/akka-actor/src/main/scala/akka/util/Helpers.scala index 8c0cfec86c..430d4582ae 100644 --- a/akka-actor/src/main/scala/akka/util/Helpers.scala +++ b/akka-actor/src/main/scala/akka/util/Helpers.scala @@ -9,6 +9,8 @@ import java.util.regex.Pattern object Helpers { + val isWindows: Boolean = System.getProperty("os.name", "").toLowerCase.indexOf("win") >= 0 + def makePattern(s: String): Pattern = Pattern.compile("^\\Q" + s.replace("?", "\\E.\\Q").replace("*", "\\E.*\\Q") + "\\E$") def compareIdentityHash(a: AnyRef, b: AnyRef): Int = { diff --git a/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala b/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala index d0ee67c1fb..236f645864 100644 --- a/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala +++ b/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala @@ -32,7 +32,7 @@ private[akka] object SubclassifiedIndex { val kids = subkeys flatMap (_ addValue value) if (!(values contains value)) { values += value - kids :+ ((key, values)) + kids :+ ((key, Set(value))) } else kids } diff --git a/akka-camel/src/main/scala/akka/camel/ActorNotRegisteredException.scala b/akka-camel/src/main/scala/akka/camel/ActorNotRegisteredException.scala index 7a303e47b3..29889d8bf6 100644 --- a/akka-camel/src/main/scala/akka/camel/ActorNotRegisteredException.scala +++ b/akka-camel/src/main/scala/akka/camel/ActorNotRegisteredException.scala @@ -3,7 +3,6 @@ package akka.camel * Thrown to indicate that the actor referenced by an endpoint URI cannot be * found in the actor system. * - * @author Martin Krasser */ class ActorNotRegisteredException(uri: String) extends RuntimeException { override def getMessage: String = "Actor [%s] doesn't exist" format uri diff --git a/akka-camel/src/main/scala/akka/camel/CamelMessage.scala b/akka-camel/src/main/scala/akka/camel/CamelMessage.scala index 70fd61bd2a..c9dc32e597 100644 --- a/akka-camel/src/main/scala/akka/camel/CamelMessage.scala +++ b/akka-camel/src/main/scala/akka/camel/CamelMessage.scala @@ -5,7 +5,7 @@ package akka.camel import java.util.{ Map ⇒ JMap, Set ⇒ JSet } -import org.apache.camel.{ CamelContext, Message ⇒ JCamelMessage } +import org.apache.camel.{ CamelContext, Message ⇒ JCamelMessage, StreamCache } import akka.AkkaException import scala.reflect.ClassTag import scala.util.Try @@ -14,7 +14,6 @@ import akka.dispatch.Mapper /** * An immutable representation of a Camel message. - * @author Martin Krasser */ case class CamelMessage(body: Any, headers: Map[String, Any]) { def this(body: Any, headers: JMap[String, Any]) = this(body, headers.toMap) //for Java @@ -108,7 +107,21 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) { * Java API * */ - def getBodyAs[T](clazz: Class[T], camelContext: CamelContext): T = camelContext.getTypeConverter.mandatoryConvertTo[T](clazz, body) + def getBodyAs[T](clazz: Class[T], camelContext: CamelContext): T = { + val result = camelContext.getTypeConverter.mandatoryConvertTo[T](clazz, body) + // to be able to re-read a StreamCache we must "undo" the side effect by resetting the StreamCache + resetStreamCache() + result + } + + /** + * Reset StreamCache body. Nothing is done if the body is not a StreamCache. + * See http://camel.apache.org/stream-caching.html + */ + def resetStreamCache(): Unit = body match { + case stream: StreamCache ⇒ stream.reset + case _ ⇒ + } /** * Returns a new CamelMessage with a new body, while keeping the same headers. @@ -138,7 +151,6 @@ case class CamelMessage(body: Any, headers: Map[String, Any]) { /** * Companion object of CamelMessage class. * - * @author Martin Krasser */ object CamelMessage { @@ -182,7 +194,7 @@ object CamelMessage { /** * Positive acknowledgement message (used for application-acknowledged message receipts). * When `autoAck` is set to false in the [[akka.camel.Consumer]], you can send an `Ack` to the sender of the CamelMessage. - * @author Martin Krasser + * */ case object Ack { /** Java API to get the Ack singleton */ diff --git a/akka-camel/src/main/scala/akka/camel/Consumer.scala b/akka-camel/src/main/scala/akka/camel/Consumer.scala index 2915235745..19ddc85b59 100644 --- a/akka-camel/src/main/scala/akka/camel/Consumer.scala +++ b/akka-camel/src/main/scala/akka/camel/Consumer.scala @@ -13,7 +13,7 @@ import akka.dispatch.Mapper /** * Mixed in by Actor implementations that consume message from Camel endpoints. * - * @author Martin Krasser + * */ trait Consumer extends Actor with CamelSupport { import Consumer._ diff --git a/akka-camel/src/main/scala/akka/camel/Producer.scala b/akka-camel/src/main/scala/akka/camel/Producer.scala index 683ff4f20f..ca05f7a45d 100644 --- a/akka-camel/src/main/scala/akka/camel/Producer.scala +++ b/akka-camel/src/main/scala/akka/camel/Producer.scala @@ -13,8 +13,6 @@ import org.apache.camel.processor.SendProcessor /** * Support trait for producing messages to Camel endpoints. - * - * @author Martin Krasser */ trait ProducerSupport extends Actor with CamelSupport { private[this] var messages = Map[ActorRef, Any]() @@ -66,7 +64,7 @@ trait ProducerSupport extends Actor with CamelSupport { for ( child ← producerChild; (sender, msg) ← messages - ) child.tell(msg, sender) + ) child.tell(transformOutgoingMessage(msg), sender) Map() } } @@ -78,7 +76,7 @@ trait ProducerSupport extends Actor with CamelSupport { case msg ⇒ producerChild match { - case Some(child) ⇒ child forward msg + case Some(child) ⇒ child forward transformOutgoingMessage(msg) case None ⇒ messages += (sender -> msg) } } @@ -110,7 +108,7 @@ trait ProducerSupport extends Actor with CamelSupport { private class ProducerChild(endpoint: Endpoint, processor: SendProcessor) extends Actor { def receive = { case msg @ (_: FailureResult | _: MessageResult) ⇒ context.parent forward msg - case msg ⇒ produce(endpoint, processor, transformOutgoingMessage(msg), if (oneway) ExchangePattern.InOnly else ExchangePattern.InOut) + case msg ⇒ produce(endpoint, processor, msg, if (oneway) ExchangePattern.InOnly else ExchangePattern.InOut) } /** * Initiates a message exchange of given pattern with the endpoint specified by @@ -160,20 +158,20 @@ trait Producer extends ProducerSupport { this: Actor ⇒ /** * For internal use only. - * @author Martin Krasser + * */ private case class MessageResult(message: CamelMessage) extends NoSerializationVerificationNeeded /** * For internal use only. - * @author Martin Krasser + * */ private case class FailureResult(cause: Throwable, headers: Map[String, Any] = Map.empty) extends NoSerializationVerificationNeeded /** * A one-way producer. * - * @author Martin Krasser + * */ trait Oneway extends Producer { this: Actor ⇒ override def oneway: Boolean = true diff --git a/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala b/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala index 43ca2701c6..9beb6a8894 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala @@ -6,15 +6,14 @@ package akka.camel.internal import akka.actor._ import collection.mutable.WeakHashMap -import akka.camel._ -import internal.ActivationProtocol._ +import akka.camel.internal.ActivationProtocol._ /** * For internal use only. An actor that tracks activation and de-activation of endpoints. */ -private[akka] final class ActivationTracker extends Actor with ActorLogging { - val activations = new WeakHashMap[ActorRef, ActivationStateMachine] +private[camel] class ActivationTracker extends Actor with ActorLogging { + val activations = new WeakHashMap[ActorRef, ActivationStateMachine] /** * A state machine that keeps track of the endpoint activation status of an actor. */ @@ -22,7 +21,6 @@ private[akka] final class ActivationTracker extends Actor with ActorLogging { type State = PartialFunction[ActivationMessage, Unit] var receive: State = notActivated() - /** * Not activated state * @return a partial function that handles messages in the 'not activated' state @@ -68,8 +66,12 @@ private[akka] final class ActivationTracker extends Actor with ActorLogging { * @return a partial function that handles messages in the 'de-activated' state */ def deactivated: State = { + // deactivated means it was activated at some point, so tell sender it was activated case AwaitActivation(ref) ⇒ sender ! EndpointActivated(ref) case AwaitDeActivation(ref) ⇒ sender ! EndpointDeActivated(ref) + //resurrected at restart. + case msg @ EndpointActivated(ref) ⇒ + receive = activated(Nil) } /** @@ -80,6 +82,7 @@ private[akka] final class ActivationTracker extends Actor with ActorLogging { def failedToActivate(cause: Throwable): State = { case AwaitActivation(ref) ⇒ sender ! EndpointFailedToActivate(ref, cause) case AwaitDeActivation(ref) ⇒ sender ! EndpointFailedToActivate(ref, cause) + case EndpointDeActivated(_) ⇒ // the de-register at termination always sends a de-activated when the cleanup is done. ignoring. } /** @@ -90,6 +93,7 @@ private[akka] final class ActivationTracker extends Actor with ActorLogging { def failedToDeActivate(cause: Throwable): State = { case AwaitActivation(ref) ⇒ sender ! EndpointActivated(ref) case AwaitDeActivation(ref) ⇒ sender ! EndpointFailedToDeActivate(ref, cause) + case EndpointDeActivated(_) ⇒ // the de-register at termination always sends a de-activated when the cleanup is done. ignoring. } } @@ -114,4 +118,4 @@ private[camel] case class AwaitActivation(ref: ActorRef) extends ActivationMessa * For internal use only. * @param ref the actorRef */ -private[camel] case class AwaitDeActivation(ref: ActorRef) extends ActivationMessage(ref) \ No newline at end of file +private[camel] case class AwaitDeActivation(ref: ActorRef) extends ActivationMessage(ref) diff --git a/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala b/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala index 1d16c3003e..b6a991d4d5 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/CamelExchangeAdapter.scala @@ -11,7 +11,7 @@ import akka.camel.{ FailureResult, AkkaCamelException, CamelMessage } * This adapter is used to convert to immutable messages to be used with Actors, and convert the immutable messages back * to org.apache.camel.Message when using Camel. * - * @author Martin Krasser + * */ private[camel] class CamelExchangeAdapter(val exchange: Exchange) { /** diff --git a/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala b/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala index b19bdbc0a2..bbad41e02f 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/CamelSupervisor.scala @@ -115,9 +115,9 @@ private[camel] class Registry(activationTracker: ActorRef) extends Actor with Ca case msg @ Register(producer, _, None) ⇒ if (!producers(producer)) { producers += producer - producerRegistrar forward msg parent ! AddWatch(producer) } + producerRegistrar forward msg case DeRegister(actorRef) ⇒ producers.find(_ == actorRef).foreach { p ⇒ deRegisterProducer(p) @@ -155,6 +155,8 @@ private[camel] class ProducerRegistrar(activationTracker: ActorRef) extends Acto } catch { case NonFatal(e) ⇒ throw new ActorActivationException(producer, e) } + } else { + camelObjects.get(producer).foreach { case (endpoint, processor) ⇒ producer ! CamelProducerObjects(endpoint, processor) } } case DeRegister(producer) ⇒ camelObjects.get(producer).foreach { diff --git a/akka-camel/src/main/scala/akka/camel/internal/ConsumerActorRouteBuilder.scala b/akka-camel/src/main/scala/akka/camel/internal/ConsumerActorRouteBuilder.scala index 2caf952c6a..a27c23ec2f 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ConsumerActorRouteBuilder.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ConsumerActorRouteBuilder.scala @@ -16,7 +16,7 @@ import org.apache.camel.model.RouteDefinition * * @param endpointUri endpoint URI of the consumer actor. * - * @author Martin Krasser + * */ private[camel] class ConsumerActorRouteBuilder(endpointUri: String, consumer: ActorRef, config: ConsumerConfig, settings: CamelSettings) extends RouteBuilder { diff --git a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala index 7400de9810..2585b970c9 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala @@ -31,7 +31,7 @@ import scala.util.{ Failure, Success, Try } * Messages are sent to [[akka.camel.Consumer]] actors through a [[akka.camel.internal.component.ActorEndpoint]] that * this component provides. * - * @author Martin Krasser + * */ private[camel] class ActorComponent(camel: Camel, system: ActorSystem) extends DefaultComponent { /** @@ -52,7 +52,7 @@ private[camel] class ActorComponent(camel: Camel, system: ActorSystem) extends D * [actorPath]?[options]%s, * where [actorPath] refers to the actor path to the actor. * - * @author Martin Krasser + * */ private[camel] class ActorEndpoint(uri: String, comp: ActorComponent, @@ -104,7 +104,7 @@ private[camel] trait ActorEndpointConfig { * @see akka.camel.component.ActorComponent * @see akka.camel.component.ActorEndpoint * - * @author Martin Krasser + * */ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) extends DefaultProducer(endpoint) with AsyncProcessor { /** diff --git a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala index cd353e04a0..7688df5130 100644 --- a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala +++ b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala @@ -12,7 +12,7 @@ import org.apache.camel.impl.DefaultCamelContext /** * Subclass this abstract class to create an untyped producer actor. This class is meant to be used from Java. * - * @author Martin Krasser + * */ abstract class UntypedProducerActor extends UntypedActor with ProducerSupport { /** diff --git a/akka-camel/src/test/java/akka/camel/ConsumerJavaTestBase.java b/akka-camel/src/test/java/akka/camel/ConsumerJavaTestBase.java index e8b178a463..d8aec8a761 100644 --- a/akka-camel/src/test/java/akka/camel/ConsumerJavaTestBase.java +++ b/akka-camel/src/test/java/akka/camel/ConsumerJavaTestBase.java @@ -19,7 +19,7 @@ import java.util.concurrent.TimeUnit; import akka.testkit.AkkaSpec; import static org.junit.Assert.*; /** - * @author Martin Krasser + * */ public class ConsumerJavaTestBase { diff --git a/akka-camel/src/test/java/akka/camel/MessageJavaTestBase.java b/akka-camel/src/test/java/akka/camel/MessageJavaTestBase.java index 95cdc5007b..d805a8b2c1 100644 --- a/akka-camel/src/test/java/akka/camel/MessageJavaTestBase.java +++ b/akka-camel/src/test/java/akka/camel/MessageJavaTestBase.java @@ -8,6 +8,7 @@ import akka.actor.ActorSystem; import akka.dispatch.Mapper; import akka.japi.Function; import org.apache.camel.NoTypeConversionAvailableException; +import org.apache.camel.converter.stream.InputStreamCache; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; @@ -18,7 +19,7 @@ import java.util.*; import static org.junit.Assert.assertEquals; /** - * @author Martin Krasser + * */ public class MessageJavaTestBase { static Camel camel; @@ -100,6 +101,14 @@ public class MessageJavaTestBase { message("test1" , createMap("A", "1")).withHeaders(createMap("C", "3"))); } + @Test + public void shouldBeAbleToReReadStreamCacheBody() throws Exception { + CamelMessage msg = new CamelMessage(new InputStreamCache("test1".getBytes("utf-8")), empty); + assertEquals("test1", msg.getBodyAs(String.class, camel.context())); + // re-read + assertEquals("test1", msg.getBodyAs(String.class, camel.context())); + } + private static Set createSet(String... entries) { HashSet set = new HashSet(); set.addAll(Arrays.asList(entries)); diff --git a/akka-camel/src/test/java/akka/camel/SampleErrorHandlingConsumer.java b/akka-camel/src/test/java/akka/camel/SampleErrorHandlingConsumer.java index e8a057e1ac..92fb124a11 100644 --- a/akka-camel/src/test/java/akka/camel/SampleErrorHandlingConsumer.java +++ b/akka-camel/src/test/java/akka/camel/SampleErrorHandlingConsumer.java @@ -15,7 +15,7 @@ import scala.Option; import scala.concurrent.duration.FiniteDuration; /** - * @author Martin Krasser + * */ public class SampleErrorHandlingConsumer extends UntypedConsumerActor { private static Mapper> mapper = new Mapper>() { diff --git a/akka-camel/src/test/java/akka/camel/SampleUntypedConsumer.java b/akka-camel/src/test/java/akka/camel/SampleUntypedConsumer.java index be293c21b9..030c951cc9 100644 --- a/akka-camel/src/test/java/akka/camel/SampleUntypedConsumer.java +++ b/akka-camel/src/test/java/akka/camel/SampleUntypedConsumer.java @@ -7,7 +7,7 @@ package akka.camel; import akka.camel.javaapi.UntypedConsumerActor; /** - * @author Martin Krasser + * */ public class SampleUntypedConsumer extends UntypedConsumerActor { diff --git a/akka-camel/src/test/java/akka/camel/SampleUntypedForwardingProducer.java b/akka-camel/src/test/java/akka/camel/SampleUntypedForwardingProducer.java index 375ef36835..b99a7ecc31 100644 --- a/akka-camel/src/test/java/akka/camel/SampleUntypedForwardingProducer.java +++ b/akka-camel/src/test/java/akka/camel/SampleUntypedForwardingProducer.java @@ -6,7 +6,7 @@ package akka.camel; import akka.camel.javaapi.UntypedProducerActor; /** - * @author Martin Krasser + * */ public class SampleUntypedForwardingProducer extends UntypedProducerActor { diff --git a/akka-camel/src/test/java/akka/camel/SampleUntypedReplyingProducer.java b/akka-camel/src/test/java/akka/camel/SampleUntypedReplyingProducer.java index 039494fd00..c47187d1da 100644 --- a/akka-camel/src/test/java/akka/camel/SampleUntypedReplyingProducer.java +++ b/akka-camel/src/test/java/akka/camel/SampleUntypedReplyingProducer.java @@ -7,7 +7,7 @@ package akka.camel; import akka.camel.javaapi.UntypedProducerActor; /** - * @author Martin Krasser + * */ public class SampleUntypedReplyingProducer extends UntypedProducerActor { diff --git a/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala b/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala index acc72ff9b1..6462e0b191 100644 --- a/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala @@ -30,7 +30,7 @@ class ConsumerIntegrationTest extends WordSpec with MustMatchers with NonSharedC "Consumer must throw FailedToCreateRouteException, while awaiting activation, if endpoint is invalid" in { filterEvents(EventFilter[ActorActivationException](occurrences = 1)) { - val actorRef = system.actorOf(Props(new TestActor(uri = "some invalid uri"))) + val actorRef = system.actorOf(Props(new TestActor(uri = "some invalid uri")), "invalidActor") intercept[FailedToCreateRouteException] { Await.result(camel.activationFutureFor(actorRef), defaultTimeoutDuration) } diff --git a/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala b/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala index dd73027624..cbf0190e91 100644 --- a/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala +++ b/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala @@ -5,11 +5,11 @@ package akka.camel import java.io.InputStream - import org.apache.camel.NoTypeConversionAvailableException import akka.camel.TestSupport.{ SharedCamelSystem } import org.scalatest.FunSuite import org.scalatest.matchers.MustMatchers +import org.apache.camel.converter.stream.InputStreamCache class MessageScalaTest extends FunSuite with MustMatchers with SharedCamelSystem { implicit def camelContext = camel.context @@ -44,12 +44,17 @@ class MessageScalaTest extends FunSuite with MustMatchers with SharedCamelSystem test("mustSetBodyAndPreserveHeaders") { CamelMessage("test1", Map("A" -> "1")).copy(body = "test2") must be( CamelMessage("test2", Map("A" -> "1"))) - } test("mustSetHeadersAndPreserveBody") { CamelMessage("test1", Map("A" -> "1")).copy(headers = Map("C" -> "3")) must be( CamelMessage("test1", Map("C" -> "3"))) + } + test("mustBeAbleToReReadStreamCacheBody") { + val msg = CamelMessage(new InputStreamCache("test1".getBytes("utf-8")), Map.empty) + msg.bodyAs[String] must be("test1") + // re-read + msg.bodyAs[String] must be("test1") } } diff --git a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala index 7cf9e92464..58cd0713d6 100644 --- a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala @@ -19,13 +19,20 @@ import scala.concurrent.duration._ import akka.util.Timeout import org.scalatest.matchers.MustMatchers import akka.testkit._ +import akka.actor.Status.Failure /** * Tests the features of the Camel Producer. */ -class ProducerFeatureTest extends WordSpec with BeforeAndAfterAll with BeforeAndAfterEach with SharedCamelSystem with MustMatchers { +class ProducerFeatureTest extends TestKit(ActorSystem("test", AkkaSpec.testConf)) with WordSpec with BeforeAndAfterAll with BeforeAndAfterEach with MustMatchers { import ProducerFeatureTest._ + implicit def camel = CamelExtension(system) + + override protected def afterAll() { + super.afterAll() + system.shutdown() + } val camelContext = camel.context // to make testing equality of messages easier, otherwise the breadcrumb shows up in the result. @@ -40,9 +47,8 @@ class ProducerFeatureTest extends WordSpec with BeforeAndAfterAll with BeforeAnd "produce a message and receive normal response" in { val producer = system.actorOf(Props(new TestProducer("direct:producer-test-2", true)), name = "direct-producer-2") val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123")) - val future = producer.ask(message)(timeoutDuration) - val expected = CamelMessage("received TEST", Map(CamelMessage.MessageExchangeId -> "123")) - Await.result(future, timeoutDuration) must be === expected + producer.tell(message, testActor) + expectMsg(CamelMessage("received TEST", Map(CamelMessage.MessageExchangeId -> "123"))) stopGracefully(producer) } @@ -65,12 +71,17 @@ class ProducerFeatureTest extends WordSpec with BeforeAndAfterAll with BeforeAnd case _: AkkaCamelException ⇒ Stop } }), name = "prod-anonymous-supervisor") - val producer = Await.result[ActorRef](supervisor.ask(Props(new TestProducer("direct:producer-test-2"))).mapTo[ActorRef], timeoutDuration) + + supervisor.tell(Props(new TestProducer("direct:producer-test-2")), testActor) + val producer = receiveOne(timeoutDuration).asInstanceOf[ActorRef] val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123")) filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { - val e = intercept[AkkaCamelException] { Await.result(producer.ask(message)(timeoutDuration), timeoutDuration) } - e.getMessage must be("failure") - e.headers must be(Map(CamelMessage.MessageExchangeId -> "123")) + producer.tell(message, testActor) + expectMsgPF(timeoutDuration) { + case Failure(e: AkkaCamelException) ⇒ + e.getMessage must be("failure") + e.headers must be(Map(CamelMessage.MessageExchangeId -> "123")) + } } Await.ready(latch, timeoutDuration) deadActor must be(Some(producer)) @@ -101,15 +112,8 @@ class ProducerFeatureTest extends WordSpec with BeforeAndAfterAll with BeforeAnd "produce message to direct:producer-test-3 and receive normal response" in { val producer = system.actorOf(Props(new TestProducer("direct:producer-test-3")), name = "direct-producer-test-3") val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123")) - val future = producer.ask(message)(timeoutDuration) - - Await.result(future, timeoutDuration) match { - case result: CamelMessage ⇒ - // a normal response must have been returned by the producer - val expected = CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123")) - result must be(expected) - case unexpected ⇒ fail("Actor responded with unexpected message:" + unexpected) - } + producer.tell(message, testActor) + expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123"))) stopGracefully(producer) } @@ -118,9 +122,12 @@ class ProducerFeatureTest extends WordSpec with BeforeAndAfterAll with BeforeAnd val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123")) filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { - val e = intercept[AkkaCamelException] { Await.result(producer.ask(message)(timeoutDuration), timeoutDuration) } - e.getMessage must be("failure") - e.headers must be(Map(CamelMessage.MessageExchangeId -> "123")) + producer.tell(message, testActor) + expectMsgPF(timeoutDuration) { + case Failure(e: AkkaCamelException) ⇒ + e.getMessage must be("failure") + e.headers must be(Map(CamelMessage.MessageExchangeId -> "123")) + } } stopGracefully(producer) } @@ -129,15 +136,8 @@ class ProducerFeatureTest extends WordSpec with BeforeAndAfterAll with BeforeAnd val target = system.actorOf(Props[ReplyingForwardTarget], name = "reply-forwarding-target") val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-2", target)), name = "direct-producer-test-2-forwarder") val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123")) - val future = producer.ask(message)(timeoutDuration) - - Await.result(future, timeoutDuration) match { - case result: CamelMessage ⇒ - // a normal response must have been returned by the forward target - val expected = CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123", "test" -> "result")) - result must be(expected) - case unexpected ⇒ fail("Actor responded with unexpected message:" + unexpected) - } + producer.tell(message, testActor) + expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123", "test" -> "result"))) stopGracefully(target, producer) } @@ -147,9 +147,12 @@ class ProducerFeatureTest extends WordSpec with BeforeAndAfterAll with BeforeAnd val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123")) filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { - val e = intercept[AkkaCamelException] { Await.result(producer.ask(message)(timeoutDuration), timeoutDuration) } - e.getMessage must be("failure") - e.headers must be(Map(CamelMessage.MessageExchangeId -> "123", "test" -> "failure")) + producer.tell(message, testActor) + expectMsgPF(timeoutDuration) { + case Failure(e: AkkaCamelException) ⇒ + e.getMessage must be("failure") + e.headers must be(Map(CamelMessage.MessageExchangeId -> "123", "test" -> "failure")) + } } stopGracefully(target, producer) } @@ -180,13 +183,8 @@ class ProducerFeatureTest extends WordSpec with BeforeAndAfterAll with BeforeAnd val producer = system.actorOf(Props(new TestForwarder("direct:producer-test-3", target)), name = "direct-producer-test-3-to-replying-actor") val message = CamelMessage("test", Map(CamelMessage.MessageExchangeId -> "123")) - val future = producer.ask(message)(timeoutDuration) - Await.result(future, timeoutDuration) match { - case message: CamelMessage ⇒ - val expected = CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123", "test" -> "result")) - message must be(expected) - case unexpected ⇒ fail("Actor responded with unexpected message:" + unexpected) - } + producer.tell(message, testActor) + expectMsg(CamelMessage("received test", Map(CamelMessage.MessageExchangeId -> "123", "test" -> "result"))) stopGracefully(target, producer) } @@ -196,9 +194,12 @@ class ProducerFeatureTest extends WordSpec with BeforeAndAfterAll with BeforeAnd val message = CamelMessage("fail", Map(CamelMessage.MessageExchangeId -> "123")) filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { - val e = intercept[AkkaCamelException] { Await.result(producer.ask(message)(timeoutDuration), timeoutDuration) } - e.getMessage must be("failure") - e.headers must be(Map(CamelMessage.MessageExchangeId -> "123", "test" -> "failure")) + producer.tell(message, testActor) + expectMsgPF(timeoutDuration) { + case Failure(e: AkkaCamelException) ⇒ + e.getMessage must be("failure") + e.headers must be(Map(CamelMessage.MessageExchangeId -> "123", "test" -> "failure")) + } } stopGracefully(target, producer) } @@ -224,6 +225,36 @@ class ProducerFeatureTest extends WordSpec with BeforeAndAfterAll with BeforeAnd } stopGracefully(target, producer) } + + "keep producing messages after error" in { + import TestSupport._ + val consumer = start(new IntermittentErrorConsumer("direct:intermittentTest-1"), "intermittentTest-error-consumer") + val producer = start(new SimpleProducer("direct:intermittentTest-1"), "intermittentTest-producer") + filterEvents(EventFilter[AkkaCamelException](occurrences = 1)) { + val futureFailed = producer.tell("fail", testActor) + expectMsgPF(timeoutDuration) { + case Failure(e) ⇒ + e.getMessage must be("fail") + } + producer.tell("OK", testActor) + expectMsg("OK") + } + stop(consumer) + stop(producer) + } + "be able to transform outgoing messages and have a valid sender reference" in { + import TestSupport._ + filterEvents(EventFilter[Exception](occurrences = 1)) { + val producerSupervisor = system.actorOf(Props(new ProducerSupervisor(Props(new ChildProducer("mock:mock", true)))), "ignore-deadletter-sender-ref-test") + mockEndpoint.reset() + producerSupervisor.tell(CamelMessage("test", Map()), testActor) + producerSupervisor.tell(CamelMessage("err", Map()), testActor) + mockEndpoint.expectedMessageCount(1) + mockEndpoint.expectedBodiesReceived("TEST") + expectMsg("TEST") + system.stop(producerSupervisor) + } + } } private def mockEndpoint = camel.context.getEndpoint("mock:mock", classOf[MockEndpoint]) @@ -236,9 +267,52 @@ class ProducerFeatureTest extends WordSpec with BeforeAndAfterAll with BeforeAnd } object ProducerFeatureTest { + class ProducerSupervisor(childProps: Props) extends Actor { + override def supervisorStrategy = SupervisorStrategy.stoppingStrategy + val child = context.actorOf(childProps, "producer-supervisor-child") + val duration = 10 seconds + implicit val timeout = Timeout(duration) + implicit val ec = context.system.dispatcher + Await.ready(CamelExtension(context.system).activationFutureFor(child), timeout.duration) + def receive = { + case msg: CamelMessage ⇒ + child forward (msg) + case (aref: ActorRef, msg: String) ⇒ + aref ! msg + } + } + class ChildProducer(uri: String, upper: Boolean = false) extends Actor with Producer { + override def oneway = true + + var lastSender: Option[ActorRef] = None + var lastMessage: Option[String] = None + def endpointUri = uri + + override def transformOutgoingMessage(msg: Any) = msg match { + case msg: CamelMessage ⇒ if (upper) msg.mapBody { + body: String ⇒ + if (body == "err") throw new Exception("Crash!") + val upperMsg = body.toUpperCase + lastSender = Some(sender) + lastMessage = Some(upperMsg) + } + else msg + } + + override def postStop() { + for (msg ← lastMessage; aref ← lastSender) context.parent ! (aref, msg) + super.postStop() + } + } + class TestProducer(uri: String, upper: Boolean = false) extends Actor with Producer { def endpointUri = uri + override def preRestart(reason: Throwable, message: Option[Any]) { + //overriding on purpose so it doesn't try to deRegister and reRegister at restart, + // which would cause a deadletter message in the test output. + } + override protected def transformOutgoingMessage(msg: Any) = msg match { case msg: CamelMessage ⇒ if (upper) msg.mapBody { body: String ⇒ body.toUpperCase @@ -303,4 +377,18 @@ object ProducerFeatureTest { } } + class SimpleProducer(override val endpointUri: String) extends Producer { + override protected def transformResponse(msg: Any) = msg match { + case m: CamelMessage ⇒ m.bodyAs[String] + case m: Any ⇒ m + } + } + + class IntermittentErrorConsumer(override val endpointUri: String) extends Consumer { + def receive = { + case msg: CamelMessage if msg.bodyAs[String] == "fail" ⇒ sender ! Failure(new Exception("fail")) + case msg: CamelMessage ⇒ sender ! msg + } + } + } diff --git a/akka-camel/src/test/scala/akka/camel/internal/ActivationTrackerTest.scala b/akka-camel/src/test/scala/akka/camel/internal/ActivationTrackerTest.scala index 99ebeafed7..783e7ab9a5 100644 --- a/akka-camel/src/test/scala/akka/camel/internal/ActivationTrackerTest.scala +++ b/akka-camel/src/test/scala/akka/camel/internal/ActivationTrackerTest.scala @@ -109,6 +109,14 @@ class ActivationTrackerTest extends TestKit(ActorSystem("test")) with WordSpec w awaiting.verifyActivated() } + + "send activation message when an actor is activated, deactivated and activated again" taggedAs TimingTest in { + publish(EndpointActivated(actor.ref)) + publish(EndpointDeActivated(actor.ref)) + publish(EndpointActivated(actor.ref)) + awaiting.awaitActivation() + awaiting.verifyActivated() + } } class Awaiting(actor: TestProbe) { diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index a1215f4563..f8d18a516b 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -28,6 +28,12 @@ akka { # formed in case of network partition. auto-down = off + # Minimum required number of members before the leader changes member status + # of 'Joining' members to 'Up'. Typically used together with + # 'Cluster.registerOnMemberUp' to defer some action, such as starting actors, + # until the cluster has reached a certain size. + min-nr-of-members = 1 + # Enable or disable JMX MBeans for management of the cluster jmx.enabled = on @@ -70,7 +76,7 @@ akka { failure-detector { # FQCN of the failure detector implementation. - # It must implement akka.cluster.akka.cluster and + # It must implement akka.cluster.FailureDetector and # have constructor with akka.actor.ActorSystem and # akka.cluster.ClusterSettings parameters implementation-class = "akka.cluster.AccrualFailureDetector" @@ -106,22 +112,32 @@ akka { max-sample-size = 1000 } - # Uses JMX and Hyperic SIGAR, if SIGAR is on the classpath. metrics { # Enable or disable metrics collector for load-balancing nodes. enabled = on - # How often metrics is sampled on a node. - metrics-interval = 3s + # FQCN of the metrics collector implementation. + # It must implement akka.cluster.cluster.MetricsCollector and + # have constructor with akka.actor.ActorSystem parameter. + # The default SigarMetricsCollector uses JMX and Hyperic SIGAR, if SIGAR + # is on the classpath, otherwise only JMX. + collector-class = "akka.cluster.SigarMetricsCollector" + + # How often metrics are sampled on a node. + # Shorter interval will collect the metrics more often. + collect-interval = 3s # How often a node publishes metrics information. gossip-interval = 3s # How quickly the exponential weighting of past data is decayed compared to - # new data. - # If set to 0 data streaming over time will be turned off. - # Set higher to increase the bias toward newer values - rate-of-decay = 10 + # new data. Set lower to increase the bias toward newer values. + # The relevance of each data sample is halved for every passing half-life duration, + # i.e. after 4 times the half-life, a data sample’s relevance is reduced to 6% of + # its original relevance. The initial relevance of a data sample is given by + # 1 – 0.5 ^ (collect-interval / half-life). + # See http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average + moving-average-half-life = 12s } # If the tick-duration of the default scheduler is longer than the @@ -143,6 +159,16 @@ akka { } # Default configuration for routers + actor.deployment.default { + # MetricsSelector to use + # - available: "mix", "heap", "cpu", "load" + # - or: Fully qualified class name of the MetricsSelector class. + # The class must extend akka.cluster.routing.MetricsSelector + # and have a constructor with com.typesafe.config.Config + # parameter. + # - default is "mix" + metrics-selector = mix + } actor.deployment.default.cluster { # enable cluster aware router that deploys to nodes in the cluster enabled = off @@ -169,4 +195,5 @@ akka { routees-path = "" } + } diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index 7293e54c67..feb950a9a8 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -6,7 +6,7 @@ package akka.cluster import akka.actor.{ ActorSystem, Address, ExtendedActorSystem } import akka.event.Logging -import scala.collection.immutable.Map +import scala.collection.immutable import scala.annotation.tailrec import java.util.concurrent.atomic.AtomicReference import java.util.concurrent.TimeUnit.NANOSECONDS @@ -233,7 +233,7 @@ private[cluster] object HeartbeatHistory { */ def apply(maxSampleSize: Int): HeartbeatHistory = HeartbeatHistory( maxSampleSize = maxSampleSize, - intervals = IndexedSeq.empty, + intervals = immutable.IndexedSeq.empty, intervalSum = 0L, squaredIntervalSum = 0L) @@ -248,7 +248,7 @@ private[cluster] object HeartbeatHistory { */ private[cluster] case class HeartbeatHistory private ( maxSampleSize: Int, - intervals: IndexedSeq[Long], + intervals: immutable.IndexedSeq[Long], intervalSum: Long, squaredIntervalSum: Long) { diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index cfe1f0ddab..54d3e6bfa3 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -17,7 +17,7 @@ import akka.util._ import scala.concurrent.duration._ import scala.concurrent.forkjoin.ThreadLocalRandom import scala.annotation.tailrec -import scala.collection.immutable.SortedSet +import scala.collection.immutable import java.io.Closeable import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.atomic.AtomicReference @@ -67,7 +67,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { format(system, other.getClass.getName)) } - private val _isRunning = new AtomicBoolean(true) + private val _isTerminated = new AtomicBoolean(false) private val log = Logging(system, "Cluster") log.info("Cluster Node [{}] - is starting up...", selfAddress) @@ -95,7 +95,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { new DefaultScheduler( new HashedWheelTimer(log, system.threadFactory match { - case tf: MonitorableThreadFactory ⇒ tf.copy(name = tf.name + "-cluster-scheduler") + case tf: MonitorableThreadFactory ⇒ tf.withName(tf.name + "-cluster-scheduler") case tf ⇒ tf }, SchedulerTickDuration, @@ -108,7 +108,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { override def close(): Unit = () // we are using system.scheduler, which we are not responsible for closing override def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, - receiver: ActorRef, message: Any)(implicit executor: ExecutionContext): Cancellable = + receiver: ActorRef, message: Any)(implicit executor: ExecutionContext, sender: ActorRef = Actor.noSender): Cancellable = systemScheduler.schedule(initialDelay, interval, receiver, message) override def schedule(initialDelay: FiniteDuration, interval: FiniteDuration)(f: ⇒ Unit)(implicit executor: ExecutionContext): Cancellable = @@ -169,9 +169,9 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { // ====================================================== /** - * Returns true if the cluster node is up and running, false if it is shut down. + * Returns true if this cluster instance has be shutdown. */ - def isRunning: Boolean = _isRunning.get + def isTerminated: Boolean = _isTerminated.get /** * Subscribe to cluster domain events. @@ -232,6 +232,24 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { def down(address: Address): Unit = clusterCore ! ClusterUserAction.Down(address) + /** + * The supplied thunk will be run, once, when current cluster member is `Up`. + * Typically used together with configuration option `akka.cluster.min-nr-of-members' + * to defer some action, such as starting actors, until the cluster has reached + * a certain size. + */ + def registerOnMemberUp[T](code: ⇒ T): Unit = + registerOnMemberUp(new Runnable { def run = code }) + + /** + * The supplied callback will be run, once, when current cluster member is `Up`. + * Typically used together with configuration option `akka.cluster.min-nr-of-members' + * to defer some action, such as starting actors, until the cluster has reached + * a certain size. + * JAVA API + */ + def registerOnMemberUp(callback: Runnable): Unit = clusterDaemons ! InternalClusterAction.AddOnMemberUpListener(callback) + // ======================================================== // ===================== INTERNAL API ===================== // ======================================================== @@ -241,7 +259,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { * in config. Especially useful from tests when Addresses are unknown * before startup time. */ - private[cluster] def joinSeedNodes(seedNodes: IndexedSeq[Address]): Unit = + private[cluster] def joinSeedNodes(seedNodes: immutable.IndexedSeq[Address]): Unit = clusterCore ! InternalClusterAction.JoinSeedNodes(seedNodes) /** @@ -253,7 +271,7 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { * to go through graceful handoff process `LEAVE -> EXITING -> REMOVED -> SHUTDOWN`. */ private[cluster] def shutdown(): Unit = { - if (_isRunning.compareAndSet(true, false)) { + if (_isTerminated.compareAndSet(false, true)) { log.info("Cluster Node [{}] - Shutting down cluster Node and cluster daemons...", selfAddress) system.stop(clusterDaemons) @@ -268,4 +286,3 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { } } - diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala index 024dfdc00c..5adb57615a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala @@ -18,16 +18,29 @@ import akka.actor.Props import akka.actor.Scheduler import akka.actor.Scope import akka.actor.Terminated -import akka.cluster.routing.ClusterRouterConfig -import akka.cluster.routing.ClusterRouterSettings import akka.dispatch.ChildTerminated import akka.event.EventStream +import akka.japi.Util.immutableSeq import akka.remote.RemoteActorRefProvider import akka.remote.RemoteDeployer import akka.remote.routing.RemoteRouterConfig +import akka.routing.RouterConfig +import akka.routing.DefaultResizer +import akka.cluster.routing.ClusterRouterConfig +import akka.cluster.routing.ClusterRouterSettings +import akka.cluster.routing.AdaptiveLoadBalancingRouter +import akka.cluster.routing.MixMetricsSelector +import akka.cluster.routing.HeapMetricsSelector +import akka.cluster.routing.SystemLoadAverageMetricsSelector +import akka.cluster.routing.CpuMetricsSelector +import akka.cluster.routing.MetricsSelector /** * INTERNAL API + * + * The `ClusterActorRefProvider` will load the [[akka.cluster.Cluster]] + * extension, i.e. the cluster will automatically be started when + * the `ClusterActorRefProvider` is used. */ class ClusterActorRefProvider( _systemName: String, @@ -42,10 +55,17 @@ class ClusterActorRefProvider( override def init(system: ActorSystemImpl): Unit = { super.init(system) + // initialize/load the Cluster extension + Cluster(system) + remoteDeploymentWatcher = system.systemActorOf(Props[RemoteDeploymentWatcher], "RemoteDeploymentWatcher") } - override val deployer: ClusterDeployer = new ClusterDeployer(settings, dynamicAccess) + /** + * Factory method to make it possible to override deployer in subclass + * Creates a new instance every time + */ + override protected def createDeployer: ClusterDeployer = new ClusterDeployer(settings, dynamicAccess) /** * This method is overridden here to keep track of remote deployed actors to @@ -108,6 +128,36 @@ private[akka] class ClusterDeployer(_settings: ActorSystem.Settings, _pm: Dynami case None ⇒ None } } + + override protected def createRouterConfig(routerType: String, key: String, config: Config, deployment: Config): RouterConfig = { + val routees = immutableSeq(deployment.getStringList("routees.paths")) + val nrOfInstances = deployment.getInt("nr-of-instances") + val resizer = if (config.hasPath("resizer")) Some(DefaultResizer(deployment.getConfig("resizer"))) else None + + routerType match { + case "adaptive" ⇒ + val metricsSelector = deployment.getString("metrics-selector") match { + case "mix" ⇒ MixMetricsSelector + case "heap" ⇒ HeapMetricsSelector + case "cpu" ⇒ CpuMetricsSelector + case "load" ⇒ SystemLoadAverageMetricsSelector + case fqn ⇒ + val args = List(classOf[Config] -> deployment) + dynamicAccess.createInstanceFor[MetricsSelector](fqn, args).recover({ + case exception ⇒ throw new IllegalArgumentException( + ("Cannot instantiate metrics-selector [%s], defined in [%s], " + + "make sure it extends [akka.cluster.routing.MetricsSelector] and " + + "has constructor with [com.typesafe.config.Config] parameter") + .format(fqn, key), exception) + }).get + } + + AdaptiveLoadBalancingRouter(metricsSelector, nrOfInstances, routees, resizer) + + case _ ⇒ super.createRouterConfig(routerType, key, config, deployment) + } + + } } @SerialVersionUID(1L) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index 9a69922521..50644e431c 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -3,9 +3,12 @@ */ package akka.cluster -import scala.collection.immutable.SortedSet +import language.existentials +import language.postfixOps +import scala.collection.immutable import scala.concurrent.duration._ import scala.concurrent.forkjoin.ThreadLocalRandom +import scala.util.control.NonFatal import akka.actor.{ Actor, ActorLogging, ActorRef, Address, Cancellable, Props, ReceiveTimeout, RootActorPath, Scheduler } import akka.actor.Status.Failure import akka.event.EventStream @@ -13,8 +16,6 @@ import akka.pattern.ask import akka.util.Timeout import akka.cluster.MemberStatus._ import akka.cluster.ClusterEvent._ -import language.existentials -import language.postfixOps /** * Base trait for all cluster messages. All ClusterMessage's are serializable. @@ -61,7 +62,7 @@ private[cluster] object InternalClusterAction { * Command to initiate the process to join the specified * seed nodes. */ - case class JoinSeedNodes(seedNodes: IndexedSeq[Address]) + case class JoinSeedNodes(seedNodes: immutable.IndexedSeq[Address]) /** * Start message of the process to join one of the seed nodes. @@ -104,6 +105,12 @@ private[cluster] object InternalClusterAction { case object GetClusterCoreRef + /** + * Comand to [[akka.cluster.ClusterDaemon]] to create a + * [[akka.cluster.OnMemberUpListener]]. + */ + case class AddOnMemberUpListener(callback: Runnable) + sealed trait SubscriptionMessage case class Subscribe(subscriber: ActorRef, to: Class[_]) extends SubscriptionMessage case class Unsubscribe(subscriber: ActorRef, to: Option[Class[_]]) extends SubscriptionMessage @@ -113,10 +120,12 @@ private[cluster] object InternalClusterAction { */ case class PublishCurrentClusterState(receiver: Option[ActorRef]) extends SubscriptionMessage - case class PublishChanges(oldGossip: Gossip, newGossip: Gossip) - case class PublishEvent(event: ClusterDomainEvent) - case object PublishDone - + sealed trait PublishMessage + case class PublishChanges(newGossip: Gossip) extends PublishMessage + case class PublishEvent(event: ClusterDomainEvent) extends PublishMessage + case object PublishStart extends PublishMessage + case object PublishDone extends PublishMessage + case object PublishDoneFinished extends PublishMessage } /** @@ -160,6 +169,8 @@ private[cluster] final class ClusterDaemon(settings: ClusterSettings) extends Ac def receive = { case InternalClusterAction.GetClusterCoreRef ⇒ sender ! core + case InternalClusterAction.AddOnMemberUpListener(code) ⇒ + context.actorOf(Props(new OnMemberUpListener(code))) } } @@ -256,7 +267,7 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto def initJoin(): Unit = sender ! InitJoinAck(selfAddress) - def joinSeedNodes(seedNodes: IndexedSeq[Address]): Unit = { + def joinSeedNodes(seedNodes: immutable.IndexedSeq[Address]): Unit = { // only the node which is named first in the list of seed nodes will join itself if (seedNodes.isEmpty || seedNodes.head == selfAddress) self ! JoinTo(selfAddress) @@ -271,14 +282,14 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto */ def join(address: Address): Unit = { if (!latestGossip.members.exists(_.address == address)) { - val localGossip = latestGossip // wipe our state since a node that joins a cluster must be empty latestGossip = Gossip() - // wipe the failure detector since we are starting fresh and shouldn't care about the past failureDetector.reset() + // wipe the publisher since we are starting fresh + publisher ! PublishStart - publish(localGossip) + publish(latestGossip) heartbeatSender ! JoinInProgress(address, Deadline.now + JoinTimeout) context.become(initialized) @@ -293,18 +304,16 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto * State transition to JOINING - new node joining. */ def joining(node: Address): Unit = { - val localGossip = latestGossip - val localMembers = localGossip.members - val localUnreachable = localGossip.overview.unreachable + val localMembers = latestGossip.members + val localUnreachable = latestGossip.overview.unreachable val alreadyMember = localMembers.exists(_.address == node) - val isUnreachable = localGossip.overview.isNonDownUnreachable(node) + val isUnreachable = latestGossip.overview.isNonDownUnreachable(node) if (!alreadyMember && !isUnreachable) { - // remove the node from the 'unreachable' set in case it is a DOWN node that is rejoining cluster val (rejoiningMember, newUnreachableMembers) = localUnreachable partition { _.address == node } - val newOverview = localGossip.overview copy (unreachable = newUnreachableMembers) + val newOverview = latestGossip.overview copy (unreachable = newUnreachableMembers) // remove the node from the failure detector if it is a DOWN node that is rejoining cluster if (rejoiningMember.nonEmpty) failureDetector.remove(node) @@ -312,7 +321,7 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto // add joining node as Joining // add self in case someone else joins before self has joined (Set discards duplicates) val newMembers = localMembers + Member(node, Joining) + Member(selfAddress, Joining) - val newGossip = localGossip copy (overview = newOverview, members = newMembers) + val newGossip = latestGossip copy (overview = newOverview, members = newMembers) val versionedGossip = newGossip :+ vclockNode val seenVersionedGossip = versionedGossip seen selfAddress @@ -326,7 +335,7 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto gossipTo(node) } - publish(localGossip) + publish(latestGossip) } } @@ -334,10 +343,9 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto * State transition to LEAVING. */ def leaving(address: Address): Unit = { - val localGossip = latestGossip - if (localGossip.members.exists(_.address == address)) { // only try to update if the node is available (in the member ring) - val newMembers = localGossip.members map { member ⇒ if (member.address == address) Member(address, Leaving) else member } // mark node as LEAVING - val newGossip = localGossip copy (members = newMembers) + if (latestGossip.members.exists(_.address == address)) { // only try to update if the node is available (in the member ring) + val newMembers = latestGossip.members map { member ⇒ if (member.address == address) Member(address, Leaving) else member } // mark node as LEAVING + val newGossip = latestGossip copy (members = newMembers) val versionedGossip = newGossip :+ vclockNode val seenVersionedGossip = versionedGossip seen selfAddress @@ -345,7 +353,7 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto latestGossip = seenVersionedGossip log.info("Cluster Node [{}] - Marked address [{}] as LEAVING", selfAddress, address) - publish(localGossip) + publish(latestGossip) } } @@ -368,10 +376,9 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto */ def removing(address: Address): Unit = { log.info("Cluster Node [{}] - Node has been REMOVED by the leader - shutting down...", selfAddress) - val localGossip = latestGossip // just cleaning up the gossip state latestGossip = Gossip() - publish(localGossip) + publish(latestGossip) context.become(removed) // make sure the final (removed) state is published // before shutting down @@ -426,7 +433,7 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto val versionedGossip = newGossip :+ vclockNode latestGossip = versionedGossip seen selfAddress - publish(localGossip) + publish(latestGossip) } /** @@ -515,7 +522,7 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto } stats = stats.incrementReceivedGossipCount - publish(localGossip) + publish(latestGossip) if (envelope.conversation && (conflict || (winningGossip ne remoteGossip) || (latestGossip ne remoteGossip))) { @@ -602,12 +609,18 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto if (localGossip.convergence) { // we have convergence - so we can't have unreachable nodes + val numberOfMembers = localMembers.size + def isJoiningToUp(m: Member): Boolean = m.status == Joining && numberOfMembers >= MinNrOfMembers + // transform the node member ring val newMembers = localMembers collect { - // 1. Move JOINING => UP (once all nodes have seen that this node is JOINING e.g. we have a convergence) - case member if member.status == Joining ⇒ member copy (status = Up) - // 2. Move LEAVING => EXITING (once we have a convergence on LEAVING *and* if we have a successful partition handoff) - case member if member.status == Leaving && hasPartionHandoffCompletedSuccessfully ⇒ member copy (status = Exiting) + // 1. Move JOINING => UP (once all nodes have seen that this node is JOINING, i.e. we have a convergence) + // and minimum number of nodes have joined the cluster + case member if isJoiningToUp(member) ⇒ member copy (status = Up) + // 2. Move LEAVING => EXITING (once we have a convergence on LEAVING + // *and* if we have a successful partition handoff) + case member if member.status == Leaving && hasPartionHandoffCompletedSuccessfully ⇒ + member copy (status = Exiting) // 3. Everyone else that is not Exiting stays as they are case member if member.status != Exiting ⇒ member // 4. Move EXITING => REMOVED - e.g. remove the nodes from the 'members' set/node ring and seen table @@ -621,10 +634,10 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto // Repeat the checking for transitions between JOINING -> UP, LEAVING -> EXITING, EXITING -> REMOVED // to check for state-changes and to store away removed and exiting members for later notification // 1. check for state-changes to update - // 2. store away removed and exiting members so we can separate the pure state changes (that can be retried on collision) and the side-effecting message sending + // 2. store away removed and exiting members so we can separate the pure state changes val (removedMembers, newMembers1) = localMembers partition (_.status == Exiting) - val (upMembers, newMembers2) = newMembers1 partition (_.status == Joining) + val (upMembers, newMembers2) = newMembers1 partition (isJoiningToUp(_)) val exitingMembers = newMembers2 filter (_.status == Leaving && hasPartionHandoffCompletedSuccessfully) @@ -715,7 +728,7 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto log.info("Cluster Node [{}] - Leader is marking unreachable node [{}] as DOWN", selfAddress, member.address) } - publish(localGossip) + publish(latestGossip) } } } @@ -752,7 +765,7 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto log.error("Cluster Node [{}] - Marking node(s) as UNREACHABLE [{}]", selfAddress, newlyDetectedUnreachableMembers.mkString(", ")) - publish(localGossip) + publish(latestGossip) } } } @@ -763,14 +776,14 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto def isSingletonCluster: Boolean = latestGossip.isSingletonCluster - def isAvailable: Boolean = latestGossip.isAvailable(selfAddress) + def isAvailable: Boolean = !latestGossip.isUnreachable(selfAddress) /** * Gossips latest gossip to a random member in the set of members passed in as argument. * * @return the used [[akka.actor.Address] if any */ - private def gossipToRandomNodeOf(addresses: IndexedSeq[Address]): Option[Address] = { + private def gossipToRandomNodeOf(addresses: immutable.IndexedSeq[Address]): Option[Address] = { log.debug("Cluster Node [{}] - Selecting random node to gossip to [{}]", selfAddress, addresses.mkString(", ")) // filter out myself val peer = selectRandomNode(addresses filterNot (_ == selfAddress)) @@ -790,8 +803,8 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto def gossipTo(address: Address, gossipMsg: GossipEnvelope): Unit = if (address != selfAddress) coreSender ! SendClusterMessage(address, gossipMsg) - def publish(oldGossip: Gossip): Unit = { - publisher ! PublishChanges(oldGossip, latestGossip) + def publish(newGossip: Gossip): Unit = { + publisher ! PublishChanges(newGossip) if (PublishStatsInterval == Duration.Zero) publishInternalStats() } @@ -823,7 +836,7 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto * 5. seed3 retries the join procedure and gets acks from seed2 first, and then joins to seed2 * */ -private[cluster] final class JoinSeedNodeProcess(seedNodes: IndexedSeq[Address]) extends Actor with ActorLogging { +private[cluster] final class JoinSeedNodeProcess(seedNodes: immutable.IndexedSeq[Address]) extends Actor with ActorLogging { import InternalClusterAction._ def selfAddress = Cluster(context.system).selfAddress @@ -877,6 +890,42 @@ private[cluster] final class ClusterCoreSender extends Actor with ActorLogging { } } +/** + * INTERNAL API + * + * The supplied callback will be run, once, when current cluster member is `Up`. + */ +private[cluster] class OnMemberUpListener(callback: Runnable) extends Actor with ActorLogging { + import ClusterEvent._ + val cluster = Cluster(context.system) + // subscribe to MemberUp, re-subscribe when restart + override def preStart(): Unit = + cluster.subscribe(self, classOf[MemberUp]) + override def postStop(): Unit = + cluster.unsubscribe(self) + + def receive = { + case state: CurrentClusterState ⇒ + if (state.members.exists(isSelfUp(_))) + done() + case MemberUp(m) ⇒ + if (isSelfUp(m)) + done() + } + + def done(): Unit = { + try callback.run() catch { + case NonFatal(e) ⇒ log.error(e, "OnMemberUp callback failed with [{}]", e.getMessage) + } finally { + context stop self + } + } + + def isSelfUp(m: Member): Boolean = + m.address == cluster.selfAddress && m.status == MemberStatus.Up + +} + /** * INTERNAL API */ diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index 8d87f3fe53..c896e721cc 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -4,12 +4,16 @@ package akka.cluster import language.postfixOps -import scala.collection.immutable.SortedSet +import scala.collection.immutable +import scala.collection.immutable.{ VectorBuilder, SortedSet } import akka.actor.{ Actor, ActorLogging, ActorRef, Address } import akka.cluster.ClusterEvent._ import akka.cluster.MemberStatus._ import akka.event.EventStream import akka.actor.AddressTerminated +import java.lang.Iterable +import akka.japi.Util.immutableSeq +import akka.util.Collections.EmptyImmutableSeq /** * Domain events published to the event bus. @@ -28,9 +32,8 @@ object ClusterEvent { * Current snapshot state of the cluster. Sent to new subscriber. */ case class CurrentClusterState( - members: SortedSet[Member] = SortedSet.empty, + members: immutable.SortedSet[Member] = immutable.SortedSet.empty, unreachable: Set[Member] = Set.empty, - convergence: Boolean = false, seenBy: Set[Address] = Set.empty, leader: Option[Address] = None) extends ClusterDomainEvent { @@ -47,19 +50,15 @@ object ClusterEvent { * Java API * Read only */ - def getUnreachable: java.util.Set[Member] = { - import scala.collection.JavaConverters._ - unreachable.asJava - } + def getUnreachable: java.util.Set[Member] = + scala.collection.JavaConverters.setAsJavaSetConverter(unreachable).asJava /** * Java API * Read only */ - def getSeenBy: java.util.Set[Address] = { - import scala.collection.JavaConverters._ - seenBy.asJava - } + def getSeenBy: java.util.Set[Address] = + scala.collection.JavaConverters.setAsJavaSetConverter(seenBy).asJava /** * Java API @@ -76,57 +75,47 @@ object ClusterEvent { } /** - * A new member joined the cluster. + * A new member joined the cluster. Only published after convergence. */ case class MemberJoined(member: Member) extends MemberEvent { if (member.status != Joining) throw new IllegalArgumentException("Expected Joining status, got: " + member) } /** - * Member status changed to Up + * Member status changed to Up. Only published after convergence. */ case class MemberUp(member: Member) extends MemberEvent { if (member.status != Up) throw new IllegalArgumentException("Expected Up status, got: " + member) } /** - * Member status changed to Leaving + * Member status changed to Leaving. Only published after convergence. */ case class MemberLeft(member: Member) extends MemberEvent { if (member.status != Leaving) throw new IllegalArgumentException("Expected Leaving status, got: " + member) } /** - * Member status changed to Exiting + * Member status changed to Exiting. Only published after convergence. */ case class MemberExited(member: Member) extends MemberEvent { if (member.status != Exiting) throw new IllegalArgumentException("Expected Exiting status, got: " + member) } /** - * A member is considered as unreachable by the failure detector. - */ - case class MemberUnreachable(member: Member) extends MemberEvent - - /** - * Member status changed to Down + * Member status changed to Down. Only published after convergence. */ case class MemberDowned(member: Member) extends MemberEvent { if (member.status != Down) throw new IllegalArgumentException("Expected Down status, got: " + member) } /** - * Member completely removed from the cluster + * Member completely removed from the cluster. Only published after convergence. */ case class MemberRemoved(member: Member) extends MemberEvent { if (member.status != Removed) throw new IllegalArgumentException("Expected Removed status, got: " + member) } - /** - * Cluster convergence state changed. - */ - case class ConvergenceChanged(convergence: Boolean) extends ClusterDomainEvent - /** * Leader of the cluster members changed. Only published after convergence. */ @@ -138,12 +127,23 @@ object ClusterEvent { def getLeader: Address = leader orNull } + /** + * A member is considered as unreachable by the failure detector. + */ + case class UnreachableMember(member: Member) extends ClusterDomainEvent + /** * INTERNAL API * - * Current snapshot of cluster member metrics. Published to subscribers. + * Current snapshot of cluster node metrics. Published to subscribers. */ - case class ClusterMetricsChanged(nodes: Set[NodeMetrics]) extends ClusterDomainEvent + case class ClusterMetricsChanged(nodeMetrics: Set[NodeMetrics]) extends ClusterDomainEvent { + /** + * Java API + */ + def getNodeMetrics: java.lang.Iterable[NodeMetrics] = + scala.collection.JavaConverters.asJavaIterableConverter(nodeMetrics).asJava + } /** * INTERNAL API @@ -159,56 +159,75 @@ object ClusterEvent { /** * INTERNAL API */ - private[cluster] def diff(oldGossip: Gossip, newGossip: Gossip): IndexedSeq[ClusterDomainEvent] = { - val newMembers = newGossip.members -- oldGossip.members + private[cluster] def diffUnreachable(oldGossip: Gossip, newGossip: Gossip): immutable.Seq[UnreachableMember] = + if (newGossip eq oldGossip) Nil + else { + val newUnreachable = newGossip.overview.unreachable -- oldGossip.overview.unreachable + val unreachableEvents = newUnreachable map UnreachableMember - val membersGroupedByAddress = (newGossip.members.toList ++ oldGossip.members.toList).groupBy(_.address) - val changedMembers = membersGroupedByAddress collect { - case (_, newMember :: oldMember :: Nil) if newMember.status != oldMember.status ⇒ newMember + immutable.Seq.empty ++ unreachableEvents } - val memberEvents = (newMembers ++ changedMembers) map { m ⇒ - if (m.status == Joining) MemberJoined(m) - else if (m.status == Up) MemberUp(m) - else if (m.status == Leaving) MemberLeft(m) - else if (m.status == Exiting) MemberExited(m) - else throw new IllegalStateException("Unexpected member status: " + m) + /** + * INTERNAL API. + */ + private[cluster] def diffMemberEvents(oldGossip: Gossip, newGossip: Gossip): immutable.Seq[MemberEvent] = + if (newGossip eq oldGossip) Nil + else { + val newMembers = newGossip.members -- oldGossip.members + val membersGroupedByAddress = List(newGossip.members, oldGossip.members).flatten.groupBy(_.address) + val changedMembers = membersGroupedByAddress collect { + case (_, newMember :: oldMember :: Nil) if newMember.status != oldMember.status ⇒ newMember + } + val memberEvents = (newMembers ++ changedMembers) map { m ⇒ + m.status match { + case Joining ⇒ MemberJoined(m) + case Up ⇒ MemberUp(m) + case Leaving ⇒ MemberLeft(m) + case Exiting ⇒ MemberExited(m) + case _ ⇒ throw new IllegalStateException("Unexpected member status: " + m) + } + } + + val allNewUnreachable = newGossip.overview.unreachable -- oldGossip.overview.unreachable + val newDowned = allNewUnreachable filter { _.status == Down } + val downedEvents = newDowned map MemberDowned + + val unreachableGroupedByAddress = + List(newGossip.overview.unreachable, oldGossip.overview.unreachable).flatten.groupBy(_.address) + val unreachableDownMembers = unreachableGroupedByAddress collect { + case (_, newMember :: oldMember :: Nil) if newMember.status == Down && newMember.status != oldMember.status ⇒ + newMember + } + val unreachableDownedEvents = unreachableDownMembers map MemberDowned + + val removedEvents = (oldGossip.members -- newGossip.members -- newGossip.overview.unreachable) map { m ⇒ + MemberRemoved(m.copy(status = Removed)) + } + + (new VectorBuilder[MemberEvent]() ++= memberEvents ++= downedEvents ++= unreachableDownedEvents + ++= removedEvents).result() } - val allNewUnreachable = newGossip.overview.unreachable -- oldGossip.overview.unreachable - val (newDowned, newUnreachable) = allNewUnreachable partition { _.status == Down } - val downedEvents = newDowned map MemberDowned - val unreachableEvents = newUnreachable map MemberUnreachable + /** + * INTERNAL API + */ + private[cluster] def diffLeader(oldGossip: Gossip, newGossip: Gossip): immutable.Seq[LeaderChanged] = + if (newGossip.leader != oldGossip.leader) List(LeaderChanged(newGossip.leader)) + else Nil - val unreachableGroupedByAddress = - (newGossip.overview.unreachable.toList ++ oldGossip.overview.unreachable.toList).groupBy(_.address) - val unreachableDownMembers = unreachableGroupedByAddress collect { - case (_, newMember :: oldMember :: Nil) if newMember.status == Down && newMember.status != oldMember.status ⇒ - newMember + /** + * INTERNAL API + */ + private[cluster] def diffSeen(oldGossip: Gossip, newGossip: Gossip): immutable.Seq[SeenChanged] = + if (newGossip eq oldGossip) Nil + else { + val newConvergence = newGossip.convergence + val newSeenBy = newGossip.seenBy + if (newConvergence != oldGossip.convergence || newSeenBy != oldGossip.seenBy) + List(SeenChanged(newConvergence, newSeenBy)) + else Nil } - val unreachableDownedEvents = unreachableDownMembers map MemberDowned - - val removedEvents = (oldGossip.members -- newGossip.members -- newGossip.overview.unreachable) map { m ⇒ - MemberRemoved(m.copy(status = Removed)) - } - - val newConvergence = newGossip.convergence - val convergenceChanged = newConvergence != oldGossip.convergence - val convergenceEvents = if (convergenceChanged) Seq(ConvergenceChanged(newConvergence)) else Seq.empty - - val leaderEvents = - if (newGossip.leader != oldGossip.leader) Seq(LeaderChanged(newGossip.leader)) - else Seq.empty - - val newSeenBy = newGossip.seenBy - val seenEvents = - if (convergenceChanged || newSeenBy != oldGossip.seenBy) Seq(SeenChanged(newConvergence, newSeenBy)) - else Seq.empty - - memberEvents.toIndexedSeq ++ unreachableEvents ++ downedEvents ++ unreachableDownedEvents ++ removedEvents ++ - leaderEvents ++ convergenceEvents ++ seenEvents - } - } /** @@ -220,34 +239,30 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto import InternalClusterAction._ var latestGossip: Gossip = Gossip() - - // Keep track of LeaderChanged event. Should not be published until - // convergence, and it should only be published when leader actually - // changed to another node. 3 states: - // - None: No LeaderChanged detected yet, nothing published yet - // - Some(Left): Stashed LeaderChanged to be published later, when convergence - // - Some(Right): Latest published LeaderChanged - var leaderChangedState: Option[Either[LeaderChanged, LeaderChanged]] = None + var latestConvergedGossip: Gossip = Gossip() + var memberEvents: immutable.Seq[MemberEvent] = immutable.Seq.empty def receive = { - case PublishChanges(oldGossip, newGossip) ⇒ publishChanges(oldGossip, newGossip) + case PublishChanges(newGossip) ⇒ publishChanges(newGossip) case currentStats: CurrentInternalStats ⇒ publishInternalStats(currentStats) case PublishCurrentClusterState(receiver) ⇒ publishCurrentClusterState(receiver) case Subscribe(subscriber, to) ⇒ subscribe(subscriber, to) case Unsubscribe(subscriber, to) ⇒ unsubscribe(subscriber, to) case PublishEvent(event) ⇒ publish(event) - case PublishDone ⇒ sender ! PublishDone + case PublishStart ⇒ publishStart() + case PublishDone ⇒ publishDone(sender) } def eventStream: EventStream = context.system.eventStream def publishCurrentClusterState(receiver: Option[ActorRef]): Unit = { + // The state is a mix of converged and latest gossip to mimic what you + // would have seen if you where listening to the events. val state = CurrentClusterState( - members = latestGossip.members, + members = latestConvergedGossip.members, unreachable = latestGossip.overview.unreachable, - convergence = latestGossip.convergence, seenBy = latestGossip.seenBy, - leader = latestGossip.leader) + leader = latestConvergedGossip.leader) receiver match { case Some(ref) ⇒ ref ! state case None ⇒ publish(state) @@ -264,47 +279,43 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto case Some(c) ⇒ eventStream.unsubscribe(subscriber, c) } - def publishChanges(oldGossip: Gossip, newGossip: Gossip): Unit = { + def publishChanges(newGossip: Gossip): Unit = { + val oldGossip = latestGossip // keep the latestGossip to be sent to new subscribers latestGossip = newGossip - diff(oldGossip, newGossip) foreach { event ⇒ - event match { - case x @ LeaderChanged(_) if leaderChangedState == Some(Right(x)) ⇒ - // skip, this leader has already been published - - case x @ LeaderChanged(_) if oldGossip.convergence && newGossip.convergence ⇒ - // leader changed and immediate convergence - leaderChangedState = Some(Right(x)) - publish(x) - - case x: LeaderChanged ⇒ - // publish later, when convergence - leaderChangedState = Some(Left(x)) - - case ConvergenceChanged(true) ⇒ - // now it's convergence, publish eventual stashed LeaderChanged event - leaderChangedState match { - case Some(Left(x)) ⇒ - leaderChangedState = Some(Right(x)) - publish(x) - - case _ ⇒ // nothing stashed - } - publish(event) - - case MemberUnreachable(m) ⇒ - publish(event) - // notify DeathWatch about unreachable node - publish(AddressTerminated(m.address)) - - case _ ⇒ - // all other events - publish(event) - } + // first publish the diffUnreachable between the last two gossips + diffUnreachable(oldGossip, newGossip) foreach { event ⇒ + publish(event) + // notify DeathWatch about unreachable node + publish(AddressTerminated(event.member.address)) } + // buffer up the MemberEvents waiting for convergence + memberEvents ++= diffMemberEvents(oldGossip, newGossip) + // if we have convergence then publish the MemberEvents and possibly a LeaderChanged + if (newGossip.convergence) { + val previousConvergedGossip = latestConvergedGossip + latestConvergedGossip = newGossip + memberEvents foreach publish + memberEvents = immutable.Seq.empty + diffLeader(previousConvergedGossip, latestConvergedGossip) foreach publish + } + // publish internal SeenState for testing purposes + diffSeen(oldGossip, newGossip) foreach publish } def publishInternalStats(currentStats: CurrentInternalStats): Unit = publish(currentStats) def publish(event: AnyRef): Unit = eventStream publish event + + def publishStart(): Unit = clearState() + + def publishDone(receiver: ActorRef): Unit = { + clearState() + receiver ! PublishDoneFinished + } + + def clearState(): Unit = { + latestGossip = Gossip() + latestConvergedGossip = Gossip() + } } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala index 8dfd6ba04b..3ada580bb2 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala @@ -5,7 +5,7 @@ package akka.cluster import language.postfixOps -import scala.collection.immutable.SortedSet +import scala.collection.immutable import scala.annotation.tailrec import scala.concurrent.duration._ import java.net.URLEncoder @@ -96,7 +96,10 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg val heartbeatTask = scheduler.schedule(PeriodicTasksInitialDelay max HeartbeatInterval, HeartbeatInterval, self, HeartbeatTick) - override def preStart(): Unit = cluster.subscribe(self, classOf[MemberEvent]) + override def preStart(): Unit = { + cluster.subscribe(self, classOf[MemberEvent]) + cluster.subscribe(self, classOf[UnreachableMember]) + } override def postStop(): Unit = { heartbeatTask.cancel() @@ -112,7 +115,7 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg def receive = { case HeartbeatTick ⇒ heartbeat() case s: CurrentClusterState ⇒ reset(s) - case MemberUnreachable(m) ⇒ removeMember(m) + case UnreachableMember(m) ⇒ removeMember(m) case MemberRemoved(m) ⇒ removeMember(m) case e: MemberEvent ⇒ addMember(e.member) case JoinInProgress(a, d) ⇒ addJoinInProgress(a, d) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala index 4eb27e836e..ae023263c8 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala @@ -16,17 +16,70 @@ import javax.management.InstanceNotFoundException * Interface for the cluster JMX MBean. */ trait ClusterNodeMBean { + + /** + * Member status for this node. + */ def getMemberStatus: String + + /** + * Comma separated addresses of member nodes, sorted in the cluster ring order. + * The address format is `akka://actor-system-name@hostname:port` + */ + def getMembers: String + + /** + * Comma separated addresses of unreachable member nodes. + * The address format is `akka://actor-system-name@hostname:port` + */ + def getUnreachable: String + + /* + * String that will list all nodes in the node ring as follows: + * {{{ + * Members: + * Member(address = akka://system0@localhost:5550, status = Up) + * Member(address = akka://system1@localhost:5551, status = Up) + * Unreachable: + * Member(address = akka://system2@localhost:5553, status = Down) + * }}} + */ def getClusterStatus: String + + /** + * Get the address of the current leader. + * The address format is `akka://actor-system-name@hostname:port` + */ def getLeader: String + /** + * Does the cluster consist of only one member? + */ def isSingleton: Boolean - def isConvergence: Boolean - def isAvailable: Boolean - def isRunning: Boolean + /** + * Returns true if the node is not unreachable and not `Down` + * and not `Removed`. + */ + def isAvailable: Boolean + + /** + * Try to join this cluster node with the node specified by 'address'. + * The address format is `akka://actor-system-name@hostname:port`. + * A 'Join(thisNodeAddress)' command is sent to the node to join. + */ def join(address: String) + + /** + * Send command to issue state transition to LEAVING for the node specified by 'address'. + * The address format is `akka://actor-system-name@hostname:port` + */ def leave(address: String) + + /** + * Send command to DOWN the node specified by 'address'. + * The address format is `akka://actor-system-name@hostname:port` + */ def down(address: String) } @@ -47,34 +100,26 @@ private[akka] class ClusterJmx(cluster: Cluster, log: LoggingAdapter) { // JMX attributes (bean-style) - /* - * Sends a string to the JMX client that will list all nodes in the node ring as follows: - * {{{ - * Members: - * Member(address = akka://system0@localhost:5550, status = Up) - * Member(address = akka://system1@localhost:5551, status = Up) - * Unreachable: - * Member(address = akka://system2@localhost:5553, status = Down) - * }}} - */ def getClusterStatus: String = { val unreachable = clusterView.unreachableMembers "\nMembers:\n\t" + clusterView.members.mkString("\n\t") + { if (unreachable.nonEmpty) "\nUnreachable:\n\t" + unreachable.mkString("\n\t") else "" } } + def getMembers: String = + clusterView.members.toSeq.map(_.address).mkString(",") + + def getUnreachable: String = + clusterView.unreachableMembers.map(_.address).mkString(",") + def getMemberStatus: String = clusterView.status.toString - def getLeader: String = clusterView.leader.toString + def getLeader: String = clusterView.leader.fold("")(_.toString) def isSingleton: Boolean = clusterView.isSingletonCluster - def isConvergence: Boolean = clusterView.convergence - def isAvailable: Boolean = clusterView.isAvailable - def isRunning: Boolean = clusterView.isRunning - // JMX commands def join(address: String) = cluster.join(AddressFromURIString(address)) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala index a3abd94316..2a7951a667 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala @@ -4,34 +4,39 @@ package akka.cluster -import scala.language.postfixOps +import java.io.Closeable +import java.lang.System.{ currentTimeMillis ⇒ newTimestamp } +import java.lang.management.{ OperatingSystemMXBean, MemoryMXBean, ManagementFactory } +import java.lang.reflect.InvocationTargetException +import java.lang.reflect.Method +import scala.collection.immutable import scala.concurrent.duration._ -import scala.collection.immutable.{ SortedSet, Map } import scala.concurrent.forkjoin.ThreadLocalRandom import scala.util.{ Try, Success, Failure } -import scala.math.ScalaNumericAnyConversions -import runtime.{ ScalaNumberProxy, RichLong, RichDouble, RichInt } - -import akka.actor._ -import akka.event.LoggingAdapter +import akka.ConfigurationException +import akka.actor.Actor +import akka.actor.ActorLogging +import akka.actor.ActorRef +import akka.actor.ActorSystem +import akka.actor.Address +import akka.actor.DynamicAccess +import akka.actor.ExtendedActorSystem import akka.cluster.MemberStatus.Up - -import java.lang.management.{ OperatingSystemMXBean, MemoryMXBean, ManagementFactory } -import java.lang.reflect.Method -import java.lang.System.{ currentTimeMillis ⇒ newTimestamp } +import akka.event.Logging +import java.lang.management.MemoryUsage /** * INTERNAL API. * - * This strategy is primarily for load-balancing of nodes. It controls metrics sampling + * Cluster metrics is primarily for load-balancing of nodes. It controls metrics sampling * at a regular frequency, prepares highly variable data for further analysis by other entities, - * and publishes the latest cluster metrics data around the node ring to assist in determining - * the need to redirect traffic to the least-loaded nodes. + * and publishes the latest cluster metrics data around the node ring and local eventStream + * to assist in determining the need to redirect traffic to the least-loaded nodes. * * Metrics sampling is delegated to the [[akka.cluster.MetricsCollector]]. * - * Calculation of statistical data for each monitored process is delegated to the - * [[akka.cluster.DataStream]] for exponential smoothing, with additional decay factor. + * Smoothing of the data for each monitored process is delegated to the + * [[akka.cluster.EWMA]] for exponential weighted moving average. */ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Actor with ActorLogging { @@ -46,17 +51,17 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto /** * The node ring gossipped that contains only members that are Up. */ - var nodes: SortedSet[Address] = SortedSet.empty + var nodes: immutable.SortedSet[Address] = immutable.SortedSet.empty /** * The latest metric values with their statistical data. */ - var latestGossip: MetricsGossip = MetricsGossip(MetricsRateOfDecay) + var latestGossip: MetricsGossip = MetricsGossip.empty /** * The metrics collector that samples data on the node. */ - val collector: MetricsCollector = MetricsCollector(selfAddress, log, context.system.asInstanceOf[ExtendedActorSystem].dynamicAccess) + val collector: MetricsCollector = MetricsCollector(context.system.asInstanceOf[ExtendedActorSystem], settings) /** * Start periodic gossip to random nodes in cluster @@ -72,6 +77,7 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto override def preStart(): Unit = { cluster.subscribe(self, classOf[MemberEvent]) + cluster.subscribe(self, classOf[UnreachableMember]) log.info("Metrics collection has started successfully on node [{}]", selfAddress) } @@ -79,8 +85,9 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto case GossipTick ⇒ gossip() case MetricsTick ⇒ collect() case state: CurrentClusterState ⇒ receiveState(state) - case MemberUp(m) ⇒ receiveMember(m) - case e: MemberEvent ⇒ removeMember(e) + case MemberUp(m) ⇒ addMember(m) + case e: MemberEvent ⇒ removeMember(e.member) + case UnreachableMember(m) ⇒ removeMember(m) case msg: MetricsGossipEnvelope ⇒ receiveGossip(msg) } @@ -94,21 +101,22 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto /** * Adds a member to the node ring. */ - def receiveMember(member: Member): Unit = nodes += member.address + def addMember(member: Member): Unit = nodes += member.address /** * Removes a member from the member node ring. */ - def removeMember(event: MemberEvent): Unit = { - nodes -= event.member.address - latestGossip = latestGossip remove event.member.address + def removeMember(member: Member): Unit = { + nodes -= member.address + latestGossip = latestGossip remove member.address publish() } /** * Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus.Up]]. */ - def receiveState(state: CurrentClusterState): Unit = nodes = state.members collect { case m if m.status == Up ⇒ m.address } + def receiveState(state: CurrentClusterState): Unit = + nodes = state.members collect { case m if m.status == Up ⇒ m.address } /** * Samples the latest metrics for the node, updates metrics statistics in @@ -123,27 +131,33 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto /** * Receives changes from peer nodes, merges remote with local gossip nodes, then publishes - * changes to the event stream for load balancing router consumption, and gossips to peers. + * changes to the event stream for load balancing router consumption, and gossip back. */ def receiveGossip(envelope: MetricsGossipEnvelope): Unit = { - val remoteGossip = envelope.gossip - - if (remoteGossip != latestGossip) { - latestGossip = latestGossip merge remoteGossip - publish() - gossipTo(envelope.from) - } + // remote node might not have same view of member nodes, this side should only care + // about nodes that are known here, otherwise removed nodes can come back + val otherGossip = envelope.gossip.filter(nodes) + latestGossip = latestGossip merge otherGossip + publish() + if (!envelope.reply) + replyGossipTo(envelope.from) } /** * Gossip to peer nodes. */ - def gossip(): Unit = selectRandomNode((nodes - selfAddress).toIndexedSeq) foreach gossipTo + def gossip(): Unit = selectRandomNode((nodes - selfAddress).toVector) foreach gossipTo def gossipTo(address: Address): Unit = - context.actorFor(self.path.toStringWithAddress(address)) ! MetricsGossipEnvelope(selfAddress, latestGossip) + sendGossip(address, MetricsGossipEnvelope(selfAddress, latestGossip, reply = false)) - def selectRandomNode(addresses: IndexedSeq[Address]): Option[Address] = + def replyGossipTo(address: Address): Unit = + sendGossip(address, MetricsGossipEnvelope(selfAddress, latestGossip, reply = true)) + + def sendGossip(address: Address, envelope: MetricsGossipEnvelope): Unit = + context.actorFor(self.path.toStringWithAddress(address)) ! envelope + + def selectRandomNode(addresses: immutable.IndexedSeq[Address]): Option[Address] = if (addresses.isEmpty) None else Some(addresses(ThreadLocalRandom.current nextInt addresses.size)) /** @@ -153,61 +167,50 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto } +/** + * INTERNAL API + */ +private[cluster] object MetricsGossip { + val empty = MetricsGossip(Set.empty[NodeMetrics]) +} + /** * INTERNAL API * * @param nodes metrics per node */ -private[cluster] case class MetricsGossip(rateOfDecay: Int, nodes: Set[NodeMetrics] = Set.empty) { +private[cluster] case class MetricsGossip(nodes: Set[NodeMetrics]) { /** * Removes nodes if their correlating node ring members are not [[akka.cluster.MemberStatus.Up]] */ def remove(node: Address): MetricsGossip = copy(nodes = nodes filterNot (_.address == node)) + /** + * Only the nodes that are in the `includeNodes` Set. + */ + def filter(includeNodes: Set[Address]): MetricsGossip = + copy(nodes = nodes filter { includeNodes contains _.address }) + /** * Adds new remote [[akka.cluster.NodeMetrics]] and merges existing from a remote gossip. */ - def merge(remoteGossip: MetricsGossip): MetricsGossip = { - val remoteNodes = remoteGossip.nodes.map(n ⇒ n.address -> n).toMap - val toMerge = nodeKeys intersect remoteNodes.keySet - val onlyInRemote = remoteNodes.keySet -- nodeKeys - val onlyInLocal = nodeKeys -- remoteNodes.keySet + def merge(otherGossip: MetricsGossip): MetricsGossip = + otherGossip.nodes.foldLeft(this) { (gossip, nodeMetrics) ⇒ gossip :+ nodeMetrics } - val seen = nodes.collect { - case n if toMerge contains n.address ⇒ n merge remoteNodes(n.address) - case n if onlyInLocal contains n.address ⇒ n - } - - val unseen = remoteGossip.nodes.collect { case n if onlyInRemote contains n.address ⇒ n } - - copy(nodes = seen ++ unseen) + /** + * Adds new local [[akka.cluster.NodeMetrics]], or merges an existing. + */ + def :+(newNodeMetrics: NodeMetrics): MetricsGossip = nodeMetricsFor(newNodeMetrics.address) match { + case Some(existingNodeMetrics) ⇒ + copy(nodes = nodes - existingNodeMetrics + (existingNodeMetrics merge newNodeMetrics)) + case None ⇒ copy(nodes = nodes + newNodeMetrics) } /** - * Adds new local [[akka.cluster.NodeMetrics]] and initializes the data, or merges an existing. + * Returns [[akka.cluster.NodeMetrics]] for a node if exists. */ - def :+(data: NodeMetrics): MetricsGossip = { - val previous = metricsFor(data) - val names = previous map (_.name) - - val (toMerge: Set[Metric], unseen: Set[Metric]) = data.metrics partition (a ⇒ names contains a.name) - val initialized = unseen.map(_.initialize(rateOfDecay)) - val merged = toMerge flatMap (latest ⇒ previous.collect { case peer if latest same peer ⇒ peer :+ latest }) - - val refreshed = nodes filterNot (_.address == data.address) - copy(nodes = refreshed + data.copy(metrics = initialized ++ merged)) - } - - /** - * Returns a set of [[akka.actor.Address]] for a given node set. - */ - def nodeKeys: Set[Address] = nodes map (_.address) - - /** - * Returns metrics for a node if exists. - */ - def metricsFor(node: NodeMetrics): Set[Metric] = nodes flatMap (n ⇒ if (n same node) n.metrics else Set.empty[Metric]) + def nodeMetricsFor(address: Address): Option[NodeMetrics] = nodes find { n ⇒ n.address == address } } @@ -215,7 +218,31 @@ private[cluster] case class MetricsGossip(rateOfDecay: Int, nodes: Set[NodeMetri * INTERNAL API * Envelope adding a sender address to the gossip. */ -private[cluster] case class MetricsGossipEnvelope(from: Address, gossip: MetricsGossip) extends ClusterMessage +private[cluster] case class MetricsGossipEnvelope(from: Address, gossip: MetricsGossip, reply: Boolean) + extends ClusterMessage + +object EWMA { + /** + * math.log(2) + */ + private val LogOf2 = 0.69315 + + /** + * Calculate the alpha (decay factor) used in [[akka.cluster.EWMA]] + * from specified half-life and interval between observations. + * Half-life is the interval over which the weights decrease by a factor of two. + * The relevance of each data sample is halved for every passing half-life duration, + * i.e. after 4 times the half-life, a data sample’s relevance is reduced to 6% of + * its original relevance. The initial relevance of a data sample is given by + * 1 – 0.5 ^ (collect-interval / half-life). + */ + def alpha(halfLife: FiniteDuration, collectInterval: FiniteDuration): Double = { + val halfLifeMillis = halfLife.toMillis + require(halfLife.toMillis > 0, "halfLife must be > 0 s") + val decayRate = LogOf2 / halfLifeMillis + 1 - math.exp(-decayRate * collectInterval.toMillis) + } +} /** * The exponentially weighted moving average (EWMA) approach captures short-term @@ -223,176 +250,282 @@ private[cluster] case class MetricsGossipEnvelope(from: Address, gossip: Metrics * of its alpha, or decay factor, this provides a statistical streaming data model * that is exponentially biased towards newer entries. * + * http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average + * * An EWMA only needs the most recent forecast value to be kept, as opposed to a standard * moving average model. * * INTERNAL API * - * @param decay sets how quickly the exponential weighting decays for past data compared to new data + * @param alpha decay factor, sets how quickly the exponential weighting decays for past data compared to new data, + * see http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average * - * @param ewma the current exponentially weighted moving average, e.g. Y(n - 1), or, + * @param value the current exponentially weighted moving average, e.g. Y(n - 1), or, * the sampled value resulting from the previous smoothing iteration. * This value is always used as the previous EWMA to calculate the new EWMA. * - * @param timestamp the most recent time of sampling - * - * @param startTime the time of initial sampling for this data stream */ -private[cluster] case class DataStream(decay: Int, ewma: ScalaNumericAnyConversions, startTime: Long, timestamp: Long) - extends ClusterMessage with MetricNumericConverter { +private[cluster] case class EWMA(value: Double, alpha: Double) extends ClusterMessage { - /** - * The rate at which the weights of past observations - * decay as they become more distant. - */ - private val α = 2 / decay + 1 + require(0.0 <= alpha && alpha <= 1.0, "alpha must be between 0.0 and 1.0") /** * Calculates the exponentially weighted moving average for a given monitored data set. - * The datam can be too large to fit into an int or long, thus we use ScalaNumericAnyConversions, - * and defer to BigInt or BigDecimal. * * @param xn the new data point - * @return an new [[akka.cluster.DataStream]] with the updated yn and timestamp + * @return a new [[akka.cluster.EWMA]] with the updated value */ - def :+(xn: ScalaNumericAnyConversions): DataStream = convert(xn) fold ( - nl ⇒ copy(ewma = BigInt(α * nl + 1 - α * ewma.longValue()), timestamp = newTimestamp), - nd ⇒ copy(ewma = BigDecimal(α * nd + 1 - α * ewma.doubleValue()), timestamp = newTimestamp)) - - /** - * The duration of observation for this data stream - */ - def duration: FiniteDuration = (timestamp - startTime) millis + def :+(xn: Double): EWMA = { + val newValue = (alpha * xn) + (1 - alpha) * value + if (newValue == value) this // no change + else copy(value = newValue) + } } /** - * INTERNAL API + * Metrics key/value. * - * Companion object of DataStream class. - */ -private[cluster] object DataStream { - - def apply(decay: Int, data: ScalaNumericAnyConversions): Option[DataStream] = if (decay > 0) - Some(DataStream(decay, data, newTimestamp, newTimestamp)) else None - -} - -/** - * INTERNAL API + * Equality of Metric is based on its name. * * @param name the metric name - * - * @param value the metric value, which may or may not be defined - * + * @param value the metric value, which may or may not be defined, it must be a valid numerical value, + * see [[akka.cluster.MetricNumericConverter.defined()]] * @param average the data stream of the metric value, for trending over time. Metrics that are already - * averages (e.g. system load average) or finite (e.g. as total cores), are not trended. + * averages (e.g. system load average) or finite (e.g. as number of processors), are not trended. */ -private[cluster] case class Metric(name: String, value: Option[ScalaNumericAnyConversions], average: Option[DataStream]) +case class Metric private (name: String, value: Number, private val average: Option[EWMA]) extends ClusterMessage with MetricNumericConverter { - /** - * Returns the metric with a new data stream for data trending if eligible, - * otherwise returns the unchanged metric. - */ - def initialize(decay: Int): Metric = if (initializable) copy(average = DataStream(decay, value.get)) else this + require(defined(value), s"Invalid Metric [$name] value [$value]") /** * If defined ( [[akka.cluster.MetricNumericConverter.defined()]] ), updates the new * data point, and if defined, updates the data stream. Returns the updated metric. */ - def :+(latest: Metric): Metric = latest.value match { - case Some(v) if this same latest ⇒ average match { - case Some(previous) ⇒ copy(value = Some(v), average = Some(previous :+ v)) - case None if latest.average.isDefined ⇒ copy(value = Some(v), average = latest.average) - case None if !latest.average.isDefined ⇒ copy(value = Some(v)) - } - case None ⇒ this + def :+(latest: Metric): Metric = if (this sameAs latest) average match { + case Some(avg) ⇒ copy(value = latest.value, average = Some(avg :+ latest.value.doubleValue)) + case None if latest.average.isDefined ⇒ copy(value = latest.value, average = latest.average) + case _ ⇒ copy(value = latest.value) + } + else this + + /** + * The numerical value of the average, if defined, otherwise the latest value + */ + def smoothValue: Double = average match { + case Some(avg) ⇒ avg.value + case None ⇒ value.doubleValue } /** - * @see [[akka.cluster.MetricNumericConverter.defined()]] + * @return true if this value is smoothed */ - def isDefined: Boolean = value match { - case Some(a) ⇒ defined(a) - case None ⇒ false - } + def isSmooth: Boolean = average.isDefined /** * Returns true if that is tracking the same metric as this. */ - def same(that: Metric): Boolean = name == that.name + def sameAs(that: Metric): Boolean = name == that.name - /** - * Returns true if the metric requires initialization. - */ - def initializable: Boolean = trendable && isDefined && average.isEmpty - - /** - * Returns true if the metric is a value applicable for trending. - */ - def trendable: Boolean = !(Metric.noStream contains name) - -} - -/** - * INTERNAL API - * - * Companion object of Metric class. - */ -private[cluster] object Metric extends MetricNumericConverter { - - /** - * The metrics that are already averages or finite are not trended over time. - */ - private val noStream = Set("system-load-average", "total-cores", "processors") - - /** - * Evaluates validity of value based on whether it is available (SIGAR on classpath) - * or defined for the OS (JMX). If undefined we set the value option to None and do not modify - * the latest sampled metric to avoid skewing the statistical trend. - */ - def apply(name: String, value: Option[ScalaNumericAnyConversions]): Metric = value match { - case Some(v) if defined(v) ⇒ Metric(name, value, None) - case _ ⇒ Metric(name, None, None) + override def hashCode = name.## + override def equals(obj: Any) = obj match { + case other: Metric ⇒ sameAs(other) + case _ ⇒ false + } + +} + +/** + * Factory for creating valid Metric instances. + */ +object Metric extends MetricNumericConverter { + + /** + * Creates a new Metric instance if the value is valid, otherwise None + * is returned. Invalid numeric values are negative and NaN/Infinite. + */ + def create(name: String, value: Number, decayFactor: Option[Double]): Option[Metric] = + if (defined(value)) Some(new Metric(name, value, ceateEWMA(value.doubleValue, decayFactor))) + else None + + /** + * Creates a new Metric instance if the Try is successful and the value is valid, + * otherwise None is returned. Invalid numeric values are negative and NaN/Infinite. + */ + def create(name: String, value: Try[Number], decayFactor: Option[Double]): Option[Metric] = value match { + case Success(v) ⇒ create(name, v, decayFactor) + case Failure(_) ⇒ None + } + + private def ceateEWMA(value: Double, decayFactor: Option[Double]): Option[EWMA] = decayFactor match { + case Some(alpha) ⇒ Some(EWMA(value, alpha)) + case None ⇒ None } } /** - * INTERNAL API - * * The snapshot of current sampled health metrics for any monitored process. * Collected and gossipped at regular intervals for dynamic cluster management strategies. * - * For the JVM memory. The amount of used and committed memory will always be <= max if max is defined. - * A memory allocation may fail if it attempts to increase the used memory such that used > committed - * even if used <= max is true (e.g. when the system virtual memory is low). - * - * The system is possibly nearing a bottleneck if the system load average is nearing in cpus/cores. + * Equality of NodeMetrics is based on its address. * * @param address [[akka.actor.Address]] of the node the metrics are gathered at - * - * @param timestamp the time of sampling - * - * @param metrics the array of sampled [[akka.actor.Metric]] + * @param timestamp the time of sampling, in milliseconds since midnight, January 1, 1970 UTC + * @param metrics the set of sampled [[akka.actor.Metric]] */ -private[cluster] case class NodeMetrics(address: Address, timestamp: Long, metrics: Set[Metric] = Set.empty[Metric]) extends ClusterMessage { +case class NodeMetrics(address: Address, timestamp: Long, metrics: Set[Metric] = Set.empty[Metric]) extends ClusterMessage { /** * Returns the most recent data. */ - def merge(that: NodeMetrics): NodeMetrics = if (this updatable that) copy(metrics = that.metrics, timestamp = that.timestamp) else this + def merge(that: NodeMetrics): NodeMetrics = { + require(address == that.address, s"merge only allowed for same address, [$address] != [$that.address]") + if (timestamp >= that.timestamp) this // that is older + else { + // equality is based on the name of the Metric and Set doesn't replace existing element + copy(metrics = that.metrics ++ metrics, timestamp = that.timestamp) + } + } + + def metric(key: String): Option[Metric] = metrics.collectFirst { case m if m.name == key ⇒ m } /** - * Returns true if that address is the same as this and its metric set is more recent. + * Java API */ - def updatable(that: NodeMetrics): Boolean = (this same that) && (that.timestamp > timestamp) + def getMetrics: java.lang.Iterable[Metric] = + scala.collection.JavaConverters.asJavaIterableConverter(metrics).asJava /** * Returns true if that address is the same as this */ - def same(that: NodeMetrics): Boolean = address == that.address + def sameAs(that: NodeMetrics): Boolean = address == that.address + + override def hashCode = address.## + override def equals(obj: Any) = obj match { + case other: NodeMetrics ⇒ sameAs(other) + case _ ⇒ false + } + +} + +/** + * Definitions of the built-in standard metrics. + * + * The following extractors and data structures makes it easy to consume the + * [[akka.cluster.NodeMetrics]] in for example load balancers. + */ +object StandardMetrics { + + // Constants for the heap related Metric names + final val HeapMemoryUsed = "heap-memory-used" + final val HeapMemoryCommitted = "heap-memory-committed" + final val HeapMemoryMax = "heap-memory-max" + + // Constants for the cpu related Metric names + final val SystemLoadAverage = "system-load-average" + final val Processors = "processors" + final val CpuCombined = "cpu-combined" + + object HeapMemory { + + /** + * Given a NodeMetrics it returns the HeapMemory data if the nodeMetrics contains + * necessary heap metrics. + * @return if possible a tuple matching the HeapMemory constructor parameters + */ + def unapply(nodeMetrics: NodeMetrics): Option[(Address, Long, Long, Long, Option[Long])] = { + for { + used ← nodeMetrics.metric(HeapMemoryUsed) + committed ← nodeMetrics.metric(HeapMemoryCommitted) + } yield (nodeMetrics.address, nodeMetrics.timestamp, + used.smoothValue.longValue, committed.smoothValue.longValue, + nodeMetrics.metric(HeapMemoryMax).map(_.smoothValue.longValue)) + } + + } + + /** + * Java API to extract HeapMemory data from nodeMetrics, if the nodeMetrics + * contains necessary heap metrics, otherwise it returns null. + */ + def extractHeapMemory(nodeMetrics: NodeMetrics): HeapMemory = nodeMetrics match { + case HeapMemory(address, timestamp, used, committed, max) ⇒ + // note that above extractor returns tuple + HeapMemory(address, timestamp, used, committed, max) + case _ ⇒ null + } + + /** + * The amount of used and committed memory will always be <= max if max is defined. + * A memory allocation may fail if it attempts to increase the used memory such that used > committed + * even if used <= max is true (e.g. when the system virtual memory is low). + * + * @param address [[akka.actor.Address]] of the node the metrics are gathered at + * @param timestamp the time of sampling, in milliseconds since midnight, January 1, 1970 UTC + * @param used the current sum of heap memory used from all heap memory pools (in bytes) + * @param committed the current sum of heap memory guaranteed to be available to the JVM + * from all heap memory pools (in bytes). Committed will always be greater than or equal to used. + * @param max the maximum amount of memory (in bytes) that can be used for JVM memory management. + * Can be undefined on some OS. + */ + case class HeapMemory(address: Address, timestamp: Long, used: Long, committed: Long, max: Option[Long]) { + require(committed > 0L, "committed heap expected to be > 0 bytes") + require(max.isEmpty || max.get > 0L, "max heap expected to be > 0 bytes") + } + + object Cpu { + + /** + * Given a NodeMetrics it returns the Cpu data if the nodeMetrics contains + * necessary cpu metrics. + * @return if possible a tuple matching the Cpu constructor parameters + */ + def unapply(nodeMetrics: NodeMetrics): Option[(Address, Long, Option[Double], Option[Double], Int)] = { + for { + processors ← nodeMetrics.metric(Processors) + } yield (nodeMetrics.address, nodeMetrics.timestamp, + nodeMetrics.metric(SystemLoadAverage).map(_.smoothValue), + nodeMetrics.metric(CpuCombined).map(_.smoothValue), processors.value.intValue) + } + + } + + /** + * Java API to extract Cpu data from nodeMetrics, if the nodeMetrics + * contains necessary cpu metrics, otherwise it returns null. + */ + def extractCpu(nodeMetrics: NodeMetrics): Cpu = nodeMetrics match { + case Cpu(address, timestamp, systemLoadAverage, cpuCombined, processors) ⇒ + // note that above extractor returns tuple + Cpu(address, timestamp, systemLoadAverage, cpuCombined, processors) + case _ ⇒ null + } + + /** + * @param address [[akka.actor.Address]] of the node the metrics are gathered at + * @param timestamp the time of sampling, in milliseconds since midnight, January 1, 1970 UTC + * @param systemLoadAverage OS-specific average load on the CPUs in the system, for the past 1 minute, + * The system is possibly nearing a bottleneck if the system load average is nearing number of cpus/cores. + * @param cpuCombined combined CPU sum of User + Sys + Nice + Wait, in percentage ([0.0 - 1.0]. This + * metric can describe the amount of time the CPU spent executing code during n-interval and how + * much more it could theoretically. + * @param processors the number of available processors + */ + case class Cpu( + address: Address, + timestamp: Long, + systemLoadAverage: Option[Double], + cpuCombined: Option[Double], + processors: Int) { + + cpuCombined match { + case Some(x) ⇒ require(0.0 <= x && x <= 1.0, s"cpuCombined must be between [0.0 - 1.0], was [$x]") + case None ⇒ + } + + } } @@ -405,97 +538,199 @@ private[cluster] case class NodeMetrics(address: Address, timestamp: Long, metri private[cluster] trait MetricNumericConverter { /** - * A defined value is neither a -1 or NaN/Infinite: + * An defined value is neither negative nor NaN/Infinite: *
  • JMX system load average and max heap can be 'undefined' for certain OS, in which case a -1 is returned
  • *
  • SIGAR combined CPU can occasionally return a NaN or Infinite (known bug)
*/ - def defined(value: ScalaNumericAnyConversions): Boolean = - convert(value) fold (a ⇒ value.underlying != -1, b ⇒ !(b.isNaN || b.isInfinite)) + def defined(value: Number): Boolean = convertNumber(value) match { + case Left(a) ⇒ a >= 0 + case Right(b) ⇒ !(b < 0.0 || b.isNaN || b.isInfinite) + } /** * May involve rounding or truncation. */ - def convert(from: ScalaNumericAnyConversions): Either[Long, Double] = from match { - case n: BigInt ⇒ Left(n.longValue()) - case n: BigDecimal ⇒ Right(n.doubleValue()) - case n: RichInt ⇒ Left(n.abs) - case n: RichLong ⇒ Left(n.self) - case n: RichDouble ⇒ Right(n.self) + def convertNumber(from: Any): Either[Long, Double] = from match { + case n: Int ⇒ Left(n) + case n: Long ⇒ Left(n) + case n: Double ⇒ Right(n) + case n: Float ⇒ Right(n) + case n: BigInt ⇒ Left(n.longValue) + case n: BigDecimal ⇒ Right(n.doubleValue) + case x ⇒ throw new IllegalArgumentException(s"Not a number [$x]") } } /** * INTERNAL API - * - * Loads JVM metrics through JMX monitoring beans. If Hyperic SIGAR is on the classpath, this - * loads wider and more accurate range of metrics in combination with SIGAR's native OS library. - * - * FIXME switch to Scala reflection - * - * @param sigar the optional org.hyperic.Sigar instance + */ +private[cluster] trait MetricsCollector extends Closeable { + /** + * Samples and collects new data points. + */ + def sample: NodeMetrics +} + +/** + * Loads JVM and system metrics through JMX monitoring beans. * * @param address The [[akka.actor.Address]] of the node being sampled + * @param decay how quickly the exponential weighting of past data is decayed */ -private[cluster] class MetricsCollector private (private val sigar: Option[AnyRef], address: Address) extends MetricNumericConverter { +class JmxMetricsCollector(address: Address, decayFactor: Double) extends MetricsCollector { + import StandardMetrics._ + + private def this(cluster: Cluster) = + this(cluster.selfAddress, + EWMA.alpha(cluster.settings.MetricsMovingAverageHalfLife, cluster.settings.MetricsInterval)) + + /** + * This constructor is used when creating an instance from configured FQCN + */ + def this(system: ActorSystem) = this(Cluster(system)) + + private val decayFactorOption = Some(decayFactor) private val memoryMBean: MemoryMXBean = ManagementFactory.getMemoryMXBean private val osMBean: OperatingSystemMXBean = ManagementFactory.getOperatingSystemMXBean - private val LoadAverage: Option[Method] = createMethodFrom(sigar, "getLoadAverage") - - private val CpuList: Option[Method] = createMethodFrom(sigar, "getCpuInfoList").map(m ⇒ m) - - private val NetInterfaces: Option[Method] = createMethodFrom(sigar, "getNetInterfaceList") - - private val Cpu: Option[Method] = createMethodFrom(sigar, "getCpuPerc") - - private val CombinedCpu: Option[Method] = Try(Cpu.get.getReturnType.getMethod("getCombined")).toOption - /** * Samples and collects new data points. - * - * @return [[akka.cluster.NodeMetrics]] + * Creates a new instance each time. */ - def sample: NodeMetrics = NodeMetrics(address, newTimestamp, Set(cpuCombined, totalCores, - systemLoadAverage, used, committed, max, processors, networkMaxRx, networkMaxTx)) + def sample: NodeMetrics = NodeMetrics(address, newTimestamp, metrics) + + def metrics: Set[Metric] = { + val heap = heapMemoryUsage + Set(systemLoadAverage, heapUsed(heap), heapCommitted(heap), heapMax(heap), processors).flatten + } /** - * (SIGAR / JMX) Returns the OS-specific average system load on the CPUs in the system, for the past 1 minute. - * On some systems the JMX OS system load average may not be available, in which case a Metric with - * undefined value is returned. - * Hyperic SIGAR provides more precise values, thus, if the library is on the classpath, it is the default. + * JMX Returns the OS-specific average load on the CPUs in the system, for the past 1 minute. + * On some systems the JMX OS system load average may not be available, in which case a -1 is + * returned from JMX, and None is returned from this method. + * Creates a new instance each time. */ - def systemLoadAverage: Metric = Metric("system-load-average", - Try(LoadAverage.get.invoke(sigar.get).asInstanceOf[Array[Double]].toSeq.head).getOrElse( - osMBean.getSystemLoadAverage) match { - case x if x < 0 ⇒ None // load average may be unavailable on some platform - case x ⇒ Some(BigDecimal(x)) - }) + def systemLoadAverage: Option[Metric] = Metric.create( + name = SystemLoadAverage, + value = osMBean.getSystemLoadAverage, + decayFactor = None) /** * (JMX) Returns the number of available processors + * Creates a new instance each time. */ - def processors: Metric = Metric("processors", Some(BigInt(osMBean.getAvailableProcessors))) + def processors: Option[Metric] = Metric.create( + name = Processors, + value = osMBean.getAvailableProcessors, + decayFactor = None) + + /** + * Current heap to be passed in to heapUsed, heapCommitted and heapMax + */ + def heapMemoryUsage: MemoryUsage = memoryMBean.getHeapMemoryUsage /** * (JMX) Returns the current sum of heap memory used from all heap memory pools (in bytes). + * Creates a new instance each time. */ - def used: Metric = Metric("heap-memory-used", Some(BigInt(memoryMBean.getHeapMemoryUsage.getUsed))) + def heapUsed(heap: MemoryUsage): Option[Metric] = Metric.create( + name = HeapMemoryUsed, + value = heap.getUsed, + decayFactor = decayFactorOption) /** * (JMX) Returns the current sum of heap memory guaranteed to be available to the JVM - * from all heap memory pools (in bytes). Committed will always be greater - * than or equal to used. + * from all heap memory pools (in bytes). + * Creates a new instance each time. */ - def committed: Metric = Metric("heap-memory-committed", Some(BigInt(memoryMBean.getHeapMemoryUsage.getCommitted))) + def heapCommitted(heap: MemoryUsage): Option[Metric] = Metric.create( + name = HeapMemoryCommitted, + value = heap.getCommitted, + decayFactor = decayFactorOption) /** * (JMX) Returns the maximum amount of memory (in bytes) that can be used - * for JVM memory management. If undefined, returns -1. + * for JVM memory management. If not defined the metrics value is None, i.e. + * never negative. + * Creates a new instance each time. */ - def max: Metric = Metric("heap-memory-max", Some(BigInt(memoryMBean.getHeapMemoryUsage.getMax))) + def heapMax(heap: MemoryUsage): Option[Metric] = Metric.create( + name = HeapMemoryMax, + value = heap.getMax, + decayFactor = None) + + override def close(): Unit = () + +} + +/** + * Loads metrics through Hyperic SIGAR and JMX monitoring beans. This + * loads wider and more accurate range of metrics compared to JmxMetricsCollector + * by using SIGAR's native OS library. + * + * The constructor will by design throw exception if org.hyperic.sigar.Sigar can't be loaded, due + * to missing classes or native libraries. + * + * TODO switch to Scala reflection + * + * @param address The [[akka.actor.Address]] of the node being sampled + * @param decay how quickly the exponential weighting of past data is decayed + * @param sigar the org.hyperic.Sigar instance + */ +class SigarMetricsCollector(address: Address, decayFactor: Double, sigar: AnyRef) + extends JmxMetricsCollector(address, decayFactor) { + + import StandardMetrics._ + + private def this(cluster: Cluster) = + this(cluster.selfAddress, + EWMA.alpha(cluster.settings.MetricsMovingAverageHalfLife, cluster.settings.MetricsInterval), + cluster.system.dynamicAccess.createInstanceFor[AnyRef]("org.hyperic.sigar.Sigar", Nil).get) + + /** + * This constructor is used when creating an instance from configured FQCN + */ + def this(system: ActorSystem) = this(Cluster(system)) + + private val decayFactorOption = Some(decayFactor) + + private val EmptyClassArray: Array[(Class[_])] = Array.empty[(Class[_])] + private val LoadAverage: Option[Method] = createMethodFrom(sigar, "getLoadAverage") + private val Cpu: Option[Method] = createMethodFrom(sigar, "getCpuPerc") + private val CombinedCpu: Option[Method] = Try(Cpu.get.getReturnType.getMethod("getCombined")).toOption + + // Do something initially, in constructor, to make sure that the native library can be loaded. + // This will by design throw exception if sigar isn't usable + val pid: Long = createMethodFrom(sigar, "getPid") match { + case Some(method) ⇒ + try method.invoke(sigar).asInstanceOf[Long] catch { + case e: InvocationTargetException if e.getCause.isInstanceOf[LinkageError] ⇒ + // native libraries not in place + // don't throw fatal LinkageError, but something harmless + throw new IllegalArgumentException(e.getCause.toString) + case e: InvocationTargetException ⇒ throw e.getCause + } + case None ⇒ throw new IllegalArgumentException("Wrong version of Sigar, expected 'getPid' method") + } + + override def metrics: Set[Metric] = { + super.metrics.filterNot(_.name == SystemLoadAverage) ++ Set(systemLoadAverage, cpuCombined).flatten + } + + /** + * (SIGAR / JMX) Returns the OS-specific average load on the CPUs in the system, for the past 1 minute. + * On some systems the JMX OS system load average may not be available, in which case a -1 is returned + * from JMX, which means that None is returned from this method. + * Hyperic SIGAR provides more precise values, thus, if the library is on the classpath, it is the default. + * Creates a new instance each time. + */ + override def systemLoadAverage: Option[Metric] = Metric.create( + name = SystemLoadAverage, + value = Try(LoadAverage.get.invoke(sigar).asInstanceOf[Array[AnyRef]](0).asInstanceOf[Number]), + decayFactor = None) orElse super.systemLoadAverage /** * (SIGAR) Returns the combined CPU sum of User + Sys + Nice + Wait, in percentage. This metric can describe @@ -504,68 +739,51 @@ private[cluster] class MetricsCollector private (private val sigar: Option[AnyRe * * In the data stream, this will sometimes return with a valid metric value, and sometimes as a NaN or Infinite. * Documented bug https://bugzilla.redhat.com/show_bug.cgi?id=749121 and several others. + * + * Creates a new instance each time. */ - def cpuCombined: Metric = Metric("cpu-combined", Try(BigDecimal(CombinedCpu.get.invoke(Cpu.get.invoke(sigar.get)).asInstanceOf[Double])).toOption) - - /** - * (SIGAR) Returns the total number of cores. - */ - def totalCores: Metric = Metric("total-cores", Try(BigInt(CpuList.get.invoke(sigar.get).asInstanceOf[Array[AnyRef]].map(cpu ⇒ - createMethodFrom(Some(cpu), "getTotalCores").get.invoke(cpu).asInstanceOf[Int]).head)).toOption) - //Array[Int].head - if this would differ on some servers, expose all. In testing each int was always equal. - - /** - * (SIGAR) Returns the max network IO read/write value, in bytes, for network latency evaluation. - */ - def networkMaxRx: Metric = networkMaxFor("getRxBytes", "network-max-rx") - - /** - * (SIGAR) Returns the max network IO tx value, in bytes. - */ - def networkMaxTx: Metric = networkMaxFor("getTxBytes", "network-max-tx") - - /** - * Returns the network stats per interface. - */ - def networkStats: Map[String, AnyRef] = Try(NetInterfaces.get.invoke(sigar.get).asInstanceOf[Array[String]].map(arg ⇒ - arg -> (createMethodFrom(sigar, "getNetInterfaceStat", Array(classOf[String])).get.invoke(sigar.get, arg))).toMap) getOrElse Map.empty[String, AnyRef] - - /** - * Returns true if SIGAR is successfully installed on the classpath, otherwise false. - */ - def isSigar: Boolean = sigar.isDefined + def cpuCombined: Option[Metric] = Metric.create( + name = CpuCombined, + value = Try(CombinedCpu.get.invoke(Cpu.get.invoke(sigar)).asInstanceOf[Number]), + decayFactor = decayFactorOption) /** * Releases any native resources associated with this instance. */ - def close(): Unit = if (isSigar) Try(createMethodFrom(sigar, "close").get.invoke(sigar.get)) getOrElse Unit + override def close(): Unit = Try(createMethodFrom(sigar, "close").get.invoke(sigar)) - /** - * Returns the max bytes for the given method in metric for metric from the network interface stats. - */ - private def networkMaxFor(method: String, metric: String): Metric = Metric(metric, Try(Some(BigInt( - networkStats.collect { case (_, a) ⇒ createMethodFrom(Some(a), method).get.invoke(a).asInstanceOf[Long] }.max))) getOrElse None) - - private def createMethodFrom(ref: Option[AnyRef], method: String, types: Array[(Class[_])] = Array.empty[(Class[_])]): Option[Method] = - Try(ref.get.getClass.getMethod(method, types: _*)).toOption + private def createMethodFrom(ref: AnyRef, method: String, types: Array[(Class[_])] = EmptyClassArray): Option[Method] = + Try(ref.getClass.getMethod(method, types: _*)).toOption } /** * INTERNAL API - * Companion object of MetricsCollector class. + * Factory to create configured MetricsCollector. + * If instantiation of SigarMetricsCollector fails (missing class or native library) + * it falls back to use JmxMetricsCollector. */ private[cluster] object MetricsCollector { - def apply(address: Address, log: LoggingAdapter, dynamicAccess: DynamicAccess): MetricsCollector = - dynamicAccess.createInstanceFor[AnyRef]("org.hyperic.sigar.Sigar", Nil) match { - case Success(identity) ⇒ new MetricsCollector(Some(identity), address) - case Failure(e) ⇒ - log.debug(e.toString) - log.info("Hyperic SIGAR was not found on the classpath or not installed properly. " + - "Metrics will be retreived from MBeans, and may be incorrect on some platforms. " + - "To increase metric accuracy add the 'sigar.jar' to the classpath and the appropriate" + - "platform-specific native libary to 'java.library.path'.") - new MetricsCollector(None, address) + def apply(system: ExtendedActorSystem, settings: ClusterSettings): MetricsCollector = { + import settings.{ MetricsCollectorClass ⇒ fqcn } + def log = Logging(system, "MetricsCollector") + if (fqcn == classOf[SigarMetricsCollector].getName) { + Try(new SigarMetricsCollector(system)) match { + case Success(sigarCollector) ⇒ sigarCollector + case Failure(e) ⇒ + log.info("Metrics will be retreived from MBeans, and may be incorrect on some platforms. " + + "To increase metric accuracy add the 'sigar.jar' to the classpath and the appropriate " + + "platform-specific native libary to 'java.library.path'. Reason: " + + e.toString) + new JmxMetricsCollector(system) + } + + } else { + system.dynamicAccess.createInstanceFor[MetricsCollector](fqcn, List(classOf[ActorSystem] -> system)). + recover { + case e ⇒ throw new ConfigurationException("Could not create custom metrics collector [" + fqcn + "] due to:" + e.toString) + }.get } + } } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala index 5920ac3dca..831d72f9c8 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala @@ -5,7 +5,7 @@ package akka.cluster import java.io.Closeable -import scala.collection.immutable.SortedSet +import scala.collection.immutable import akka.actor.{ Actor, ActorRef, ActorSystemImpl, Address, Props } import akka.cluster.ClusterEvent._ import akka.actor.PoisonPill @@ -45,25 +45,25 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable { override def postStop(): Unit = cluster.unsubscribe(self) def receive = { - case SeenChanged(convergence, seenBy) ⇒ - state = state.copy(convergence = convergence, seenBy = seenBy) - case MemberRemoved(member) ⇒ - state = state.copy(members = state.members - member, unreachable = state.unreachable - member) - case MemberUnreachable(member) ⇒ - // replace current member with new member (might have different status, only address is used in equals) - state = state.copy(members = state.members - member, unreachable = state.unreachable - member + member) - case MemberDowned(member) ⇒ - // replace current member with new member (might have different status, only address is used in equals) - state = state.copy(members = state.members - member, unreachable = state.unreachable - member + member) - case event: MemberEvent ⇒ - // replace current member with new member (might have different status, only address is used in equals) - state = state.copy(members = state.members - event.member + event.member) - case LeaderChanged(leader) ⇒ state = state.copy(leader = leader) - case ConvergenceChanged(convergence) ⇒ state = state.copy(convergence = convergence) - case s: CurrentClusterState ⇒ state = s - case CurrentInternalStats(stats) ⇒ _latestStats = stats - case ClusterMetricsChanged(nodes) ⇒ _clusterMetrics = nodes - case _ ⇒ // ignore, not interesting + case e: ClusterDomainEvent ⇒ e match { + case SeenChanged(convergence, seenBy) ⇒ + state = state.copy(seenBy = seenBy) + case MemberRemoved(member) ⇒ + state = state.copy(members = state.members - member, unreachable = state.unreachable - member) + case UnreachableMember(member) ⇒ + // replace current member with new member (might have different status, only address is used in equals) + state = state.copy(members = state.members - member, unreachable = state.unreachable - member + member) + case MemberDowned(member) ⇒ + // replace current member with new member (might have different status, only address is used in equals) + state = state.copy(members = state.members - member, unreachable = state.unreachable - member + member) + case event: MemberEvent ⇒ + // replace current member with new member (might have different status, only address is used in equals) + state = state.copy(members = state.members - event.member + event.member) + case LeaderChanged(leader) ⇒ state = state.copy(leader = leader) + case s: CurrentClusterState ⇒ state = s + case CurrentInternalStats(stats) ⇒ _latestStats = stats + case ClusterMetricsChanged(nodes) ⇒ _clusterMetrics = nodes + } } }).withDispatcher(cluster.settings.UseDispatcher), name = "clusterEventBusListener") } @@ -74,14 +74,14 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable { } /** - * Returns true if the cluster node is up and running, false if it is shut down. + * Returns true if this cluster instance has be shutdown. */ - def isRunning: Boolean = cluster.isRunning + def isTerminated: Boolean = cluster.isTerminated /** * Current cluster members, sorted by address. */ - def members: SortedSet[Member] = state.members + def members: immutable.SortedSet[Member] = state.members /** * Members that has been detected as unreachable. @@ -108,21 +108,19 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable { def leader: Option[Address] = state.leader /** - * Is this node a singleton cluster? + * Does the cluster consist of only one member? */ def isSingletonCluster: Boolean = members.size == 1 /** - * Checks if we have a cluster convergence. - */ - def convergence: Boolean = state.convergence - - /** - * Returns true if the node is UP or JOINING. + * Returns true if the node is not unreachable and not `Down` + * and not `Removed`. */ def isAvailable: Boolean = { val myself = self - !unreachableMembers.contains(myself) && !myself.status.isUnavailable + !unreachableMembers.contains(myself) && + myself.status != MemberStatus.Down && + myself.status != MemberStatus.Removed } /** @@ -148,4 +146,4 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable { eventBusListener ! PoisonPill } -} \ No newline at end of file +} diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 88c408e70c..5b5c26ae33 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -3,15 +3,16 @@ */ package akka.cluster +import scala.collection.immutable import com.typesafe.config.Config import scala.concurrent.duration.Duration import java.util.concurrent.TimeUnit.MILLISECONDS import akka.ConfigurationException -import scala.collection.JavaConverters._ import akka.actor.Address import akka.actor.AddressFromURIString import akka.dispatch.Dispatchers import scala.concurrent.duration.FiniteDuration +import akka.japi.Util.immutableSeq class ClusterSettings(val config: Config, val systemName: String) { import config._ @@ -45,7 +46,8 @@ class ClusterSettings(val config: Config, val systemName: String) { require(n > 0, "failure-detector.monitored-by-nr-of-members must be > 0"); n } - final val SeedNodes: IndexedSeq[Address] = getStringList("akka.cluster.seed-nodes").asScala.map { case AddressFromURIString(addr) ⇒ addr }.toIndexedSeq + final val SeedNodes: immutable.IndexedSeq[Address] = + immutableSeq(getStringList("akka.cluster.seed-nodes")).map { case AddressFromURIString(addr) ⇒ addr }.toVector final val SeedNodeTimeout: FiniteDuration = Duration(getMilliseconds("akka.cluster.seed-node-timeout"), MILLISECONDS) final val PeriodicTasksInitialDelay: FiniteDuration = Duration(getMilliseconds("akka.cluster.periodic-tasks-initial-delay"), MILLISECONDS) final val GossipInterval: FiniteDuration = Duration(getMilliseconds("akka.cluster.gossip-interval"), MILLISECONDS) @@ -54,6 +56,10 @@ class ClusterSettings(val config: Config, val systemName: String) { final val PublishStatsInterval: FiniteDuration = Duration(getMilliseconds("akka.cluster.publish-stats-interval"), MILLISECONDS) final val AutoJoin: Boolean = getBoolean("akka.cluster.auto-join") final val AutoDown: Boolean = getBoolean("akka.cluster.auto-down") + final val MinNrOfMembers: Int = { + val n = getInt("akka.cluster.min-nr-of-members") + require(n > 0, "min-nr-of-members must be > 0"); n + } final val JmxEnabled: Boolean = getBoolean("akka.cluster.jmx.enabled") final val JoinTimeout: FiniteDuration = Duration(getMilliseconds("akka.cluster.join-timeout"), MILLISECONDS) final val UseDispatcher: String = getString("akka.cluster.use-dispatcher") match { @@ -69,9 +75,16 @@ class ClusterSettings(val config: Config, val systemName: String) { callTimeout = Duration(getMilliseconds("akka.cluster.send-circuit-breaker.call-timeout"), MILLISECONDS), resetTimeout = Duration(getMilliseconds("akka.cluster.send-circuit-breaker.reset-timeout"), MILLISECONDS)) final val MetricsEnabled: Boolean = getBoolean("akka.cluster.metrics.enabled") - final val MetricsInterval: FiniteDuration = Duration(getMilliseconds("akka.cluster.metrics.metrics-interval"), MILLISECONDS) + final val MetricsCollectorClass: String = getString("akka.cluster.metrics.collector-class") + final val MetricsInterval: FiniteDuration = { + val d = Duration(getMilliseconds("akka.cluster.metrics.collect-interval"), MILLISECONDS) + require(d > Duration.Zero, "metrics.collect-interval must be > 0"); d + } final val MetricsGossipInterval: FiniteDuration = Duration(getMilliseconds("akka.cluster.metrics.gossip-interval"), MILLISECONDS) - final val MetricsRateOfDecay: Int = getInt("akka.cluster.metrics.rate-of-decay") + final val MetricsMovingAverageHalfLife: FiniteDuration = { + val d = Duration(getMilliseconds("akka.cluster.metrics.moving-average-half-life"), MILLISECONDS) + require(d > Duration.Zero, "metrics.moving-average-half-life must be > 0"); d + } } case class CircuitBreakerSettings(maxFailures: Int, callTimeout: FiniteDuration, resetTimeout: FiniteDuration) diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala index be734703be..1f96434995 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala @@ -5,14 +5,14 @@ package akka.cluster import akka.actor.Address -import scala.collection.immutable.SortedSet +import scala.collection.immutable import MemberStatus._ /** * Internal API */ private[cluster] object Gossip { - val emptyMembers: SortedSet[Member] = SortedSet.empty + val emptyMembers: immutable.SortedSet[Member] = immutable.SortedSet.empty } /** @@ -50,7 +50,7 @@ private[cluster] object Gossip { */ private[cluster] case class Gossip( overview: GossipOverview = GossipOverview(), - members: SortedSet[Member] = Gossip.emptyMembers, // sorted set of members with their status, sorted by address + members: immutable.SortedSet[Member] = Gossip.emptyMembers, // sorted set of members with their status, sorted by address version: VectorClock = VectorClock()) // vector clock version extends ClusterMessage // is a serializable cluster message with Versioned[Gossip] { @@ -135,7 +135,7 @@ private[cluster] case class Gossip( * Checks if we have a cluster convergence. If there are any unreachable nodes then we can't have a convergence - * waiting for user to act (issuing DOWN) or leader to act (issuing DOWN through auto-down). * - * @return Some(convergedGossip) if convergence have been reached and None if not + * @return true if convergence have been reached and false if not */ def convergence: Boolean = { val unreachable = overview.unreachable @@ -151,8 +151,10 @@ private[cluster] case class Gossip( def allMembersInSeen = members.forall(m ⇒ seen.contains(m.address)) def seenSame: Boolean = - if (seen.isEmpty) false - else { + if (seen.isEmpty) { + // if both seen and members are empty, then every(no)body has seen the same thing + members.isEmpty + } else { val values = seen.values val seenHead = values.head values.forall(_ == seenHead) @@ -168,15 +170,10 @@ private[cluster] case class Gossip( def isSingletonCluster: Boolean = members.size == 1 /** - * Returns true if the node is UP or JOINING. + * Returns true if the node is in the unreachable set */ - def isAvailable(address: Address): Boolean = !isUnavailable(address) - - def isUnavailable(address: Address): Boolean = { - val isUnreachable = overview.unreachable exists { _.address == address } - val hasUnavailableMemberStatus = members exists { m ⇒ m.status.isUnavailable && m.address == address } - isUnreachable || hasUnavailableMemberStatus - } + def isUnreachable(address: Address): Boolean = + overview.unreachable exists { _.address == address } def member(address: Address): Member = { members.find(_.address == address).orElse(overview.unreachable.find(_.address == address)). diff --git a/akka-cluster/src/main/scala/akka/cluster/Member.scala b/akka-cluster/src/main/scala/akka/cluster/Member.scala index f8a064977d..1ee4aae804 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Member.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Member.scala @@ -6,7 +6,7 @@ package akka.cluster import language.implicitConversions -import scala.collection.immutable.SortedSet +import scala.collection.immutable import scala.collection.GenTraversableOnce import akka.actor.Address import MemberStatus._ @@ -87,13 +87,7 @@ object Member { * * Can be one of: Joining, Up, Leaving, Exiting and Down. */ -abstract class MemberStatus extends ClusterMessage { - - /** - * Using the same notion for 'unavailable' as 'non-convergence': DOWN - */ - def isUnavailable: Boolean = this == Down -} +abstract class MemberStatus extends ClusterMessage object MemberStatus { case object Joining extends MemberStatus diff --git a/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala index ed6724058f..baef66f26c 100644 --- a/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala +++ b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala @@ -160,7 +160,7 @@ case class VectorClock( * Compare two vector clocks. The outcomes will be one of the following: *

* {{{ - * 1. Clock 1 is BEFORE (>) Clock 2 if there exists an i such that c1(i) <= c(2) and there does not exist a j such that c1(j) > c2(j). + * 1. Clock 1 is BEFORE (>) Clock 2 if there exists an i such that c1(i) <= c2(i) and there does not exist a j such that c1(j) > c2(j). * 2. Clock 1 is CONCURRENT (<>) to Clock 2 if there exists an i, j such that c1(i) < c2(i) and c1(j) > c2(j). * 3. Clock 1 is AFTER (<) Clock 2 otherwise. * }}} diff --git a/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancingRouter.scala b/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancingRouter.scala new file mode 100644 index 0000000000..60a9c5b6a7 --- /dev/null +++ b/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancingRouter.scala @@ -0,0 +1,434 @@ +/* + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster.routing + +import java.util.Arrays + +import scala.concurrent.forkjoin.ThreadLocalRandom +import scala.collection.immutable +import akka.actor.Actor +import akka.actor.ActorRef +import akka.actor.Address +import akka.actor.OneForOneStrategy +import akka.actor.Props +import akka.actor.SupervisorStrategy +import akka.dispatch.Dispatchers +import akka.cluster.Cluster +import akka.cluster.ClusterEvent.ClusterMetricsChanged +import akka.cluster.ClusterEvent.CurrentClusterState +import akka.cluster.NodeMetrics +import akka.cluster.StandardMetrics.Cpu +import akka.cluster.StandardMetrics.HeapMemory +import akka.event.Logging +import akka.japi.Util.immutableSeq +import akka.routing.Broadcast +import akka.routing.Destination +import akka.routing.FromConfig +import akka.routing.NoRouter +import akka.routing.Resizer +import akka.routing.Route +import akka.routing.RouteeProvider +import akka.routing.RouterConfig + +object AdaptiveLoadBalancingRouter { + private val escalateStrategy: SupervisorStrategy = OneForOneStrategy() { + case _ ⇒ SupervisorStrategy.Escalate + } +} + +/** + * A Router that performs load balancing of messages to cluster nodes based on + * cluster metric data. + * + * It uses random selection of routees based probabilities derived from + * the remaining capacity of corresponding node. + * + * Please note that providing both 'nrOfInstances' and 'routees' does not make logical + * sense as this means that the router should both create new actors and use the 'routees' + * actor(s). In this case the 'nrOfInstances' will be ignored and the 'routees' will be used. + *
+ * The configuration parameter trumps the constructor arguments. This means that + * if you provide either 'nrOfInstances' or 'routees' during instantiation they will + * be ignored if the router is defined in the configuration file for the actor being used. + * + *

Supervision Setup

+ * + * The router creates a “head” actor which supervises and/or monitors the + * routees. Instances are created as children of this actor, hence the + * children are not supervised by the parent of the router. Common choices are + * to always escalate (meaning that fault handling is always applied to all + * children simultaneously; this is the default) or use the parent’s strategy, + * which will result in routed children being treated individually, but it is + * possible as well to use Routers to give different supervisor strategies to + * different groups of children. + * + * @param metricsSelector decides what probability to use for selecting a routee, based + * on remaining capacity as indicated by the node metrics + * @param routees string representation of the actor paths of the routees that will be looked up + * using `actorFor` in [[akka.actor.ActorRefProvider]] + */ +@SerialVersionUID(1L) +case class AdaptiveLoadBalancingRouter( + metricsSelector: MetricsSelector = MixMetricsSelector, + nrOfInstances: Int = 0, routees: immutable.Iterable[String] = Nil, + override val resizer: Option[Resizer] = None, + val routerDispatcher: String = Dispatchers.DefaultDispatcherId, + val supervisorStrategy: SupervisorStrategy = AdaptiveLoadBalancingRouter.escalateStrategy) + extends RouterConfig with AdaptiveLoadBalancingRouterLike { + + /** + * Constructor that sets nrOfInstances to be created. + * Java API + * @param selector the selector is responsible for producing weighted mix of routees from the node metrics + * @param nr number of routees to create + */ + def this(selector: MetricsSelector, nr: Int) = this(metricsSelector = selector, nrOfInstances = nr) + + /** + * Constructor that sets the routees to be used. + * Java API + * @param selector the selector is responsible for producing weighted mix of routees from the node metrics + * @param routeePaths string representation of the actor paths of the routees that will be looked up + * using `actorFor` in [[akka.actor.ActorRefProvider]] + */ + def this(selector: MetricsSelector, routeePaths: java.lang.Iterable[String]) = + this(metricsSelector = selector, routees = immutableSeq(routeePaths)) + + /** + * Constructor that sets the resizer to be used. + * Java API + * @param selector the selector is responsible for producing weighted mix of routees from the node metrics + */ + def this(selector: MetricsSelector, resizer: Resizer) = + this(metricsSelector = selector, resizer = Some(resizer)) + + /** + * Java API for setting routerDispatcher + */ + def withDispatcher(dispatcherId: String): AdaptiveLoadBalancingRouter = + copy(routerDispatcher = dispatcherId) + + /** + * Java API for setting the supervisor strategy to be used for the “head” + * Router actor. + */ + def withSupervisorStrategy(strategy: SupervisorStrategy): AdaptiveLoadBalancingRouter = + copy(supervisorStrategy = strategy) + + /** + * Uses the resizer of the given RouterConfig if this RouterConfig + * doesn't have one, i.e. the resizer defined in code is used if + * resizer was not defined in config. + */ + override def withFallback(other: RouterConfig): RouterConfig = other match { + case _: FromConfig | _: NoRouter ⇒ this + case otherRouter: AdaptiveLoadBalancingRouter ⇒ + val useResizer = + if (this.resizer.isEmpty && otherRouter.resizer.isDefined) otherRouter.resizer + else this.resizer + copy(resizer = useResizer) + case _ ⇒ throw new IllegalArgumentException("Expected AdaptiveLoadBalancingRouter, got [%s]".format(other)) + } + +} + +/** + * INTERNAL API. + * + * This strategy is a metrics-aware router which performs load balancing of messages to + * cluster nodes based on cluster metric data. It consumes [[akka.cluster.ClusterMetricsChanged]] + * events and the [[akka.cluster.routing.MetricsSelector]] creates an mix of + * weighted routees based on the node metrics. Messages are routed randomly to the + * weighted routees, i.e. nodes with lower load are more likely to be used than nodes with + * higher load + */ +trait AdaptiveLoadBalancingRouterLike { this: RouterConfig ⇒ + + def metricsSelector: MetricsSelector + + def nrOfInstances: Int + + def routees: immutable.Iterable[String] + + def routerDispatcher: String + + override def createRoute(routeeProvider: RouteeProvider): Route = { + if (resizer.isEmpty) { + if (routees.isEmpty) routeeProvider.createRoutees(nrOfInstances) + else routeeProvider.registerRouteesFor(routees) + } + + val log = Logging(routeeProvider.context.system, routeeProvider.context.self) + + // The current weighted routees, if any. Weights are produced by the metricsSelector + // via the metricsListener Actor. It's only updated by the actor, but accessed from + // the threads of the senders. + @volatile var weightedRoutees: Option[WeightedRoutees] = None + + // subscribe to ClusterMetricsChanged and update weightedRoutees + val metricsListener = routeeProvider.context.actorOf(Props(new Actor { + + val cluster = Cluster(context.system) + + override def preStart(): Unit = cluster.subscribe(self, classOf[ClusterMetricsChanged]) + override def postStop(): Unit = cluster.unsubscribe(self) + + def receive = { + case ClusterMetricsChanged(metrics) ⇒ receiveMetrics(metrics) + case _: CurrentClusterState ⇒ // ignore + } + + def receiveMetrics(metrics: Set[NodeMetrics]): Unit = { + // this is the only place from where weightedRoutees is updated + weightedRoutees = Some(new WeightedRoutees(routeeProvider.routees, cluster.selfAddress, + metricsSelector.weights(metrics))) + } + + }).withDispatcher(routerDispatcher), name = "metricsListener") + + def getNext(): ActorRef = weightedRoutees match { + case Some(weighted) ⇒ + if (weighted.isEmpty) routeeProvider.context.system.deadLetters + else weighted(ThreadLocalRandom.current.nextInt(weighted.total) + 1) + case None ⇒ + val currentRoutees = routeeProvider.routees + if (currentRoutees.isEmpty) routeeProvider.context.system.deadLetters + else currentRoutees(ThreadLocalRandom.current.nextInt(currentRoutees.size)) + } + + { + case (sender, message) ⇒ + message match { + case Broadcast(msg) ⇒ toAll(sender, routeeProvider.routees) + case msg ⇒ List(Destination(sender, getNext())) + } + } + } +} + +/** + * MetricsSelector that uses the heap metrics. + * Low heap capacity => small weight. + */ +@SerialVersionUID(1L) +case object HeapMetricsSelector extends CapacityMetricsSelector { + /** + * Java API: get the singleton instance + */ + def getInstance = this + + override def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double] = { + nodeMetrics.collect { + case HeapMemory(address, _, used, committed, max) ⇒ + val capacity = max match { + case None ⇒ (committed - used).toDouble / committed + case Some(m) ⇒ (m - used).toDouble / m + } + (address, capacity) + }.toMap + } +} + +/** + * MetricsSelector that uses the combined CPU metrics. + * Combined CPU is sum of User + Sys + Nice + Wait, in percentage. + * Low cpu capacity => small weight. + */ +@SerialVersionUID(1L) +case object CpuMetricsSelector extends CapacityMetricsSelector { + /** + * Java API: get the singleton instance + */ + def getInstance = this + + override def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double] = { + nodeMetrics.collect { + case Cpu(address, _, _, Some(cpuCombined), _) ⇒ + val capacity = 1.0 - cpuCombined + (address, capacity) + }.toMap + } +} + +/** + * MetricsSelector that uses the system load average metrics. + * System load average is OS-specific average load on the CPUs in the system, + * for the past 1 minute. The system is possibly nearing a bottleneck if the + * system load average is nearing number of cpus/cores. + * Low load average capacity => small weight. + */ +@SerialVersionUID(1L) +case object SystemLoadAverageMetricsSelector extends CapacityMetricsSelector { + /** + * Java API: get the singleton instance + */ + def getInstance = this + + override def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double] = { + nodeMetrics.collect { + case Cpu(address, _, Some(systemLoadAverage), _, processors) ⇒ + val capacity = 1.0 - math.min(1.0, systemLoadAverage / processors) + (address, capacity) + }.toMap + } +} + +/** + * Singleton instance of the default MixMetricsSelector, which uses [akka.cluster.routing.HeapMetricsSelector], + * [akka.cluster.routing.CpuMetricsSelector], and [akka.cluster.routing.SystemLoadAverageMetricsSelector] + */ +@SerialVersionUID(1L) +object MixMetricsSelector extends MixMetricsSelectorBase( + Vector(HeapMetricsSelector, CpuMetricsSelector, SystemLoadAverageMetricsSelector)) { + + /** + * Java API: get the default singleton instance + */ + def getInstance = this +} + +/** + * MetricsSelector that combines other selectors and aggregates their capacity + * values. By default it uses [akka.cluster.routing.HeapMetricsSelector], + * [akka.cluster.routing.CpuMetricsSelector], and [akka.cluster.routing.SystemLoadAverageMetricsSelector] + */ +@SerialVersionUID(1L) +case class MixMetricsSelector( + selectors: immutable.IndexedSeq[CapacityMetricsSelector]) + extends MixMetricsSelectorBase(selectors) + +/** + * Base class for MetricsSelector that combines other selectors and aggregates their capacity. + */ +@SerialVersionUID(1L) +abstract class MixMetricsSelectorBase(selectors: immutable.IndexedSeq[CapacityMetricsSelector]) + extends CapacityMetricsSelector { + + /** + * Java API + */ + def this(selectors: java.lang.Iterable[CapacityMetricsSelector]) = this(immutableSeq(selectors).toVector) + + override def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double] = { + val combined: immutable.IndexedSeq[(Address, Double)] = selectors.flatMap(_.capacity(nodeMetrics).toSeq) + // aggregated average of the capacities by address + combined.foldLeft(Map.empty[Address, (Double, Int)].withDefaultValue((0.0, 0))) { + case (acc, (address, capacity)) ⇒ + val (sum, count) = acc(address) + acc + (address -> (sum + capacity, count + 1)) + }.map { + case (addr, (sum, count)) ⇒ (addr -> sum / count) + } + } + +} + +/** + * A MetricsSelector is responsible for producing weights from the node metrics. + */ +@SerialVersionUID(1L) +trait MetricsSelector extends Serializable { + /** + * The weights per address, based on the the nodeMetrics. + */ + def weights(nodeMetrics: Set[NodeMetrics]): Map[Address, Int] +} + +/** + * A MetricsSelector producing weights from remaining capacity. + * The weights are typically proportional to the remaining capacity. + */ +abstract class CapacityMetricsSelector extends MetricsSelector { + + /** + * Remaining capacity for each node. The value is between + * 0.0 and 1.0, where 0.0 means no remaining capacity (full + * utilization) and 1.0 means full remaining capacity (zero + * utilization). + */ + def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double] + + /** + * Converts the capacity values to weights. The node with lowest + * capacity gets weight 1 (lowest usable capacity is 1%) and other + * nodes gets weights proportional to their capacity compared to + * the node with lowest capacity. + */ + def weights(capacity: Map[Address, Double]): Map[Address, Int] = { + if (capacity.isEmpty) Map.empty[Address, Int] + else { + val (_, min) = capacity.minBy { case (_, c) ⇒ c } + // lowest usable capacity is 1% (>= 0.5% will be rounded to weight 1), also avoids div by zero + val divisor = math.max(0.01, min) + capacity map { case (addr, c) ⇒ (addr -> math.round((c) / divisor).toInt) } + } + } + + /** + * The weights per address, based on the capacity produced by + * the nodeMetrics. + */ + override def weights(nodeMetrics: Set[NodeMetrics]): Map[Address, Int] = + weights(capacity(nodeMetrics)) + +} + +/** + * INTERNAL API + * + * Pick routee based on its weight. Higher weight, higher probability. + */ +private[cluster] class WeightedRoutees(refs: immutable.IndexedSeq[ActorRef], selfAddress: Address, weights: Map[Address, Int]) { + + // fill an array of same size as the refs with accumulated weights, + // binarySearch is used to pick the right bucket from a requested value + // from 1 to the total sum of the used weights. + private val buckets: Array[Int] = { + def fullAddress(actorRef: ActorRef): Address = actorRef.path.address match { + case Address(_, _, None, None) ⇒ selfAddress + case a ⇒ a + } + val buckets = Array.ofDim[Int](refs.size) + val meanWeight = if (weights.isEmpty) 1 else weights.values.sum / weights.size + val w = weights.withDefaultValue(meanWeight) // we don’t necessarily have metrics for all addresses + var i = 0 + var sum = 0 + refs foreach { ref ⇒ + sum += w(fullAddress(ref)) + buckets(i) = sum + i += 1 + } + buckets + } + + def isEmpty: Boolean = buckets.length == 0 + + def total: Int = { + require(!isEmpty, "WeightedRoutees must not be used when empty") + buckets(buckets.length - 1) + } + + /** + * Pick the routee matching a value, from 1 to total. + */ + def apply(value: Int): ActorRef = { + require(1 <= value && value <= total, "value must be between [1 - %s]" format total) + refs(idx(Arrays.binarySearch(buckets, value))) + } + + /** + * Converts the result of Arrays.binarySearch into a index in the buckets array + * see documentation of Arrays.binarySearch for what it returns + */ + private def idx(i: Int): Int = { + if (i >= 0) i // exact match + else { + val j = math.abs(i + 1) + if (j >= buckets.length) throw new IndexOutOfBoundsException( + "Requested index [%s] is > max index [%s]".format(i, buckets.length)) + else j + } + } +} \ No newline at end of file diff --git a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala index ee6cd88a7d..59c88c9fee 100644 --- a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala +++ b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala @@ -127,7 +127,7 @@ case class ClusterRouterSettings private[akka] ( if (isRouteesPathDefined && maxInstancesPerNode != 1) throw new IllegalArgumentException("maxInstancesPerNode of cluster router must be 1 when routeesPath is defined") - val routeesPathElements: Iterable[String] = routeesPath match { + val routeesPathElements: immutable.Iterable[String] = routeesPath match { case RelativeActorPath(elements) ⇒ elements case _ ⇒ throw new IllegalArgumentException("routeesPath [%s] is not a valid relative actor path" format routeesPath) @@ -248,9 +248,11 @@ private[akka] class ClusterRouteeProvider( */ private[akka] class ClusterRouterActor extends Router { - // subscribe to cluster changes, MemberEvent // re-subscribe when restart - override def preStart(): Unit = cluster.subscribe(self, classOf[MemberEvent]) + override def preStart(): Unit = { + cluster.subscribe(self, classOf[MemberEvent]) + cluster.subscribe(self, classOf[UnreachableMember]) + } override def postStop(): Unit = cluster.unsubscribe(self) // lazy to not interfere with RoutedActorCell initialization @@ -264,6 +266,19 @@ private[akka] class ClusterRouterActor extends Router { def fullAddress(actorRef: ActorRef): Address = routeeProvider.fullAddress(actorRef) + def unregisterRoutees(member: Member) = { + val address = member.address + routeeProvider.nodes -= address + + // unregister routees that live on that node + val affectedRoutes = routeeProvider.routees.filter(fullAddress(_) == address) + routeeProvider.unregisterRoutees(affectedRoutes) + + // createRoutees will not create more than createRoutees and maxInstancesPerNode + // this is useful when totalInstances < upNodes.size + routeeProvider.createRoutees() + } + override def routerReceive: Receive = { case s: CurrentClusterState ⇒ import Member.addressOrdering @@ -278,17 +293,10 @@ private[akka] class ClusterRouterActor extends Router { case other: MemberEvent ⇒ // other events means that it is no longer interesting, such as - // MemberJoined, MemberLeft, MemberExited, MemberUnreachable, MemberRemoved - val address = other.member.address - routeeProvider.nodes -= address - - // unregister routees that live on that node - val affectedRoutes = routeeProvider.routees.filter(fullAddress(_) == address) - routeeProvider.unregisterRoutees(affectedRoutes) - - // createRoutees will not create more than createRoutees and maxInstancesPerNode - // this is useful when totalInstances < upNodes.size - routeeProvider.createRoutees() + // MemberJoined, MemberLeft, MemberExited, MemberRemoved + unregisterRoutees(other.member) + case UnreachableMember(m) ⇒ + unregisterRoutees(m) } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala index 3c1b41c950..d711aec55f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala @@ -3,6 +3,8 @@ */ package akka.cluster +import language.postfixOps +import scala.concurrent.duration._ import com.typesafe.config.ConfigFactory import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig @@ -54,7 +56,7 @@ abstract class ClusterDeathWatchSpec } "An actor watching a remote actor in the cluster" must { - "receive Terminated when watched node becomes unreachable" taggedAs LongRunningTest in { + "receive Terminated when watched node becomes Down" taggedAs LongRunningTest in within(20 seconds) { awaitClusterUp(roles: _*) enterBarrier("cluster-up") @@ -76,10 +78,12 @@ abstract class ClusterDeathWatchSpec watchEstablished.await enterBarrier("watch-established") expectMsg(path2) - expectNoMsg + expectNoMsg(2 seconds) enterBarrier("second-terminated") markNodeAsUnavailable(third) + awaitCond(clusterView.unreachableMembers.exists(_.address == address(third))) + cluster.down(third) expectMsg(path3) enterBarrier("third-terminated") @@ -91,6 +95,8 @@ abstract class ClusterDeathWatchSpec enterBarrier("watch-established") runOn(third) { markNodeAsUnavailable(second) + awaitCond(clusterView.unreachableMembers.exists(_.address == address(second))) + cluster.down(second) } enterBarrier("second-terminated") enterBarrier("third-terminated") @@ -132,7 +138,7 @@ abstract class ClusterDeathWatchSpec enterBarrier("after-3") } - "be able to shutdown system when using remote deployed actor on node that crash" taggedAs LongRunningTest in { + "be able to shutdown system when using remote deployed actor on node that crash" taggedAs LongRunningTest in within(20 seconds) { runOn(fourth) { val hello = system.actorOf(Props[Hello], "hello") hello.isInstanceOf[RemoteActorRef] must be(true) @@ -141,6 +147,9 @@ abstract class ClusterDeathWatchSpec enterBarrier("hello-deployed") markNodeAsUnavailable(first) + awaitCond(clusterView.unreachableMembers.exists(_.address == address(first))) + cluster.down(first) + val t = expectMsgType[Terminated] t.actor must be(hello) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsDataStreamingOffSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsDataStreamingOffSpec.scala deleted file mode 100644 index 33ed0b8f6b..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsDataStreamingOffSpec.scala +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import scala.language.postfixOps -import scala.concurrent.duration._ -import akka.remote.testkit.{ MultiNodeSpec, MultiNodeConfig } -import com.typesafe.config.ConfigFactory -import akka.testkit.LongRunningTest - -object ClusterMetricsDataStreamingOffMultiJvmSpec extends MultiNodeConfig { - val first = role("first") - val second = role("second") - commonConfig(ConfigFactory.parseString("akka.cluster.metrics.rate-of-decay = 0") - .withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) -} -class ClusterMetricsDataStreamingOffMultiJvmNode1 extends ClusterMetricsDataStreamingOffSpec -class ClusterMetricsDataStreamingOffMultiJvmNode2 extends ClusterMetricsDataStreamingOffSpec - -abstract class ClusterMetricsDataStreamingOffSpec extends MultiNodeSpec(ClusterMetricsDataStreamingOffMultiJvmSpec) with MultiNodeClusterSpec with MetricSpec { - "Cluster metrics" must { - "not collect stream metric data" taggedAs LongRunningTest in within(30 seconds) { - awaitClusterUp(roles: _*) - awaitCond(clusterView.clusterMetrics.size == roles.size) - awaitCond(clusterView.clusterMetrics.flatMap(_.metrics).filter(_.trendable).forall(_.average.isEmpty)) - enterBarrier("after") - } - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsSpec.scala index e04d3612d3..6712502312 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsSpec.scala @@ -28,9 +28,11 @@ class ClusterMetricsMultiJvmNode3 extends ClusterMetricsSpec class ClusterMetricsMultiJvmNode4 extends ClusterMetricsSpec class ClusterMetricsMultiJvmNode5 extends ClusterMetricsSpec -abstract class ClusterMetricsSpec extends MultiNodeSpec(ClusterMetricsMultiJvmSpec) with MultiNodeClusterSpec with MetricSpec { +abstract class ClusterMetricsSpec extends MultiNodeSpec(ClusterMetricsMultiJvmSpec) with MultiNodeClusterSpec { import ClusterMetricsMultiJvmSpec._ + def isSigar(collector: MetricsCollector): Boolean = collector.isInstanceOf[SigarMetricsCollector] + "Cluster metrics" must { "periodically collect metrics on each node, publish ClusterMetricsChanged to the event stream, " + "and gossip metrics around the node ring" taggedAs LongRunningTest in within(60 seconds) { @@ -38,9 +40,8 @@ abstract class ClusterMetricsSpec extends MultiNodeSpec(ClusterMetricsMultiJvmSp enterBarrier("cluster-started") awaitCond(clusterView.members.filter(_.status == MemberStatus.Up).size == roles.size) awaitCond(clusterView.clusterMetrics.size == roles.size) - assertInitialized(cluster.settings.MetricsRateOfDecay, collectNodeMetrics(clusterView.clusterMetrics).toSet) - val collector = MetricsCollector(cluster.selfAddress, log, system.asInstanceOf[ExtendedActorSystem].dynamicAccess) - clusterView.clusterMetrics.foreach(n ⇒ assertExpectedSampleSize(collector.isSigar, cluster.settings.MetricsRateOfDecay, n)) + val collector = MetricsCollector(cluster.system, cluster.settings) + collector.sample.metrics.size must be > (3) enterBarrier("after") } "reflect the correct number of node metrics in cluster view" taggedAs LongRunningTest in within(30 seconds) { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index b2a9453035..ae5dea869e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -76,8 +76,6 @@ abstract class ConvergenceSpec(multiNodeConfig: ConvergenceMultiNodeConfig) // still one unreachable clusterView.unreachableMembers.size must be(1) clusterView.unreachableMembers.head.address must be(thirdAddress) - // and therefore no convergence - clusterView.convergence must be(false) } } @@ -94,23 +92,33 @@ abstract class ConvergenceSpec(multiNodeConfig: ConvergenceMultiNodeConfig) def memberStatus(address: Address): Option[MemberStatus] = clusterView.members.collectFirst { case m if m.address == address ⇒ m.status } - def assertNotMovedUp: Unit = { + def assertNotMovedUp(joining: Boolean): Unit = { within(20 seconds) { - awaitCond(clusterView.members.size == 3) + if (joining) awaitCond(clusterView.members.size == 0) + else awaitCond(clusterView.members.size == 2) awaitSeenSameState(first, second, fourth) - memberStatus(first) must be(Some(MemberStatus.Up)) - memberStatus(second) must be(Some(MemberStatus.Up)) + if (joining) memberStatus(first) must be(None) + else memberStatus(first) must be(Some(MemberStatus.Up)) + if (joining) memberStatus(second) must be(None) + else memberStatus(second) must be(Some(MemberStatus.Up)) // leader is not allowed to move the new node to Up - memberStatus(fourth) must be(Some(MemberStatus.Joining)) - // still no convergence - clusterView.convergence must be(false) + memberStatus(fourth) must be(None) } } - runOn(first, second, fourth) { + enterBarrier("after-join") + + runOn(first, second) { for (n ← 1 to 5) { - log.debug("assertNotMovedUp#" + n) - assertNotMovedUp + assertNotMovedUp(joining = false) + // wait and then check again + Thread.sleep(1.second.dilated.toMillis) + } + } + + runOn(fourth) { + for (n ← 1 to 5) { + assertNotMovedUp(joining = true) // wait and then check again Thread.sleep(1.second.dilated.toMillis) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala index 4bec3ceb7d..464b627944 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala @@ -3,6 +3,7 @@ */ package akka.cluster +import scala.collection.immutable import com.typesafe.config.ConfigFactory import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig @@ -35,7 +36,7 @@ abstract class JoinSeedNodeSpec import JoinSeedNodeMultiJvmSpec._ - def seedNodes: IndexedSeq[Address] = IndexedSeq(seed1, seed2, seed3) + def seedNodes: immutable.IndexedSeq[Address] = Vector(seed1, seed2, seed3) "A cluster with seed nodes" must { "be able to start the seed nodes concurrently" taggedAs LongRunningTest in { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala index 156552ed1a..97711b30de 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala @@ -177,7 +177,6 @@ abstract class LargeClusterSpec Await.ready(latch, remaining) - awaitCond(clusterNodes.forall(_.readView.convergence)) val counts = clusterNodes.map(gossipCount(_)) val formattedStats = "mean=%s min=%s max=%s".format(counts.sum / clusterNodes.size, counts.min, counts.max) log.info("Convergence of [{}] nodes reached, it took [{}], received [{}] gossip messages per node", @@ -274,7 +273,7 @@ abstract class LargeClusterSpec } "detect failure and auto-down crashed nodes in second-datacenter" taggedAs LongRunningTest in { - val unreachableNodes = nodesPerDatacenter + val downedNodes = nodesPerDatacenter val liveNodes = nodesPerDatacenter * 4 within(30.seconds + 3.seconds * liveNodes) { @@ -289,22 +288,19 @@ abstract class LargeClusterSpec val latch = TestLatch(nodesPerDatacenter) systems foreach { sys ⇒ Cluster(sys).subscribe(sys.actorOf(Props(new Actor { - var gotUnreachable = Set.empty[Member] + var gotDowned = Set.empty[Member] def receive = { case state: CurrentClusterState ⇒ - gotUnreachable = state.unreachable - checkDone() - case MemberUnreachable(m) if !latch.isOpen ⇒ - gotUnreachable = gotUnreachable + m + gotDowned = gotDowned ++ state.unreachable.filter(_.status == Down) checkDone() case MemberDowned(m) if !latch.isOpen ⇒ - gotUnreachable = gotUnreachable + m + gotDowned = gotDowned + m checkDone() case _ ⇒ // not interesting } - def checkDone(): Unit = if (gotUnreachable.size == unreachableNodes) { - log.info("Detected [{}] unreachable nodes in [{}], it took [{}], received [{}] gossip messages", - unreachableNodes, Cluster(sys).selfAddress, tookMillis, gossipCount(Cluster(sys))) + def checkDone(): Unit = if (gotDowned.size == downedNodes) { + log.info("Detected [{}] downed nodes in [{}], it took [{}], received [{}] gossip messages", + downedNodes, Cluster(sys).selfAddress, tookMillis, gossipCount(Cluster(sys))) latch.countDown() } })), classOf[ClusterDomainEvent]) @@ -318,7 +314,6 @@ abstract class LargeClusterSpec runOn(firstDatacenter, thirdDatacenter, fourthDatacenter, fifthDatacenter) { Await.ready(latch, remaining) - awaitCond(systems.forall(Cluster(_).readView.convergence)) val mergeCount = systems.map(sys ⇒ Cluster(sys).readView.latestStats.mergeCount).sum val counts = systems.map(sys ⇒ gossipCount(Cluster(sys))) val formattedStats = "mean=%s min=%s max=%s".format(counts.sum / nodesPerDatacenter, counts.min, counts.max) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala index ddbab3edbb..acaf909d57 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala @@ -57,7 +57,7 @@ abstract class LeaderLeavingSpec enterBarrier("leader-left") // verify that the LEADER is shut down - awaitCond(!cluster.isRunning) + awaitCond(cluster.isTerminated) // verify that the LEADER is REMOVED awaitCond(clusterView.status == Removed) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala new file mode 100644 index 0000000000..e6d83f881e --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala @@ -0,0 +1,149 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import language.postfixOps +import com.typesafe.config.ConfigFactory +import scala.concurrent.duration._ +import java.lang.management.ManagementFactory +import javax.management.InstanceNotFoundException +import javax.management.ObjectName +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import scala.util.Try + +object MBeanMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + akka.cluster.jmx.enabled = on + """)).withFallback(MultiNodeClusterSpec.clusterConfig)) + +} + +class MBeanMultiJvmNode1 extends MBeanSpec +class MBeanMultiJvmNode2 extends MBeanSpec +class MBeanMultiJvmNode3 extends MBeanSpec +class MBeanMultiJvmNode4 extends MBeanSpec + +abstract class MBeanSpec + extends MultiNodeSpec(MBeanMultiJvmSpec) + with MultiNodeClusterSpec { + + import MBeanMultiJvmSpec._ + import ClusterEvent._ + + val mbeanName = new ObjectName("akka:type=Cluster") + lazy val mbeanServer = ManagementFactory.getPlatformMBeanServer + + "Cluster MBean" must { + "expose attributes" taggedAs LongRunningTest in { + val info = mbeanServer.getMBeanInfo(mbeanName) + info.getAttributes.map(_.getName).toSet must be(Set( + "ClusterStatus", "Members", "Unreachable", "MemberStatus", "Leader", "Singleton", "Available")) + enterBarrier("after-1") + } + + "expose operations" taggedAs LongRunningTest in { + val info = mbeanServer.getMBeanInfo(mbeanName) + info.getOperations.map(_.getName).toSet must be(Set( + "join", "leave", "down")) + enterBarrier("after-2") + } + + "change attributes after startup" taggedAs LongRunningTest in { + runOn(first) { + mbeanServer.getAttribute(mbeanName, "Available").asInstanceOf[Boolean] must be(false) + mbeanServer.getAttribute(mbeanName, "Singleton").asInstanceOf[Boolean] must be(false) + mbeanServer.getAttribute(mbeanName, "Leader") must be("") + mbeanServer.getAttribute(mbeanName, "Members") must be("") + mbeanServer.getAttribute(mbeanName, "Unreachable") must be("") + mbeanServer.getAttribute(mbeanName, "MemberStatus") must be("Removed") + } + awaitClusterUp(first) + runOn(first) { + awaitCond(mbeanServer.getAttribute(mbeanName, "MemberStatus") == "Up") + awaitCond(mbeanServer.getAttribute(mbeanName, "Leader") == address(first).toString) + mbeanServer.getAttribute(mbeanName, "Singleton").asInstanceOf[Boolean] must be(true) + mbeanServer.getAttribute(mbeanName, "Members") must be(address(first).toString) + mbeanServer.getAttribute(mbeanName, "Unreachable") must be("") + mbeanServer.getAttribute(mbeanName, "Available").asInstanceOf[Boolean] must be(true) + } + enterBarrier("after-3") + } + + "support join" taggedAs LongRunningTest in { + runOn(second, third, fourth) { + mbeanServer.invoke(mbeanName, "join", Array(address(first).toString), Array("java.lang.String")) + } + enterBarrier("joined") + + awaitUpConvergence(4) + assertMembers(clusterView.members, roles.map(address(_)): _*) + awaitCond(mbeanServer.getAttribute(mbeanName, "MemberStatus") == "Up") + val expectedMembers = roles.sorted.map(address(_)).mkString(",") + awaitCond(mbeanServer.getAttribute(mbeanName, "Members") == expectedMembers) + val expectedLeader = address(roleOfLeader()) + awaitCond(mbeanServer.getAttribute(mbeanName, "Leader") == expectedLeader.toString) + mbeanServer.getAttribute(mbeanName, "Singleton").asInstanceOf[Boolean] must be(false) + + enterBarrier("after-4") + } + + "support down" taggedAs LongRunningTest in { + val fourthAddress = address(fourth) + runOn(first) { + testConductor.shutdown(fourth, 0).await + } + enterBarrier("fourth-shutdown") + + runOn(first, second, third) { + awaitCond(mbeanServer.getAttribute(mbeanName, "Unreachable") == fourthAddress.toString) + val expectedMembers = Seq(first, second, third).sorted.map(address(_)).mkString(",") + awaitCond(mbeanServer.getAttribute(mbeanName, "Members") == expectedMembers) + } + enterBarrier("fourth-unreachable") + + runOn(second) { + mbeanServer.invoke(mbeanName, "down", Array(fourthAddress.toString), Array("java.lang.String")) + } + enterBarrier("fourth-down") + + runOn(first, second, third) { + awaitUpConvergence(3, canNotBePartOfMemberRing = List(fourthAddress)) + assertMembers(clusterView.members, first, second, third) + } + + enterBarrier("after-5") + } + + "support leave" taggedAs LongRunningTest in within(20 seconds) { + runOn(second) { + mbeanServer.invoke(mbeanName, "leave", Array(address(third).toString), Array("java.lang.String")) + } + enterBarrier("third-left") + runOn(first, second) { + awaitUpConvergence(2) + assertMembers(clusterView.members, first, second) + val expectedMembers = Seq(first, second).sorted.map(address(_)).mkString(",") + awaitCond(mbeanServer.getAttribute(mbeanName, "Members") == expectedMembers) + } + runOn(third) { + awaitCond(cluster.isTerminated) + // mbean should be unregistered, i.e. throw InstanceNotFoundException + awaitCond(Try { mbeanServer.getMBeanInfo(mbeanName); false } recover { + case e: InstanceNotFoundException ⇒ true + case _ ⇒ false + } get) + } + + enterBarrier("after-6") + } + + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MinMembersBeforeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MinMembersBeforeUpSpec.scala new file mode 100644 index 0000000000..46891bbc49 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MinMembersBeforeUpSpec.scala @@ -0,0 +1,81 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ +package akka.cluster + +import com.typesafe.config.ConfigFactory +import org.scalatest.BeforeAndAfter +import scala.collection.immutable.SortedSet +import scala.concurrent.duration._ +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.testkit._ +import java.util.concurrent.atomic.AtomicReference +import akka.actor.Props +import akka.actor.Actor +import akka.cluster.MemberStatus._ + +object MinMembersBeforeUpMultiJvmSpec extends MultiNodeConfig { + val first = role("first") + val second = role("second") + val third = role("third") + + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + # turn off unreachable reaper + akka.cluster.min-nr-of-members = 3""")). + withFallback(MultiNodeClusterSpec.clusterConfigWithFailureDetectorPuppet)) +} + +class MinMembersBeforeUpMultiJvmNode1 extends MinMembersBeforeUpSpec +class MinMembersBeforeUpMultiJvmNode2 extends MinMembersBeforeUpSpec +class MinMembersBeforeUpMultiJvmNode3 extends MinMembersBeforeUpSpec + +abstract class MinMembersBeforeUpSpec + extends MultiNodeSpec(MinMembersBeforeUpMultiJvmSpec) + with MultiNodeClusterSpec { + + import MinMembersBeforeUpMultiJvmSpec._ + import ClusterEvent._ + + "Cluster leader" must { + "wait with moving members to UP until minimum number of members have joined" taggedAs LongRunningTest in { + + val onUpLatch = TestLatch(1) + cluster.registerOnMemberUp(onUpLatch.countDown()) + + runOn(first) { + startClusterNode() + awaitCond(clusterView.status == Joining) + } + enterBarrier("first-started") + + onUpLatch.isOpen must be(false) + + runOn(second) { + cluster.join(first) + } + runOn(first, second) { + val expectedAddresses = Set(first, second) map address + awaitCond(clusterView.members.map(_.address) == expectedAddresses) + clusterView.members.map(_.status) must be(Set(Joining)) + // and it should not change + 1 to 5 foreach { _ ⇒ + Thread.sleep(1000) + clusterView.members.map(_.address) must be(expectedAddresses) + clusterView.members.map(_.status) must be(Set(Joining)) + } + } + enterBarrier("second-joined") + + runOn(third) { + cluster.join(first) + } + awaitClusterUp(first, second, third) + + onUpLatch.await + + enterBarrier("after-1") + } + + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index d696f9b62b..fd2714005b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -64,7 +64,7 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec { self: MultiNodeS def muteLog(sys: ActorSystem = system): Unit = { if (!sys.log.isDebugEnabled) { Seq(".*Metrics collection has started successfully.*", - ".*Hyperic SIGAR was not found on the classpath.*", + ".*Metrics will be retreived from MBeans.*", ".*Cluster Node.* - registered cluster JMX MBean.*", ".*Cluster Node.* - is starting up.*", ".*Shutting down cluster Node.*", @@ -223,7 +223,9 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec { self: MultiNodeS within(timeout) { awaitCond(clusterView.members.size == numberOfMembers) awaitCond(clusterView.members.forall(_.status == MemberStatus.Up)) - awaitCond(clusterView.convergence) + // clusterView.leader is updated by LeaderChanged, await that to be updated also + val expectedLeader = clusterView.members.headOption.map(_.address) + awaitCond(clusterView.leader == expectedLeader) if (!canNotBePartOfMemberRing.isEmpty) // don't run this on an empty set awaitCond( canNotBePartOfMemberRing forall (address ⇒ !(clusterView.members exists (_.address == address)))) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index d7eaddf402..2dfddc330f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -51,7 +51,7 @@ abstract class NodeLeavingAndExitingAndBeingRemovedSpec runOn(second) { // verify that the second node is shut down and has status REMOVED - awaitCond(!cluster.isRunning, reaperWaitingTime) + awaitCond(cluster.isTerminated, reaperWaitingTime) awaitCond(clusterView.status == MemberStatus.Removed, reaperWaitingTime) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index 86e5dd71e9..336acc2769 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -43,7 +43,6 @@ abstract class NodeMembershipSpec awaitCond { clusterView.members.forall(_.status == MemberStatus.Up) } - awaitCond(clusterView.convergence) } enterBarrier("after-1") @@ -60,7 +59,6 @@ abstract class NodeMembershipSpec awaitCond { clusterView.members.forall(_.status == MemberStatus.Up) } - awaitCond(clusterView.convergence) enterBarrier("after-2") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala index 95b362e6b3..4fe1f551aa 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala @@ -59,7 +59,7 @@ abstract class SplitBrainSpec(multiNodeConfig: SplitBrainMultiNodeConfig) val side1 = Vector(first, second) val side2 = Vector(third, fourth, fifth) - "A cluster of 5 members" must { + "A cluster of 5 members" ignore { "reach initial convergence" taggedAs LongRunningTest in { awaitClusterUp(first, second, third, fourth, fifth) diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala index 3fc7432f98..4c9054d3d1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -114,7 +114,6 @@ abstract class TransitionSpec startClusterNode() awaitCond(clusterView.isSingletonCluster) awaitMemberStatus(myself, Joining) - awaitCond(clusterView.convergence) leaderActions() awaitMemberStatus(myself, Up) } @@ -133,14 +132,13 @@ abstract class TransitionSpec awaitMemberStatus(first, Up) awaitMemberStatus(second, Joining) awaitCond(seenLatestGossip == Set(first, second)) - clusterView.convergence must be(true) } enterBarrier("convergence-joining-2") runOn(leader(first, second)) { leaderActions() awaitMemberStatus(first, Up) - awaitMemberStatus(second, Up) + awaitMemberStatus(second, Joining) } enterBarrier("leader-actions-2") @@ -150,7 +148,6 @@ abstract class TransitionSpec awaitMemberStatus(second, Up) awaitCond(seenLatestGossip == Set(first, second)) awaitMemberStatus(first, Up) - clusterView.convergence must be(true) } enterBarrier("after-2") @@ -163,10 +160,7 @@ abstract class TransitionSpec } runOn(second, third) { // gossip chat from the join will synchronize the views - awaitMembers(first, second, third) - awaitMemberStatus(third, Joining) awaitCond(seenLatestGossip == Set(second, third)) - clusterView.convergence must be(false) } enterBarrier("third-joined-second") @@ -177,7 +171,6 @@ abstract class TransitionSpec awaitMemberStatus(third, Joining) awaitMemberStatus(second, Up) awaitCond(seenLatestGossip == Set(first, second, third)) - clusterView.convergence must be(true) } first gossipTo third @@ -187,7 +180,6 @@ abstract class TransitionSpec awaitMemberStatus(second, Up) awaitMemberStatus(third, Joining) awaitCond(seenLatestGossip == Set(first, second, third)) - clusterView.convergence must be(true) } enterBarrier("convergence-joining-3") @@ -196,16 +188,15 @@ abstract class TransitionSpec leaderActions() awaitMemberStatus(first, Up) awaitMemberStatus(second, Up) - awaitMemberStatus(third, Up) + awaitMemberStatus(third, Joining) } enterBarrier("leader-actions-3") // leader gossipTo first non-leader leader(first, second, third) gossipTo nonLeader(first, second, third).head runOn(nonLeader(first, second, third).head) { - awaitMemberStatus(third, Up) + awaitMemberStatus(third, Joining) awaitCond(seenLatestGossip == Set(leader(first, second, third), myself)) - clusterView.convergence must be(false) } // first non-leader gossipTo the other non-leader @@ -217,7 +208,6 @@ abstract class TransitionSpec runOn(nonLeader(first, second, third).tail.head) { awaitMemberStatus(third, Up) awaitCond(seenLatestGossip == Set(first, second, third)) - clusterView.convergence must be(true) } // first non-leader gossipTo the leader @@ -227,7 +217,6 @@ abstract class TransitionSpec awaitMemberStatus(second, Up) awaitMemberStatus(third, Up) awaitCond(seenLatestGossip == Set(first, second, third)) - clusterView.convergence must be(true) } enterBarrier("after-3") @@ -247,12 +236,10 @@ abstract class TransitionSpec runOn(first, third) { awaitCond(clusterView.unreachableMembers.contains(Member(second, Up))) - awaitCond(!clusterView.convergence) } runOn(first) { cluster.down(second) - awaitMemberStatus(second, Down) } enterBarrier("after-second-down") @@ -263,7 +250,6 @@ abstract class TransitionSpec awaitCond(clusterView.unreachableMembers.contains(Member(second, Down))) awaitMemberStatus(second, Down) awaitCond(seenLatestGossip == Set(first, third)) - clusterView.convergence must be(true) } enterBarrier("after-6") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala index 23c0b5009d..45760a3bcd 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala @@ -67,7 +67,7 @@ abstract class UnreachableNodeRejoinsClusterSpec(multiNodeConfig: UnreachableNod enterBarrier("after_" + endBarrierNumber) } - "A cluster of " + roles.size + " members" must { + "A cluster of " + roles.size + " members" ignore { "reach initial convergence" taggedAs LongRunningTest in { awaitClusterUp(roles: _*) @@ -101,7 +101,6 @@ abstract class UnreachableNodeRejoinsClusterSpec(multiNodeConfig: UnreachableNod members.forall(_.status == MemberStatus.Up) }) clusterView.unreachableMembers.map(_.address) must be((allButVictim map address).toSet) - clusterView.convergence must be(false) } } @@ -119,8 +118,6 @@ abstract class UnreachableNodeRejoinsClusterSpec(multiNodeConfig: UnreachableNod // still one unreachable clusterView.unreachableMembers.size must be(1) clusterView.unreachableMembers.head.address must be(node(victim).address) - // and therefore no convergence - clusterView.convergence must be(false) } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala new file mode 100644 index 0000000000..723ef6b8ec --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala @@ -0,0 +1,218 @@ +/* + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster.routing + +import language.postfixOps +import java.lang.management.ManagementFactory +import scala.concurrent.Await +import scala.concurrent.duration._ +import com.typesafe.config.Config +import com.typesafe.config.ConfigFactory + +import akka.actor._ +import akka.cluster.Cluster +import akka.cluster.MultiNodeClusterSpec +import akka.cluster.NodeMetrics +import akka.pattern.ask +import akka.remote.testkit.{ MultiNodeSpec, MultiNodeConfig } +import akka.routing.CurrentRoutees +import akka.routing.FromConfig +import akka.routing.RouterRoutees +import akka.testkit.{ LongRunningTest, DefaultTimeout, ImplicitSender } + +object AdaptiveLoadBalancingRouterMultiJvmSpec extends MultiNodeConfig { + + class Routee extends Actor { + def receive = { + case _ ⇒ sender ! Reply(Cluster(context.system).selfAddress) + } + } + + class Memory extends Actor with ActorLogging { + var usedMemory: Array[Array[Int]] = _ + def receive = { + case AllocateMemory ⇒ + val heap = ManagementFactory.getMemoryMXBean.getHeapMemoryUsage + // getMax can be undefined (-1) + val max = math.max(heap.getMax, heap.getCommitted) + val used = heap.getUsed + log.debug("used heap before: [{}] bytes, of max [{}]", used, heap.getMax) + // allocate 70% of free space + val allocateBytes = (0.7 * (max - used)).toInt + val numberOfArrays = allocateBytes / 1024 + usedMemory = Array.ofDim(numberOfArrays, 248) // each 248 element Int array will use ~ 1 kB + log.debug("used heap after: [{}] bytes", ManagementFactory.getMemoryMXBean.getHeapMemoryUsage.getUsed) + sender ! "done" + } + } + + case object AllocateMemory + case class Reply(address: Address) + + val first = role("first") + val second = role("second") + val third = role("third") + + commonConfig(debugConfig(on = false).withFallback(ConfigFactory.parseString(""" + akka.cluster.metrics.collect-interval = 1s + akka.cluster.metrics.gossip-interval = 1s + akka.cluster.metrics.moving-average-half-life = 2s + akka.actor.deployment { + /router3 = { + router = adaptive + metrics-selector = cpu + nr-of-instances = 9 + } + /router4 = { + router = adaptive + metrics-selector = "akka.cluster.routing.TestCustomMetricsSelector" + nr-of-instances = 10 + cluster { + enabled = on + max-nr-of-instances-per-node = 2 + } + } + } + """)).withFallback(MultiNodeClusterSpec.clusterConfig)) + +} + +class TestCustomMetricsSelector(config: Config) extends MetricsSelector { + override def weights(nodeMetrics: Set[NodeMetrics]): Map[Address, Int] = Map.empty +} + +class AdaptiveLoadBalancingRouterMultiJvmNode1 extends AdaptiveLoadBalancingRouterSpec +class AdaptiveLoadBalancingRouterMultiJvmNode2 extends AdaptiveLoadBalancingRouterSpec +class AdaptiveLoadBalancingRouterMultiJvmNode3 extends AdaptiveLoadBalancingRouterSpec + +abstract class AdaptiveLoadBalancingRouterSpec extends MultiNodeSpec(AdaptiveLoadBalancingRouterMultiJvmSpec) + with MultiNodeClusterSpec + with ImplicitSender with DefaultTimeout { + import AdaptiveLoadBalancingRouterMultiJvmSpec._ + + def currentRoutees(router: ActorRef) = + Await.result(router ? CurrentRoutees, remaining).asInstanceOf[RouterRoutees].routees + + def receiveReplies(expectedReplies: Int): Map[Address, Int] = { + val zero = Map.empty[Address, Int] ++ roles.map(address(_) -> 0) + (receiveWhile(5 seconds, messages = expectedReplies) { + case Reply(address) ⇒ address + }).foldLeft(zero) { + case (replyMap, address) ⇒ replyMap + (address -> (replyMap(address) + 1)) + } + } + + /** + * Fills in self address for local ActorRef + */ + def fullAddress(actorRef: ActorRef): Address = actorRef.path.address match { + case Address(_, _, None, None) ⇒ cluster.selfAddress + case a ⇒ a + } + + def startRouter(name: String): ActorRef = { + val router = system.actorOf(Props[Routee].withRouter(ClusterRouterConfig( + local = AdaptiveLoadBalancingRouter(HeapMetricsSelector), + settings = ClusterRouterSettings(totalInstances = 10, maxInstancesPerNode = 1))), name) + awaitCond { + // it may take some time until router receives cluster member events + currentRoutees(router).size == roles.size + } + currentRoutees(router).map(fullAddress).toSet must be(roles.map(address).toSet) + router + } + + "A cluster with a AdaptiveLoadBalancingRouter" must { + "start cluster nodes" taggedAs LongRunningTest in { + awaitClusterUp(roles: _*) + enterBarrier("after-1") + } + + "use all nodes in the cluster when not overloaded" taggedAs LongRunningTest in { + runOn(first) { + val router1 = startRouter("router1") + + // collect some metrics before we start + Thread.sleep(cluster.settings.MetricsInterval.toMillis * 10) + + val iterationCount = 100 + 1 to iterationCount foreach { _ ⇒ + router1 ! "hit" + // wait a while between each message, since metrics is collected periodically + Thread.sleep(10) + } + + val replies = receiveReplies(iterationCount) + + replies(first) must be > (0) + replies(second) must be > (0) + replies(third) must be > (0) + replies.values.sum must be(iterationCount) + + } + + enterBarrier("after-2") + } + + "prefer node with more free heap capacity" taggedAs LongRunningTest in { + System.gc() + enterBarrier("gc") + + runOn(second) { + within(20.seconds) { + system.actorOf(Props[Memory], "memory") ! AllocateMemory + expectMsg("done") + } + } + enterBarrier("heap-allocated") + + runOn(first) { + val router2 = startRouter("router2") + router2 + + // collect some metrics before we start + Thread.sleep(cluster.settings.MetricsInterval.toMillis * 10) + + val iterationCount = 3000 + 1 to iterationCount foreach { _ ⇒ + router2 ! "hit" + } + + val replies = receiveReplies(iterationCount) + + replies(third) must be > (replies(second)) + replies.values.sum must be(iterationCount) + + } + + enterBarrier("after-3") + } + + "create routees from configuration" taggedAs LongRunningTest in { + runOn(first) { + val router3 = system.actorOf(Props[Memory].withRouter(FromConfig()), "router3") + awaitCond { + // it may take some time until router receives cluster member events + currentRoutees(router3).size == 9 + } + currentRoutees(router3).map(fullAddress).toSet must be(Set(address(first))) + } + enterBarrier("after-4") + } + + "create routees from cluster.enabled configuration" taggedAs LongRunningTest in { + runOn(first) { + val router4 = system.actorOf(Props[Memory].withRouter(FromConfig()), "router4") + awaitCond { + // it may take some time until router receives cluster member events + currentRoutees(router4).size == 6 + } + currentRoutees(router4).map(fullAddress).toSet must be(Set( + address(first), address(second), address(third))) + } + enterBarrier("after-5") + } + } +} diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 2d1a6542bd..ce7f7a4a70 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -36,6 +36,7 @@ class ClusterConfigSpec extends AkkaSpec { JoinTimeout must be(60 seconds) AutoJoin must be(true) AutoDown must be(false) + MinNrOfMembers must be(1) JmxEnabled must be(true) UseDispatcher must be(Dispatchers.DefaultDispatcherId) GossipDifferentViewProbability must be(0.8 plusOrMinus 0.0001) @@ -47,9 +48,10 @@ class ClusterConfigSpec extends AkkaSpec { callTimeout = 2 seconds, resetTimeout = 30 seconds)) MetricsEnabled must be(true) + MetricsCollectorClass must be(classOf[SigarMetricsCollector].getName) MetricsInterval must be(3 seconds) MetricsGossipInterval must be(3 seconds) - MetricsRateOfDecay must be(10) + MetricsMovingAverageHalfLife must be(12 seconds) } } } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala index 188c91505c..602ffadd8b 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala @@ -18,21 +18,9 @@ import akka.testkit.ImplicitSender import akka.actor.ActorRef import akka.testkit.TestProbe -object ClusterDomainEventPublisherSpec { - val config = """ - akka.cluster.auto-join = off - akka.actor.provider = "akka.cluster.ClusterActorRefProvider" - akka.remote.log-remote-lifecycle-events = off - akka.remote.netty.port = 0 - """ - - case class GossipTo(address: Address) -} - @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublisherSpec.config) +class ClusterDomainEventPublisherSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitSender { - import ClusterDomainEventPublisherSpec._ var publisher: ActorRef = _ val a1 = Member(Address("akka", "sys", "a", 2552), Up) @@ -54,6 +42,10 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish override def beforeEach(): Unit = { publisher = system.actorOf(Props[ClusterDomainEventPublisher]) + publisher ! PublishChanges(g0) + expectMsg(MemberUp(a1)) + expectMsg(LeaderChanged(Some(a1.address))) + expectMsgType[SeenChanged] } override def afterEach(): Unit = { @@ -62,59 +54,63 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish "ClusterDomainEventPublisher" must { - "publish MemberUp when member status changed to Up" in { - publisher ! PublishChanges(g1, g2) - expectMsg(MemberUp(c2)) - expectMsg(ConvergenceChanged(false)) + "not publish MemberUp when there is no convergence" in { + publisher ! PublishChanges(g2) expectMsgType[SeenChanged] } - "publish convergence true when all seen it" in { - publisher ! PublishChanges(g2, g3) - expectMsg(ConvergenceChanged(true)) + "publish MemberEvents when there is convergence" in { + publisher ! PublishChanges(g2) + expectMsgType[SeenChanged] + publisher ! PublishChanges(g3) + expectMsg(MemberUp(b1)) + expectMsg(MemberUp(c2)) expectMsgType[SeenChanged] } "publish leader changed when new leader after convergence" in { - publisher ! PublishChanges(g3, g4) - expectMsg(MemberUp(d1)) - expectMsg(ConvergenceChanged(false)) + publisher ! PublishChanges(g4) expectMsgType[SeenChanged] expectNoMsg(1 second) - publisher ! PublishChanges(g4, g5) + publisher ! PublishChanges(g5) + expectMsg(MemberUp(d1)) + expectMsg(MemberUp(b1)) + expectMsg(MemberUp(c2)) expectMsg(LeaderChanged(Some(d1.address))) - expectMsg(ConvergenceChanged(true)) expectMsgType[SeenChanged] } "publish leader changed when new leader and convergence both before and after" in { // convergence both before and after - publisher ! PublishChanges(g3, g5) + publisher ! PublishChanges(g3) + expectMsg(MemberUp(b1)) + expectMsg(MemberUp(c2)) + expectMsgType[SeenChanged] + publisher ! PublishChanges(g5) expectMsg(MemberUp(d1)) expectMsg(LeaderChanged(Some(d1.address))) expectMsgType[SeenChanged] } "not publish leader changed when not convergence" in { - publisher ! PublishChanges(g2, g4) - expectMsg(MemberUp(d1)) + publisher ! PublishChanges(g4) + expectMsgType[SeenChanged] expectNoMsg(1 second) } "not publish leader changed when changed convergence but still same leader" in { - publisher ! PublishChanges(g2, g5) + publisher ! PublishChanges(g5) expectMsg(MemberUp(d1)) + expectMsg(MemberUp(b1)) + expectMsg(MemberUp(c2)) expectMsg(LeaderChanged(Some(d1.address))) - expectMsg(ConvergenceChanged(true)) expectMsgType[SeenChanged] - publisher ! PublishChanges(g5, g4) - expectMsg(ConvergenceChanged(false)) + publisher ! PublishChanges(g4) expectMsgType[SeenChanged] - publisher ! PublishChanges(g4, g5) - expectMsg(ConvergenceChanged(true)) + publisher ! PublishChanges(g5) expectMsgType[SeenChanged] } @@ -131,12 +127,12 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec(ClusterDomainEventPublish publisher ! Subscribe(subscriber.ref, classOf[ClusterDomainEvent]) subscriber.expectMsgType[CurrentClusterState] publisher ! Unsubscribe(subscriber.ref, Some(classOf[ClusterDomainEvent])) - publisher ! PublishChanges(Gossip(members = SortedSet(a1)), Gossip(members = SortedSet(a1, b1))) + publisher ! PublishChanges(g3) subscriber.expectNoMsg(1 second) // but testActor is still subscriber expectMsg(MemberUp(b1)) + expectMsg(MemberUp(c2)) + expectMsgType[SeenChanged] } - } - } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala index 3a4e3ee3a4..8be81496df 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala @@ -17,6 +17,7 @@ class ClusterDomainEventSpec extends WordSpec with MustMatchers { val a1 = Member(Address("akka", "sys", "a", 2552), Up) val a2 = Member(Address("akka", "sys", "a", 2552), Joining) + val a3 = Member(Address("akka", "sys", "a", 2552), Removed) val b1 = Member(Address("akka", "sys", "b", 2552), Up) val b2 = Member(Address("akka", "sys", "b", 2552), Removed) val b3 = Member(Address("akka", "sys", "b", 2552), Down) @@ -28,61 +29,82 @@ class ClusterDomainEventSpec extends WordSpec with MustMatchers { val e2 = Member(Address("akka", "sys", "e", 2552), Up) val e3 = Member(Address("akka", "sys", "e", 2552), Down) + def converge(gossip: Gossip): (Gossip, Set[Address]) = + ((gossip, Set.empty[Address]) /: gossip.members) { (gs, m) ⇒ (gs._1.seen(m.address), gs._2 + m.address) } + "Domain events" must { - "be produced for new members" in { + "be empty for the same gossip" in { val g1 = Gossip(members = SortedSet(a1)) - val g2 = Gossip(members = SortedSet(a1, b1, e1)) - diff(g1, g2) must be(Seq(MemberUp(b1), MemberJoined(e1))) + diffUnreachable(g1, g1) must be(Seq.empty) + } + + "be produced for new members" in { + val (g1, _) = converge(Gossip(members = SortedSet(a1))) + val (g2, s2) = converge(Gossip(members = SortedSet(a1, b1, e1))) + + diffMemberEvents(g1, g2) must be(Seq(MemberUp(b1), MemberJoined(e1))) + diffUnreachable(g1, g2) must be(Seq.empty) + diffSeen(g1, g2) must be(Seq(SeenChanged(convergence = true, seenBy = s2))) } "be produced for changed status of members" in { - val g1 = Gossip(members = SortedSet(a2, b1, c2)) - val g2 = Gossip(members = SortedSet(a1, b1, c1, e1)) + val (g1, _) = converge(Gossip(members = SortedSet(a2, b1, c2))) + val (g2, s2) = converge(Gossip(members = SortedSet(a1, b1, c1, e1))) - diff(g1, g2) must be(Seq(MemberUp(a1), MemberLeft(c1), MemberJoined(e1))) + diffMemberEvents(g1, g2) must be(Seq(MemberUp(a1), MemberLeft(c1), MemberJoined(e1))) + diffUnreachable(g1, g2) must be(Seq.empty) + diffSeen(g1, g2) must be(Seq(SeenChanged(convergence = true, seenBy = s2))) } - "be produced for unreachable members" in { - val g1 = Gossip(members = SortedSet(a1, b1), overview = GossipOverview(unreachable = Set(c2))) - val g2 = Gossip(members = SortedSet(a1), overview = GossipOverview(unreachable = Set(b1, c2))) - - diff(g1, g2) must be(Seq(MemberUnreachable(b1))) - } - - "be produced for downed members" in { + "be produced for members in unreachable" in { val g1 = Gossip(members = SortedSet(a1, b1), overview = GossipOverview(unreachable = Set(c2, e2))) val g2 = Gossip(members = SortedSet(a1), overview = GossipOverview(unreachable = Set(c2, b3, e3))) - diff(g1, g2) must be(Seq(MemberDowned(b3), MemberDowned(e3))) + diffMemberEvents(g1, g2) must be(Seq(MemberDowned(b3), MemberDowned(e3))) + diffUnreachable(g1, g2) must be(Seq(UnreachableMember(b3))) + diffSeen(g1, g2) must be(Seq.empty) + } + + "be produced for downed members" in { + val (g1, _) = converge(Gossip(members = SortedSet(a1, b1))) + val (g2, _) = converge(Gossip(members = SortedSet(a1, b1), overview = GossipOverview(unreachable = Set(e3)))) + + diffMemberEvents(g1, g2) must be(Seq(MemberDowned(e3))) + diffUnreachable(g1, g2) must be(Seq(UnreachableMember(e3))) + diffSeen(g1, g2) must be(Seq.empty) } "be produced for removed members" in { - val g1 = Gossip(members = SortedSet(a1, d1), overview = GossipOverview(unreachable = Set(c2))) - val g2 = Gossip(members = SortedSet(a1), overview = GossipOverview(unreachable = Set(c2))) + val (g1, _) = converge(Gossip(members = SortedSet(a1, d1))) + val (g2, s2) = converge(Gossip(members = SortedSet(a1))) - diff(g1, g2) must be(Seq(MemberRemoved(d2))) + diffMemberEvents(g1, g2) must be(Seq(MemberRemoved(d2))) + diffUnreachable(g1, g2) must be(Seq.empty) + diffSeen(g1, g2) must be(Seq(SeenChanged(convergence = true, seenBy = s2))) } "be produced for convergence changes" in { val g1 = Gossip(members = SortedSet(a1, b1, e1)).seen(a1.address).seen(b1.address).seen(e1.address) val g2 = Gossip(members = SortedSet(a1, b1, e1)).seen(a1.address).seen(b1.address) - diff(g1, g2) must be(Seq(ConvergenceChanged(false), - SeenChanged(convergence = false, seenBy = Set(a1.address, b1.address)))) - diff(g2, g1) must be(Seq(ConvergenceChanged(true), - SeenChanged(convergence = true, seenBy = Set(a1.address, b1.address, e1.address)))) + diffMemberEvents(g1, g2) must be(Seq.empty) + diffUnreachable(g1, g2) must be(Seq.empty) + diffSeen(g1, g2) must be(Seq(SeenChanged(convergence = false, seenBy = Set(a1.address, b1.address)))) + diffMemberEvents(g2, g1) must be(Seq.empty) + diffUnreachable(g2, g1) must be(Seq.empty) + diffSeen(g2, g1) must be(Seq(SeenChanged(convergence = true, seenBy = Set(a1.address, b1.address, e1.address)))) } "be produced for leader changes" in { - val g1 = Gossip(members = SortedSet(a1, b1, e1)) - val g2 = Gossip(members = SortedSet(b1, e1), overview = GossipOverview(unreachable = Set(a1))) - val g3 = g2.copy(overview = GossipOverview()).seen(b1.address).seen(e1.address) + val (g1, _) = converge(Gossip(members = SortedSet(a1, b1, e1))) + val (g2, s2) = converge(Gossip(members = SortedSet(b1, e1))) - diff(g1, g2) must be(Seq(MemberUnreachable(a1), LeaderChanged(Some(b1.address)))) - diff(g2, g3) must be(Seq(ConvergenceChanged(true), - SeenChanged(convergence = true, seenBy = Set(b1.address, e1.address)))) + diffMemberEvents(g1, g2) must be(Seq(MemberRemoved(a3))) + diffUnreachable(g1, g2) must be(Seq.empty) + diffSeen(g1, g2) must be(Seq(SeenChanged(convergence = true, seenBy = s2))) + diffLeader(g1, g2) must be(Seq(LeaderChanged(Some(b1.address)))) } } } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index d010c823e8..008c98d4b7 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -68,7 +68,6 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { clusterView.self.address must be(selfAddress) clusterView.members.map(_.address) must be(Set(selfAddress)) clusterView.status must be(MemberStatus.Joining) - clusterView.convergence must be(true) leaderActions() awaitCond(clusterView.status == MemberStatus.Up) } diff --git a/akka-cluster/src/test/scala/akka/cluster/DataStreamSpec.scala b/akka-cluster/src/test/scala/akka/cluster/DataStreamSpec.scala deleted file mode 100644 index f77c9fdcdb..0000000000 --- a/akka-cluster/src/test/scala/akka/cluster/DataStreamSpec.scala +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.cluster - -import language.postfixOps -import scala.concurrent.duration._ - -import akka.testkit.{ LongRunningTest, AkkaSpec } - -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class DataStreamSpec extends AkkaSpec(MetricsEnabledSpec.config) with AbstractClusterMetricsSpec with MetricNumericConverter { - import system.dispatcher - - val collector = createMetricsCollector - val DefaultRateOfDecay = 10 - - "DataStream" must { - - "calculate the ewma for multiple, variable, data streams" taggedAs LongRunningTest in { - val firstDataSet = collector.sample.metrics.collect { case m if m.trendable && m.isDefined ⇒ m.initialize(DefaultRateOfDecay) } - var streamingDataSet = firstDataSet - - val cancellable = system.scheduler.schedule(0 seconds, 100 millis) { - streamingDataSet = collector.sample.metrics.flatMap(latest ⇒ streamingDataSet.collect { - case streaming if (latest.trendable && latest.isDefined) && (latest same streaming) - && (latest.value.get != streaming.value.get) ⇒ { - val updatedDataStream = streaming.average.get :+ latest.value.get - updatedDataStream.timestamp must be > (streaming.average.get.timestamp) - updatedDataStream.duration.length must be > (streaming.average.get.duration.length) - updatedDataStream.ewma must not be (streaming.average.get.ewma) - updatedDataStream.ewma must not be (latest.value.get) - streaming.copy(value = latest.value, average = Some(updatedDataStream)) - } - }) - } - awaitCond(firstDataSet.size == streamingDataSet.size, longDuration) - cancellable.cancel() - - val finalDataSet = streamingDataSet.map(m ⇒ m.name -> m).toMap - firstDataSet map { - first ⇒ - val newMetric = finalDataSet(first.name) - val e1 = first.average.get - val e2 = newMetric.average.get - - if (first.value.get != newMetric.value.get) { - e2.ewma must not be (first.value.get) - e2.ewma must not be (newMetric.value.get) - } - if (first.value.get.longValue > newMetric.value.get.longValue) e1.ewma.longValue must be > e2.ewma.longValue - else if (first.value.get.longValue < newMetric.value.get.longValue) e1.ewma.longValue must be < e2.ewma.longValue - } - } - - "data streaming is disabled if the decay is set to 0" in { - val data = collector.sample.metrics map (_.initialize(0)) - data foreach (_.average.isEmpty must be(true)) - } - } -} diff --git a/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala b/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala new file mode 100644 index 0000000000..ed954b7bb6 --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala @@ -0,0 +1,101 @@ +/* + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import language.postfixOps +import scala.concurrent.duration._ +import akka.testkit.{ LongRunningTest, AkkaSpec } +import scala.concurrent.forkjoin.ThreadLocalRandom + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class EWMASpec extends AkkaSpec(MetricsEnabledSpec.config) with MetricsCollectorFactory { + import system.dispatcher + + val collector = createMetricsCollector + + "DataStream" must { + + "calcualate same ewma for constant values" in { + val ds = EWMA(value = 100.0, alpha = 0.18) :+ + 100.0 :+ 100.0 :+ 100.0 + ds.value must be(100.0 plusOrMinus 0.001) + } + + "calcualate correct ewma for normal decay" in { + val d0 = EWMA(value = 1000.0, alpha = 2.0 / (1 + 10)) + d0.value must be(1000.0 plusOrMinus 0.01) + val d1 = d0 :+ 10.0 + d1.value must be(820.0 plusOrMinus 0.01) + val d2 = d1 :+ 10.0 + d2.value must be(672.73 plusOrMinus 0.01) + val d3 = d2 :+ 10.0 + d3.value must be(552.23 plusOrMinus 0.01) + val d4 = d3 :+ 10.0 + d4.value must be(453.64 plusOrMinus 0.01) + + val dn = (1 to 100).foldLeft(d0)((d, _) ⇒ d :+ 10.0) + dn.value must be(10.0 plusOrMinus 0.1) + } + + "calculate ewma for alpha 1.0, max bias towards latest value" in { + val d0 = EWMA(value = 100.0, alpha = 1.0) + d0.value must be(100.0 plusOrMinus 0.01) + val d1 = d0 :+ 1.0 + d1.value must be(1.0 plusOrMinus 0.01) + val d2 = d1 :+ 57.0 + d2.value must be(57.0 plusOrMinus 0.01) + val d3 = d2 :+ 10.0 + d3.value must be(10.0 plusOrMinus 0.01) + } + + "calculate alpha from half-life and collect interval" in { + // according to http://en.wikipedia.org/wiki/Moving_average#Exponential_moving_average + val expectedAlpha = 0.1 + // alpha = 2.0 / (1 + N) + val n = 19 + val halfLife = n.toDouble / 2.8854 + val collectInterval = 1.second + val halfLifeDuration = (halfLife * 1000).millis + EWMA.alpha(halfLifeDuration, collectInterval) must be(expectedAlpha plusOrMinus 0.001) + } + + "calculate sane alpha from short half-life" in { + val alpha = EWMA.alpha(1.millis, 3.seconds) + alpha must be <= (1.0) + alpha must be >= (0.0) + alpha must be(1.0 plusOrMinus 0.001) + } + + "calculate sane alpha from long half-life" in { + val alpha = EWMA.alpha(1.day, 3.seconds) + alpha must be <= (1.0) + alpha must be >= (0.0) + alpha must be(0.0 plusOrMinus 0.001) + } + + "calculate the ewma for multiple, variable, data streams" taggedAs LongRunningTest in { + var streamingDataSet = Map.empty[String, Metric] + var usedMemory = Array.empty[Byte] + (1 to 50) foreach { _ ⇒ + // wait a while between each message to give the metrics a chance to change + Thread.sleep(100) + usedMemory = usedMemory ++ Array.fill(1024)(ThreadLocalRandom.current.nextInt(127).toByte) + val changes = collector.sample.metrics.flatMap { latest ⇒ + streamingDataSet.get(latest.name) match { + case None ⇒ Some(latest) + case Some(previous) ⇒ + if (latest.isSmooth && latest.value != previous.value) { + val updated = previous :+ latest + updated.isSmooth must be(true) + updated.smoothValue must not be (previous.smoothValue) + Some(updated) + } else None + } + } + streamingDataSet ++= changes.map(m ⇒ m.name -> m) + } + } + } +} diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala index f873f5f252..f1da0dcf0e 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -28,8 +28,11 @@ class GossipSpec extends WordSpec with MustMatchers { "A Gossip" must { - "merge members by status priority" in { + "reach convergence when it's empty" in { + Gossip().convergence must be(true) + } + "merge members by status priority" in { val g1 = Gossip(members = SortedSet(a1, c1, e1)) val g2 = Gossip(members = SortedSet(a2, c2, e2)) @@ -44,7 +47,6 @@ class GossipSpec extends WordSpec with MustMatchers { } "merge unreachable by status priority" in { - val g1 = Gossip(members = Gossip.emptyMembers, overview = GossipOverview(unreachable = Set(a1, b1, c1, d1))) val g2 = Gossip(members = Gossip.emptyMembers, overview = GossipOverview(unreachable = Set(a2, b2, c2, d2))) diff --git a/akka-cluster/src/test/scala/akka/cluster/MetricNumericConverterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MetricNumericConverterSpec.scala index 1f23da769c..f572b13233 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MetricNumericConverterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MetricNumericConverterSpec.scala @@ -4,40 +4,35 @@ package akka.cluster -import akka.testkit.{ ImplicitSender, AkkaSpec } +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers +import akka.cluster.StandardMetrics._ +import scala.util.Failure @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class MetricNumericConverterSpec extends AkkaSpec(MetricsEnabledSpec.config) with MetricNumericConverter with ImplicitSender with AbstractClusterMetricsSpec { +class MetricNumericConverterSpec extends WordSpec with MustMatchers with MetricNumericConverter { "MetricNumericConverter" must { - val collector = createMetricsCollector - "convert " in { - convert(0).isLeft must be(true) - convert(1).left.get must be(1) - convert(1L).isLeft must be(true) - convert(0.0).isRight must be(true) + "convert" in { + convertNumber(0).isLeft must be(true) + convertNumber(1).left.get must be(1) + convertNumber(1L).isLeft must be(true) + convertNumber(0.0).isRight must be(true) } "define a new metric" in { - val metric = Metric("heap-memory-used", Some(0L)) - metric.initializable must be(true) - metric.name must not be (null) - metric.average.isEmpty must be(true) - metric.trendable must be(true) - - if (collector.isSigar) { - val cores = collector.totalCores - cores.isDefined must be(true) - cores.value.get.intValue must be > (0) - cores.initializable must be(false) - } + val Some(metric) = Metric.create(HeapMemoryUsed, 256L, decayFactor = Some(0.18)) + metric.name must be(HeapMemoryUsed) + metric.value must be(256L) + metric.isSmooth must be(true) + metric.smoothValue must be(256.0 plusOrMinus 0.0001) } "define an undefined value with a None " in { - Metric("x", Some(-1)).value.isDefined must be(false) - Metric("x", Some(java.lang.Double.NaN)).value.isDefined must be(false) - Metric("x", None).isDefined must be(false) + Metric.create("x", -1, None).isDefined must be(false) + Metric.create("x", java.lang.Double.NaN, None).isDefined must be(false) + Metric.create("x", Failure(new RuntimeException), None).isDefined must be(false) } "recognize whether a metric value is defined" in { @@ -47,6 +42,7 @@ class MetricNumericConverterSpec extends AkkaSpec(MetricsEnabledSpec.config) wit "recognize whether a metric value is not defined" in { defined(-1) must be(false) + defined(-1.0) must be(false) defined(Double.NaN) must be(false) } } diff --git a/akka-cluster/src/test/scala/akka/cluster/MetricValuesSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MetricValuesSpec.scala new file mode 100644 index 0000000000..8a38b59da6 --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/MetricValuesSpec.scala @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster + +import scala.util.Try +import akka.actor.Address +import akka.testkit.AkkaSpec +import akka.cluster.StandardMetrics._ + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class MetricValuesSpec extends AkkaSpec(MetricsEnabledSpec.config) with MetricsCollectorFactory { + + val collector = createMetricsCollector + + val node1 = NodeMetrics(Address("akka", "sys", "a", 2554), 1, collector.sample.metrics) + val node2 = NodeMetrics(Address("akka", "sys", "a", 2555), 1, collector.sample.metrics) + + val nodes: Seq[NodeMetrics] = { + (1 to 100).foldLeft(List(node1, node2)) { (nodes, _) ⇒ + nodes map { n ⇒ + n.copy(metrics = collector.sample.metrics.flatMap(latest ⇒ n.metrics.collect { + case streaming if latest sameAs streaming ⇒ streaming :+ latest + })) + } + } + } + + "NodeMetrics.MetricValues" must { + "extract expected metrics for load balancing" in { + val stream1 = node2.metric(HeapMemoryCommitted).get.value.longValue + val stream2 = node1.metric(HeapMemoryUsed).get.value.longValue + stream1 must be >= (stream2) + } + + "extract expected MetricValue types for load balancing" in { + nodes foreach { node ⇒ + node match { + case HeapMemory(address, _, used, committed, Some(max)) ⇒ + committed must be >= (used) + used must be <= (max) + committed must be <= (max) + // extract is the java api + StandardMetrics.extractHeapMemory(node) must not be (null) + case HeapMemory(address, _, used, committed, None) ⇒ + used must be > (0L) + committed must be > (0L) + // extract is the java api + StandardMetrics.extractCpu(node) must not be (null) + } + + node match { + case Cpu(address, _, systemLoadAverageOption, cpuCombinedOption, processors) ⇒ + processors must be > (0) + if (systemLoadAverageOption.isDefined) + systemLoadAverageOption.get must be >= (0.0) + if (cpuCombinedOption.isDefined) { + cpuCombinedOption.get must be <= (1.0) + cpuCombinedOption.get must be >= (0.0) + } + // extract is the java api + StandardMetrics.extractCpu(node) must not be (null) + } + } + } + } + +} \ No newline at end of file diff --git a/akka-cluster/src/test/scala/akka/cluster/MetricsCollectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MetricsCollectorSpec.scala index 609975db6a..2ce3892645 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MetricsCollectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MetricsCollectorSpec.scala @@ -1,4 +1,5 @@ /* + * Copyright (C) 2009-2012 Typesafe Inc. */ @@ -13,57 +14,48 @@ import scala.util.{ Success, Try, Failure } import akka.actor._ import akka.testkit._ +import akka.cluster.StandardMetrics._ import org.scalatest.WordSpec import org.scalatest.matchers.MustMatchers object MetricsEnabledSpec { val config = """ akka.cluster.metrics.enabled = on - akka.cluster.metrics.metrics-interval = 1 s + akka.cluster.metrics.collect-interval = 1 s akka.cluster.metrics.gossip-interval = 1 s - akka.cluster.metrics.rate-of-decay = 10 akka.actor.provider = "akka.remote.RemoteActorRefProvider" """ } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class MetricsCollectorSpec extends AkkaSpec(MetricsEnabledSpec.config) with ImplicitSender with AbstractClusterMetricsSpec with MetricSpec { +class MetricsCollectorSpec extends AkkaSpec(MetricsEnabledSpec.config) with ImplicitSender with MetricsCollectorFactory { import system.dispatcher val collector = createMetricsCollector "Metric must" must { - "create and initialize a new metric or merge an existing one" in { - for (i ← 0 to samples) { - val metrics = collector.sample.metrics - assertCreatedUninitialized(metrics) - assertInitialized(window, metrics map (_.initialize(window))) - } - } "merge 2 metrics that are tracking the same metric" in { - for (i ← 0 to samples) { + for (i ← 1 to 20) { val sample1 = collector.sample.metrics val sample2 = collector.sample.metrics - var merged = sample2 flatMap (latest ⇒ sample1 collect { - case peer if latest same peer ⇒ { + val merged12 = sample2 flatMap (latest ⇒ sample1 collect { + case peer if latest sameAs peer ⇒ val m = peer :+ latest - assertMerged(latest, peer, m) + m.value must be(latest.value) + m.isSmooth must be(peer.isSmooth || latest.isSmooth) m - } }) - val sample3 = collector.sample.metrics map (_.initialize(window)) - val sample4 = collector.sample.metrics map (_.initialize(window)) - merged = sample4 flatMap (latest ⇒ sample3 collect { - case peer if latest same peer ⇒ { + val sample3 = collector.sample.metrics + val sample4 = collector.sample.metrics + val merged34 = sample4 flatMap (latest ⇒ sample3 collect { + case peer if latest sameAs peer ⇒ val m = peer :+ latest - assertMerged(latest, peer, m) + m.value must be(latest.value) + m.isSmooth must be(peer.isSmooth || latest.isSmooth) m - } }) - merged.size must be(sample3.size) - merged.size must be(sample4.size) } } } @@ -76,158 +68,65 @@ class MetricsCollectorSpec extends AkkaSpec(MetricsEnabledSpec.config) with Impl "collect accurate metrics for a node" in { val sample = collector.sample - assertExpectedSampleSize(collector.isSigar, window, sample) - val metrics = sample.metrics.collect { case m if m.isDefined ⇒ (m.name, m.value.get) } - val used = metrics collectFirst { case ("heap-memory-used", b) ⇒ b } - val committed = metrics collectFirst { case ("heap-memory-committed", b) ⇒ b } + val metrics = sample.metrics.collect { case m ⇒ (m.name, m.value) } + val used = metrics collectFirst { case (HeapMemoryUsed, b) ⇒ b } + val committed = metrics collectFirst { case (HeapMemoryCommitted, b) ⇒ b } metrics foreach { - case ("total-cores", b) ⇒ b.intValue must be > (0) - case ("network-max-rx", b) ⇒ b.longValue must be > (0L) - case ("network-max-tx", b) ⇒ b.longValue must be > (0L) - case ("system-load-average", b) ⇒ b.doubleValue must be >= (0.0) - case ("processors", b) ⇒ b.intValue must be >= (0) - case ("heap-memory-used", b) ⇒ b.longValue must be >= (0L) - case ("heap-memory-committed", b) ⇒ b.longValue must be > (0L) - case ("cpu-combined", b) ⇒ - b.doubleValue must be <= (1.0) - b.doubleValue must be >= (0.0) - case ("heap-memory-max", b) ⇒ + case (SystemLoadAverage, b) ⇒ b.doubleValue must be >= (0.0) + case (Processors, b) ⇒ b.intValue must be >= (0) + case (HeapMemoryUsed, b) ⇒ b.longValue must be >= (0L) + case (HeapMemoryCommitted, b) ⇒ b.longValue must be > (0L) + case (HeapMemoryMax, b) ⇒ + b.longValue must be > (0L) used.get.longValue must be <= (b.longValue) committed.get.longValue must be <= (b.longValue) - } - } + case (CpuCombined, b) ⇒ + b.doubleValue must be <= (1.0) + b.doubleValue must be >= (0.0) - "collect SIGAR metrics if it is on the classpath" in { - if (collector.isSigar) { - // combined cpu may or may not be defined on a given sampling - // systemLoadAverage is SIGAR present - collector.systemLoadAverage.isDefined must be(true) - collector.networkStats.nonEmpty must be(true) - collector.networkMaxRx.isDefined must be(true) - collector.networkMaxTx.isDefined must be(true) - collector.totalCores.isDefined must be(true) } } "collect JMX metrics" in { // heap max may be undefined depending on the OS - // systemLoadAverage is JMX if SIGAR not present, but not available on all OS - collector.used.isDefined must be(true) - collector.committed.isDefined must be(true) - collector.processors.isDefined must be(true) + // systemLoadAverage is JMX when SIGAR not present, but + // it's not present on all platforms + val c = collector.asInstanceOf[JmxMetricsCollector] + val heap = c.heapMemoryUsage + c.heapUsed(heap).isDefined must be(true) + c.heapCommitted(heap).isDefined must be(true) + c.processors.isDefined must be(true) } - "collect [" + samples + "] node metrics samples in an acceptable duration" taggedAs LongRunningTest in { - val latch = TestLatch(samples) - val task = system.scheduler.schedule(0 seconds, interval) { + "collect 50 node metrics samples in an acceptable duration" taggedAs LongRunningTest in within(7 seconds) { + (1 to 50) foreach { _ ⇒ val sample = collector.sample - assertCreatedUninitialized(sample.metrics) - assertExpectedSampleSize(collector.isSigar, window, sample) - latch.countDown() + sample.metrics.size must be >= (3) + Thread.sleep(100) } - Await.ready(latch, longDuration) - task.cancel() } } } -trait MetricSpec extends WordSpec with MustMatchers { +/** + * Used when testing metrics without full cluster + */ +trait MetricsCollectorFactory { this: AkkaSpec ⇒ - def assertMasterMetricsAgainstGossipMetrics(master: Set[NodeMetrics], gossip: MetricsGossip): Unit = { - val masterMetrics = collectNodeMetrics(master) - val gossipMetrics = collectNodeMetrics(gossip.nodes) - gossipMetrics.size must be(masterMetrics.size plusOrMinus 1) // combined cpu - } + private def extendedActorSystem = system.asInstanceOf[ExtendedActorSystem] - def assertExpectedNodeAddresses(gossip: MetricsGossip, nodes: Set[NodeMetrics]): Unit = - gossip.nodes.map(_.address) must be(nodes.map(_.address)) + def selfAddress = extendedActorSystem.provider.rootPath.address - def assertExpectedSampleSize(isSigar: Boolean, gossip: MetricsGossip): Unit = - gossip.nodes.foreach(n ⇒ assertExpectedSampleSize(isSigar, gossip.rateOfDecay, n)) + val defaultDecayFactor = 2.0 / (1 + 10) - def assertCreatedUninitialized(gossip: MetricsGossip): Unit = - gossip.nodes.foreach(n ⇒ assertCreatedUninitialized(n.metrics.filterNot(_.trendable))) + def createMetricsCollector: MetricsCollector = + Try(new SigarMetricsCollector(selfAddress, defaultDecayFactor, + extendedActorSystem.dynamicAccess.createInstanceFor[AnyRef]("org.hyperic.sigar.Sigar", Nil))). + recover { + case e ⇒ + log.debug("Metrics will be retreived from MBeans, Sigar failed to load. Reason: " + e) + new JmxMetricsCollector(selfAddress, defaultDecayFactor) + }.get - def assertInitialized(gossip: MetricsGossip): Unit = - gossip.nodes.foreach(n ⇒ assertInitialized(gossip.rateOfDecay, n.metrics)) - - def assertCreatedUninitialized(metrics: Set[Metric]): Unit = { - metrics.size must be > (0) - metrics foreach { m ⇒ - m.average.isEmpty must be(true) - if (m.value.isDefined) m.isDefined must be(true) - if (m.initializable) (m.trendable && m.isDefined && m.average.isEmpty) must be(true) - } - } - - def assertInitialized(decay: Int, metrics: Set[Metric]): Unit = if (decay > 0) metrics.filter(_.trendable) foreach { m ⇒ - m.initializable must be(false) - if (m.isDefined) m.average.isDefined must be(true) - } - - def assertMerged(latest: Metric, peer: Metric, merged: Metric): Unit = if (latest same peer) { - if (latest.isDefined) { - if (peer.isDefined) { - merged.isDefined must be(true) - merged.value.get must be(latest.value.get) - if (latest.trendable) { - if (latest.initializable) merged.average.isEmpty must be(true) - else merged.average.isDefined must be(true) - } - } else { - merged.isDefined must be(true) - merged.value.get must be(latest.value.get) - if (latest.average.isDefined) merged.average.get must be(latest.average.get) - else merged.average.isEmpty must be(true) - } - } else { - if (peer.isDefined) { - merged.isDefined must be(true) - merged.value.get must be(peer.value.get) - if (peer.trendable) { - if (peer.initializable) merged.average.isEmpty must be(true) - else merged.average.isDefined must be(true) - } - } else { - merged.isDefined must be(false) - merged.average.isEmpty must be(true) - } - } - } - - def assertExpectedSampleSize(isSigar: Boolean, decay: Int, node: NodeMetrics): Unit = { - node.metrics.size must be(9) - val metrics = node.metrics.filter(_.isDefined) - if (isSigar) { // combined cpu + jmx max heap - metrics.size must be >= (7) - metrics.size must be <= (9) - } else { // jmx max heap - metrics.size must be >= (4) - metrics.size must be <= (5) - } - - if (decay > 0) metrics.collect { case m if m.trendable && (!m.initializable) ⇒ m }.foreach(_.average.isDefined must be(true)) - } - - def collectNodeMetrics(nodes: Set[NodeMetrics]): immutable.Seq[Metric] = - nodes.foldLeft(Vector[Metric]()) { - case (r, n) ⇒ r ++ n.metrics.filter(_.isDefined) - } + def isSigar(collector: MetricsCollector): Boolean = collector.isInstanceOf[SigarMetricsCollector] } - -trait AbstractClusterMetricsSpec extends DefaultTimeout { - this: AkkaSpec ⇒ - - val selfAddress = new Address("akka", "localhost") - - val window = 49 - - val interval: FiniteDuration = 100 millis - - val longDuration = 120 seconds // for long running tests - - val samples = 100 - - def createMetricsCollector: MetricsCollector = MetricsCollector(selfAddress, log, system.asInstanceOf[ExtendedActorSystem].dynamicAccess) - -} \ No newline at end of file diff --git a/akka-cluster/src/test/scala/akka/cluster/MetricsGossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MetricsGossipSpec.scala index 9a782b528a..6d54a69bc2 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MetricsGossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MetricsGossipSpec.scala @@ -12,95 +12,95 @@ import akka.actor.Address import java.lang.System.{ currentTimeMillis ⇒ newTimestamp } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class MetricsGossipSpec extends AkkaSpec(MetricsEnabledSpec.config) with ImplicitSender with AbstractClusterMetricsSpec with MetricSpec { +class MetricsGossipSpec extends AkkaSpec(MetricsEnabledSpec.config) with ImplicitSender with MetricsCollectorFactory { val collector = createMetricsCollector "A MetricsGossip" must { - "add and initialize new NodeMetrics" in { + "add new NodeMetrics" in { val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics) val m2 = NodeMetrics(Address("akka", "sys", "a", 2555), newTimestamp, collector.sample.metrics) - var localGossip = MetricsGossip(window) - localGossip :+= m1 - localGossip.nodes.size must be(1) - localGossip.nodeKeys.size must be(localGossip.nodes.size) - assertMasterMetricsAgainstGossipMetrics(Set(m1), localGossip) - assertExpectedSampleSize(collector.isSigar, localGossip) - assertInitialized(localGossip.rateOfDecay, collectNodeMetrics(localGossip.nodes).toSet) + m1.metrics.size must be > (3) + m2.metrics.size must be > (3) - localGossip :+= m2 - localGossip.nodes.size must be(2) - localGossip.nodeKeys.size must be(localGossip.nodes.size) - assertMasterMetricsAgainstGossipMetrics(Set(m1, m2), localGossip) - assertExpectedSampleSize(collector.isSigar, localGossip) - assertInitialized(localGossip.rateOfDecay, collectNodeMetrics(localGossip.nodes).toSet) + val g1 = MetricsGossip.empty :+ m1 + g1.nodes.size must be(1) + g1.nodeMetricsFor(m1.address).map(_.metrics) must be(Some(m1.metrics)) + + val g2 = g1 :+ m2 + g2.nodes.size must be(2) + g2.nodeMetricsFor(m1.address).map(_.metrics) must be(Some(m1.metrics)) + g2.nodeMetricsFor(m2.address).map(_.metrics) must be(Some(m2.metrics)) } "merge peer metrics" in { val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics) val m2 = NodeMetrics(Address("akka", "sys", "a", 2555), newTimestamp, collector.sample.metrics) - var remoteGossip = MetricsGossip(window) - remoteGossip :+= m1 - remoteGossip :+= m2 - remoteGossip.nodes.size must be(2) - val beforeMergeNodes = remoteGossip.nodes + val g1 = MetricsGossip.empty :+ m1 :+ m2 + g1.nodes.size must be(2) + val beforeMergeNodes = g1.nodes - val m2Updated = m2 copy (metrics = collector.sample.metrics, timestamp = newTimestamp) - remoteGossip :+= m2Updated // merge peers - remoteGossip.nodes.size must be(2) - assertMasterMetricsAgainstGossipMetrics(beforeMergeNodes, remoteGossip) - assertExpectedSampleSize(collector.isSigar, remoteGossip) - remoteGossip.nodes collect { case peer if peer.address == m2.address ⇒ peer.timestamp must be(m2Updated.timestamp) } + val m2Updated = m2 copy (metrics = collector.sample.metrics, timestamp = m2.timestamp + 1000) + val g2 = g1 :+ m2Updated // merge peers + g2.nodes.size must be(2) + g2.nodeMetricsFor(m1.address).map(_.metrics) must be(Some(m1.metrics)) + g2.nodeMetricsFor(m2.address).map(_.metrics) must be(Some(m2Updated.metrics)) + g2.nodes collect { case peer if peer.address == m2.address ⇒ peer.timestamp must be(m2Updated.timestamp) } } "merge an existing metric set for a node and update node ring" in { val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics) val m2 = NodeMetrics(Address("akka", "sys", "a", 2555), newTimestamp, collector.sample.metrics) val m3 = NodeMetrics(Address("akka", "sys", "a", 2556), newTimestamp, collector.sample.metrics) - val m2Updated = m2 copy (metrics = collector.sample.metrics, timestamp = newTimestamp) + val m2Updated = m2 copy (metrics = collector.sample.metrics, timestamp = m2.timestamp + 1000) - var localGossip = MetricsGossip(window) - localGossip :+= m1 - localGossip :+= m2 + val g1 = MetricsGossip.empty :+ m1 :+ m2 + val g2 = MetricsGossip.empty :+ m3 :+ m2Updated - var remoteGossip = MetricsGossip(window) - remoteGossip :+= m3 - remoteGossip :+= m2Updated - - localGossip.nodeKeys.contains(m1.address) must be(true) - remoteGossip.nodeKeys.contains(m3.address) must be(true) + g1.nodes.map(_.address) must be(Set(m1.address, m2.address)) // must contain nodes 1,3, and the most recent version of 2 - val mergedGossip = localGossip merge remoteGossip - mergedGossip.nodes.size must be(3) - assertExpectedNodeAddresses(mergedGossip, Set(m1, m2, m3)) - assertExpectedSampleSize(collector.isSigar, mergedGossip) - assertCreatedUninitialized(mergedGossip) - assertInitialized(mergedGossip) - mergedGossip.nodes.find(_.address == m2.address).get.timestamp must be(m2Updated.timestamp) + val mergedGossip = g1 merge g2 + mergedGossip.nodes.map(_.address) must be(Set(m1.address, m2.address, m3.address)) + mergedGossip.nodeMetricsFor(m1.address).map(_.metrics) must be(Some(m1.metrics)) + mergedGossip.nodeMetricsFor(m2.address).map(_.metrics) must be(Some(m2Updated.metrics)) + mergedGossip.nodeMetricsFor(m3.address).map(_.metrics) must be(Some(m3.metrics)) + mergedGossip.nodes.foreach(_.metrics.size must be > (3)) + mergedGossip.nodeMetricsFor(m2.address).map(_.timestamp) must be(Some(m2Updated.timestamp)) } "get the current NodeMetrics if it exists in the local nodes" in { val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics) - var localGossip = MetricsGossip(window) - localGossip :+= m1 - localGossip.metricsFor(m1).nonEmpty must be(true) + val g1 = MetricsGossip.empty :+ m1 + g1.nodeMetricsFor(m1.address).map(_.metrics) must be(Some(m1.metrics)) } "remove a node if it is no longer Up" in { val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics) val m2 = NodeMetrics(Address("akka", "sys", "a", 2555), newTimestamp, collector.sample.metrics) - var localGossip = MetricsGossip(window) - localGossip :+= m1 - localGossip :+= m2 + val g1 = MetricsGossip.empty :+ m1 :+ m2 + g1.nodes.size must be(2) + val g2 = g1 remove m1.address + g2.nodes.size must be(1) + g2.nodes.exists(_.address == m1.address) must be(false) + g2.nodeMetricsFor(m1.address) must be(None) + g2.nodeMetricsFor(m2.address).map(_.metrics) must be(Some(m2.metrics)) + } - localGossip.nodes.size must be(2) - localGossip = localGossip remove m1.address - localGossip.nodes.size must be(1) - localGossip.nodes.exists(_.address == m1.address) must be(false) + "filter nodes" in { + val m1 = NodeMetrics(Address("akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics) + val m2 = NodeMetrics(Address("akka", "sys", "a", 2555), newTimestamp, collector.sample.metrics) + + val g1 = MetricsGossip.empty :+ m1 :+ m2 + g1.nodes.size must be(2) + val g2 = g1 filter Set(m2.address) + g2.nodes.size must be(1) + g2.nodes.exists(_.address == m1.address) must be(false) + g2.nodeMetricsFor(m1.address) must be(None) + g2.nodeMetricsFor(m2.address).map(_.metrics) must be(Some(m2.metrics)) } } } diff --git a/akka-cluster/src/test/scala/akka/cluster/NodeMetricsSpec.scala b/akka-cluster/src/test/scala/akka/cluster/NodeMetricsSpec.scala index 5d58bc84e5..7e80a04d64 100644 --- a/akka-cluster/src/test/scala/akka/cluster/NodeMetricsSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/NodeMetricsSpec.scala @@ -4,51 +4,44 @@ package akka.cluster -import akka.testkit.AkkaSpec +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers import akka.actor.Address @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class NodeMetricsSpec extends AkkaSpec with AbstractClusterMetricsSpec with MetricSpec { - - val collector = createMetricsCollector +class NodeMetricsSpec extends WordSpec with MustMatchers { val node1 = Address("akka", "sys", "a", 2554) - val node2 = Address("akka", "sys", "a", 2555) "NodeMetrics must" must { - "recognize updatable nodes" in { - (NodeMetrics(node1, 0) updatable NodeMetrics(node1, 1)) must be(true) - } - - "recognize non-updatable nodes" in { - (NodeMetrics(node1, 1) updatable NodeMetrics(node2, 0)) must be(false) - } "return correct result for 2 'same' nodes" in { - (NodeMetrics(node1, 0) same NodeMetrics(node1, 0)) must be(true) + (NodeMetrics(node1, 0) sameAs NodeMetrics(node1, 0)) must be(true) } "return correct result for 2 not 'same' nodes" in { - (NodeMetrics(node1, 0) same NodeMetrics(node2, 0)) must be(false) + (NodeMetrics(node1, 0) sameAs NodeMetrics(node2, 0)) must be(false) } "merge 2 NodeMetrics by most recent" in { - val sample1 = NodeMetrics(node1, 1, collector.sample.metrics) - val sample2 = NodeMetrics(node1, 2, collector.sample.metrics) + val sample1 = NodeMetrics(node1, 1, Set(Metric.create("a", 10, None), Metric.create("b", 20, None)).flatten) + val sample2 = NodeMetrics(node1, 2, Set(Metric.create("a", 11, None), Metric.create("c", 30, None)).flatten) val merged = sample1 merge sample2 merged.timestamp must be(sample2.timestamp) - merged.metrics must be(sample2.metrics) + merged.metric("a").map(_.value) must be(Some(11)) + merged.metric("b").map(_.value) must be(Some(20)) + merged.metric("c").map(_.value) must be(Some(30)) } "not merge 2 NodeMetrics if master is more recent" in { - val sample1 = NodeMetrics(node1, 1, collector.sample.metrics) - val sample2 = NodeMetrics(node2, 0, sample1.metrics) + val sample1 = NodeMetrics(node1, 1, Set(Metric.create("a", 10, None), Metric.create("b", 20, None)).flatten) + val sample2 = NodeMetrics(node1, 0, Set(Metric.create("a", 11, None), Metric.create("c", 30, None)).flatten) - val merged = sample2 merge sample2 // older and not same - merged.timestamp must be(sample2.timestamp) - merged.metrics must be(sample2.metrics) + val merged = sample1 merge sample2 // older and not same + merged.timestamp must be(sample1.timestamp) + merged.metrics must be(sample1.metrics) } } } diff --git a/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala new file mode 100644 index 0000000000..5b5b92d950 --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala @@ -0,0 +1,118 @@ +/* + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster.routing + +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers + +import akka.actor.Address +import akka.cluster.Metric +import akka.cluster.NodeMetrics +import akka.cluster.StandardMetrics._ + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class MetricsSelectorSpec extends WordSpec with MustMatchers { + + val abstractSelector = new CapacityMetricsSelector { + override def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double] = Map.empty + } + + val a1 = Address("akka", "sys", "a1", 2551) + val b1 = Address("akka", "sys", "b1", 2551) + val c1 = Address("akka", "sys", "c1", 2551) + val d1 = Address("akka", "sys", "d1", 2551) + + val decayFactor = Some(0.18) + + val nodeMetricsA = NodeMetrics(a1, System.currentTimeMillis, Set( + Metric.create(HeapMemoryUsed, 128, decayFactor), + Metric.create(HeapMemoryCommitted, 256, decayFactor), + Metric.create(HeapMemoryMax, 512, None), + Metric.create(CpuCombined, 0.1, decayFactor), + Metric.create(SystemLoadAverage, 0.5, None), + Metric.create(Processors, 8, None)).flatten) + + val nodeMetricsB = NodeMetrics(b1, System.currentTimeMillis, Set( + Metric.create(HeapMemoryUsed, 256, decayFactor), + Metric.create(HeapMemoryCommitted, 512, decayFactor), + Metric.create(HeapMemoryMax, 1024, None), + Metric.create(CpuCombined, 0.5, decayFactor), + Metric.create(SystemLoadAverage, 1.0, None), + Metric.create(Processors, 16, None)).flatten) + + val nodeMetricsC = NodeMetrics(c1, System.currentTimeMillis, Set( + Metric.create(HeapMemoryUsed, 1024, decayFactor), + Metric.create(HeapMemoryCommitted, 1024, decayFactor), + Metric.create(HeapMemoryMax, 1024, None), + Metric.create(CpuCombined, 1.0, decayFactor), + Metric.create(SystemLoadAverage, 16.0, None), + Metric.create(Processors, 16, None)).flatten) + + val nodeMetricsD = NodeMetrics(d1, System.currentTimeMillis, Set( + Metric.create(HeapMemoryUsed, 511, decayFactor), + Metric.create(HeapMemoryCommitted, 512, decayFactor), + Metric.create(HeapMemoryMax, 512, None), + Metric.create(Processors, 2, decayFactor)).flatten) + + val nodeMetrics = Set(nodeMetricsA, nodeMetricsB, nodeMetricsC, nodeMetricsD) + + "CapacityMetricsSelector" must { + + "calculate weights from capacity" in { + val capacity = Map(a1 -> 0.6, b1 -> 0.3, c1 -> 0.1) + val weights = abstractSelector.weights(capacity) + weights must be(Map(c1 -> 1, b1 -> 3, a1 -> 6)) + } + + "handle low and zero capacity" in { + val capacity = Map(a1 -> 0.0, b1 -> 1.0, c1 -> 0.005, d1 -> 0.004) + val weights = abstractSelector.weights(capacity) + weights must be(Map(a1 -> 0, b1 -> 100, c1 -> 1, d1 -> 0)) + } + + } + + "HeapMetricsSelector" must { + "calculate capacity of heap metrics" in { + val capacity = HeapMetricsSelector.capacity(nodeMetrics) + capacity(a1) must be(0.75 plusOrMinus 0.0001) + capacity(b1) must be(0.75 plusOrMinus 0.0001) + capacity(c1) must be(0.0 plusOrMinus 0.0001) + capacity(d1) must be(0.001953125 plusOrMinus 0.0001) + } + } + + "CpuMetricsSelector" must { + "calculate capacity of cpuCombined metrics" in { + val capacity = CpuMetricsSelector.capacity(nodeMetrics) + capacity(a1) must be(0.9 plusOrMinus 0.0001) + capacity(b1) must be(0.5 plusOrMinus 0.0001) + capacity(c1) must be(0.0 plusOrMinus 0.0001) + capacity.contains(d1) must be(false) + } + } + + "SystemLoadAverageMetricsSelector" must { + "calculate capacity of systemLoadAverage metrics" in { + val capacity = SystemLoadAverageMetricsSelector.capacity(nodeMetrics) + capacity(a1) must be(0.9375 plusOrMinus 0.0001) + capacity(b1) must be(0.9375 plusOrMinus 0.0001) + capacity(c1) must be(0.0 plusOrMinus 0.0001) + capacity.contains(d1) must be(false) + } + } + + "MixMetricsSelector" must { + "aggregate capacity of all metrics" in { + val capacity = MixMetricsSelector.capacity(nodeMetrics) + capacity(a1) must be((0.75 + 0.9 + 0.9375) / 3 plusOrMinus 0.0001) + capacity(b1) must be((0.75 + 0.5 + 0.9375) / 3 plusOrMinus 0.0001) + capacity(c1) must be((0.0 + 0.0 + 0.0) / 3 plusOrMinus 0.0001) + capacity(d1) must be((0.001953125) / 1 plusOrMinus 0.0001) + } + } + +} + diff --git a/akka-cluster/src/test/scala/akka/cluster/routing/WeightedRouteesSpec.scala b/akka-cluster/src/test/scala/akka/cluster/routing/WeightedRouteesSpec.scala new file mode 100644 index 0000000000..f34b81c5ec --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/routing/WeightedRouteesSpec.scala @@ -0,0 +1,87 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.cluster.routing + +import com.typesafe.config.ConfigFactory + +import akka.actor.Address +import akka.actor.RootActorPath +import akka.testkit.AkkaSpec + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString(""" + akka.actor.provider = "akka.cluster.ClusterActorRefProvider" + akka.remote.netty.port = 0 + """)) { + + val a1 = Address("akka", "sys", "a1", 2551) + val b1 = Address("akka", "sys", "b1", 2551) + val c1 = Address("akka", "sys", "c1", 2551) + val d1 = Address("akka", "sys", "d1", 2551) + + val refA = system.actorFor(RootActorPath(a1) / "user" / "a") + val refB = system.actorFor(RootActorPath(b1) / "user" / "b") + val refC = system.actorFor(RootActorPath(c1) / "user" / "c") + + "WeightedRoutees" must { + + "allocate weighted refs" in { + val weights = Map(a1 -> 1, b1 -> 3, c1 -> 10) + val refs = Vector(refA, refB, refC) + val weighted = new WeightedRoutees(refs, a1, weights) + + weighted(1) must be(refA) + 2 to 4 foreach { weighted(_) must be(refB) } + 5 to 14 foreach { weighted(_) must be(refC) } + weighted.total must be(14) + } + + "check boundaries" in { + val empty = new WeightedRoutees(Vector(), a1, Map.empty) + empty.isEmpty must be(true) + intercept[IllegalArgumentException] { + empty.total + } + val weighted = new WeightedRoutees(Vector(refA, refB, refC), a1, Map.empty) + weighted.total must be(3) + intercept[IllegalArgumentException] { + weighted(0) + } + intercept[IllegalArgumentException] { + weighted(4) + } + } + + "allocate refs for undefined weight" in { + val weights = Map(a1 -> 1, b1 -> 7) + val refs = Vector(refA, refB, refC) + val weighted = new WeightedRoutees(refs, a1, weights) + + weighted(1) must be(refA) + 2 to 8 foreach { weighted(_) must be(refB) } + // undefined, uses the mean of the weights, i.e. 4 + 9 to 12 foreach { weighted(_) must be(refC) } + weighted.total must be(12) + } + + "allocate weighted local refs" in { + val weights = Map(a1 -> 2, b1 -> 1, c1 -> 10) + val refs = Vector(testActor, refB, refC) + val weighted = new WeightedRoutees(refs, a1, weights) + + 1 to 2 foreach { weighted(_) must be(testActor) } + 3 to weighted.total foreach { weighted(_) must not be (testActor) } + } + + "not allocate ref with weight zero" in { + val weights = Map(a1 -> 0, b1 -> 2, c1 -> 10) + val refs = Vector(refA, refB, refC) + val weighted = new WeightedRoutees(refs, a1, weights) + + 1 to weighted.total foreach { weighted(_) must not be (refA) } + } + + } +} diff --git a/akka-contrib/docs/jul.rst b/akka-contrib/docs/jul.rst new file mode 100644 index 0000000000..b0d1c0a668 --- /dev/null +++ b/akka-contrib/docs/jul.rst @@ -0,0 +1,17 @@ +Java Logging (JUL) +================= + +This extension module provides a logging backend which uses the `java.util.logging` (j.u.l) +API to do the endpoint logging for `akka.event.Logging`. + +Provided with this module is an implementation of `akka.event.LoggingAdapter` which is independent of any `ActorSystem` being in place. This means that j.u.l can be used as the backend, via the Akka Logging API, for both Actor and non-Actor codebases. + +To enable j.u.l as the `akka.event.Logging` backend, use the following Akka config: + + event-handlers = ["akka.contrib.jul.JavaLoggingEventHandler"] + +To access the `akka.event.Logging` API from non-Actor code, mix in `akka.contrib.jul.JavaLogging`. + +This module is preferred over SLF4J with its JDK14 backend, due to integration issues resulting in the incorrect handling of `threadId`, `className` and `methodName`. + +This extension module was contributed by Sam Halliday. \ No newline at end of file diff --git a/akka-contrib/src/main/scala/akka/contrib/jul/JulEventHandler.scala b/akka-contrib/src/main/scala/akka/contrib/jul/JulEventHandler.scala new file mode 100644 index 0000000000..68ce0ed973 --- /dev/null +++ b/akka-contrib/src/main/scala/akka/contrib/jul/JulEventHandler.scala @@ -0,0 +1,133 @@ +package akka.contrib.jul + +import akka.event.Logging._ +import akka.actor._ +import akka.event.LoggingAdapter +import java.util.logging +import concurrent.{ ExecutionContext, Future } + +/** + * Makes the Akka `Logging` API available as the `log` + * field, using `java.util.logging` as the backend. + * + * This trait does not require an `ActorSystem` and is + * encouraged to be used as a general purpose Scala + * logging API. + * + * For `Actor`s, use `ActorLogging` instead. + */ +trait JavaLogging { + + @transient + protected lazy val log = new JavaLoggingAdapter { + def logger = logging.Logger.getLogger(JavaLogging.this.getClass.getName) + } +} + +/** + * `java.util.logging` EventHandler. + */ +class JavaLoggingEventHandler extends Actor { + + def receive = { + case event @ Error(cause, logSource, logClass, message) ⇒ + log(logging.Level.SEVERE, cause, logSource, logClass, message, event) + + case event @ Warning(logSource, logClass, message) ⇒ + log(logging.Level.WARNING, null, logSource, logClass, message, event) + + case event @ Info(logSource, logClass, message) ⇒ + log(logging.Level.INFO, null, logSource, logClass, message, event) + + case event @ Debug(logSource, logClass, message) ⇒ + log(logging.Level.CONFIG, null, logSource, logClass, message, event) + + case InitializeLogger(_) ⇒ + sender ! LoggerInitialized + } + + @inline + def log(level: logging.Level, cause: Throwable, logSource: String, logClass: Class[_], message: Any, event: LogEvent) { + val logger = logging.Logger.getLogger(logSource) + val record = new logging.LogRecord(level, message.toString) + record.setLoggerName(logger.getName) + record.setThrown(cause) + record.setThreadID(event.thread.getId.toInt) + record.setSourceClassName(logClass.getName) + record.setSourceMethodName(null) // lost forever + logger.log(record) + } +} + +trait JavaLoggingAdapter extends LoggingAdapter { + + def logger: logging.Logger + + /** Override-able option for asynchronous logging */ + def loggingExecutionContext: Option[ExecutionContext] = None + + def isErrorEnabled = logger.isLoggable(logging.Level.SEVERE) + + def isWarningEnabled = logger.isLoggable(logging.Level.WARNING) + + def isInfoEnabled = logger.isLoggable(logging.Level.INFO) + + def isDebugEnabled = logger.isLoggable(logging.Level.CONFIG) + + protected def notifyError(message: String) { + log(logging.Level.SEVERE, null, message) + } + + protected def notifyError(cause: Throwable, message: String) { + log(logging.Level.SEVERE, cause, message) + } + + protected def notifyWarning(message: String) { + log(logging.Level.WARNING, null, message) + } + + protected def notifyInfo(message: String) { + log(logging.Level.INFO, null, message) + } + + protected def notifyDebug(message: String) { + log(logging.Level.CONFIG, null, message) + } + + @inline + def log(level: logging.Level, cause: Throwable, message: String) { + val record = new logging.LogRecord(level, message) + record.setLoggerName(logger.getName) + record.setThrown(cause) + updateSource(record) + + if (loggingExecutionContext.isDefined) { + implicit val context = loggingExecutionContext.get + Future(logger.log(record)).onFailure { + case thrown: Throwable ⇒ thrown.printStackTrace() + } + } else + logger.log(record) + } + + // it is unfortunate that this workaround is needed + private def updateSource(record: logging.LogRecord) { + val stack = Thread.currentThread.getStackTrace + val source = stack.find { + frame ⇒ + val cname = frame.getClassName + !cname.startsWith("akka.contrib.jul.") && + !cname.startsWith("akka.event.LoggingAdapter") && + !cname.startsWith("java.lang.reflect.") && + !cname.startsWith("sun.reflect.") + } + if (source.isDefined) { + record.setSourceClassName(source.get.getClassName) + record.setSourceMethodName(source.get.getMethodName) + } else { + record.setSourceClassName(null) + record.setSourceMethodName(null) + } + } + +} \ No newline at end of file diff --git a/akka-contrib/src/multi-jvm/scala/akka/contrib/pattern/ReliableProxySpec.scala b/akka-contrib/src/multi-jvm/scala/akka/contrib/pattern/ReliableProxySpec.scala index f201613cc3..deef1871c2 100644 --- a/akka-contrib/src/multi-jvm/scala/akka/contrib/pattern/ReliableProxySpec.scala +++ b/akka-contrib/src/multi-jvm/scala/akka/contrib/pattern/ReliableProxySpec.scala @@ -41,62 +41,71 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod } } - runOn(remote) { - system.actorOf(Props(new Actor { - def receive = { - case x ⇒ testActor ! x - } - }), "echo") - } + @volatile var target: ActorRef = system.deadLetters + @volatile var proxy: ActorRef = system.deadLetters - val target = system.actorFor(node(remote) / "user" / "echo") - - var proxy: ActorRef = _ def expectState(s: State) = expectMsg(FSM.CurrentState(proxy, s)) def expectTransition(s1: State, s2: State) = expectMsg(FSM.Transition(proxy, s1, s2)) - - runOn(local) { - //#demo - import akka.contrib.pattern.ReliableProxy - - proxy = system.actorOf(Props(new ReliableProxy(target, 100.millis)), "proxy") - //#demo - proxy ! FSM.SubscribeTransitionCallBack(testActor) - expectState(Idle) - //#demo - proxy ! "hello" - //#demo - expectTransition(Idle, Active) - expectTransition(Active, Idle) - } - runOn(remote) { - expectMsg("hello") - } + + def sendN(n: Int) = (1 to n) foreach (proxy ! _) + def expectN(n: Int) = (1 to n) foreach { n ⇒ expectMsg(n); lastSender must be === target } "A ReliableProxy" must { + "initialize properly" in { + runOn(remote) { + target = system.actorOf(Props(new Actor { + def receive = { + case x ⇒ testActor ! x + } + }), "echo") + } + + enterBarrier("initialize") + + runOn(local) { + //#demo + import akka.contrib.pattern.ReliableProxy + + target = system.actorFor(node(remote) / "user" / "echo") + proxy = system.actorOf(Props(new ReliableProxy(target, 100.millis)), "proxy") + //#demo + proxy ! FSM.SubscribeTransitionCallBack(testActor) + expectState(Idle) + //#demo + proxy ! "hello" + //#demo + expectTransition(Idle, Active) + expectTransition(Active, Idle) + } + + runOn(remote) { + expectMsg("hello") + } + } + "forward messages in sequence" in { runOn(local) { - (1 to 100) foreach (proxy ! _) + sendN(100) expectTransition(Idle, Active) expectTransition(Active, Idle) } runOn(remote) { within(1 second) { - (1 to 100) foreach { n ⇒ expectMsg(n); lastSender must be === target } + expectN(100) } } enterBarrier("test1a") runOn(local) { - (1 to 100) foreach (proxy ! _) + sendN(100) expectTransition(Idle, Active) expectTransition(Active, Idle) } runOn(remote) { within(1 second) { - (1 to 100) foreach { n ⇒ expectMsg(n); lastSender must be === target } + expectN(100) } } @@ -106,7 +115,7 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod "retry when sending fails" in { runOn(local) { testConductor.blackhole(local, remote, Direction.Send).await - (1 to 100) foreach (proxy ! _) + sendN(100) within(1 second) { expectTransition(Idle, Active) expectNoMsg @@ -127,7 +136,7 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod } runOn(remote) { within(1 second) { - (1 to 100) foreach { n ⇒ expectMsg(n); lastSender must be === target } + expectN(100) } } @@ -137,7 +146,7 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod "retry when receiving fails" in { runOn(local) { testConductor.blackhole(local, remote, Direction.Receive).await - (1 to 100) foreach (proxy ! _) + sendN(100) within(1 second) { expectTransition(Idle, Active) expectNoMsg @@ -145,7 +154,7 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod } runOn(remote) { within(1 second) { - (1 to 100) foreach { n ⇒ expectMsg(n); lastSender must be === target } + expectN(100) } } @@ -162,7 +171,7 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod "resend across a slow link" in { runOn(local) { testConductor.throttle(local, remote, Direction.Send, rateMBit = 0.1).await - (1 to 50) foreach (proxy ! _) + sendN(50) within(5 seconds) { expectTransition(Idle, Active) expectTransition(Active, Idle) @@ -170,7 +179,7 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod } runOn(remote) { within(5 seconds) { - (1 to 50) foreach { n ⇒ expectMsg(n); lastSender must be === target } + expectN(50) } } @@ -179,7 +188,7 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod runOn(local) { testConductor.throttle(local, remote, Direction.Send, rateMBit = -1).await testConductor.throttle(local, remote, Direction.Receive, rateMBit = 0.1).await - (1 to 50) foreach (proxy ! _) + sendN(50) within(5 seconds) { expectTransition(Idle, Active) expectTransition(Active, Idle) @@ -187,7 +196,7 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod } runOn(remote) { within(1 second) { - (1 to 50) foreach { n ⇒ expectMsg(n); lastSender must be === target } + expectN(50) } } diff --git a/akka-contrib/src/test/scala/akka/contrib/jul/JulEventHandlerSpec.scala b/akka-contrib/src/test/scala/akka/contrib/jul/JulEventHandlerSpec.scala new file mode 100644 index 0000000000..9508b75f8d --- /dev/null +++ b/akka-contrib/src/test/scala/akka/contrib/jul/JulEventHandlerSpec.scala @@ -0,0 +1,76 @@ +package akka.contrib.jul + +import com.typesafe.config.ConfigFactory +import akka.actor.{ ActorSystem, Actor, ActorLogging, Props } +import akka.testkit.AkkaSpec +import java.util.logging +import java.io.ByteArrayInputStream + +object JavaLoggingEventHandlerSpec { + + val config = ConfigFactory.parseString(""" + akka { + loglevel = INFO + event-handlers = ["akka.contrib.jul.JavaLoggingEventHandler"] + }""") + + class LogProducer extends Actor with ActorLogging { + def receive = { + case e: Exception ⇒ + log.error(e, e.getMessage) + case (s: String, x: Int) ⇒ + log.info(s, x) + } + } +} + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class JavaLoggingEventHandlerSpec extends AkkaSpec(JavaLoggingEventHandlerSpec.config) { + + val logger = logging.Logger.getLogger("akka://JavaLoggingEventHandlerSpec/user/log") + logger.setUseParentHandlers(false) // turn off output of test LogRecords + logger.addHandler(new logging.Handler { + def publish(record: logging.LogRecord) { + testActor ! record + } + + def flush() {} + def close() {} + }) + + val producer = system.actorOf(Props[JavaLoggingEventHandlerSpec.LogProducer], name = "log") + + "JavaLoggingEventHandler" must { + + "log error with stackTrace" in { + producer ! new RuntimeException("Simulated error") + + val record = expectMsgType[logging.LogRecord] + + record must not be (null) + record.getMillis must not be (0) + record.getThreadID must not be (0) + record.getLevel must be(logging.Level.SEVERE) + record.getMessage must be("Simulated error") + record.getThrown.isInstanceOf[RuntimeException] must be(true) + record.getSourceClassName must be("akka.contrib.jul.JavaLoggingEventHandlerSpec$LogProducer") + record.getSourceMethodName must be(null) + } + + "log info without stackTrace" in { + producer ! ("{} is the magic number", 3) + + val record = expectMsgType[logging.LogRecord] + + record must not be (null) + record.getMillis must not be (0) + record.getThreadID must not be (0) + record.getLevel must be(logging.Level.INFO) + record.getMessage must be("3 is the magic number") + record.getThrown must be(null) + record.getSourceClassName must be("akka.contrib.jul.JavaLoggingEventHandlerSpec$LogProducer") + record.getSourceMethodName must be(null) + } + } + +} diff --git a/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottlerSpec.scala b/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottlerSpec.scala index cf13f42a24..7304df1448 100644 --- a/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottlerSpec.scala +++ b/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottlerSpec.scala @@ -46,7 +46,8 @@ class TimerBasedThrottlerSpec extends TestKit(ActorSystem("TimerBasedThrottlerSp } })) // The throttler for this example, setting the rate - val throttler = system.actorOf(Props(new TimerBasedThrottler(3 msgsPer (1.second.dilated)))) + val throttler = system.actorOf(Props(new TimerBasedThrottler( + 3 msgsPer (1.second.dilated)))) // Set the target throttler ! SetTarget(Some(printer)) // These three messages will be sent to the echoer immediately diff --git a/akka-docs/rst/cluster/cluster-usage-java.rst b/akka-docs/rst/cluster/cluster-usage-java.rst index c674bff3a6..0b418350b2 100644 --- a/akka-docs/rst/cluster/cluster-usage-java.rst +++ b/akka-docs/rst/cluster/cluster-usage-java.rst @@ -31,20 +31,18 @@ A Simple Cluster Example ^^^^^^^^^^^^^^^^^^^^^^^^ The following small program together with its configuration starts an ``ActorSystem`` -with the Cluster extension enabled. It joins the cluster and logs some membership events. +with the Cluster enabled. It joins the cluster and logs some membership events. Try it out: 1. Add the following ``application.conf`` in your project, place it in ``src/main/resources``: -.. literalinclude:: ../../../akka-samples/akka-sample-cluster/src/main/resources/application.conf - :language: none +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/application.conf#cluster To enable cluster capabilities in your Akka project you should, at a minimum, add the :ref:`remoting-java` settings, but with ``akka.cluster.ClusterActorRefProvider``. -The ``akka.cluster.seed-nodes`` and cluster extension should normally also be added to your -``application.conf`` file. +The ``akka.cluster.seed-nodes`` should normally also be added to your ``application.conf`` file. The seed nodes are configured contact points for initial, automatic, join of the cluster. @@ -241,6 +239,25 @@ frontend nodes and 3 backend nodes:: .. note:: The above example should probably be designed as two separate, frontend/backend, clusters, when there is a `cluster client for decoupling clusters `_. +How To Startup when Cluster Size Reached +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A common use case is to start actors after the cluster has been initialized, +members have joined, and the cluster has reached a certain size. + +With a configuration option you can define required number of members +before the leader changes member status of 'Joining' members to 'Up'. + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/factorial.conf#min-nr-of-members + +You can start the actors in a ``registerOnMemberUp`` callback, which will +be invoked when the current member status is changed tp 'Up', i.e. the cluster +has at least the defined number of members. + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontendMain.java#registerOnUp + +This callback can be used for other things than starting actors. + Failure Detector ^^^^^^^^^^^^^^^^ @@ -438,6 +455,107 @@ service nodes and 1 client:: .. note:: The above example, especially the last part, will be simplified when the cluster handles automatic actor partitioning. +Cluster Metrics +^^^^^^^^^^^^^^^ + +The member nodes of the cluster collects system health metrics and publishes that to other nodes and to +registered subscribers. This information is primarily used for load-balancing routers. + +Hyperic Sigar +------------- + +The built-in metrics is gathered from JMX MBeans, and optionally you can use `Hyperic Sigar `_ +for a wider and more accurate range of metrics compared to what can be retrieved from ordinary MBeans. +Sigar is using a native OS library. To enable usage of Sigar you need to add the directory of the native library to +``-Djava.libarary.path=`` add the following dependency:: + + + org.hyperic + sigar + @sigarVersion@ + + + + +Adaptive Load Balancing +----------------------- + +The ``AdaptiveLoadBalancingRouter`` performs load balancing of messages to cluster nodes based on the cluster metrics data. +It uses random selection of routees with probabilities derived from the remaining capacity of the corresponding node. +It can be configured to use a specific MetricsSelector to produce the probabilities, a.k.a. weights: + +* ``heap`` / ``HeapMetricsSelector`` - Used and max JVM heap memory. Weights based on remaining heap capacity; (max - used) / max +* ``load`` / ``SystemLoadAverageMetricsSelector`` - System load average for the past 1 minute, corresponding value can be found in ``top`` of Linux systems. The system is possibly nearing a bottleneck if the system load average is nearing number of cpus/cores. Weights based on remaining load capacity; 1 - (load / processors) +* ``cpu`` / ``CpuMetricsSelector`` - CPU utilization in percentage, sum of User + Sys + Nice + Wait. Weights based on remaining cpu capacity; 1 - utilization +* ``mix`` / ``MixMetricsSelector`` - Combines heap, cpu and load. Weights based on mean of remaining capacity of the combined selectors. +* Any custom implementation of ``akka.cluster.routing.MetricsSelector`` + +The collected metrics values are smoothed with `exponential weighted moving average `_. In the :ref:`cluster_configuration_java` you can adjust how quickly past data is decayed compared to new data. + +Let's take a look at this router in action. + +In this example the following imports are used: + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialBackend.java#imports + +The backend worker that performs the factorial calculation: + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialBackend.java#backend + +The frontend that receives user jobs and delegates to the backends via the router: + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontend.java#frontend + + +As you can see, the router is defined in the same way as other routers, and in this case it's configured as follows: + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/application.conf#adaptive-router + +It's only router type ``adaptive`` and the ``metrics-selector`` that is specific to this router, other things work +in the same way as other routers. + +The same type of router could also have been defined in code: + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontend.java#router-lookup-in-code + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontend.java#router-deploy-in-code + +This example is included in ``akka-samples/akka-sample-cluster`` and you can try it by copying the +`source <@github@/akka-samples/akka-sample-cluster>`_ to your +maven project, defined as in :ref:`cluster_simple_example_java`. +Run it by starting nodes in different terminal windows. For example, starting 3 backend nodes and +one frontend:: + + mvn exec:java \ + -Dexec.mainClass="sample.cluster.factorial.FactorialBackendMain" \ + -Dexec.args="2551" + + mvn exec:java \ + -Dexec.mainClass="sample.cluster.factorial.FactorialBackendMain" \ + -Dexec.args="2552" + + mvn exec:java \ + -Dexec.mainClass="sample.cluster.factorial.FactorialBackendMain" + + mvn exec:java \ + -Dexec.mainClass="sample.cluster.factorial.FactorialFrontendMain" + +Press ctrl-c in the terminal window of the frontend to stop the factorial calculations. + + +Subscribe to Metrics Events +--------------------------- + +It's possible to subscribe to the metrics events directly to implement other functionality. + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/MetricsListener.java#metrics-listener + +Custom Metrics Collector +------------------------ + +You can plug-in your own metrics collector instead of +``akka.cluster.SigarMetricsCollector`` or ``akka.cluster.JmxMetricsCollector``. Look at those two implementations +for inspiration. The implementation class can be defined in the :ref:`cluster_configuration_java`. .. _cluster_jmx_java: @@ -474,15 +592,16 @@ Run it without parameters to see instructions about how to use the script:: leave - Sends a request for node with URL to LEAVE the cluster down - Sends a request for marking node with URL as DOWN member-status - Asks the member node for its current status + members - Asks the cluster for addresses of current members + unreachable - Asks the cluster for addresses of unreachable members cluster-status - Asks the cluster for its current status (member ring, unavailable nodes, meta data etc.) leader - Asks the cluster who the current leader is is-singleton - Checks if the cluster is a singleton cluster (single node cluster) is-available - Checks if the member node is available - is-running - Checks if the member node is running - has-convergence - Checks if there is a cluster convergence - Where the should be on the format of 'akka://actor-system-name@hostname:port' + Where the should be on the format of + 'akka://actor-system-name@hostname:port' Examples: bin/akka-cluster localhost:9999 is-available bin/akka-cluster localhost:9999 join akka://MySystem@darkstar:2552 @@ -522,7 +641,7 @@ introduce the extra overhead of another thread. :: # shorter tick-duration of default scheduler when using cluster - akka.scheduler.tick-duration.tick-duration = 33ms + akka.scheduler.tick-duration = 33ms diff --git a/akka-docs/rst/cluster/cluster-usage-scala.rst b/akka-docs/rst/cluster/cluster-usage-scala.rst index 31ce7e7191..717d084d96 100644 --- a/akka-docs/rst/cluster/cluster-usage-scala.rst +++ b/akka-docs/rst/cluster/cluster-usage-scala.rst @@ -25,20 +25,18 @@ A Simple Cluster Example ^^^^^^^^^^^^^^^^^^^^^^^^ The following small program together with its configuration starts an ``ActorSystem`` -with the Cluster extension enabled. It joins the cluster and logs some membership events. +with the Cluster enabled. It joins the cluster and logs some membership events. Try it out: 1. Add the following ``application.conf`` in your project, place it in ``src/main/resources``: -.. literalinclude:: ../../../akka-samples/akka-sample-cluster/src/main/resources/application.conf - :language: none +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/application.conf#cluster To enable cluster capabilities in your Akka project you should, at a minimum, add the :ref:`remoting-scala` settings, but with ``akka.cluster.ClusterActorRefProvider``. -The ``akka.cluster.seed-nodes`` and cluster extension should normally also be added to your -``application.conf`` file. +The ``akka.cluster.seed-nodes`` should normally also be added to your ``application.conf`` file. The seed nodes are configured contact points for initial, automatic, join of the cluster. @@ -214,6 +212,25 @@ frontend nodes and 3 backend nodes:: .. note:: The above example should probably be designed as two separate, frontend/backend, clusters, when there is a `cluster client for decoupling clusters `_. +How To Startup when Cluster Size Reached +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +A common use case is to start actors after the cluster has been initialized, +members have joined, and the cluster has reached a certain size. + +With a configuration option you can define required number of members +before the leader changes member status of 'Joining' members to 'Up'. + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/factorial.conf#min-nr-of-members + +You can start the actors in a ``registerOnMemberUp`` callback, which will +be invoked when the current member status is changed tp 'Up', i.e. the cluster +has at least the defined number of members. + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala#registerOnUp + +This callback can be used for other things than starting actors. + Failure Detector ^^^^^^^^^^^^^^^^ @@ -265,6 +282,8 @@ This is how the curve looks like for ``acceptable-heartbeat-pause`` configured t .. image:: images/phi3.png +.. _cluster_aware_routers_scala: + Cluster Aware Routers ^^^^^^^^^^^^^^^^^^^^^ @@ -397,6 +416,97 @@ service nodes and 1 client:: .. note:: The above example, especially the last part, will be simplified when the cluster handles automatic actor partitioning. +Cluster Metrics +^^^^^^^^^^^^^^^ + +The member nodes of the cluster collects system health metrics and publishes that to other nodes and to +registered subscribers. This information is primarily used for load-balancing routers. + +Hyperic Sigar +------------- + +The built-in metrics is gathered from JMX MBeans, and optionally you can use `Hyperic Sigar `_ +for a wider and more accurate range of metrics compared to what can be retrieved from ordinary MBeans. +Sigar is using a native OS library. To enable usage of Sigar you need to add the directory of the native library to +``-Djava.libarary.path=`` add the following dependency:: + + "org.hyperic" % "sigar" % "@sigarVersion@" + + +Adaptive Load Balancing +----------------------- + +The ``AdaptiveLoadBalancingRouter`` performs load balancing of messages to cluster nodes based on the cluster metrics data. +It uses random selection of routees with probabilities derived from the remaining capacity of the corresponding node. +It can be configured to use a specific MetricsSelector to produce the probabilities, a.k.a. weights: + +* ``heap`` / ``HeapMetricsSelector`` - Used and max JVM heap memory. Weights based on remaining heap capacity; (max - used) / max +* ``load`` / ``SystemLoadAverageMetricsSelector`` - System load average for the past 1 minute, corresponding value can be found in ``top`` of Linux systems. The system is possibly nearing a bottleneck if the system load average is nearing number of cpus/cores. Weights based on remaining load capacity; 1 - (load / processors) +* ``cpu`` / ``CpuMetricsSelector`` - CPU utilization in percentage, sum of User + Sys + Nice + Wait. Weights based on remaining cpu capacity; 1 - utilization +* ``mix`` / ``MixMetricsSelector`` - Combines heap, cpu and load. Weights based on mean of remaining capacity of the combined selectors. +* Any custom implementation of ``akka.cluster.routing.MetricsSelector`` + +The collected metrics values are smoothed with `exponential weighted moving average `_. In the :ref:`cluster_configuration_scala` you can adjust how quickly past data is decayed compared to new data. + +Let's take a look at this router in action. + +In this example the following imports are used: + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala#imports + +The backend worker that performs the factorial calculation: + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala#backend + +The frontend that receives user jobs and delegates to the backends via the router: + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala#frontend + + +As you can see, the router is defined in the same way as other routers, and in this case it's configured as follows: + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/resources/application.conf#adaptive-router + +It's only router type ``adaptive`` and the ``metrics-selector`` that is specific to this router, other things work +in the same way as other routers. + +The same type of router could also have been defined in code: + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala#router-lookup-in-code + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala#router-deploy-in-code + +This example is included in ``akka-samples/akka-sample-cluster`` +and you can try by starting nodes in different terminal windows. For example, starting 3 backend nodes and one frontend:: + + sbt + + project akka-sample-cluster-experimental + + run-main sample.cluster.factorial.FactorialBackend 2551 + + run-main sample.cluster.factorial.FactorialBackend 2552 + + run-main sample.cluster.factorial.FactorialBackend + + run-main sample.cluster.factorial.FactorialFrontend + +Press ctrl-c in the terminal window of the frontend to stop the factorial calculations. + +Subscribe to Metrics Events +--------------------------- + +It's possible to subscribe to the metrics events directly to implement other functionality. + +.. includecode:: ../../../akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala#metrics-listener + +Custom Metrics Collector +------------------------ + +You can plug-in your own metrics collector instead of +``akka.cluster.SigarMetricsCollector`` or ``akka.cluster.JmxMetricsCollector``. Look at those two implementations +for inspiration. The implementation class can be defined in the :ref:`cluster_configuration_scala`. + How to Test ^^^^^^^^^^^ @@ -488,15 +598,16 @@ Run it without parameters to see instructions about how to use the script:: leave - Sends a request for node with URL to LEAVE the cluster down - Sends a request for marking node with URL as DOWN member-status - Asks the member node for its current status + members - Asks the cluster for addresses of current members + unreachable - Asks the cluster for addresses of unreachable members cluster-status - Asks the cluster for its current status (member ring, unavailable nodes, meta data etc.) leader - Asks the cluster who the current leader is is-singleton - Checks if the cluster is a singleton cluster (single node cluster) is-available - Checks if the member node is available - is-running - Checks if the member node is running - has-convergence - Checks if there is a cluster convergence - Where the should be on the format of 'akka://actor-system-name@hostname:port' + Where the should be on the format of + 'akka://actor-system-name@hostname:port' Examples: bin/akka-cluster localhost:9999 is-available bin/akka-cluster localhost:9999 join akka://MySystem@darkstar:2552 @@ -536,7 +647,7 @@ introduce the extra overhead of another thread. :: # shorter tick-duration of default scheduler when using cluster - akka.scheduler.tick-duration.tick-duration = 33ms + akka.scheduler.tick-duration = 33ms diff --git a/akka-docs/rst/cluster/cluster.rst b/akka-docs/rst/cluster/cluster.rst index 1190da953a..dfcb4f0a42 100644 --- a/akka-docs/rst/cluster/cluster.rst +++ b/akka-docs/rst/cluster/cluster.rst @@ -84,9 +84,9 @@ Gossip The cluster membership used in Akka is based on Amazon's `Dynamo`_ system and particularly the approach taken in Basho's' `Riak`_ distributed database. Cluster membership is communicated using a `Gossip Protocol`_, where the current -state of the cluster is gossiped randomly through the cluster. Joining a cluster -is initiated by issuing a ``Join`` command to one of the nodes in the cluster to -join. +state of the cluster is gossiped randomly through the cluster, with preference to +members that have not seen the latest version. Joining a cluster is initiated +by issuing a ``Join`` command to one of the nodes in the cluster to join. .. _Gossip Protocol: http://en.wikipedia.org/wiki/Gossip_protocol .. _Dynamo: http://www.allthingsdistributed.com/files/amazon-dynamo-sosp2007.pdf @@ -209,8 +209,7 @@ node to initiate a round of gossip with. The choice of node is random but can also include extra gossiping nodes with either newer or older state versions. The gossip overview contains the current state version for all nodes and also a -list of unreachable nodes. Whenever a node receives a gossip overview it updates -the `Failure Detector`_ with the liveness information. +list of unreachable nodes. The nodes defined as ``seed`` nodes are just regular member nodes whose only "special role" is to function as contact points in the cluster. diff --git a/akka-docs/rst/dev/documentation.rst b/akka-docs/rst/dev/documentation.rst index a9a87f013e..b990a6bbf3 100644 --- a/akka-docs/rst/dev/documentation.rst +++ b/akka-docs/rst/dev/documentation.rst @@ -127,7 +127,7 @@ Add texlive bin to $PATH: :: - /usr/local/texlive/2010basic/bin/universal-darwin + /usr/local/texlive/2012basic/bin/universal-darwin Add missing tex packages: @@ -140,10 +140,3 @@ Add missing tex packages: sudo tlmgr install wrapfig sudo tlmgr install helvetic sudo tlmgr install courier - -Link the akka pygments style: - -:: - - cd /usr/local/Cellar/python/2.7.1/lib/python2.7/site-packages/pygments/styles - ln -s /path/to/akka/akka-docs/themes/akka/pygments/akka.py akka.py diff --git a/akka-docs/rst/general/jmm.rst b/akka-docs/rst/general/jmm.rst index 085a347451..dc0c87e2a4 100644 --- a/akka-docs/rst/general/jmm.rst +++ b/akka-docs/rst/general/jmm.rst @@ -13,9 +13,9 @@ Prior to Java 5, the Java Memory Model (JMM) was ill defined. It was possible to shared memory was accessed by multiple threads, such as: * a thread not seeing values written by other threads: a visibility problem -* a thread observing 'impossible' behavior of other threads, caused by instructions not being executed in the order - -expected: an instruction reordering problem. +* a thread observing 'impossible' behavior of other threads, caused by + instructions not being executed in the order expected: an instruction + reordering problem. With the implementation of JSR 133 in Java 5, a lot of these issues have been resolved. The JMM is a set of rules based on the "happens-before" relation, which constrain when one memory access must happen before another, and conversely, @@ -120,4 +120,4 @@ Since Akka runs on the JVM there are still some rules to be followed. } } -* Messages **should** be immutable, this is to avoid the shared mutable state trap. \ No newline at end of file +* Messages **should** be immutable, this is to avoid the shared mutable state trap. diff --git a/akka-docs/rst/intro/why-akka.rst b/akka-docs/rst/intro/why-akka.rst index 85789fdf19..e11cfee187 100644 --- a/akka-docs/rst/intro/why-akka.rst +++ b/akka-docs/rst/intro/why-akka.rst @@ -24,7 +24,7 @@ and then there's the whole package, the Akka Microkernel, which is a standalone container to deploy your Akka application in. With CPUs growing more and more cores every cycle, Akka is the alternative that provides outstanding performance even if you're only running it on one machine. Akka also supplies a wide array -of concurrency-paradigms, allowing for users to choose the right tool for the +of concurrency-paradigms, allowing users to choose the right tool for the job. diff --git a/akka-docs/rst/java/camel.rst b/akka-docs/rst/java/camel.rst index 9eff4ae561..4825e4e4a1 100644 --- a/akka-docs/rst/java/camel.rst +++ b/akka-docs/rst/java/camel.rst @@ -388,6 +388,8 @@ URI options The following URI options are supported: +.. tabularcolumns:: |l|l|l|L| + +--------------+----------+---------+------------------------------------------------+ | Name | Type | Default | Description | +==============+==========+=========+================================================+ diff --git a/akka-docs/rst/java/code/docs/actor/SchedulerDocTestBase.java b/akka-docs/rst/java/code/docs/actor/SchedulerDocTestBase.java index 0b3d55f33f..66b5181ba8 100644 --- a/akka-docs/rst/java/code/docs/actor/SchedulerDocTestBase.java +++ b/akka-docs/rst/java/code/docs/actor/SchedulerDocTestBase.java @@ -79,7 +79,7 @@ public class SchedulerDocTestBase { //to the tickActor after 0ms repeating every 50ms Cancellable cancellable = system.scheduler().schedule(Duration.Zero(), Duration.create(50, TimeUnit.MILLISECONDS), tickActor, "Tick", - system.dispatcher()); + system.dispatcher(), null); //This cancels further Ticks to be sent cancellable.cancel(); diff --git a/akka-docs/rst/java/code/docs/actor/japi/FaultHandlingDocSample.java b/akka-docs/rst/java/code/docs/actor/japi/FaultHandlingDocSample.java index 5b7a3073c3..37d1da703a 100644 --- a/akka-docs/rst/java/code/docs/actor/japi/FaultHandlingDocSample.java +++ b/akka-docs/rst/java/code/docs/actor/japi/FaultHandlingDocSample.java @@ -148,7 +148,7 @@ public class FaultHandlingDocSample { progressListener = getSender(); getContext().system().scheduler().schedule( Duration.Zero(), Duration.create(1, "second"), getSelf(), Do, - getContext().dispatcher() + getContext().dispatcher(), null ); } else if (msg.equals(Do)) { counterService.tell(new Increment(1), getSelf()); diff --git a/akka-docs/rst/java/code/docs/future/FutureDocTestBase.java b/akka-docs/rst/java/code/docs/future/FutureDocTestBase.java index fb49a9a48d..975814ded2 100644 --- a/akka-docs/rst/java/code/docs/future/FutureDocTestBase.java +++ b/akka-docs/rst/java/code/docs/future/FutureDocTestBase.java @@ -43,10 +43,10 @@ import scala.concurrent.ExecutionContext$; //#imports8 import static akka.pattern.Patterns.after; +import java.util.Arrays; //#imports8 import java.util.ArrayList; -import java.util.Arrays; import java.util.List; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; @@ -564,7 +564,7 @@ public class FutureDocTestBase { return "foo"; } }, ec); - Future result = future.either(delayed); + Future result = Futures.firstCompletedOf(Arrays.asList(future, delayed), ec); //#after Await.result(result, Duration.create(2, SECONDS)); } diff --git a/akka-docs/rst/java/code/docs/jrouting/RouterViaProgramExample.java b/akka-docs/rst/java/code/docs/jrouting/RouterViaProgramExample.java index 7065524e52..a3a48f300c 100644 --- a/akka-docs/rst/java/code/docs/jrouting/RouterViaProgramExample.java +++ b/akka-docs/rst/java/code/docs/jrouting/RouterViaProgramExample.java @@ -70,7 +70,7 @@ public class RouterViaProgramExample { int upperBound = 15; DefaultResizer resizer = new DefaultResizer(lowerBound, upperBound); ActorRef router3 = system.actorOf( - new Props(ExampleActor.class).withRouter(new RoundRobinRouter(nrOfInstances))); + new Props(ExampleActor.class).withRouter(new RoundRobinRouter(resizer))); //#programmaticRoutingWithResizer for (int i = 1; i <= 6; i++) { router3.tell(new ExampleActor.Message(i), null); diff --git a/akka-docs/rst/java/code/docs/pattern/SchedulerPatternTest.java b/akka-docs/rst/java/code/docs/pattern/SchedulerPatternTest.java index e712eee146..bb3eb8b777 100644 --- a/akka-docs/rst/java/code/docs/pattern/SchedulerPatternTest.java +++ b/akka-docs/rst/java/code/docs/pattern/SchedulerPatternTest.java @@ -35,7 +35,7 @@ public class SchedulerPatternTest { private final Cancellable tick = getContext().system().scheduler().schedule( Duration.create(500, TimeUnit.MILLISECONDS), Duration.create(1000, TimeUnit.MILLISECONDS), - getSelf(), "tick", getContext().dispatcher()); + getSelf(), "tick", getContext().dispatcher(), null); //#schedule-constructor // this variable and constructor is declared here to not show up in the docs final ActorRef target; diff --git a/akka-docs/rst/java/code/docs/pattern/SupervisedAsk.java b/akka-docs/rst/java/code/docs/pattern/SupervisedAsk.java new file mode 100644 index 0000000000..248b8acd42 --- /dev/null +++ b/akka-docs/rst/java/code/docs/pattern/SupervisedAsk.java @@ -0,0 +1,108 @@ +package docs.pattern; + +import java.util.concurrent.TimeoutException; + +import scala.concurrent.Future; +import scala.concurrent.duration.Duration; + +import akka.actor.ActorKilledException; +import akka.actor.ActorRef; +import akka.actor.ActorRefFactory; +import akka.actor.Cancellable; +import akka.actor.OneForOneStrategy; +import akka.actor.Props; +import akka.actor.Scheduler; +import akka.actor.Status; +import akka.actor.SupervisorStrategy; +import akka.actor.SupervisorStrategy.Directive; +import akka.actor.Terminated; +import akka.actor.UntypedActor; +import akka.japi.Function; +import akka.pattern.Patterns; +import akka.util.Timeout; + +public class SupervisedAsk { + + private static class AskParam { + Props props; + Object message; + Timeout timeout; + + AskParam(Props props, Object message, Timeout timeout) { + this.props = props; + this.message = message; + this.timeout = timeout; + } + } + + private static class AskTimeout { + } + + public static class AskSupervisorCreator extends UntypedActor { + + @Override + public void onReceive(Object message) throws Exception { + if (message instanceof AskParam) { + ActorRef supervisor = getContext().actorOf( + Props.apply(AskSupervisor.class)); + supervisor.forward(message, getContext()); + } else { + unhandled(message); + } + } + } + + public static class AskSupervisor extends UntypedActor { + private ActorRef targetActor; + private ActorRef caller; + private AskParam askParam; + private Cancellable timeoutMessage; + + @Override + public SupervisorStrategy supervisorStrategy() { + return new OneForOneStrategy(0, Duration.Zero(), + new Function() { + public Directive apply(Throwable cause) { + caller.tell(new Status.Failure(cause), self()); + return SupervisorStrategy.stop(); + } + }); + } + + @Override + public void onReceive(Object message) throws Exception { + if (message instanceof AskParam) { + askParam = (AskParam) message; + caller = getSender(); + targetActor = getContext().actorOf(askParam.props); + getContext().watch(targetActor); + targetActor.forward(askParam.message, getContext()); + Scheduler scheduler = getContext().system().scheduler(); + timeoutMessage = scheduler.scheduleOnce(askParam.timeout.duration(), + self(), new AskTimeout(), context().dispatcher()); + } else if (message instanceof Terminated) { + Throwable ex = new ActorKilledException("Target actor terminated."); + caller.tell(new Status.Failure(ex), self()); + timeoutMessage.cancel(); + getContext().stop(self()); + } else if (message instanceof AskTimeout) { + Throwable ex = new TimeoutException("Target actor timed out after " + + askParam.timeout.toString()); + caller.tell(new Status.Failure(ex), self()); + getContext().stop(self()); + } else + unhandled(message); + } + } + + public static Future askOf(ActorRef supervisorCreator, Props props, + Object message, Timeout timeout) { + AskParam param = new AskParam(props, message, timeout); + return Patterns.ask(supervisorCreator, param, timeout); + } + + synchronized public static ActorRef createSupervisorCreator( + ActorRefFactory factory) { + return factory.actorOf(Props.apply(AskSupervisorCreator.class)); + } +} \ No newline at end of file diff --git a/akka-docs/rst/java/code/docs/pattern/SupervisedAskSpec.java b/akka-docs/rst/java/code/docs/pattern/SupervisedAskSpec.java new file mode 100644 index 0000000000..4486f56e98 --- /dev/null +++ b/akka-docs/rst/java/code/docs/pattern/SupervisedAskSpec.java @@ -0,0 +1,28 @@ +package docs.pattern; + +import scala.concurrent.Await; +import scala.concurrent.Future; +import akka.actor.ActorRef; +import akka.actor.ActorRefFactory; +import akka.actor.Props; +import akka.actor.UntypedActor; +import akka.util.Timeout; + +public class SupervisedAskSpec { + + public Object execute(Class someActor, + Object message, Timeout timeout, ActorRefFactory actorSystem) + throws Exception { + // example usage + try { + ActorRef supervisorCreator = SupervisedAsk + .createSupervisorCreator(actorSystem); + Future finished = SupervisedAsk.askOf(supervisorCreator, + Props.apply(someActor), message, timeout); + return Await.result(finished, timeout.duration()); + } catch (Exception e) { + // exception propagated by supervision + throw e; + } + } +} diff --git a/akka-docs/rst/java/code/docs/serialization/SerializationDocTestBase.java b/akka-docs/rst/java/code/docs/serialization/SerializationDocTestBase.java index 78c85f3e9e..db46031584 100644 --- a/akka-docs/rst/java/code/docs/serialization/SerializationDocTestBase.java +++ b/akka-docs/rst/java/code/docs/serialization/SerializationDocTestBase.java @@ -138,12 +138,7 @@ public class SerializationDocTestBase { } public Address getAddress() { - final ActorRefProvider provider = system.provider(); - if (provider instanceof RemoteActorRefProvider) { - return ((RemoteActorRefProvider) provider).transport().defaultAddress(); - } else { - throw new UnsupportedOperationException("need RemoteActorRefProvider"); - } + return system.provider().getDefaultAddress(); } } diff --git a/akka-docs/rst/java/code/docs/zeromq/ZeromqDocTestBase.java b/akka-docs/rst/java/code/docs/zeromq/ZeromqDocTestBase.java index d9d09a9bac..d0020c0a5a 100644 --- a/akka-docs/rst/java/code/docs/zeromq/ZeromqDocTestBase.java +++ b/akka-docs/rst/java/code/docs/zeromq/ZeromqDocTestBase.java @@ -16,7 +16,7 @@ import akka.zeromq.Subscribe; import akka.zeromq.Unsubscribe; //#import-unsub-topic-socket //#import-pub-topic -import akka.zeromq.Frame; +import akka.util.ByteString; import akka.zeromq.ZMQMessage; //#import-pub-topic @@ -96,9 +96,12 @@ public class ZeromqDocTestBase { byte[] payload = new byte[0]; //#pub-topic - pubSocket.tell(new ZMQMessage(new Frame("foo.bar"), new Frame(payload)), null); + pubSocket.tell(ZMQMessage.withFrames(ByteString.fromString("foo.bar"), ByteString.fromArray(payload)), null); //#pub-topic + system.stop(subSocket); + system.stop(subTopicSocket); + //#high-watermark ActorRef highWatermarkSocket = ZeroMQExtension.get(system).newRouterSocket( new SocketOption[] { new Listener(listener), @@ -133,7 +136,7 @@ public class ZeromqDocTestBase { private boolean checkZeroMQInstallation() { try { ZeroMQVersion v = ZeroMQExtension.get(system).version(); - return (v.major() == 2 && v.minor() == 1); + return (v.major() >= 3 || (v.major() >= 2 && v.minor() >= 1)); } catch (LinkageError e) { return false; } @@ -195,7 +198,7 @@ public class ZeromqDocTestBase { public void preStart() { getContext().system().scheduler() .schedule(Duration.create(1, "second"), Duration.create(1, "second"), - getSelf(), TICK, getContext().dispatcher()); + getSelf(), TICK, getContext().dispatcher(), null); } @Override @@ -210,18 +213,23 @@ public class ZeromqDocTestBase { long timestamp = System.currentTimeMillis(); // use akka SerializationExtension to convert to bytes - byte[] heapPayload = ser.serializerFor(Heap.class).toBinary( - new Heap(timestamp, currentHeap.getUsed(), currentHeap.getMax())); + ByteString heapTopic = ByteString.fromString("health.heap", "UTF-8"); + ByteString heapPayload = ByteString.fromArray( + ser.serialize( + new Heap(timestamp, + currentHeap.getUsed(), + currentHeap.getMax()) + ).get()); // the first frame is the topic, second is the message - pubSocket.tell(new ZMQMessage(new Frame("health.heap"), - new Frame(heapPayload)), getSelf()); + pubSocket.tell(ZMQMessage.withFrames(heapTopic, heapPayload), getSelf()); // use akka SerializationExtension to convert to bytes - byte[] loadPayload = ser.serializerFor(Load.class).toBinary( - new Load(timestamp, os.getSystemLoadAverage())); + ByteString loadTopic = ByteString.fromString("health.load", "UTF-8"); + ByteString loadPayload = ByteString.fromArray( + ser.serialize(new Load(timestamp, os.getSystemLoadAverage())).get() + ); // the first frame is the topic, second is the message - pubSocket.tell(new ZMQMessage(new Frame("health.load"), - new Frame(loadPayload)), getSelf()); + pubSocket.tell(ZMQMessage.withFrames(loadTopic, loadPayload), getSelf()); } else { unhandled(message); } @@ -245,13 +253,14 @@ public class ZeromqDocTestBase { public void onReceive(Object message) { if (message instanceof ZMQMessage) { ZMQMessage m = (ZMQMessage) message; + String topic = m.frame(0).utf8String(); // the first frame is the topic, second is the message - if (m.firstFrameAsString().equals("health.heap")) { - Heap heap = (Heap) ser.serializerFor(Heap.class).fromBinary(m.payload(1)); + if ("health.heap".equals(topic)) { + Heap heap = ser.deserialize(m.frame(1).toArray(), Heap.class).get(); log.info("Used heap {} bytes, at {}", heap.used, timestampFormat.format(new Date(heap.timestamp))); - } else if (m.firstFrameAsString().equals("health.load")) { - Load load = (Load) ser.serializerFor(Load.class).fromBinary(m.payload(1)); + } else if ("health.load".equals(topic)) { + Load load = ser.deserialize(m.frame(1).toArray(), Load.class).get(); log.info("Load average {}, at {}", load.loadAverage, timestampFormat.format(new Date(load.timestamp))); } @@ -279,9 +288,10 @@ public class ZeromqDocTestBase { public void onReceive(Object message) { if (message instanceof ZMQMessage) { ZMQMessage m = (ZMQMessage) message; + String topic = m.frame(0).utf8String(); // the first frame is the topic, second is the message - if (m.firstFrameAsString().equals("health.heap")) { - Heap heap = (Heap) ser.serializerFor(Heap.class).fromBinary(m.payload(1)); + if ("health.heap".equals(topic)) { + Heap heap = ser.deserialize(m.frame(1).toArray(), Heap.class).get(); if (((double) heap.used / heap.max) > 0.9) { count += 1; } else { diff --git a/akka-docs/rst/java/event-bus.rst b/akka-docs/rst/java/event-bus.rst index faecd1d209..fa71e356d8 100644 --- a/akka-docs/rst/java/event-bus.rst +++ b/akka-docs/rst/java/event-bus.rst @@ -185,7 +185,7 @@ at runtime:: system.eventStream.setLogLevel(Logging.DebugLevel()); -This means that log events for a level which will not be logged are not +This means that log events for a level which will not be logged are typically not dispatched at all (unless manual subscriptions to the respective event class have been done) diff --git a/akka-docs/rst/java/fault-tolerance.rst b/akka-docs/rst/java/fault-tolerance.rst index 3794ebd3fe..9cb9d234fd 100644 --- a/akka-docs/rst/java/fault-tolerance.rst +++ b/akka-docs/rst/java/fault-tolerance.rst @@ -24,9 +24,6 @@ sample as it is easy to follow the log output to understand what is happening in fault-tolerance-sample -.. includecode:: code/docs/actor/japi/FaultHandlingDocSample.java#all - :exclude: imports,messages,dummydb - Creating a Supervisor Strategy ------------------------------ diff --git a/akka-docs/rst/java/howto.rst b/akka-docs/rst/java/howto.rst index 922d318c75..b15a18a38c 100644 --- a/akka-docs/rst/java/howto.rst +++ b/akka-docs/rst/java/howto.rst @@ -1,4 +1,3 @@ - .. _howto-java: ###################### @@ -48,6 +47,43 @@ and schedule the initial message send again. .. includecode:: code/docs/pattern/SchedulerPatternTest.java#schedule-receive +Single-Use Actor Trees with High-Level Error Reporting +====================================================== + +*Contributed by: Rick Latrine* + +A nice way to enter the actor world from java is the use of Patterns.ask(). +This method starts a temporary actor to forward the message and collect the result from the actor to be "asked". +In case of errors within the asked actor the default supervision handling will take over. +The caller of Patterns.ask() will *not* be notified. + +If that caller is interested in such an exception, he must make sure that the asked actor replies with Status.Failure(Throwable). +Behind the asked actor a complex actor hierarchy might be spawned to accomplish asynchronous work. +Then supervision is the established way to control error handling. + +Unfortunately the asked actor must know about supervision and must catch the exceptions. +Such an actor is unlikely to be reused in a different actor hierarchy and contains crippled try/catch blocks. + +This pattern provides a way to encapsulate supervision and error propagation to the temporary actor. +Finally the promise returned by Patterns.ask() is fulfilled as a failure, including the exception. + +Let's have a look at the example code: + +.. includecode:: code/docs/pattern/SupervisedAsk.java + +In the askOf method the SupervisorCreator is sent the user message. +The SupervisorCreator creates a SupervisorActor and forwards the message. +This prevents the actor system from overloading due to actor creations. +The SupervisorActor is responsible to create the user actor, forwards the message, handles actor termination and supervision. +Additionally the SupervisorActor stops the user actor if execution time expired. + +In case of an exception the supervisor tells the temporary actor which exception was thrown. +Afterwards the actor hierarchy is stopped. + +Finally we are able to execute an actor and receive the results or exceptions. + +.. includecode:: code/docs/pattern/SupervisedAskSpec.java + Template Pattern ================ diff --git a/akka-docs/rst/java/microkernel.rst b/akka-docs/rst/java/microkernel.rst index db7c547b89..832e02ef90 100644 --- a/akka-docs/rst/java/microkernel.rst +++ b/akka-docs/rst/java/microkernel.rst @@ -19,11 +19,7 @@ Put your application jar in the ``deploy`` directory to have it automatically loaded. To start the kernel use the scripts in the ``bin`` directory, passing the boot -classes for your application. - -There is a simple example of an application setup for running with the -microkernel included in the akka download. This can be run with the following -command (on a unix-based system): +classes for your application. Example command (on a unix-based system): .. code-block:: none diff --git a/akka-docs/rst/java/remoting.rst b/akka-docs/rst/java/remoting.rst index aab5a1b013..fae73cfde7 100644 --- a/akka-docs/rst/java/remoting.rst +++ b/akka-docs/rst/java/remoting.rst @@ -122,6 +122,15 @@ actor systems has to have a JAR containing the class. object, which in most cases is not serializable. It is best to make a static inner class which implements :class:`UntypedActorFactory`. +.. note:: + + You can use asterisks as wildcard matches for the actor path sections, so you could specify: + ``/*/sampleActor`` and that would match all ``sampleActor`` on that level in the hierarchy. + You can also use wildcard in the last position to match all actors at a certain level: + ``/someParent/*``. Non-wildcard matches always have higher priority to match than wildcards, so: + ``/foo/bar`` is considered **more specific** than ``/foo/*`` and only the highest priority match is used. + Please note that it **cannot** be used to partially match section, like this: ``/foo*/bar``, ``/f*o/bar`` etc. + .. warning:: *Caveat:* Remote deployment ties both systems together in a tight fashion, diff --git a/akka-docs/rst/java/routing.rst b/akka-docs/rst/java/routing.rst index 4d21bdd187..9f74e6d902 100644 --- a/akka-docs/rst/java/routing.rst +++ b/akka-docs/rst/java/routing.rst @@ -66,7 +66,7 @@ In addition to being able to supply looked-up remote actors as routees, you can make the router deploy its created children on a set of remote hosts; this will be done in round-robin fashion. In order to do that, wrap the router configuration in a :class:`RemoteRouterConfig`, attaching the remote addresses of -the nodes to deploy to. Naturally, this requires your to include the +the nodes to deploy to. Naturally, this requires you to include the ``akka-remote`` module on your classpath: .. includecode:: code/docs/jrouting/RouterViaProgramExample.java#remoteRoutees @@ -114,7 +114,7 @@ Routers vs. Supervision ^^^^^^^^^^^^^^^^^^^^^^^ As explained in the previous section, routers create new actor instances as -children of the “head” router, who therefor also is their supervisor. The +children of the “head” router, who therefore also is their supervisor. The supervisor strategy of this actor can be configured by means of the :meth:`RouterConfig.supervisorStrategy` property, which is supported for all built-in router types. It defaults to “always escalate”, which leads to the @@ -434,7 +434,7 @@ Configured Custom Router It is possible to define configuration properties for custom routers. In the ``router`` property of the deployment configuration you define the fully qualified class name of the router class. The router class must extend -``akka.routing.CustomRouterConfig`` and and have constructor with ``com.typesafe.config.Config`` parameter. +``akka.routing.CustomRouterConfig`` and have constructor with one ``com.typesafe.config.Config`` parameter. The deployment section of the configuration is passed to the constructor. Custom Resizer diff --git a/akka-docs/rst/java/serialization.rst b/akka-docs/rst/java/serialization.rst index caec1ba325..4668597c4f 100644 --- a/akka-docs/rst/java/serialization.rst +++ b/akka-docs/rst/java/serialization.rst @@ -149,16 +149,12 @@ concrete address handy you can create a dummy one for the right protocol using ``new Address(protocol, "", "", 0)`` (assuming that the actual transport used is as lenient as Akka’s RemoteActorRefProvider). -There is a possible simplification available if you are just using the default -:class:`NettyRemoteTransport` with the :meth:`RemoteActorRefProvider`, which is -enabled by the fact that this combination has just a single remote address: +There is also a default remote address which is the one used by cluster support +(and typical systems have just this one); you can get it like this: .. includecode:: code/docs/serialization/SerializationDocTestBase.java :include: external-address-default -This solution has to be adapted once other providers are used (like the planned -extensions for clustering). - Deep serialization of Actors ---------------------------- diff --git a/akka-docs/rst/java/untyped-actors.rst b/akka-docs/rst/java/untyped-actors.rst index f10fc30787..2acc8bbee1 100644 --- a/akka-docs/rst/java/untyped-actors.rst +++ b/akka-docs/rst/java/untyped-actors.rst @@ -127,10 +127,11 @@ UntypedActor API The :class:`UntypedActor` class defines only one abstract method, the above mentioned :meth:`onReceive(Object message)`, which implements the behavior of the actor. -If the current actor behavior does not match a received message, -:meth:`unhandled` is called, which by default publishes a ``new +If the current actor behavior does not match a received message, it's recommended that +you call the :meth:`unhandled` method, which by default publishes a ``new akka.actor.UnhandledMessage(message, sender, recipient)`` on the actor system’s -event stream. +event stream (set configuration item ``akka.actor.debug.unhandled`` to ``on`` +to have them converted into actual Debug messages). In addition, it offers: diff --git a/akka-docs/rst/modules/durable-mailbox.rst b/akka-docs/rst/modules/durable-mailbox.rst index f76cee0dbd..7fa5aa2480 100644 --- a/akka-docs/rst/modules/durable-mailbox.rst +++ b/akka-docs/rst/modules/durable-mailbox.rst @@ -96,7 +96,8 @@ added in concrete subclass like this: To use ``DurableMailboxDocSpec`` add this dependency:: - "com.typesafe.akka" %% "akka-mailboxes-common" % "@version@" classifier "test" @crossString@ + "com.typesafe.akka" %% "akka-mailboxes-common" % + "@version@" classifier "test" @crossString@ For more inspiration you can look at the old implementations based on Redis, MongoDB, Beanstalk, and ZooKeeper, which can be found in Akka git repository tag diff --git a/akka-docs/rst/project/links.rst b/akka-docs/rst/project/links.rst index 54734e0e5c..842042cdec 100644 --- a/akka-docs/rst/project/links.rst +++ b/akka-docs/rst/project/links.rst @@ -73,7 +73,8 @@ Make sure that you add the repository to the sbt resolvers:: Define the library dependencies with the timestamp as version. For example:: - libraryDependencies += "com.typesafe.akka" % "akka-remote_@binVersion@" % "2.1-20121016-001042" + libraryDependencies += "com.typesafe.akka" % "akka-remote_@binVersion@" % + "2.1-20121016-001042" maven definition of snapshot repository --------------------------------------- diff --git a/akka-docs/rst/project/migration-guide-2.0.x-2.1.x.rst b/akka-docs/rst/project/migration-guide-2.0.x-2.1.x.rst index e9458c7614..fcd35ff1ee 100644 --- a/akka-docs/rst/project/migration-guide-2.0.x-2.1.x.rst +++ b/akka-docs/rst/project/migration-guide-2.0.x-2.1.x.rst @@ -4,27 +4,35 @@ Migration Guide 2.0.x to 2.1.x ################################ -The 2.1 release contains several structural changes that require some -simple, mechanical source-level changes in client code. Several things have -been moved to Scala standard library, such as ``Future``, and some package -names have been changed in Remoting and Durable Mailboxes. +Some parts of the 2.0 API have changed in the Akka 2.1 release. This guide lists the the changes and +explains what you will need to do to upgrade your program to work with Akka 2.1. -When migrating from 1.3.x to 2.1.x you should first follow the instructions for -migrating `1.3.x to 2.0.x `_. +Migrating from Akka 2.0.x to Akka 2.1.x is relatively straightforward. In Akka 2.1 the API has +undergone some basic housekeeping, for example some package names have changed, but otherwise usage +is largely unchanged. User programs will generally only need simple, mechanical changes in order to +work with Akka 2.1. + +If you are migrating from Akka 1.3.x you will need to follow the instructions for +`migrating from Akka 1.3.x to 2.0.x `_ +before following the instructions in this guide. Scala Version ============= +Akka 2.1 uses a new version of Scala. Change your project build and dependencies to Scala version ``@scalaVersion@``. Config Dependency ================= -`Typesafe config `_ library is a normal -dependency of akka-actor and it is no longer embedded in ``akka-actor.jar``. -If your are using a build tool with dependency resolution, such as sbt or maven you -will not notice the difference, but if you have manually constructed classpaths -you need to add `config-1.0.0.jar `_. +Akka's configuration system has graduated from Akka to become the `Typesafe config +`_ project. The configuration system was previously embedded +within ``akka-actor.jar``, now it is specified as a dependency of ``akka-actor.jar``. + +If your are using a build tool with automatic dependency resolution, such as sbt or Maven, then you +will not notice a difference. Otherwise you will need to ensure that +`config-1.0.0.jar `_ +is present on your classpath. Pieces Moved to Scala Standard Library ====================================== @@ -48,7 +56,8 @@ Search Replace with Scheduler Dispatcher ==================== -The ``ExecutionContext`` to use for running scheduled tasks must be specified. +The ``ExecutionContext`` to use for running scheduled tasks must now be specified. +You can use an Akka ``Dispatcher`` for this purpose. Scala: @@ -66,8 +75,9 @@ Java: :: // Use this Actors' Dispatcher as ExecutionContext - getContext().system().scheduler().scheduleOnce(Duration.create(10, TimeUnit.SECONDS)", - getSelf(), new Reconnect(), getContext().getDispatcher()); + getContext().system().scheduler().scheduleOnce(Duration.create( + 10, TimeUnit.SECONDS), getSelf(), new Reconnect(), + getContext().getDispatcher()); // Use ActorSystem's default Dispatcher as ExecutionContext system.scheduler().scheduleOnce(Duration.create(50, TimeUnit.MILLISECONDS), @@ -79,7 +89,7 @@ Java: }, system.dispatcher()); -API Changes of Future - Scala +API Changes to Future - Scala ============================= v2.0:: @@ -105,7 +115,7 @@ v2.1:: -API Changes of Future - Java +API Changes to Future - Java ============================ v2.0:: @@ -181,17 +191,17 @@ v2.1:: } }, ec); -API changes of DynamicAccess +API changes to DynamicAccess ============================ -All methods with scala.Either[Throwable, X] have been changed to used scala.util.Try[X]. +All methods with scala.Either[Throwable, X] have been changed to use scala.util.Try[X]. DynamicAccess.withErrorHandling has been removed since scala.util.Try now fulfills that role. -API changes of Serialization +API changes to Serialization ============================ -All methods with scala.Either[Throwable, X] have been changed to used scala.util.Try[X]. +All methods with scala.Either[Throwable, X] have been changed to use scala.util.Try[X]. Empty Props =========== @@ -230,14 +240,14 @@ v2.1 Scala:: Failing Send ============ -When failing to send to a remote actor or actor with bounded or durable mailbox the message will -silently be delivered to ``ActorSystem.deadletters`` instead of throwing an exception. +When failing to send to a remote actor or an actor with a bounded or durable mailbox the message will +now be silently delivered to ``ActorSystem.deadletters`` instead of throwing an exception. Graceful Stop Exception ======================= If the target actor of ``akka.pattern.gracefulStop`` isn't terminated within the -timeout the ``Future`` is completed with failure ``akka.pattern.AskTimeoutException``. +timeout then the ``Future`` is completed with a failure of ``akka.pattern.AskTimeoutException``. In 2.0 it was ``akka.actor.ActorTimeoutException``. getInstance for Singletons - Java @@ -275,23 +285,23 @@ v2.1:: log-remote-lifecycle-events =========================== -Default value of akka.remote.log-remote-lifecycle-events has changed to **on**. -If you don't want these in the log you need to add this to your configuration:: +The default value of akka.remote.log-remote-lifecycle-events has changed to **on**. +If you don't want these events in the log then you need to add this to your configuration:: akka.remote.log-remote-lifecycle-events = off Stash postStop ============== -Both Actors and UntypedActors using ``Stash`` now overrides postStop to make sure that -stashed messages are put into the dead letters when the actor stops, make sure you call +Both Actors and UntypedActors using ``Stash`` now override postStop to make sure that +stashed messages are put into the dead letters when the actor stops. Make sure you call super.postStop if you override it. -Forward of Terminated message -============================= +Forwarding Terminated messages +============================== -Forward of ``Terminated`` message is no longer supported. Instead, if you forward -``Terminated`` you should send the information in you own message. +Forwarding ``Terminated`` messages is no longer supported. Instead, if you forward +``Terminated`` you should send the information in your own message. v2.0:: @@ -312,12 +322,12 @@ v2.1:: } -Custom Router or Resizer -======================== +Custom Routers and Resizers +=========================== The API of ``RouterConfig``, ``RouteeProvider`` and ``Resizer`` has been cleaned up. If you use these to build your own router functionality the -compiler will tell you you to do some adjustments. +compiler will tell you if you need to make adjustments. v2.0:: @@ -362,19 +372,20 @@ v2.1:: Duration and Timeout ==================== -The Duration class in the scala library is an improved version of the previous -:class:`akka.util.Duration`. Among others it keeps the static type of -:class:`FiniteDuration` more consistently, which has been used to tighten APIs. -The advantage is that instead of runtime exceptions you’ll get compiler errors -telling you if you try to pass a possibly non-finite duration where it does not -belong. +The :class:`akka.util.Duration` class has been moved into the Scala library under +the ``scala.concurrent.duration`` package. Several changes have been made to tighten +up the duration and timeout API. + +:class:`FiniteDuration` is now used more consistently throught the API. +The advantage is that if you try to pass a possibly non-finite duration where +it does not belong you’ll get compile errors instead of runtime exceptions. The main source incompatibility is that you may have to change the declared type of fields from ``Duration`` to ``FiniteDuration`` (factory methods already return the more precise type wherever possible). -Another change is that ``Duration.parse`` was not accepted by the scala-library -maintainers, use ``Duration.create`` instead. +Another change is that ``Duration.parse`` was not accepted by the Scala library +maintainers; use ``Duration.create`` instead. v2.0:: @@ -392,38 +403,49 @@ Package Name Changes in Remoting The package name of all classes in the ``akka-remote.jar`` artifact now starts with ``akka.remote``. This has been done to enable OSGi bundles that don't have conflicting package names. -Change the following import statements. Please note that the serializers are often referenced from configuration. +Change the following import statements. Please note that serializers are often referenced from +configuration files. + +Search -> Replace with:: + + akka.routing.RemoteRouterConfig -> + akka.remote.routing.RemoteRouterConfig + + akka.serialization.ProtobufSerializer -> + akka.remote.serialization.ProtobufSerializer + + akka.serialization.DaemonMsgCreateSerializer -> + akka.remote.serialization.DaemonMsgCreateSerializer -================================================ ======================================================= -Search Replace with -================================================ ======================================================= -``akka.routing.RemoteRouterConfig`` ``akka.remote.routing.RemoteRouterConfig`` -``akka.serialization.ProtobufSerializer`` ``akka.remote.serialization.ProtobufSerializer`` -``akka.serialization.DaemonMsgCreateSerializer`` ``akka.remote.serialization.DaemonMsgCreateSerializer`` -================================================ ======================================================= Package Name Changes in Durable Mailboxes ========================================= -The package name of all classes in the ``akka-file-mailbox.jar`` artifact now starts with ``akka.actor.mailbox.filebased``. +The package names of all classes in the ``akka-file-mailbox.jar`` artifact now start with ``akka.actor.mailbox.filebased``. This has been done to enable OSGi bundles that don't have conflicting package names. Change the following import statements. Please note that the ``FileBasedMailboxType`` is often referenced from configuration. -================================================ ========================================================= -Search Replace with -================================================ ========================================================= -``akka.actor.mailbox.FileBasedMailboxType`` ``akka.actor.mailbox.filebased.FileBasedMailboxType`` -``akka.actor.mailbox.FileBasedMailboxSettings`` ``akka.actor.mailbox.filebased.FileBasedMailboxSettings`` -``akka.actor.mailbox.FileBasedMessageQueue`` ``akka.actor.mailbox.filebased.FileBasedMessageQueue`` -``akka.actor.mailbox.filequeue.*`` ``akka.actor.mailbox.filebased.filequeue.*`` -================================================ ========================================================= +Search -> Replace with:: + + akka.actor.mailbox.FileBasedMailboxType -> + akka.actor.mailbox.filebased.FileBasedMailboxType + + akka.actor.mailbox.FileBasedMailboxSettings -> + akka.actor.mailbox.filebased.FileBasedMailboxSettings + + akka.actor.mailbox.FileBasedMessageQueue -> + akka.actor.mailbox.filebased.FileBasedMessageQueue + + akka.actor.mailbox.filequeue.* -> + akka.actor.mailbox.filebased.filequeue.* + Actor Receive Timeout ===================== The API for setting and querying the receive timeout has been made more -consisten in always taking and returning a ``Duration``, the wrapping in +consistent in always taking and returning a ``Duration``; the wrapping in ``Option`` has been removed. (Samples for Java, Scala sources are affected in exactly the same way.) @@ -445,7 +467,7 @@ v2.1:: ConsistentHash ============== -``akka.routing.ConsistentHash`` has been changed to an immutable data structure. +``akka.routing.ConsistentHash`` has been changed into an immutable data structure. v2.0:: diff --git a/akka-docs/rst/project/migration-guide-2.1.x-2.2.x.rst b/akka-docs/rst/project/migration-guide-2.1.x-2.2.x.rst index 80bdccd803..8baa78b249 100644 --- a/akka-docs/rst/project/migration-guide-2.1.x-2.2.x.rst +++ b/akka-docs/rst/project/migration-guide-2.1.x-2.2.x.rst @@ -23,4 +23,33 @@ Search Replace with ==================================== ==================================== If you need to convert from Java to ``scala.collection.immutable.Seq`` or ``scala.collection.immutable.Iterable`` you should use ``akka.japi.Util.immutableSeq(…)``, -and if you need to convert from Scala you can simply switch to using immutable collections yourself or use the ``to[immutable.]`` method. \ No newline at end of file +and if you need to convert from Scala you can simply switch to using immutable collections yourself or use the ``to[immutable.]`` method. + +API changes to FSM and TestFSMRef +================================= + +The ``timerActive_?`` method has been deprecated in both the ``FSM`` trait and the ``TestFSMRef`` +class. You should now use the ``isTimerActive`` method instead. The old method will remain +throughout 2.2.x. It will be removed in Akka 2.3. + + +ThreadPoolConfigBuilder +======================= + +``akka.dispatch.ThreadPoolConfigBuilder`` companion object has been removed, +and with it the ``conf_?`` method that was essentially only a type-inferencer aid for creation +of optional transformations on ``ThreadPoolConfigBuilder``. +Instead use: ``option.map(o => (t: ThreadPoolConfigBuilder) => t.op(o))``. + +Scheduler +========= + +Akka's ``Scheduler`` has been augmented to also include a ``sender`` when scheduling to send messages, this should work Out-Of-The-Box for Scala users, +but for Java Users you will need to manually provide the ``sender`` – as usual use ``null`` to designate "no sender" which will behave just as before the change. + +ZeroMQ ByteString +================= + +``akka.zeromq.Frame`` and the use of ``Seq[Byte]`` in the API has been removed and is replaced by ``akka.util.ByteString``. + +``ZMQMessage.firstFrameAsString`` has been removed, please use ``ZMQMessage.frames`` or ``ZMQMessage.frame(int)`` to access the frames. diff --git a/akka-docs/rst/scala/actors.rst b/akka-docs/rst/scala/actors.rst index e52d9ba973..4f44497485 100644 --- a/akka-docs/rst/scala/actors.rst +++ b/akka-docs/rst/scala/actors.rst @@ -234,8 +234,8 @@ If the current actor behavior does not match a received message, :meth:`unhandled` is called, which by default publishes an ``akka.actor.UnhandledMessage(message, sender, recipient)`` on the actor system’s event stream (set configuration item -``akka.event-handler-startup-timeout`` to ``true`` to have them converted into -actual Debug messages) +``akka.actor.debug.unhandled`` to ``on`` to have them converted into +actual Debug messages). In addition, it offers: diff --git a/akka-docs/rst/scala/camel.rst b/akka-docs/rst/scala/camel.rst index c1a0e35789..c556827a69 100644 --- a/akka-docs/rst/scala/camel.rst +++ b/akka-docs/rst/scala/camel.rst @@ -384,6 +384,8 @@ URI options The following URI options are supported: +.. tabularcolumns:: |l|l|l|L| + +--------------+----------+---------+-------------------------------------------+ | Name | Type | Default | Description | +==============+==========+=========+===========================================+ diff --git a/akka-docs/rst/scala/code/docs/actor/FSMDocSpec.scala b/akka-docs/rst/scala/code/docs/actor/FSMDocSpec.scala index cc88416b0e..15821419d4 100644 --- a/akka-docs/rst/scala/code/docs/actor/FSMDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/actor/FSMDocSpec.scala @@ -189,6 +189,15 @@ class FSMDocSpec extends MyFavoriteTestFrameWorkPlusAkkaTestKit { } //#fsm-code-elided + "demonstrate NullFunction" in { + class A extends Actor with FSM[Int, Null] { + val SomeState = 0 + //#NullFunction + when(SomeState)(FSM.NullFunction) + //#NullFunction + } + } + "batch correctly" in { val buncher = system.actorOf(Props(new Buncher)) buncher ! SetTarget(testActor) diff --git a/akka-docs/rst/scala/code/docs/future/FutureDocSpec.scala b/akka-docs/rst/scala/code/docs/future/FutureDocSpec.scala index 48feec13d9..a80f920a6b 100644 --- a/akka-docs/rst/scala/code/docs/future/FutureDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/future/FutureDocSpec.scala @@ -417,7 +417,7 @@ class FutureDocSpec extends AkkaSpec { val delayed = after(200 millis, using = system.scheduler)(Future.failed( new IllegalStateException("OHNOES"))) val future = Future { Thread.sleep(1000); "foo" } - val result = future either delayed + val result = Future firstCompletedOf Seq(future, delayed) //#after intercept[IllegalStateException] { Await.result(result, 2 second) } } diff --git a/akka-docs/rst/scala/code/docs/serialization/SerializationDocSpec.scala b/akka-docs/rst/scala/code/docs/serialization/SerializationDocSpec.scala index e428497039..1607556ab2 100644 --- a/akka-docs/rst/scala/code/docs/serialization/SerializationDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/serialization/SerializationDocSpec.scala @@ -2,19 +2,6 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -//#extract-transport -package object akka { - // needs to be inside the akka package because accessing unsupported API ! - def transportOf(system: actor.ExtendedActorSystem): remote.RemoteTransport = - system.provider match { - case r: remote.RemoteActorRefProvider ⇒ r.transport - case _ ⇒ - throw new UnsupportedOperationException( - "this method requires the RemoteActorRefProvider to be configured") - } -} -//#extract-transport - package docs.serialization { import org.scalatest.matchers.MustMatchers @@ -216,7 +203,7 @@ package docs.serialization { object ExternalAddress extends ExtensionKey[ExternalAddressExt] class ExternalAddressExt(system: ExtendedActorSystem) extends Extension { - def addressForAkka: Address = akka.transportOf(system).defaultAddress + def addressForAkka: Address = system.provider.getDefaultAddress } def serializeAkkaDefault(ref: ActorRef): String = diff --git a/akka-docs/rst/scala/code/docs/testkit/TestkitDocSpec.scala b/akka-docs/rst/scala/code/docs/testkit/TestkitDocSpec.scala index 028c4efbaa..da3fd43270 100644 --- a/akka-docs/rst/scala/code/docs/testkit/TestkitDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/testkit/TestkitDocSpec.scala @@ -110,11 +110,11 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { fsm.setState(stateName = 1) assert(fsm.stateName == 1) - assert(fsm.timerActive_?("test") == false) + assert(fsm.isTimerActive("test") == false) fsm.setTimer("test", 12, 10 millis, true) - assert(fsm.timerActive_?("test") == true) + assert(fsm.isTimerActive("test") == true) fsm.cancelTimer("test") - assert(fsm.timerActive_?("test") == false) + assert(fsm.isTimerActive("test") == false) //#test-fsm-ref } @@ -232,7 +232,7 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { //#test-probe-forward } - "demonstrate " in { + "demonstrate calling thread dispatcher" in { //#calling-thread-dispatcher import akka.testkit.CallingThreadDispatcher val ref = system.actorOf(Props[MyActor].withDispatcher(CallingThreadDispatcher.Id)) diff --git a/akka-docs/rst/scala/code/docs/zeromq/ZeromqDocSpec.scala b/akka-docs/rst/scala/code/docs/zeromq/ZeromqDocSpec.scala index 28ff2e3d34..ca2db8f675 100644 --- a/akka-docs/rst/scala/code/docs/zeromq/ZeromqDocSpec.scala +++ b/akka-docs/rst/scala/code/docs/zeromq/ZeromqDocSpec.scala @@ -6,8 +6,8 @@ package docs.zeromq import language.postfixOps import scala.concurrent.duration._ -import scala.collection.immutable import akka.actor.{ Actor, Props } +import akka.util.ByteString import akka.testkit._ import akka.zeromq.{ ZeroMQVersion, ZeroMQExtension, SocketType, Bind } import java.text.SimpleDateFormat @@ -29,7 +29,8 @@ object ZeromqDocSpec { class HealthProbe extends Actor { - val pubSocket = ZeroMQExtension(context.system).newSocket(SocketType.Pub, Bind("tcp://127.0.0.1:1235")) + val pubSocket = ZeroMQExtension(context.system).newSocket(SocketType.Pub, + Bind("tcp://127.0.0.1:1235")) val memory = ManagementFactory.getMemoryMXBean val os = ManagementFactory.getOperatingSystemMXBean val ser = SerializationExtension(context.system) @@ -52,12 +53,12 @@ object ZeromqDocSpec { val heapPayload = ser.serialize(Heap(timestamp, currentHeap.getUsed, currentHeap.getMax)).get // the first frame is the topic, second is the message - pubSocket ! ZMQMessage(immutable.Seq(Frame("health.heap"), Frame(heapPayload))) + pubSocket ! ZMQMessage(ByteString("health.heap"), ByteString(heapPayload)) // use akka SerializationExtension to convert to bytes val loadPayload = ser.serialize(Load(timestamp, os.getSystemLoadAverage)).get // the first frame is the topic, second is the message - pubSocket ! ZMQMessage(immutable.Seq(Frame("health.load"), Frame(loadPayload))) + pubSocket ! ZMQMessage(ByteString("health.load"), ByteString(loadPayload)) } } //#health @@ -72,14 +73,14 @@ object ZeromqDocSpec { def receive = { // the first frame is the topic, second is the message - case m: ZMQMessage if m.firstFrameAsString == "health.heap" ⇒ - val Heap(timestamp, used, max) = ser.deserialize(m.payload(1), + case m: ZMQMessage if m.frames(0).utf8String == "health.heap" ⇒ + val Heap(timestamp, used, max) = ser.deserialize(m.frames(1).toArray, classOf[Heap]).get log.info("Used heap {} bytes, at {}", used, timestampFormat.format(new Date(timestamp))) - case m: ZMQMessage if m.firstFrameAsString == "health.load" ⇒ - val Load(timestamp, loadAverage) = ser.deserialize(m.payload(1), + case m: ZMQMessage if m.frames(0).utf8String == "health.load" ⇒ + val Load(timestamp, loadAverage) = ser.deserialize(m.frames(1).toArray, classOf[Load]).get log.info("Load average {}, at {}", loadAverage, timestampFormat.format(new Date(timestamp))) @@ -97,9 +98,8 @@ object ZeromqDocSpec { def receive = { // the first frame is the topic, second is the message - case m: ZMQMessage if m.firstFrameAsString == "health.heap" ⇒ - val Heap(timestamp, used, max) = ser.deserialize(m.payload(1), - classOf[Heap]).get + case m: ZMQMessage if m.frames(0).utf8String == "health.heap" ⇒ + val Heap(timestamp, used, max) = ser.deserialize(m.frames(1).toArray, classOf[Heap]).get if ((used.toDouble / max) > 0.9) count += 1 else count = 0 if (count > 10) log.warning("Need more memory, using {} %", @@ -146,7 +146,7 @@ class ZeromqDocSpec extends AkkaSpec("akka.loglevel=INFO") { val payload = Array.empty[Byte] //#pub-topic - pubSocket ! ZMQMessage(Frame("foo.bar"), Frame(payload)) + pubSocket ! ZMQMessage(ByteString("foo.bar"), ByteString(payload)) //#pub-topic system.stop(subSocket) @@ -187,8 +187,9 @@ class ZeromqDocSpec extends AkkaSpec("akka.loglevel=INFO") { def checkZeroMQInstallation() = try { ZeroMQExtension(system).version match { - case ZeroMQVersion(2, 1, _) ⇒ Unit - case version ⇒ pending + case ZeroMQVersion(2, x, _) if x >= 1 ⇒ Unit + case ZeroMQVersion(y, _, _) if y >= 3 ⇒ Unit + case version ⇒ pending } } catch { case e: LinkageError ⇒ pending diff --git a/akka-docs/rst/scala/fault-tolerance.rst b/akka-docs/rst/scala/fault-tolerance.rst index f0e3952c99..6b6559e647 100644 --- a/akka-docs/rst/scala/fault-tolerance.rst +++ b/akka-docs/rst/scala/fault-tolerance.rst @@ -24,9 +24,6 @@ sample as it is easy to follow the log output to understand what is happening in fault-tolerance-sample -.. includecode:: code/docs/actor/FaultHandlingDocSample.scala#all - :exclude: imports,messages,dummydb - Creating a Supervisor Strategy ------------------------------ diff --git a/akka-docs/rst/scala/fsm.rst b/akka-docs/rst/scala/fsm.rst index 4ace396a14..f30c4f36dc 100644 --- a/akka-docs/rst/scala/fsm.rst +++ b/akka-docs/rst/scala/fsm.rst @@ -179,6 +179,18 @@ demonstrated below: The :class:`Event(msg: Any, data: D)` case class is parameterized with the data type held by the FSM for convenient pattern matching. +.. warning:: + + It is required that you define handlers for each of the possible FSM states, + otherwise there will be failures when trying to switch to undeclared states. + +It is recommended practice to declare the states as objects extending a +sealed trait and then verify that there is a ``when`` clause for each of the +states. If you want to leave the handling of a state “unhandled” (more below), +it still needs to be declared like this: + +.. includecode:: code/docs/actor/FSMDocSpec.scala#NullFunction + Defining the Initial State -------------------------- @@ -359,7 +371,7 @@ which is guaranteed to work immediately, meaning that the scheduled message will not be processed after this call even if the timer already fired and queued it. The status of any timer may be inquired with - :func:`timerActive_?(name)` + :func:`isTimerActive(name)` These named timers complement state timeouts because they are not affected by intervening reception of other messages. diff --git a/akka-docs/rst/scala/io.rst b/akka-docs/rst/scala/io.rst index 866fa8bffc..abeb6b729c 100644 --- a/akka-docs/rst/scala/io.rst +++ b/akka-docs/rst/scala/io.rst @@ -138,9 +138,9 @@ Receiving messages from the ``IOManager``: IO.Iteratee ^^^^^^^^^^^ -Included with Akka's IO support is a basic implementation of ``Iteratee``\s. ``Iteratee``\s are an effective way of handling a stream of data without needing to wait for all the data to arrive. This is especially useful when dealing with non blocking IO since we will usually receive data in chunks which may not include enough information to process, or it may contain much more data then we currently need. +Included with Akka's IO support is a basic implementation of ``Iteratee``\s. ``Iteratee``\s are an effective way of handling a stream of data without needing to wait for all the data to arrive. This is especially useful when dealing with non blocking IO since we will usually receive data in chunks which may not include enough information to process, or it may contain much more data than we currently need. -This ``Iteratee`` implementation is much more basic then what is usually found. There is only support for ``ByteString`` input, and enumerators aren't used. The reason for this limited implementation is to reduce the amount of explicit type signatures needed and to keep things simple. It is important to note that Akka's ``Iteratee``\s are completely optional, incoming data can be handled in any way, including other ``Iteratee`` libraries. +This ``Iteratee`` implementation is much more basic than what is usually found. There is only support for ``ByteString`` input, and enumerators aren't used. The reason for this limited implementation is to reduce the amount of explicit type signatures needed and to keep things simple. It is important to note that Akka's ``Iteratee``\s are completely optional, incoming data can be handled in any way, including other ``Iteratee`` libraries. ``Iteratee``\s work by processing the data that it is given and returning either the result (with any unused input) or a continuation if more input is needed. They are monadic, so methods like ``flatMap`` can be used to pass the result of an ``Iteratee`` to another. @@ -204,7 +204,7 @@ Following the path we read in the query (if it exists): .. includecode:: code/docs/io/HTTPServer.scala :include: read-query -It is much simpler then reading the path since we aren't doing any parsing of the query since there is no standard format of the query string. +It is much simpler than reading the path since we aren't doing any parsing of the query since there is no standard format of the query string. Both the path and query used the ``readUriPart`` ``Iteratee``, which is next: diff --git a/akka-docs/rst/scala/microkernel.rst b/akka-docs/rst/scala/microkernel.rst index 51e0bf4ac4..5a1908346a 100644 --- a/akka-docs/rst/scala/microkernel.rst +++ b/akka-docs/rst/scala/microkernel.rst @@ -19,11 +19,7 @@ Put your application jar in the ``deploy`` directory to have it automatically loaded. To start the kernel use the scripts in the ``bin`` directory, passing the boot -classes for your application. - -There is a simple example of an application setup for running with the -microkernel included in the akka download. This can be run with the following -command (on a unix-based system): +classes for your application. Example command (on a unix-based system): .. code-block:: none diff --git a/akka-docs/rst/scala/remoting.rst b/akka-docs/rst/scala/remoting.rst index db5ddbf170..cf12c93c60 100644 --- a/akka-docs/rst/scala/remoting.rst +++ b/akka-docs/rst/scala/remoting.rst @@ -129,6 +129,15 @@ actor systems has to have a JAR containing the class. most cases is not serializable. It is best to create a factory method in the companion object of the actor’s class. +.. note:: + + You can use asterisks as wildcard matches for the actor paths, so you could specify: + ``/*/sampleActor`` and that would match all ``sampleActor`` on that level in the hierarchy. + You can also use wildcard in the last position to match all actors at a certain level: + ``/someParent/*``. Non-wildcard matches always have higher priority to match than wildcards, so: + ``/foo/bar`` is considered **more specific** than ``/foo/*`` and only the highest priority match is used. + Please note that it **cannot** be used to partially match section, like this: ``/foo*/bar``, ``/f*o/bar`` etc. + .. warning:: *Caveat:* Remote deployment ties both systems together in a tight fashion, diff --git a/akka-docs/rst/scala/routing.rst b/akka-docs/rst/scala/routing.rst index 9dc356c98c..f04223b5d3 100644 --- a/akka-docs/rst/scala/routing.rst +++ b/akka-docs/rst/scala/routing.rst @@ -66,7 +66,7 @@ In addition to being able to supply looked-up remote actors as routees, you can make the router deploy its created children on a set of remote hosts; this will be done in round-robin fashion. In order to do that, wrap the router configuration in a :class:`RemoteRouterConfig`, attaching the remote addresses of -the nodes to deploy to. Naturally, this requires your to include the +the nodes to deploy to. Naturally, this requires you to include the ``akka-remote`` module on your classpath: .. includecode:: code/docs/routing/RouterViaProgramExample.scala#remoteRoutees @@ -430,7 +430,7 @@ Configured Custom Router It is possible to define configuration properties for custom routers. In the ``router`` property of the deployment configuration you define the fully qualified class name of the router class. The router class must extend -``akka.routing.RouterConfig`` and and have constructor with ``com.typesafe.config.Config`` parameter. +``akka.routing.RouterConfig`` and have constructor with one ``com.typesafe.config.Config`` parameter. The deployment section of the configuration is passed to the constructor. Custom Resizer diff --git a/akka-docs/rst/scala/serialization.rst b/akka-docs/rst/scala/serialization.rst index 10283b441f..70a02faecd 100644 --- a/akka-docs/rst/scala/serialization.rst +++ b/akka-docs/rst/scala/serialization.rst @@ -138,24 +138,12 @@ concrete address handy you can create a dummy one for the right protocol using ``Address(protocol, "", "", 0)`` (assuming that the actual transport used is as lenient as Akka’s RemoteActorRefProvider). -There is a possible simplification available if you are just using the default -:class:`NettyRemoteTransport` with the :meth:`RemoteActorRefProvider`, which is -enabled by the fact that this combination has just a single remote address. -This approach relies on internal API, which means that it is not guaranteed to -be supported in future versions. To make this caveat more obvious, some bridge -code in the ``akka`` package is required to make it work: - -.. includecode:: code/docs/serialization/SerializationDocSpec.scala - :include: extract-transport - -And with this, the address extraction goes like this: +There is also a default remote address which is the one used by cluster support +(and typical systems have just this one); you can get it like this: .. includecode:: code/docs/serialization/SerializationDocSpec.scala :include: external-address-default -This solution has to be adapted once other providers are used (like the planned -extensions for clustering). - Deep serialization of Actors ---------------------------- diff --git a/akka-kernel/src/main/dist/bin/akka-cluster b/akka-kernel/src/main/dist/bin/akka-cluster index 0cbff520dd..e544772a13 100755 --- a/akka-kernel/src/main/dist/bin/akka-cluster +++ b/akka-kernel/src/main/dist/bin/akka-cluster @@ -16,7 +16,7 @@ declare AKKA_HOME="$(cd "$(cd "$(dirname "$0")"; pwd -P)"/..; pwd)" -[ -n "$JMX_CLIENT_CLASSPATH" ] || JMX_CLIENT_CLASSPATH="$AKKA_HOME/lib/akka/akka-kernel-*" +[ -n "$JMX_CLIENT_CLASSPATH" ] || JMX_CLIENT_CLASSPATH="$AKKA_HOME/lib/akka/akka-kernel*" # NOTE: The 'cmdline-jmxclient' is available as part of the Akka distribution. JMX_CLIENT="java -cp $JMX_CLIENT_CLASSPATH akka.jmx.Client -" @@ -103,6 +103,32 @@ case "$2" in $JMX_CLIENT $HOST akka:type=Cluster ClusterStatus ;; + members) + if [ $# -ne 2 ]; then + echo "Usage: $SELF members" + exit 1 + fi + + ensureNodeIsRunningAndAvailable + shift + + echo "Querying members" + $JMX_CLIENT $HOST akka:type=Cluster Members + ;; + + unreachable) + if [ $# -ne 2 ]; then + echo "Usage: $SELF unreachable" + exit 1 + fi + + ensureNodeIsRunningAndAvailable + shift + + echo "Querying unreachable members" + $JMX_CLIENT $HOST akka:type=Cluster Unreachable + ;; + leader) if [ $# -ne 2 ]; then echo "Usage: $SELF leader" @@ -129,19 +155,6 @@ case "$2" in $JMX_CLIENT $HOST akka:type=Cluster Singleton ;; - has-convergence) - if [ $# -ne 2 ]; then - echo "Usage: $SELF is-convergence" - exit 1 - fi - - ensureNodeIsRunningAndAvailable - shift - - echo "Checking for cluster convergence" - $JMX_CLIENT $HOST akka:type=Cluster Convergence - ;; - is-available) if [ $# -ne 2 ]; then echo "Usage: $SELF is-available" @@ -155,19 +168,6 @@ case "$2" in $JMX_CLIENT $HOST akka:type=Cluster Available ;; - is-running) - if [ $# -ne 2 ]; then - echo "Usage: $SELF is-running" - exit 1 - fi - - ensureNodeIsRunningAndAvailable - shift - - echo "Checking if member node on $HOST is RUNNING" - $JMX_CLIENT $HOST akka:type=Cluster Running - ;; - *) printf "Usage: bin/$SELF ...\n" printf "\n" @@ -176,12 +176,12 @@ case "$2" in printf "%26s - %s\n" "leave " "Sends a request for node with URL to LEAVE the cluster" printf "%26s - %s\n" "down " "Sends a request for marking node with URL as DOWN" printf "%26s - %s\n" member-status "Asks the member node for its current status" + printf "%26s - %s\n" members "Asks the cluster for addresses of current members" + printf "%26s - %s\n" unreachable "Asks the cluster for addresses of unreachable members" printf "%26s - %s\n" cluster-status "Asks the cluster for its current status (member ring, unavailable nodes, meta data etc.)" printf "%26s - %s\n" leader "Asks the cluster who the current leader is" printf "%26s - %s\n" is-singleton "Checks if the cluster is a singleton cluster (single node cluster)" printf "%26s - %s\n" is-available "Checks if the member node is available" - printf "%26s - %s\n" is-running "Checks if the member node is running" - printf "%26s - %s\n" has-convergence "Checks if there is a cluster convergence" printf "Where the should be on the format of 'akka://actor-system-name@hostname:port'\n" printf "\n" printf "Examples: bin/$SELF localhost:9999 is-available\n" diff --git a/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala b/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala index 1c70d03d7b..d1d77daf1e 100644 --- a/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala +++ b/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala @@ -10,21 +10,22 @@ import org.apache.commons.io.IOUtils.copy import org.osgi.framework._ import java.net.URL - import java.util.jar.JarInputStream import java.io._ import org.scalatest.{ BeforeAndAfterAll, Suite } import java.util.{ UUID, Date, ServiceLoader, HashMap } import scala.reflect.ClassTag import scala.collection.immutable +import scala.concurrent.duration._ +import scala.annotation.tailrec /** * Trait that provides support for building akka-osgi tests using PojoSR */ trait PojoSRTestSupport extends Suite with BeforeAndAfterAll { - val MAX_WAIT_TIME = 12800 - val START_WAIT_TIME = 50 + val MaxWaitDuration = 12800.millis + val SleepyTime = 50.millis /** * All bundles being found on the test classpath are automatically installed and started in the PojoSR runtime. @@ -69,16 +70,21 @@ trait PojoSRTestSupport extends Suite with BeforeAndAfterAll { def serviceForType[T](implicit t: ClassTag[T]): T = context.getService(awaitReference(t.runtimeClass)).asInstanceOf[T] - def awaitReference(serviceType: Class[_]): ServiceReference = awaitReference(serviceType, START_WAIT_TIME) + def awaitReference(serviceType: Class[_]): ServiceReference = awaitReference(serviceType, SleepyTime) - def awaitReference(serviceType: Class[_], wait: Long): ServiceReference = { - val option = Option(context.getServiceReference(serviceType.getName)) - Thread.sleep(wait) //FIXME No sleep please - option match { - case Some(reference) ⇒ reference - case None if (wait > MAX_WAIT_TIME) ⇒ fail("Gave up waiting for service of type %s".format(serviceType)) - case None ⇒ awaitReference(serviceType, wait * 2) + def awaitReference(serviceType: Class[_], wait: FiniteDuration): ServiceReference = { + + @tailrec def poll(step: Duration, deadline: Deadline): ServiceReference = context.getServiceReference(serviceType.getName) match { + case null ⇒ + if (deadline.isOverdue()) fail("Gave up waiting for service of type %s".format(serviceType)) + else { + Thread.sleep((step min deadline.timeLeft max Duration.Zero).toMillis) + poll(step, deadline) + } + case some ⇒ some } + + poll(wait, Deadline.now + MaxWaitDuration) } protected def buildTestBundles(builders: immutable.Seq[BundleDescriptorBuilder]): immutable.Seq[BundleDescriptor] = diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala index db212e7cbf..adca9518b4 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/RemoteConnection.scala @@ -13,6 +13,7 @@ import org.jboss.netty.handler.timeout.{ ReadTimeoutHandler, ReadTimeoutExceptio import java.net.InetSocketAddress import java.util.concurrent.Executors import akka.event.Logging +import akka.util.Helpers /** * INTERNAL API. @@ -59,7 +60,7 @@ private[akka] object RemoteConnection { poolSize) val bootstrap = new ServerBootstrap(socketfactory) bootstrap.setPipelineFactory(new TestConductorPipelineFactory(handler)) - bootstrap.setOption("reuseAddress", true) + bootstrap.setOption("reuseAddress", !Helpers.isWindows) bootstrap.setOption("child.tcpNoDelay", true) bootstrap.bind(sockaddr) } diff --git a/akka-remote-tests/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala index f4e34c5a34..1ccb919a2d 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -103,7 +103,7 @@ abstract class MultiNodeConfig { else ConfigFactory.empty val configs = (_nodeConf get myself).toList ::: _commonConf.toList ::: transportConfig :: MultiNodeSpec.nodeConfig :: MultiNodeSpec.baseConfig :: Nil - configs reduce (_ withFallback _) + configs reduceLeft (_ withFallback _) } private[testkit] def deployments(node: RoleName): immutable.Seq[String] = (_deployments get node getOrElse Nil) ++ _allDeploy diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala index 52c2fa56b8..a75b983f07 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala @@ -12,8 +12,10 @@ import akka.actor.PoisonPill import akka.actor.Address import scala.concurrent.Await import akka.pattern.ask -import akka.remote.testkit.{STMultiNodeSpec, MultiNodeConfig, MultiNodeSpec} +import akka.remote.testkit.{ STMultiNodeSpec, MultiNodeConfig, MultiNodeSpec } import akka.routing.Broadcast +import akka.routing.CurrentRoutees +import akka.routing.RouterRoutees import akka.routing.RoundRobinRouter import akka.routing.RoutedActorRef import akka.routing.Resizer @@ -59,7 +61,7 @@ class RoundRobinRoutedRemoteActorMultiJvmNode3 extends RoundRobinRoutedRemoteAct class RoundRobinRoutedRemoteActorMultiJvmNode4 extends RoundRobinRoutedRemoteActorSpec class RoundRobinRoutedRemoteActorSpec extends MultiNodeSpec(RoundRobinRoutedRemoteActorMultiJvmSpec) - with STMultiNodeSpec with ImplicitSender with DefaultTimeout { + with STMultiNodeSpec with ImplicitSender with DefaultTimeout { import RoundRobinRoutedRemoteActorMultiJvmSpec._ def initialParticipants = 4 @@ -105,7 +107,7 @@ class RoundRobinRoutedRemoteActorSpec extends MultiNodeSpec(RoundRobinRoutedRemo } "A new remote actor configured with a RoundRobin router and Resizer" must { - "be locally instantiated on a remote node after several resize rounds" taggedAs LongRunningTest in { + "be locally instantiated on a remote node after several resize rounds" taggedAs LongRunningTest in within(5 seconds) { runOn(first, second, third) { enterBarrier("start", "broadcast-end", "end", "done") @@ -117,22 +119,21 @@ class RoundRobinRoutedRemoteActorSpec extends MultiNodeSpec(RoundRobinRoutedRemo resizer = Some(new TestResizer))), "service-hello2") actor.isInstanceOf[RoutedActorRef] must be(true) - val iterationCount = 9 + actor ! CurrentRoutees + expectMsgType[RouterRoutees].routees.size must be(1) val repliesFrom: Set[ActorRef] = - (for { - i ← 0 until iterationCount - } yield { + (for (n ← 2 to 8) yield { actor ! "hit" - receiveOne(5 seconds) match { case ref: ActorRef ⇒ ref } + awaitCond(Await.result(actor ? CurrentRoutees, remaining).asInstanceOf[RouterRoutees].routees.size == n) + expectMsgType[ActorRef] }).toSet enterBarrier("broadcast-end") actor ! Broadcast(PoisonPill) enterBarrier("end") - // at least more than one actor per node - repliesFrom.size must be > (3) + repliesFrom.size must be(7) val repliesFromAddresses = repliesFrom.map(_.path.address) repliesFromAddresses must be === (Set(node(first), node(second), node(third)).map(_.address)) diff --git a/akka-remote/src/main/resources/reference.conf b/akka-remote/src/main/resources/reference.conf index 65027d4e42..0d23116770 100644 --- a/akka-remote/src/main/resources/reference.conf +++ b/akka-remote/src/main/resources/reference.conf @@ -215,6 +215,11 @@ akka { # (I) EXPERIMENTAL If "" then the specified dispatcher # will be used to accept inbound connections, and perform IO. If "" then # dedicated threads will be used. + # + # CAUTION: This might lead to the used dispatcher not shutting down properly! + # - may prevent the JVM from shutting down normally + # - may leak threads when shutting down an ActorSystem + # use-dispatcher-for-io = "" # (I) The hostname or ip to bind the remoting to, @@ -242,6 +247,11 @@ akka { # (I) Sets the size of the connection backlog backlog = 4096 + # (I) Sets the SO_REUSE_ADDR flag, valid values are "on", "off" and "off-for-windows" + # due to the following Windows bug: http://bugs.sun.com/bugdatabase/view_bug.do?bug_id=4476378 + # "off-for-windows" of course means that it's "on" for all other platforms + reuse-address = off-for-windows + # (I) Length in akka.time-unit how long core threads will be kept alive if # idling execution-pool-keepalive = 60s @@ -326,7 +336,8 @@ akka { # Example: ["TLS_RSA_WITH_AES_128_CBC_SHA", "TLS_RSA_WITH_AES_256_CBC_SHA"] # You need to install the JCE Unlimited Strength Jurisdiction Policy # Files to use AES 256. - # More info here: http://docs.oracle.com/javase/7/docs/technotes/guides/security/SunProviders.html#SunJCEProvider + # More info here: + # http://docs.oracle.com/javase/7/docs/technotes/guides/security/SunProviders.html#SunJCEProvider enabled-algorithms = ["TLS_RSA_WITH_AES_128_CBC_SHA"] # Using /dev/./urandom is only necessary when using SHA1PRNG on Linux to diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index 02d8b5d837..1c75200a5f 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -63,6 +63,10 @@ object RemoteActorRefProvider { /** * Remote ActorRefProvider. Starts up actor on remote node and creates a RemoteActorRef representing it. + * + * INTERNAL API! + * + * Depending on this class is not supported, only the [[ActorRefProvider]] interface is supported. */ class RemoteActorRefProvider( val systemName: String, @@ -74,7 +78,13 @@ class RemoteActorRefProvider( val remoteSettings: RemoteSettings = new RemoteSettings(settings.config, systemName) - val deployer: RemoteDeployer = new RemoteDeployer(settings, dynamicAccess) + override val deployer: Deployer = createDeployer + + /** + * Factory method to make it possible to override deployer in subclass + * Creates a new instance every time + */ + protected def createDeployer: RemoteDeployer = new RemoteDeployer(settings, dynamicAccess) private val local = new LocalActorRefProvider(systemName, settings, eventStream, scheduler, dynamicAccess, deployer) @@ -82,9 +92,7 @@ class RemoteActorRefProvider( private var _log = local.log def log: LoggingAdapter = _log - @volatile - private var _rootPath = local.rootPath - override def rootPath: ActorPath = _rootPath + override def rootPath: ActorPath = local.rootPath override def deadLetters: InternalActorRef = local.deadLetters // these are only available after init() @@ -148,11 +156,6 @@ class RemoteActorRefProvider( // this enables reception of remote requests transport.start() - _rootPath = RootActorPath(local.rootPath.address.copy( - protocol = transport.defaultAddress.protocol, - host = transport.defaultAddress.host, - port = transport.defaultAddress.port)) - val remoteClientLifeCycleHandler = system.systemActorOf(Props(new Actor { def receive = { case RemoteClientError(cause, remote, address) ⇒ remote.shutdownClientConnection(address) @@ -296,6 +299,8 @@ class RemoteActorRefProvider( } } + def getDefaultAddress: Address = transport.address + private def hasAddress(address: Address): Boolean = address == local.rootPath.address || address == rootPath.address || transport.addresses(address) diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 8411d312cc..372cc6dd0c 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -44,8 +44,6 @@ private[akka] abstract class RemoteClient private[akka] (val netty: NettyRemoteT def shutdown(): Boolean - def isBoundTo(address: Address): Boolean = remoteAddress == address - /** * Converts the message to the wireprotocol and sends the message across the wire */ diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index 6c4a7b9c9b..b465782aa0 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -252,13 +252,13 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider } } - def bindClient(remoteAddress: Address, client: RemoteClient, putIfAbsent: Boolean = false): Boolean = { + def bindClient(remoteAddress: Address, client: RemoteClient): Boolean = { clientsLock.writeLock().lock() try { - if (putIfAbsent && remoteClients.contains(remoteAddress)) false + if (remoteClients.contains(remoteAddress)) false else { client.connect() - remoteClients.put(remoteAddress, client).foreach(_.shutdown()) + remoteClients.put(remoteAddress, client) true } } finally { @@ -266,17 +266,7 @@ private[akka] class NettyRemoteTransport(_system: ExtendedActorSystem, _provider } } - def unbindClient(remoteAddress: Address): Unit = { - clientsLock.writeLock().lock() - try { - remoteClients foreach { - case (k, v) ⇒ - if (v.isBoundTo(remoteAddress)) { v.shutdown(); remoteClients.remove(k) } - } - } finally { - clientsLock.writeLock().unlock() - } - } + def unbindClient(remoteAddress: Address): Unit = shutdownClientConnection(remoteAddress) def shutdownClientConnection(remoteAddress: Address): Unit = { clientsLock.writeLock().lock() diff --git a/akka-remote/src/main/scala/akka/remote/netty/Server.scala b/akka-remote/src/main/scala/akka/remote/netty/Server.scala index dfa97f78ce..077d30f9ba 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Server.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Server.scala @@ -37,7 +37,7 @@ private[akka] class NettyRemoteServer(val netty: NettyRemoteTransport) { b.setOption("backlog", settings.Backlog) b.setOption("tcpNoDelay", true) b.setOption("child.keepAlive", true) - b.setOption("reuseAddress", true) + b.setOption("reuseAddress", settings.ReuseAddress) settings.ReceiveBufferSize.foreach(sz ⇒ b.setOption("receiveBufferSize", sz)) settings.SendBufferSize.foreach(sz ⇒ b.setOption("sendBufferSize", sz)) settings.WriteBufferHighWaterMark.foreach(sz ⇒ b.setOption("writeBufferHighWaterMark", sz)) diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index 951ff31e59..0a27914734 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -11,6 +11,7 @@ import akka.ConfigurationException import akka.japi.Util.immutableSeq import scala.concurrent.duration.FiniteDuration import akka.dispatch.ThreadPoolConfig +import akka.util.Helpers private[akka] class NettySettings(config: Config, val systemName: String) { @@ -72,6 +73,11 @@ private[akka] class NettySettings(config: Config, val systemName: String) { val Backlog: Int = getInt("backlog") + val ReuseAddress: Boolean = getString("reuse-address") match { + case "off-for-windows" ⇒ !Helpers.isWindows + case _ ⇒ getBoolean("reuse-address") + } + val ExecutionPoolKeepalive: FiniteDuration = Duration(getMilliseconds("execution-pool-keepalive"), MILLISECONDS) val ExecutionPoolSize: Int = getInt("execution-pool-size") match { diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala index f1d9ac2397..e07251048c 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala @@ -9,6 +9,7 @@ import akka.testkit.AkkaSpec import akka.actor.ExtendedActorSystem import scala.concurrent.duration._ import akka.remote.netty.NettyRemoteTransport +import akka.util.Helpers @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class RemoteConfigSpec extends AkkaSpec( @@ -50,6 +51,7 @@ class RemoteConfigSpec extends AkkaSpec( MessageFrameSize must be(1048576) ConnectionTimeout must be(2 minutes) Backlog must be(4096) + ReuseAddress must be(!Helpers.isWindows) ExecutionPoolKeepalive must be(1 minute) ExecutionPoolSize must be(4) MaxChannelMemorySize must be(0) @@ -89,6 +91,10 @@ class RemoteConfigSpec extends AkkaSpec( pool.getDouble("pool-size-factor") must equal(1.0) pool.getInt("pool-size-max") must equal(8) } + + { + c.getString("reuse-address") must be("off-for-windows") + } } } } diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-amd64-freebsd-6.so b/akka-samples/akka-sample-cluster/sigar/libsigar-amd64-freebsd-6.so new file mode 100644 index 0000000000..3e94f0d2bf Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-amd64-freebsd-6.so differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-amd64-linux.so b/akka-samples/akka-sample-cluster/sigar/libsigar-amd64-linux.so new file mode 100644 index 0000000000..5a2e4c24fe Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-amd64-linux.so differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-amd64-solaris.so b/akka-samples/akka-sample-cluster/sigar/libsigar-amd64-solaris.so new file mode 100644 index 0000000000..6396482a43 Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-amd64-solaris.so differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-ia64-hpux-11.sl b/akka-samples/akka-sample-cluster/sigar/libsigar-ia64-hpux-11.sl new file mode 100644 index 0000000000..d92ea4a96a Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-ia64-hpux-11.sl differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-ia64-linux.so b/akka-samples/akka-sample-cluster/sigar/libsigar-ia64-linux.so new file mode 100644 index 0000000000..2bd2fc8e32 Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-ia64-linux.so differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-pa-hpux-11.sl b/akka-samples/akka-sample-cluster/sigar/libsigar-pa-hpux-11.sl new file mode 100644 index 0000000000..0dfd8a1122 Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-pa-hpux-11.sl differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-ppc-aix-5.so b/akka-samples/akka-sample-cluster/sigar/libsigar-ppc-aix-5.so new file mode 100644 index 0000000000..7d4b519921 Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-ppc-aix-5.so differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-ppc-linux.so b/akka-samples/akka-sample-cluster/sigar/libsigar-ppc-linux.so new file mode 100644 index 0000000000..4394b1b00f Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-ppc-linux.so differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-ppc64-aix-5.so b/akka-samples/akka-sample-cluster/sigar/libsigar-ppc64-aix-5.so new file mode 100644 index 0000000000..35fd828808 Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-ppc64-aix-5.so differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-ppc64-linux.so b/akka-samples/akka-sample-cluster/sigar/libsigar-ppc64-linux.so new file mode 100644 index 0000000000..a1ba2529c9 Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-ppc64-linux.so differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-s390x-linux.so b/akka-samples/akka-sample-cluster/sigar/libsigar-s390x-linux.so new file mode 100644 index 0000000000..c275f4ac69 Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-s390x-linux.so differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-sparc-solaris.so b/akka-samples/akka-sample-cluster/sigar/libsigar-sparc-solaris.so new file mode 100644 index 0000000000..aa847d2b54 Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-sparc-solaris.so differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-sparc64-solaris.so b/akka-samples/akka-sample-cluster/sigar/libsigar-sparc64-solaris.so new file mode 100644 index 0000000000..6c4fe809c5 Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-sparc64-solaris.so differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-universal-macosx.dylib b/akka-samples/akka-sample-cluster/sigar/libsigar-universal-macosx.dylib new file mode 100644 index 0000000000..27ab107111 Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-universal-macosx.dylib differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-universal64-macosx.dylib b/akka-samples/akka-sample-cluster/sigar/libsigar-universal64-macosx.dylib new file mode 100644 index 0000000000..0c721fecf3 Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-universal64-macosx.dylib differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-x86-freebsd-5.so b/akka-samples/akka-sample-cluster/sigar/libsigar-x86-freebsd-5.so new file mode 100644 index 0000000000..8c50c6117a Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-x86-freebsd-5.so differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-x86-freebsd-6.so b/akka-samples/akka-sample-cluster/sigar/libsigar-x86-freebsd-6.so new file mode 100644 index 0000000000..f0800274a6 Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-x86-freebsd-6.so differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-x86-linux.so b/akka-samples/akka-sample-cluster/sigar/libsigar-x86-linux.so new file mode 100644 index 0000000000..a0b64eddb0 Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-x86-linux.so differ diff --git a/akka-samples/akka-sample-cluster/sigar/libsigar-x86-solaris.so b/akka-samples/akka-sample-cluster/sigar/libsigar-x86-solaris.so new file mode 100644 index 0000000000..c6452e5655 Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/libsigar-x86-solaris.so differ diff --git a/akka-samples/akka-sample-cluster/sigar/sigar-amd64-winnt.dll b/akka-samples/akka-sample-cluster/sigar/sigar-amd64-winnt.dll new file mode 100644 index 0000000000..1ec8a0353e Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/sigar-amd64-winnt.dll differ diff --git a/akka-samples/akka-sample-cluster/sigar/sigar-x86-winnt.dll b/akka-samples/akka-sample-cluster/sigar/sigar-x86-winnt.dll new file mode 100644 index 0000000000..6afdc0166c Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/sigar-x86-winnt.dll differ diff --git a/akka-samples/akka-sample-cluster/sigar/sigar-x86-winnt.lib b/akka-samples/akka-sample-cluster/sigar/sigar-x86-winnt.lib new file mode 100644 index 0000000000..04924a1fc1 Binary files /dev/null and b/akka-samples/akka-sample-cluster/sigar/sigar-x86-winnt.lib differ diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialBackend.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialBackend.java new file mode 100644 index 0000000000..b1f813f684 --- /dev/null +++ b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialBackend.java @@ -0,0 +1,49 @@ +package sample.cluster.factorial.japi; + +//#imports +import java.math.BigInteger; +import java.util.concurrent.Callable; +import scala.concurrent.Future; +import akka.actor.UntypedActor; +import akka.dispatch.Mapper; +import static akka.dispatch.Futures.future; +import static akka.pattern.Patterns.pipe; +//#imports + +//#backend +public class FactorialBackend extends UntypedActor { + + @Override + public void onReceive(Object message) { + if (message instanceof Integer) { + final Integer n = (Integer) message; + Future f = future(new Callable() { + public BigInteger call() { + return factorial(n); + } + }, getContext().dispatcher()); + + Future result = f.map( + new Mapper() { + public FactorialResult apply(BigInteger factorial) { + return new FactorialResult(n, factorial); + } + }, getContext().dispatcher()); + + pipe(result, getContext().dispatcher()).to(getSender()); + + } else { + unhandled(message); + } + } + + BigInteger factorial(int n) { + BigInteger acc = BigInteger.ONE; + for (int i = 1; i <= n; ++i) { + acc = acc.multiply(BigInteger.valueOf(i)); + } + return acc; + } +} +//#backend + diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialBackendMain.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialBackendMain.java new file mode 100644 index 0000000000..8acb6aad26 --- /dev/null +++ b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialBackendMain.java @@ -0,0 +1,23 @@ +package sample.cluster.factorial.japi; + +import com.typesafe.config.ConfigFactory; +import akka.actor.ActorSystem; +import akka.actor.Props; + +public class FactorialBackendMain { + + public static void main(String[] args) throws Exception { + // Override the configuration of the port + // when specified as program argument + if (args.length > 0) + System.setProperty("akka.remote.netty.port", args[0]); + + ActorSystem system = ActorSystem.create("ClusterSystem", ConfigFactory.load("factorial")); + + system.actorOf(new Props(FactorialBackend.class), "factorialBackend"); + + system.actorOf(new Props(MetricsListener.class), "metricsListener"); + + } + +} diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontend.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontend.java new file mode 100644 index 0000000000..13af688739 --- /dev/null +++ b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontend.java @@ -0,0 +1,90 @@ +package sample.cluster.factorial.japi; + +import akka.actor.UntypedActor; +import akka.actor.ActorRef; +import akka.actor.Props; +import akka.event.Logging; +import akka.event.LoggingAdapter; +import akka.routing.FromConfig; +import akka.cluster.routing.AdaptiveLoadBalancingRouter; +import akka.cluster.routing.ClusterRouterConfig; +import akka.cluster.routing.ClusterRouterSettings; +import akka.cluster.routing.HeapMetricsSelector; +import akka.cluster.routing.SystemLoadAverageMetricsSelector; + +//#frontend +public class FactorialFrontend extends UntypedActor { + final int upToN; + final boolean repeat; + + LoggingAdapter log = Logging.getLogger(getContext().system(), this); + + ActorRef backend = getContext().actorOf( + new Props(FactorialBackend.class).withRouter(FromConfig.getInstance()), + "factorialBackendRouter"); + + public FactorialFrontend(int upToN, boolean repeat) { + this.upToN = upToN; + this.repeat = repeat; + } + + @Override + public void preStart() { + sendJobs(); + } + + @Override + public void onReceive(Object message) { + if (message instanceof FactorialResult) { + FactorialResult result = (FactorialResult) message; + if (result.n == upToN) { + log.debug("{}! = {}", result.n, result.factorial); + if (repeat) sendJobs(); + } + + } else { + unhandled(message); + } + } + + void sendJobs() { + log.info("Starting batch of factorials up to [{}]", upToN); + for (int n = 1; n <= upToN; n++) { + backend.tell(n, getSelf()); + } + } + +} +//#frontend + + +//not used, only for documentation +abstract class FactorialFrontend2 extends UntypedActor { + //#router-lookup-in-code + int totalInstances = 100; + String routeesPath = "/user/statsWorker"; + boolean allowLocalRoutees = true; + ActorRef backend = getContext().actorOf( + new Props(FactorialBackend.class).withRouter(new ClusterRouterConfig( + new AdaptiveLoadBalancingRouter(HeapMetricsSelector.getInstance(), 0), + new ClusterRouterSettings( + totalInstances, routeesPath, allowLocalRoutees))), + "factorialBackendRouter2"); + //#router-lookup-in-code +} + +//not used, only for documentation +abstract class StatsService3 extends UntypedActor { + //#router-deploy-in-code + int totalInstances = 100; + int maxInstancesPerNode = 3; + boolean allowLocalRoutees = false; + ActorRef backend = getContext().actorOf( + new Props(FactorialBackend.class).withRouter(new ClusterRouterConfig( + new AdaptiveLoadBalancingRouter( + SystemLoadAverageMetricsSelector.getInstance(), 0), + new ClusterRouterSettings( + totalInstances, maxInstancesPerNode, allowLocalRoutees))), + "factorialBackendRouter3"); + //#router-deploy-in-code +} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontendMain.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontendMain.java new file mode 100644 index 0000000000..e22ad6c2cb --- /dev/null +++ b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialFrontendMain.java @@ -0,0 +1,32 @@ +package sample.cluster.factorial.japi; + +import com.typesafe.config.ConfigFactory; +import akka.actor.ActorSystem; +import akka.actor.Props; +import akka.actor.UntypedActor; +import akka.actor.UntypedActorFactory; +import akka.cluster.Cluster; + +public class FactorialFrontendMain { + + public static void main(String[] args) throws Exception { + final int upToN = (args.length == 0 ? 200 : Integer.valueOf(args[0])); + + final ActorSystem system = ActorSystem.create("ClusterSystem", ConfigFactory.load("factorial")); + system.log().info("Factorials will start when 3 members in the cluster."); + //#registerOnUp + Cluster.get(system).registerOnMemberUp(new Runnable() { + @Override + public void run() { + system.actorOf(new Props(new UntypedActorFactory() { + @Override + public UntypedActor create() { + return new FactorialFrontend(upToN, true); + } + }), "factorialFrontend"); + } + }); + //#registerOnUp + } + +} diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialResult.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialResult.java new file mode 100644 index 0000000000..0cb74b6b54 --- /dev/null +++ b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/FactorialResult.java @@ -0,0 +1,14 @@ +package sample.cluster.factorial.japi; + +import java.math.BigInteger; +import java.io.Serializable; + +public class FactorialResult implements Serializable { + public final int n; + public final BigInteger factorial; + + FactorialResult(int n, BigInteger factorial) { + this.n = n; + this.factorial = factorial; + } +} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/MetricsListener.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/MetricsListener.java new file mode 100644 index 0000000000..3acbf3e4c0 --- /dev/null +++ b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/factorial/japi/MetricsListener.java @@ -0,0 +1,68 @@ +package sample.cluster.factorial.japi; + +//#metrics-listener +import akka.actor.UntypedActor; +import akka.cluster.Cluster; +import akka.cluster.ClusterEvent.ClusterMetricsChanged; +import akka.cluster.ClusterEvent.CurrentClusterState; +import akka.cluster.NodeMetrics; +import akka.cluster.StandardMetrics; +import akka.cluster.StandardMetrics.HeapMemory; +import akka.cluster.StandardMetrics.Cpu; +import akka.event.Logging; +import akka.event.LoggingAdapter; + +public class MetricsListener extends UntypedActor { + LoggingAdapter log = Logging.getLogger(getContext().system(), this); + + Cluster cluster = Cluster.get(getContext().system()); + + //subscribe to ClusterMetricsChanged + @Override + public void preStart() { + cluster.subscribe(getSelf(), ClusterMetricsChanged.class); + } + + //re-subscribe when restart + @Override + public void postStop() { + cluster.unsubscribe(getSelf()); + } + + + @Override + public void onReceive(Object message) { + if (message instanceof ClusterMetricsChanged) { + ClusterMetricsChanged clusterMetrics = (ClusterMetricsChanged) message; + for (NodeMetrics nodeMetrics : clusterMetrics.getNodeMetrics()) { + if (nodeMetrics.address().equals(cluster.selfAddress())) { + logHeap(nodeMetrics); + logCpu(nodeMetrics); + } + } + + } else if (message instanceof CurrentClusterState) { + // ignore + + } else { + unhandled(message); + } + } + + void logHeap(NodeMetrics nodeMetrics) { + HeapMemory heap = StandardMetrics.extractHeapMemory(nodeMetrics); + if (heap != null) { + log.info("Used heap: {} MB", ((double) heap.used()) / 1024 / 1024); + } + } + + void logCpu(NodeMetrics nodeMetrics) { + Cpu cpu = StandardMetrics.extractCpu(nodeMetrics); + if (cpu != null && cpu.systemLoadAverage().isDefined()) { + log.info("Load: {} ({} processors)", cpu.systemLoadAverage().get(), + cpu.processors()); + } + } + +} +//#metrics-listener \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/simple/japi/SimpleClusterListener.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/simple/japi/SimpleClusterListener.java index 24770318c1..40e595e653 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/simple/japi/SimpleClusterListener.java +++ b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/simple/japi/SimpleClusterListener.java @@ -4,8 +4,8 @@ import akka.actor.UntypedActor; import akka.cluster.ClusterEvent.ClusterDomainEvent; import akka.cluster.ClusterEvent.CurrentClusterState; import akka.cluster.ClusterEvent.MemberJoined; -import akka.cluster.ClusterEvent.MemberUnreachable; import akka.cluster.ClusterEvent.MemberUp; +import akka.cluster.ClusterEvent.UnreachableMember; import akka.event.Logging; import akka.event.LoggingAdapter; @@ -26,8 +26,8 @@ public class SimpleClusterListener extends UntypedActor { MemberUp mUp = (MemberUp) message; log.info("Member is Up: {}", mUp.member()); - } else if (message instanceof MemberUnreachable) { - MemberUnreachable mUnreachable = (MemberUnreachable) message; + } else if (message instanceof UnreachableMember) { + UnreachableMember mUnreachable = (UnreachableMember) message; log.info("Member detected as unreachable: {}", mUnreachable.member()); } else if (message instanceof ClusterDomainEvent) { diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsAggregator.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsAggregator.java index 81b5b74ce5..0716cc38ec 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsAggregator.java +++ b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsAggregator.java @@ -25,7 +25,7 @@ public class StatsAggregator extends UntypedActor { @Override public void preStart() { - getContext().setReceiveTimeout(Duration.create(5, TimeUnit.SECONDS)); + getContext().setReceiveTimeout(Duration.create(3, TimeUnit.SECONDS)); } @Override diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsFacade.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsFacade.java index 89582882fa..15a271027c 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsFacade.java +++ b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsFacade.java @@ -49,7 +49,7 @@ public class StatsFacade extends UntypedActor { } else if (message instanceof StatsJob) { StatsJob job = (StatsJob) message; - Future f = ask(currentMaster, job, new Timeout(10, SECONDS)). + Future f = ask(currentMaster, job, new Timeout(5, SECONDS)). recover(new Recover() { public Object recover(Throwable t) { return new JobFailed("Service unavailable, try again later"); diff --git a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleClient.java b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleClient.java index bb3f52e248..2a89390677 100644 --- a/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleClient.java +++ b/akka-samples/akka-sample-cluster/src/main/java/sample/cluster/stats/japi/StatsSampleClient.java @@ -38,7 +38,7 @@ public class StatsSampleClient extends UntypedActor { .system() .scheduler() .schedule(interval, interval, getSelf(), "tick", - getContext().dispatcher()); + getContext().dispatcher(), null); } //subscribe to cluster changes, MemberEvent diff --git a/akka-samples/akka-sample-cluster/src/main/resources/application.conf b/akka-samples/akka-sample-cluster/src/main/resources/application.conf index 62554a65cf..507191b79c 100644 --- a/akka-samples/akka-sample-cluster/src/main/resources/application.conf +++ b/akka-samples/akka-sample-cluster/src/main/resources/application.conf @@ -1,3 +1,4 @@ +# //#cluster akka { actor { provider = "akka.cluster.ClusterActorRefProvider" @@ -11,8 +12,6 @@ akka { } } - extensions = ["akka.cluster.Cluster"] - cluster { seed-nodes = [ "akka://ClusterSystem@127.0.0.1:2551", @@ -20,4 +19,23 @@ akka { auto-down = on } -} \ No newline at end of file +} +# //#cluster + +# //#adaptive-router +akka.actor.deployment { + /factorialFrontend/factorialBackendRouter = { + router = adaptive + # metrics-selector = heap + # metrics-selector = load + # metrics-selector = cpu + metrics-selector = mix + nr-of-instances = 100 + cluster { + enabled = on + routees-path = "/user/factorialBackend" + allow-local-routees = off + } + } +} +# //#adaptive-router \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster/src/main/resources/factorial.conf b/akka-samples/akka-sample-cluster/src/main/resources/factorial.conf new file mode 100644 index 0000000000..17e82db15d --- /dev/null +++ b/akka-samples/akka-sample-cluster/src/main/resources/factorial.conf @@ -0,0 +1,5 @@ +include "application" + +# //#min-nr-of-members +akka.cluster.min-nr-of-members = 3 +# //#min-nr-of-members \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala b/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala new file mode 100644 index 0000000000..d75dcb96e1 --- /dev/null +++ b/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/factorial/FactorialSample.scala @@ -0,0 +1,167 @@ +package sample.cluster.factorial + +//#imports +import scala.annotation.tailrec +import scala.concurrent.Future +import com.typesafe.config.ConfigFactory +import akka.actor.Actor +import akka.actor.ActorLogging +import akka.actor.ActorRef +import akka.actor.ActorSystem +import akka.actor.Props +import akka.pattern.pipe +import akka.routing.FromConfig + +//#imports + +import akka.cluster.Cluster +import akka.cluster.ClusterEvent.CurrentClusterState +import akka.cluster.ClusterEvent.MemberUp + +object FactorialFrontend { + def main(args: Array[String]): Unit = { + val upToN = if (args.isEmpty) 200 else args(0).toInt + + val system = ActorSystem("ClusterSystem", ConfigFactory.load("factorial")) + system.log.info("Factorials will start when 3 members in the cluster.") + //#registerOnUp + Cluster(system) registerOnMemberUp { + system.actorOf(Props(new FactorialFrontend(upToN, repeat = true)), + name = "factorialFrontend") + } + //#registerOnUp + } +} + +//#frontend +class FactorialFrontend(upToN: Int, repeat: Boolean) extends Actor with ActorLogging { + + val backend = context.actorOf(Props[FactorialBackend].withRouter(FromConfig), + name = "factorialBackendRouter") + + override def preStart(): Unit = sendJobs() + + def receive = { + case (n: Int, factorial: BigInt) ⇒ + if (n == upToN) { + log.debug("{}! = {}", n, factorial) + if (repeat) sendJobs() + } + } + + def sendJobs(): Unit = { + log.info("Starting batch of factorials up to [{}]", upToN) + 1 to upToN foreach { backend ! _ } + } +} +//#frontend + +object FactorialBackend { + def main(args: Array[String]): Unit = { + // Override the configuration of the port + // when specified as program argument + if (args.nonEmpty) System.setProperty("akka.remote.netty.port", args(0)) + + val system = ActorSystem("ClusterSystem", ConfigFactory.load("factorial")) + system.actorOf(Props[FactorialBackend], name = "factorialBackend") + + system.actorOf(Props[MetricsListener], name = "metricsListener") + } +} + +//#backend +class FactorialBackend extends Actor with ActorLogging { + + import context.dispatcher + + def receive = { + case (n: Int) ⇒ + Future(factorial(n)) map { result ⇒ (n, result) } pipeTo sender + } + + def factorial(n: Int): BigInt = { + @tailrec def factorialAcc(acc: BigInt, n: Int): BigInt = { + if (n <= 1) acc + else factorialAcc(acc * n, n - 1) + } + factorialAcc(BigInt(1), n) + } + +} +//#backend + +//#metrics-listener +import akka.cluster.Cluster +import akka.cluster.ClusterEvent.ClusterMetricsChanged +import akka.cluster.ClusterEvent.CurrentClusterState +import akka.cluster.NodeMetrics +import akka.cluster.StandardMetrics.HeapMemory +import akka.cluster.StandardMetrics.Cpu + +class MetricsListener extends Actor with ActorLogging { + val selfAddress = Cluster(context.system).selfAddress + + // subscribe to ClusterMetricsChanged + // re-subscribe when restart + override def preStart(): Unit = + Cluster(context.system).subscribe(self, classOf[ClusterMetricsChanged]) + override def postStop(): Unit = + Cluster(context.system).unsubscribe(self) + + def receive = { + case ClusterMetricsChanged(clusterMetrics) ⇒ + clusterMetrics.filter(_.address == selfAddress) foreach { nodeMetrics ⇒ + logHeap(nodeMetrics) + logCpu(nodeMetrics) + } + case state: CurrentClusterState ⇒ // ignore + } + + def logHeap(nodeMetrics: NodeMetrics): Unit = nodeMetrics match { + case HeapMemory(address, timestamp, used, committed, max) ⇒ + log.info("Used heap: {} MB", used.doubleValue / 1024 / 1024) + case _ ⇒ // no heap info + } + + def logCpu(nodeMetrics: NodeMetrics): Unit = nodeMetrics match { + case Cpu(address, timestamp, Some(systemLoadAverage), cpuCombined, processors) ⇒ + log.info("Load: {} ({} processors)", systemLoadAverage, processors) + case _ ⇒ // no cpu info + } +} + +//#metrics-listener + +// not used, only for documentation +abstract class FactorialFrontend2 extends Actor { + //#router-lookup-in-code + import akka.cluster.routing.ClusterRouterConfig + import akka.cluster.routing.ClusterRouterSettings + import akka.cluster.routing.AdaptiveLoadBalancingRouter + import akka.cluster.routing.HeapMetricsSelector + + val backend = context.actorOf(Props[FactorialBackend].withRouter( + ClusterRouterConfig(AdaptiveLoadBalancingRouter(HeapMetricsSelector), + ClusterRouterSettings( + totalInstances = 100, routeesPath = "/user/statsWorker", + allowLocalRoutees = true))), + name = "factorialBackendRouter2") + //#router-lookup-in-code +} + +// not used, only for documentation +abstract class FactorialFrontend3 extends Actor { + //#router-deploy-in-code + import akka.cluster.routing.ClusterRouterConfig + import akka.cluster.routing.ClusterRouterSettings + import akka.cluster.routing.AdaptiveLoadBalancingRouter + import akka.cluster.routing.SystemLoadAverageMetricsSelector + + val backend = context.actorOf(Props[FactorialBackend].withRouter( + ClusterRouterConfig(AdaptiveLoadBalancingRouter( + SystemLoadAverageMetricsSelector), ClusterRouterSettings( + totalInstances = 100, maxInstancesPerNode = 3, + allowLocalRoutees = false))), + name = "factorialBackendRouter3") + //#router-deploy-in-code +} \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/simple/SimpleClusterApp.scala b/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/simple/SimpleClusterApp.scala index bdf669eb99..6bdec7cfba 100644 --- a/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/simple/SimpleClusterApp.scala +++ b/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/simple/SimpleClusterApp.scala @@ -22,7 +22,7 @@ object SimpleClusterApp { log.info("Member joined: {}", member) case MemberUp(member) ⇒ log.info("Member is Up: {}", member) - case MemberUnreachable(member) ⇒ + case UnreachableMember(member) ⇒ log.info("Member detected as unreachable: {}", member) case _: ClusterDomainEvent ⇒ // ignore diff --git a/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala b/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala index d35c475780..de7eff456c 100644 --- a/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala +++ b/akka-samples/akka-sample-cluster/src/main/scala/sample/cluster/stats/StatsSample.scala @@ -15,10 +15,7 @@ import akka.actor.ReceiveTimeout import akka.actor.RelativeActorPath import akka.actor.RootActorPath import akka.cluster.Cluster -import akka.cluster.ClusterEvent.CurrentClusterState -import akka.cluster.ClusterEvent.LeaderChanged -import akka.cluster.ClusterEvent.MemberEvent -import akka.cluster.ClusterEvent.MemberUp +import akka.cluster.ClusterEvent._ import akka.cluster.MemberStatus import akka.routing.FromConfig import akka.routing.ConsistentHashingRouter.ConsistentHashableEnvelope @@ -54,7 +51,7 @@ class StatsService extends Actor { class StatsAggregator(expectedResults: Int, replyTo: ActorRef) extends Actor { var results = IndexedSeq.empty[Int] - context.setReceiveTimeout(5 seconds) + context.setReceiveTimeout(3 seconds) def receive = { case wordCount: Int ⇒ @@ -106,7 +103,7 @@ class StatsFacade extends Actor with ActorLogging { case job: StatsJob if currentMaster.isEmpty ⇒ sender ! JobFailed("Service unavailable, try again later") case job: StatsJob ⇒ - implicit val timeout = Timeout(10.seconds) + implicit val timeout = Timeout(5.seconds) currentMaster foreach { _ ? job recover { case _ ⇒ JobFailed("Service unavailable, try again later") @@ -219,7 +216,10 @@ class StatsSampleClient(servicePath: String) extends Actor { var nodes = Set.empty[Address] - override def preStart(): Unit = cluster.subscribe(self, classOf[MemberEvent]) + override def preStart(): Unit = { + cluster.subscribe(self, classOf[MemberEvent]) + cluster.subscribe(self, classOf[UnreachableMember]) + } override def postStop(): Unit = { cluster.unsubscribe(self) tickTask.cancel() @@ -237,8 +237,9 @@ class StatsSampleClient(servicePath: String) extends Actor { println(failed) case state: CurrentClusterState ⇒ nodes = state.members.collect { case m if m.status == MemberStatus.Up ⇒ m.address } - case MemberUp(m) ⇒ nodes += m.address - case other: MemberEvent ⇒ nodes -= other.member.address + case MemberUp(m) ⇒ nodes += m.address + case other: MemberEvent ⇒ nodes -= other.member.address + case UnreachableMember(m) ⇒ nodes -= m.address } } diff --git a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala b/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala index 7acd2ff202..5f1c9728a3 100644 --- a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala +++ b/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSingleMasterSpec.scala @@ -33,6 +33,8 @@ object StatsSampleSingleMasterSpecConfig extends MultiNodeConfig { akka.actor.provider = "akka.cluster.ClusterActorRefProvider" akka.remote.log-remote-lifecycle-events = off akka.cluster.auto-join = off + # don't use sigar for tests, native lib not in path + akka.cluster.metrics.collector-class = akka.cluster.JmxMetricsCollector #//#router-deploy-config akka.actor.deployment { /statsFacade/statsService/workerRouter { @@ -72,7 +74,6 @@ abstract class StatsSampleSingleMasterSpec extends MultiNodeSpec(StatsSampleSing expectMsgClass(classOf[CurrentClusterState]) Cluster(system) join node(first).address - system.actorOf(Props[StatsFacade], "statsFacade") expectMsgAllOf( MemberUp(Member(node(first).address, MemberStatus.Up)), @@ -80,11 +81,13 @@ abstract class StatsSampleSingleMasterSpec extends MultiNodeSpec(StatsSampleSing MemberUp(Member(node(third).address, MemberStatus.Up))) Cluster(system).unsubscribe(testActor) + + system.actorOf(Props[StatsFacade], "statsFacade") testConductor.enter("all-up") } - "show usage of the statsFacade" in within(15 seconds) { + "show usage of the statsFacade" in within(20 seconds) { val facade = system.actorFor(RootActorPath(node(third).address) / "user" / "statsFacade") // eventually the service should be ok, diff --git a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala b/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala index b9eeef3159..7d9fbda51b 100644 --- a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala +++ b/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/StatsSampleSpec.scala @@ -27,6 +27,8 @@ object StatsSampleSpecConfig extends MultiNodeConfig { akka.actor.provider = "akka.cluster.ClusterActorRefProvider" akka.remote.log-remote-lifecycle-events = off akka.cluster.auto-join = off + # don't use sigar for tests, native lib not in path + akka.cluster.metrics.collector-class = akka.cluster.JmxMetricsCollector #//#router-lookup-config akka.actor.deployment { /statsService/workerRouter { @@ -71,7 +73,7 @@ abstract class StatsSampleSpec extends MultiNodeSpec(StatsSampleSpecConfig) override def afterAll() = multiNodeSpecAfterAll() -//#abstract-test + //#abstract-test "The stats sample" must { @@ -80,7 +82,7 @@ abstract class StatsSampleSpec extends MultiNodeSpec(StatsSampleSpecConfig) Cluster(system).subscribe(testActor, classOf[MemberUp]) expectMsgClass(classOf[CurrentClusterState]) - //#addresses + //#addresses val firstAddress = node(first).address val secondAddress = node(second).address val thirdAddress = node(third).address @@ -104,34 +106,37 @@ abstract class StatsSampleSpec extends MultiNodeSpec(StatsSampleSpecConfig) } //#startup-cluster - //#test-statsService "show usage of the statsService from one node" in within(15 seconds) { runOn(second) { - val service = system.actorFor(node(third) / "user" / "statsService") - service ! StatsJob("this is the text that will be analyzed") - val meanWordLength = expectMsgPF() { - case StatsResult(meanWordLength) ⇒ meanWordLength - } - meanWordLength must be(3.875 plusOrMinus 0.001) + assertServiceOk } testConductor.enter("done-2") } - //#test-statsService - - "show usage of the statsService from all nodes" in within(15 seconds) { - val service = system.actorFor(node(third) / "user" / "statsService") - service ! StatsJob("this is the text that will be analyzed") - val meanWordLength = expectMsgPF() { - case StatsResult(meanWordLength) ⇒ meanWordLength - } - meanWordLength must be(3.875 plusOrMinus 0.001) + def assertServiceOk: Unit = { + val service = system.actorFor(node(third) / "user" / "statsService") + // eventually the service should be ok, + // first attempts might fail because worker actors not started yet + awaitCond { + service ! StatsJob("this is the text that will be analyzed") + expectMsgPF() { + case unavailble: JobFailed ⇒ false + case StatsResult(meanWordLength) ⇒ + meanWordLength must be(3.875 plusOrMinus 0.001) + true + } + } + + } + //#test-statsService + + "show usage of the statsService from all nodes" in within(15 seconds) { + assertServiceOk testConductor.enter("done-3") } - } } \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/japi/StatsSampleJapiSpec.scala b/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/japi/StatsSampleJapiSpec.scala index 1f4624674b..4583dac90e 100644 --- a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/japi/StatsSampleJapiSpec.scala +++ b/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/japi/StatsSampleJapiSpec.scala @@ -31,6 +31,8 @@ object StatsSampleJapiSpecConfig extends MultiNodeConfig { akka.actor.provider = "akka.cluster.ClusterActorRefProvider" akka.remote.log-remote-lifecycle-events = off akka.cluster.auto-join = off + # don't use sigar for tests, native lib not in path + akka.cluster.metrics.collector-class = akka.cluster.JmxMetricsCollector akka.actor.deployment { /statsService/workerRouter { router = consistent-hashing @@ -88,33 +90,36 @@ abstract class StatsSampleJapiSpec extends MultiNodeSpec(StatsSampleJapiSpecConf testConductor.enter("all-up") } - "show usage of the statsService from one node" in within(15 seconds) { runOn(second) { - val service = system.actorFor(node(third) / "user" / "statsService") - service ! new StatsJob("this is the text that will be analyzed") - val meanWordLength = expectMsgPF() { - case r: StatsResult ⇒ r.getMeanWordLength - } - meanWordLength must be(3.875 plusOrMinus 0.001) + assertServiceOk } testConductor.enter("done-2") } - //#test-statsService - - "show usage of the statsService from all nodes" in within(15 seconds) { + + def assertServiceOk: Unit = { val service = system.actorFor(node(third) / "user" / "statsService") - service ! new StatsJob("this is the text that will be analyzed") - val meanWordLength = expectMsgPF() { - case r: StatsResult ⇒ r.getMeanWordLength + // eventually the service should be ok, + // first attempts might fail because worker actors not started yet + awaitCond { + service ! new StatsJob("this is the text that will be analyzed") + expectMsgPF() { + case unavailble: JobFailed ⇒ false + case r: StatsResult ⇒ + r.getMeanWordLength must be(3.875 plusOrMinus 0.001) + true + } } - meanWordLength must be(3.875 plusOrMinus 0.001) + } + //#test-statsService + + "show usage of the statsService from all nodes" in within(15 seconds) { + assertServiceOk testConductor.enter("done-3") } - } } \ No newline at end of file diff --git a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/japi/StatsSampleSingleMasterJapiSpec.scala b/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/japi/StatsSampleSingleMasterJapiSpec.scala index 9299e007c4..ca69c1ae6c 100644 --- a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/japi/StatsSampleSingleMasterJapiSpec.scala +++ b/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/stats/japi/StatsSampleSingleMasterJapiSpec.scala @@ -34,6 +34,8 @@ object StatsSampleSingleMasterJapiSpecConfig extends MultiNodeConfig { akka.actor.provider = "akka.cluster.ClusterActorRefProvider" akka.remote.log-remote-lifecycle-events = off akka.cluster.auto-join = off + # don't use sigar for tests, native lib not in path + akka.cluster.metrics.collector-class = akka.cluster.JmxMetricsCollector akka.actor.deployment { /statsFacade/statsService/workerRouter { router = consistent-hashing @@ -71,7 +73,6 @@ abstract class StatsSampleSingleMasterJapiSpec extends MultiNodeSpec(StatsSample expectMsgClass(classOf[CurrentClusterState]) Cluster(system) join node(first).address - system.actorOf(Props[StatsFacade], "statsFacade") expectMsgAllOf( MemberUp(Member(node(first).address, MemberStatus.Up)), @@ -80,10 +81,12 @@ abstract class StatsSampleSingleMasterJapiSpec extends MultiNodeSpec(StatsSample Cluster(system).unsubscribe(testActor) + system.actorOf(Props[StatsFacade], "statsFacade") + testConductor.enter("all-up") } - "show usage of the statsFacade" in within(15 seconds) { + "show usage of the statsFacade" in within(20 seconds) { val facade = system.actorFor(RootActorPath(node(third).address) / "user" / "statsFacade") // eventually the service should be ok, diff --git a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala b/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala index 8129bad77e..0e4403b285 100644 --- a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala +++ b/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/transformation/TransformationSampleSpec.scala @@ -29,6 +29,8 @@ object TransformationSampleSpecConfig extends MultiNodeConfig { akka.actor.provider = "akka.cluster.ClusterActorRefProvider" akka.remote.log-remote-lifecycle-events = off akka.cluster.auto-join = off + # don't use sigar for tests, native lib not in path + akka.cluster.metrics.collector-class = akka.cluster.JmxMetricsCollector """)) } @@ -88,6 +90,8 @@ abstract class TransformationSampleSpec extends MultiNodeSpec(TransformationSamp Cluster(system) join node(frontend1).address system.actorOf(Props[TransformationFrontend], name = "frontend") } + testConductor.enter("frontend2-started") + runOn(backend2, backend3) { Cluster(system) join node(backend1).address system.actorOf(Props[TransformationBackend], name = "backend") diff --git a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/transformation/japi/TransformationSampleJapiSpec.scala b/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/transformation/japi/TransformationSampleJapiSpec.scala index a37458a129..bf6fdaf19c 100644 --- a/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/transformation/japi/TransformationSampleJapiSpec.scala +++ b/akka-samples/akka-sample-cluster/src/multi-jvm/scala/sample/cluster/transformation/japi/TransformationSampleJapiSpec.scala @@ -30,6 +30,8 @@ object TransformationSampleJapiSpecConfig extends MultiNodeConfig { akka.actor.provider = "akka.cluster.ClusterActorRefProvider" akka.remote.log-remote-lifecycle-events = off akka.cluster.auto-join = off + # don't use sigar for tests, native lib not in path + akka.cluster.metrics.collector-class = akka.cluster.JmxMetricsCollector """)) } @@ -89,6 +91,7 @@ abstract class TransformationSampleJapiSpec extends MultiNodeSpec(Transformation Cluster(system) join node(frontend1).address system.actorOf(Props[TransformationFrontend], name = "frontend") } + testConductor.enter("frontend2-started") runOn(backend2, backend3) { Cluster(system) join node(backend1).address system.actorOf(Props[TransformationBackend], name = "backend") diff --git a/akka-sbt-plugin/sample/project/Build.scala b/akka-sbt-plugin/sample/project/Build.scala index 6fbf075359..5223e097bd 100644 --- a/akka-sbt-plugin/sample/project/Build.scala +++ b/akka-sbt-plugin/sample/project/Build.scala @@ -7,7 +7,7 @@ import akka.sbt.AkkaKernelPlugin.{ Dist, outputDirectory, distJvmOptions} object HelloKernelBuild extends Build { val Organization = "akka.sample" val Version = "2.2-SNAPSHOT" - val ScalaVersion = "2.10.0-RC1" + val ScalaVersion = "2.10.0-RC3" lazy val HelloKernel = Project( id = "hello-kernel", @@ -52,7 +52,7 @@ object Dependency { val Akka = "2.2-SNAPSHOT" } - val akkaKernel = "com.typesafe.akka" %% "akka-kernel" % V.Akka cross CrossVersion.full - val akkaSlf4j = "com.typesafe.akka" %% "akka-slf4j" % V.Akka cross CrossVersion.full - val logback = "ch.qos.logback" % "logback-classic" % "1.0.0" + val akkaKernel = "com.typesafe.akka" %% "akka-kernel" % V.Akka cross CrossVersion.full + val akkaSlf4j = "com.typesafe.akka" %% "akka-slf4j" % V.Akka cross CrossVersion.full + val logback = "ch.qos.logback" % "logback-classic" % "1.0.0" } diff --git a/akka-sbt-plugin/src/main/scala/AkkaKernelPlugin.scala b/akka-sbt-plugin/src/main/scala/AkkaKernelPlugin.scala index f000763a70..d63a22d213 100644 --- a/akka-sbt-plugin/src/main/scala/AkkaKernelPlugin.scala +++ b/akka-sbt-plugin/src/main/scala/AkkaKernelPlugin.scala @@ -19,6 +19,7 @@ object AkkaKernelPlugin extends Plugin { configSourceDirs: Seq[File], distJvmOptions: String, distMainClass: String, + distBootClass: String, libFilter: File ⇒ Boolean, additionalLibs: Seq[File]) @@ -30,8 +31,12 @@ object AkkaKernelPlugin extends Plugin { val configSourceDirs = TaskKey[Seq[File]]("config-source-directories", "Configuration files are copied from these directories") - val distJvmOptions = SettingKey[String]("kernel-jvm-options", "JVM parameters to use in start script") - val distMainClass = SettingKey[String]("kernel-main-class", "Kernel main class to use in start script") + val distJvmOptions = SettingKey[String]("kernel-jvm-options", + "JVM parameters to use in start script") + val distMainClass = SettingKey[String]("kernel-main-class", + "main class to use in start script, defaults to akka.kernel.Main to load an akka.kernel.Bootable") + val distBootClass = SettingKey[String]("kernel-boot-class", + "class implementing akka.kernel.Bootable, which gets loaded by the default 'distMainClass'") val libFilter = SettingKey[File ⇒ Boolean]("lib-filter", "Filter of dependency jar files") val additionalLibs = TaskKey[Seq[File]]("additional-libs", "Additional dependency jar files") @@ -50,9 +55,10 @@ object AkkaKernelPlugin extends Plugin { configSourceDirs <<= defaultConfigSourceDirs, distJvmOptions := "-Xms1024M -Xmx1024M -Xss1M -XX:MaxPermSize=256M -XX:+UseParallelGC", distMainClass := "akka.kernel.Main", + distBootClass := "", libFilter := { f ⇒ true }, additionalLibs <<= defaultAdditionalLibs, - distConfig <<= (outputDirectory, configSourceDirs, distJvmOptions, distMainClass, libFilter, additionalLibs) map DistConfig)) ++ + distConfig <<= (outputDirectory, configSourceDirs, distJvmOptions, distMainClass, distBootClass, libFilter, additionalLibs) map DistConfig)) ++ Seq(dist <<= (dist in Dist), distNeedsPackageBin) private def distTask: Initialize[Task[File]] = @@ -69,7 +75,7 @@ object AkkaKernelPlugin extends Plugin { log.info("Creating distribution %s ..." format conf.outputDirectory) IO.createDirectory(conf.outputDirectory) - Scripts(conf.distJvmOptions, conf.distMainClass).writeScripts(distBinPath) + Scripts(conf.distJvmOptions, conf.distMainClass, conf.distBootClass).writeScripts(distBinPath) copyDirectories(conf.configSourceDirs, distConfigPath) copyJars(tgt, distDeployPath) @@ -97,7 +103,8 @@ object AkkaKernelPlugin extends Plugin { def isKernelProject(dependencies: Seq[ModuleID]): Boolean = { dependencies.exists { d ⇒ - (d.organization == "com.typesafe.akka" || d.organization == "se.scalablesolutions.akka") && d.name == "akka-kernel" + (d.organization == "com.typesafe.akka" || d.organization == "se.scalablesolutions.akka") && + (d.name == "akka-kernel" || d.name.startsWith("akka-kernel_")) } } @@ -109,7 +116,7 @@ object AkkaKernelPlugin extends Plugin { Seq.empty[File] } - private case class Scripts(jvmOptions: String, mainClass: String) { + private case class Scripts(jvmOptions: String, mainClass: String, bootClass: String) { def writeScripts(to: File) = { scripts.map { script ⇒ @@ -131,8 +138,8 @@ object AkkaKernelPlugin extends Plugin { |AKKA_CLASSPATH="$AKKA_HOME/config:$AKKA_HOME/lib/*" |JAVA_OPTS="%s" | - |java $JAVA_OPTS -cp "$AKKA_CLASSPATH" -Dakka.home="$AKKA_HOME" %s "$@" - |""".stripMargin.format(jvmOptions, mainClass) + |java $JAVA_OPTS -cp "$AKKA_CLASSPATH" -Dakka.home="$AKKA_HOME" %s%s "$@" + |""".stripMargin.format(jvmOptions, mainClass, if (bootClass.nonEmpty) " " + bootClass else "") private def distBatScript = """|@echo off @@ -140,8 +147,8 @@ object AkkaKernelPlugin extends Plugin { |set AKKA_CLASSPATH=%%AKKA_HOME%%\config;%%AKKA_HOME%%\lib\* |set JAVA_OPTS=%s | - |java %%JAVA_OPTS%% -cp "%%AKKA_CLASSPATH%%" -Dakka.home="%%AKKA_HOME%%" %s %%* - |""".stripMargin.format(jvmOptions, mainClass) + |java %%JAVA_OPTS%% -cp "%%AKKA_CLASSPATH%%" -Dakka.home="%%AKKA_HOME%%" %s%s %%* + |""".stripMargin.format(jvmOptions, mainClass, if (bootClass.nonEmpty) " " + bootClass else "") private def setExecutable(target: File, executable: Boolean): Option[String] = { val success = target.setExecutable(executable, false) diff --git a/akka-testkit/src/main/resources/reference.conf b/akka-testkit/src/main/resources/reference.conf index 17da88c22e..7adeb68331 100644 --- a/akka-testkit/src/main/resources/reference.conf +++ b/akka-testkit/src/main/resources/reference.conf @@ -15,7 +15,8 @@ akka { # all required messages are received filter-leeway = 3s - # duration to wait in expectMsg and friends outside of within() block by default + # duration to wait in expectMsg and friends outside of within() block + # by default single-expect-default = 3s # The timeout that is added as an implicit by DefaultTimeout trait diff --git a/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala index c60d9d85f4..a6d2cbf39e 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala @@ -75,11 +75,18 @@ class TestFSMRef[S, D, T <: Actor]( */ def cancelTimer(name: String) { fsm.cancelTimer(name) } + @deprecated("Use isTimerActive", "2.2") + def timerActive_?(name: String): Boolean = isTimerActive(name) + + /** + * Proxy for FSM.isTimerActive. + */ + def isTimerActive(name: String) = fsm.isTimerActive(name) + /** * Proxy for FSM.timerActive_?. */ - def timerActive_?(name: String) = fsm.timerActive_?(name) - + def isStateTimerActive = fsm.isStateTimerActive } object TestFSMRef { diff --git a/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala index 6ed7d51708..41efe55e6d 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala @@ -51,11 +51,11 @@ class TestFSMRefSpec extends AkkaSpec { case x ⇒ stay } }, "test-fsm-ref-2") - fsm.timerActive_?("test") must be(false) + fsm.isTimerActive("test") must be(false) fsm.setTimer("test", 12, 10 millis, true) - fsm.timerActive_?("test") must be(true) + fsm.isTimerActive("test") must be(true) fsm.cancelTimer("test") - fsm.timerActive_?("test") must be(false) + fsm.isTimerActive("test") must be(false) } } } diff --git a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala index a9efa56c1e..f91ea2e318 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala @@ -14,6 +14,7 @@ import scala.collection.mutable.ListBuffer import scala.util.control.NonFatal import akka.event.Logging import java.util.concurrent.TimeUnit +import akka.util.ByteString private[zeromq] object ConcurrentSocketActor { private sealed trait PollMsg @@ -29,7 +30,6 @@ private[zeromq] object ConcurrentSocketActor { private[zeromq] class ConcurrentSocketActor(params: immutable.Seq[SocketOption]) extends Actor { import ConcurrentSocketActor._ - private val noBytes = Array[Byte]() private val zmqContext = params collectFirst { case c: Context ⇒ c } getOrElse DefaultContext private var deserializer = params collectFirst { case d: Deserializer ⇒ d } getOrElse new ZMQMessageDeserializer @@ -41,7 +41,7 @@ private[zeromq] class ConcurrentSocketActor(params: immutable.Seq[SocketOption]) private val socket: Socket = zmqContext.socket(socketType) private val poller: Poller = zmqContext.poller - private val pendingSends = new ListBuffer[immutable.Seq[Frame]] + private val pendingSends = new ListBuffer[immutable.Seq[ByteString]] def receive = { case m: PollMsg ⇒ doPoll(m) @@ -152,13 +152,13 @@ private[zeromq] class ConcurrentSocketActor(params: immutable.Seq[SocketOption]) } } finally notifyListener(Closed) - @tailrec private def flushMessage(i: immutable.Seq[Frame]): Boolean = + @tailrec private def flushMessage(i: immutable.Seq[ByteString]): Boolean = if (i.isEmpty) true else { val head = i.head val tail = i.tail - if (socket.send(head.payload.toArray, if (tail.nonEmpty) JZMQ.SNDMORE else 0)) flushMessage(tail) + if (socket.send(head.toArray, if (tail.nonEmpty) JZMQ.SNDMORE else 0)) flushMessage(tail) else { pendingSends.prepend(i) // Reenqueue the rest of the message so the next flush takes care of it self ! Flush @@ -199,7 +199,7 @@ private[zeromq] class ConcurrentSocketActor(params: immutable.Seq[SocketOption]) case frames ⇒ notifyListener(deserializer(frames)); doPoll(mode, togo - 1) } - @tailrec private def receiveMessage(mode: PollMsg, currentFrames: Vector[Frame] = Vector.empty): immutable.Seq[Frame] = + @tailrec private def receiveMessage(mode: PollMsg, currentFrames: Vector[ByteString] = Vector.empty): immutable.Seq[ByteString] = if (mode == PollCareful && (poller.poll(0) <= 0)) { if (currentFrames.isEmpty) currentFrames else throw new IllegalStateException("Received partial transmission!") } else { @@ -207,7 +207,7 @@ private[zeromq] class ConcurrentSocketActor(params: immutable.Seq[SocketOption]) case null ⇒ /*EAGAIN*/ if (currentFrames.isEmpty) currentFrames else receiveMessage(mode, currentFrames) case bytes ⇒ - val frames = currentFrames :+ Frame(if (bytes.length == 0) noBytes else bytes) + val frames = currentFrames :+ ByteString(bytes) if (socket.hasReceiveMore) receiveMessage(mode, frames) else frames } } diff --git a/akka-zeromq/src/main/scala/akka/zeromq/SocketOption.scala b/akka-zeromq/src/main/scala/akka/zeromq/SocketOption.scala index b70c245327..b74760b5c3 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/SocketOption.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/SocketOption.scala @@ -9,6 +9,10 @@ import scala.concurrent.duration._ import scala.collection.immutable import org.zeromq.{ ZMQ ⇒ JZMQ } import org.zeromq.ZMQ.{ Poller, Socket } +import akka.japi.Util.immutableSeq +import akka.util.ByteString +import akka.util.Collections.EmptyImmutableSeq +import annotation.varargs /** * Marker trait representing request messages for zeromq @@ -37,7 +41,7 @@ sealed trait SocketConnectOption extends SocketOption { * A base trait for pubsub options for the ZeroMQ socket */ sealed trait PubSubOption extends SocketOption { - def payload: immutable.Seq[Byte] + def payload: ByteString } /** @@ -80,7 +84,7 @@ class Context(numIoThreads: Int) extends SocketMeta { * A base trait for message deserializers */ trait Deserializer extends SocketOption { - def apply(frames: immutable.Seq[Frame]): Any + def apply(frames: immutable.Seq[ByteString]): Any } /** @@ -173,12 +177,15 @@ case class Bind(endpoint: String) extends SocketConnectOption * * @param payload the topic to subscribe to */ -case class Subscribe(payload: immutable.Seq[Byte]) extends PubSubOption { - def this(topic: String) = this(topic.getBytes("UTF-8").to[immutable.Seq]) +case class Subscribe(payload: ByteString) extends PubSubOption { + def this(topic: String) = this(ByteString(topic)) } object Subscribe { - def apply(topic: String): Subscribe = new Subscribe(topic) - val all = Subscribe("") + val all = Subscribe(ByteString.empty) + def apply(topic: String): Subscribe = topic match { + case null | "" ⇒ all + case t ⇒ new Subscribe(t) + } } /** @@ -190,8 +197,8 @@ object Subscribe { * * @param payload */ -case class Unsubscribe(payload: immutable.Seq[Byte]) extends PubSubOption { - def this(topic: String) = this(topic.getBytes("UTF-8").to[immutable.Seq]) +case class Unsubscribe(payload: ByteString) extends PubSubOption { + def this(topic: String) = this(ByteString(topic)) } object Unsubscribe { def apply(topic: String): Unsubscribe = new Unsubscribe(topic) @@ -201,33 +208,34 @@ object Unsubscribe { * Send a message over the zeromq socket * @param frames */ -case class Send(frames: immutable.Seq[Frame]) extends Request +case class Send(frames: immutable.Seq[ByteString]) extends Request /** * A message received over the zeromq socket * @param frames */ -case class ZMQMessage(frames: immutable.Seq[Frame]) { - - def this(frame: Frame) = this(List(frame)) - def this(frame1: Frame, frame2: Frame) = this(List(frame1, frame2)) - def this(frameArray: Array[Frame]) = this(frameArray.to[immutable.Seq]) - - /** - * Convert the bytes in the first frame to a String, using specified charset. - */ - def firstFrameAsString(charsetName: String): String = new String(frames.head.payload.toArray, charsetName) - /** - * Convert the bytes in the first frame to a String, using "UTF-8" charset. - */ - def firstFrameAsString: String = firstFrameAsString("UTF-8") - - def payload(frameIndex: Int): Array[Byte] = frames(frameIndex).payload.toArray +case class ZMQMessage(frames: immutable.Seq[ByteString]) { + def frame(frameIndex: Int): ByteString = frames(frameIndex) } object ZMQMessage { - def apply(bytes: Array[Byte]): ZMQMessage = new ZMQMessage(List(Frame(bytes))) - def apply(frames: Frame*): ZMQMessage = new ZMQMessage(frames.to[immutable.Seq]) - def apply(message: Message): ZMQMessage = apply(message.toByteArray) + val empty = new ZMQMessage(EmptyImmutableSeq) + + /** + * Scala API + * @param frames the frames of the returned ZMQMessage + * @return a ZMQMessage with the given frames + */ + def apply(frames: ByteString*): ZMQMessage = + if ((frames eq null) || frames.length == 0) empty else new ZMQMessage(frames.to[immutable.Seq]) + + /** + * Java API + * @param frames the frames of the returned ZMQMessage + * @return a ZMQMessage with the given frames + */ + @varargs def withFrames(frames: ByteString*): ZMQMessage = apply(frames: _*) + + def apply[T](frames: T*)(implicit converter: T ⇒ ByteString): ZMQMessage = apply(frames map converter: _*) } /** diff --git a/akka-zeromq/src/main/scala/akka/zeromq/ZMQMessageDeserializer.scala b/akka-zeromq/src/main/scala/akka/zeromq/ZMQMessageDeserializer.scala index d0141bf515..3325fc2c4b 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/ZMQMessageDeserializer.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/ZMQMessageDeserializer.scala @@ -4,24 +4,11 @@ package akka.zeromq import scala.collection.immutable - -object Frame { - def apply(bytes: Array[Byte]): Frame = new Frame(bytes) - def apply(text: String): Frame = new Frame(text) -} - -/** - * A single message frame of a zeromq message - * @param payload - */ -case class Frame(payload: immutable.Seq[Byte]) { - def this(bytes: Array[Byte]) = this(bytes.to[immutable.Seq]) - def this(text: String) = this(text.getBytes("UTF-8")) -} +import akka.util.ByteString /** * Deserializes ZeroMQ messages into an immutable sequence of frames */ class ZMQMessageDeserializer extends Deserializer { - def apply(frames: immutable.Seq[Frame]): ZMQMessage = ZMQMessage(frames) + def apply(frames: immutable.Seq[ByteString]): ZMQMessage = ZMQMessage(frames) } diff --git a/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala b/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala index 6feaffd6d6..e17ca6fb1f 100644 --- a/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala +++ b/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala @@ -9,7 +9,7 @@ import org.scalatest.matchers.MustMatchers import akka.testkit.{ TestProbe, DefaultTimeout, AkkaSpec } import scala.concurrent.duration._ import akka.actor.{ Cancellable, Actor, Props, ActorRef } -import akka.util.Timeout +import akka.util.{ ByteString, Timeout } class ConcurrentSocketActorSpec extends AkkaSpec { @@ -51,7 +51,7 @@ class ConcurrentSocketActorSpec extends AkkaSpec { val msgGenerator = system.scheduler.schedule(100 millis, 10 millis, new Runnable { var number = 0 def run() { - publisher ! ZMQMessage(Frame(number.toString), Frame(Nil)) + publisher ! ZMQMessage(ByteString(number.toString), ByteString.empty) number += 1 } }) @@ -60,9 +60,9 @@ class ConcurrentSocketActorSpec extends AkkaSpec { subscriberProbe.expectMsg(Connecting) val msgNumbers = subscriberProbe.receiveWhile(2 seconds) { case msg: ZMQMessage if msg.frames.size == 2 ⇒ - msg.payload(1).length must be(0) + msg.frames(1).length must be(0) msg - }.map(_.firstFrameAsString.toInt) + }.map(m ⇒ m.frames(0).utf8String.toInt) msgNumbers.length must be > 0 msgNumbers must equal(for (i ← msgNumbers.head to msgNumbers.last) yield i) } finally { @@ -88,8 +88,8 @@ class ConcurrentSocketActorSpec extends AkkaSpec { try { replierProbe.expectMsg(Connecting) - val request = ZMQMessage(Frame("Request")) - val reply = ZMQMessage(Frame("Reply")) + val request = ZMQMessage(ByteString("Request")) + val reply = ZMQMessage(ByteString("Reply")) requester ! request replierProbe.expectMsg(request) @@ -112,7 +112,7 @@ class ConcurrentSocketActorSpec extends AkkaSpec { try { pullerProbe.expectMsg(Connecting) - val message = ZMQMessage(Frame("Pushed message")) + val message = ZMQMessage(ByteString("Pushed message")) pusher ! message pullerProbe.expectMsg(message) @@ -146,7 +146,7 @@ class ConcurrentSocketActorSpec extends AkkaSpec { case _ ⇒ val payload = "%s".format(messageNumber) messageNumber += 1 - actorRef ! ZMQMessage(payload.getBytes) + actorRef ! ZMQMessage(ByteString(payload)) } } } diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index ef67a662be..db30222621 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -30,7 +30,8 @@ object AkkaBuild extends Build { organization := "com.typesafe.akka", version := "2.2-SNAPSHOT", // FIXME: use 2.10.0 for final - scalaVersion := System.getProperty("akka.scalaVersion", "2.10.0-RC2") + // Also change ScalaVersion in akka-sbt-plugin/sample/project/Build.scala + scalaVersion := System.getProperty("akka.scalaVersion", "2.10.0-RC3") ) lazy val akka = Project( @@ -51,7 +52,7 @@ object AkkaBuild extends Build { |import com.typesafe.config.ConfigFactory |import scala.concurrent.duration._ |import akka.util.Timeout - |val config = ConfigFactory.parseString("akka.stdout-loglevel=INFO,akka.loglevel=DEBUG") + |val config = ConfigFactory.parseString("akka.stdout-loglevel=INFO,akka.loglevel=DEBUG,pinned{type=PinnedDispatcher,executor=thread-pool-executor,throughput=1000}") |val remoteConfig = ConfigFactory.parseString("akka.remote.netty{port=0,use-dispatcher-for-io=akka.actor.default-dispatcher,execution-pool-size=0},akka.actor.provider=akka.remote.RemoteActorRefProvider").withFallback(config) |var system: ActorSystem = null |implicit def _system = system @@ -97,7 +98,7 @@ object AkkaBuild extends Build { id = "akka-testkit", base = file("akka-testkit"), dependencies = Seq(actor), - settings = defaultSettings ++ Seq( + settings = defaultSettings ++ OSGi.testkit ++ Seq( libraryDependencies ++= Dependencies.testkit, initialCommands += "import akka.testkit._", previousArtifact := akkaPreviousArtifact("akka-testkit") @@ -325,7 +326,13 @@ object AkkaBuild extends Build { base = file("akka-samples/akka-sample-cluster"), dependencies = Seq(cluster, remoteTests % "test", testkit % "test"), settings = sampleSettings ++ multiJvmSettings ++ experimentalSettings ++ Seq( + // sigar is in Typesafe repo + resolvers += "Typesafe Repo" at "http://repo.typesafe.com/typesafe/releases/", libraryDependencies ++= Dependencies.clusterSample, + javaOptions in run ++= Seq( + "-Djava.library.path=./sigar", + "-Xms128m", "-Xmx1024m"), + Keys.fork in run := true, // disable parallel tests parallelExecution in Test := false, extraOptions in MultiJvm <<= (sourceDirectory in MultiJvm) { src => @@ -446,7 +453,9 @@ object AkkaBuild extends Build { case key: String if key.startsWith("multinode.") => "-D" + key + "=" + System.getProperty(key) case key: String if key.startsWith("akka.") => "-D" + key + "=" + System.getProperty(key) } - akkaProperties ::: (if (getBoolean("sbt.log.noformat")) List("-Dakka.test.nocolor=true") else Nil) + + "-Xmx256m" :: akkaProperties ::: + (if (getBoolean("sbt.log.noformat")) List("-Dakka.test.nocolor=true") else Nil) } // for excluding tests by name use system property: -Dakka.test.names.exclude=TimingSpec @@ -549,6 +558,7 @@ object AkkaBuild extends Build { case BinVer(bv) => bv case _ => s }), + "sigarVersion" -> Dependencies.Compile.sigar.revision, "github" -> "http://github.com/akka/akka/tree/%s".format((if (isSnapshot) "master" else "v" + v)) ) }, @@ -621,13 +631,13 @@ object AkkaBuild extends Build { val fileMailbox = exports(Seq("akka.actor.mailbox.filebased.*")) - val mailboxesCommon = exports(Seq("akka.actor.mailbox.*")) + val mailboxesCommon = exports(Seq("akka.actor.mailbox.*"), imports = Seq(protobufImport())) val osgi = exports(Seq("akka.osgi")) ++ Seq(OsgiKeys.privatePackage := Seq("akka.osgi.impl")) val osgiAries = exports() ++ Seq(OsgiKeys.privatePackage := Seq("akka.osgi.aries.*")) - val remote = exports(Seq("akka.remote.*")) + val remote = exports(Seq("akka.remote.*"), imports = Seq(protobufImport())) val slf4j = exports(Seq("akka.event.slf4j.*")) @@ -635,16 +645,19 @@ object AkkaBuild extends Build { val transactor = exports(Seq("akka.transactor.*")) - val zeroMQ = exports(Seq("akka.zeromq.*")) + val testkit = exports(Seq("akka.testkit.*")) - def exports(packages: Seq[String] = Seq()) = osgiSettings ++ Seq( - OsgiKeys.importPackage := defaultImports, + val zeroMQ = exports(Seq("akka.zeromq.*"), imports = Seq(protobufImport()) ) + + def exports(packages: Seq[String] = Seq(), imports: Seq[String] = Nil) = osgiSettings ++ Seq( + OsgiKeys.importPackage := imports ++ defaultImports, OsgiKeys.exportPackage := packages ) def defaultImports = Seq("!sun.misc", akkaImport(), configImport(), scalaImport(), "*") def akkaImport(packageName: String = "akka.*") = "%s;version=\"[2.1,2.2)\"".format(packageName) def configImport(packageName: String = "com.typesafe.config.*") = "%s;version=\"[0.4.1,1.1.0)\"".format(packageName) + def protobufImport(packageName: String = "com.google.protobuf.*") = "%s;version=\"[2.4.0,2.5.0)\"".format(packageName) def scalaImport(packageName: String = "scala.*") = "%s;version=\"[2.10,2.11)\"".format(packageName) } } @@ -672,6 +685,9 @@ object Dependencies { // Camel Sample val camelJetty = "org.apache.camel" % "camel-jetty" % camelCore.revision // ApacheV2 + // Cluster Sample + val sigar = "org.hyperic" % "sigar" % "1.6.4" // ApacheV2 + // Test object Test { @@ -680,7 +696,7 @@ object Dependencies { val junit = "junit" % "junit" % "4.10" % "test" // Common Public License 1.0 val logback = "ch.qos.logback" % "logback-classic" % "1.0.7" % "test" // EPL 1.0 / LGPL 2.1 val mockito = "org.mockito" % "mockito-all" % "1.8.1" % "test" // MIT - val scalatest = "org.scalatest" % "scalatest" % "1.8-B2" % "test" cross CrossVersion.full // ApacheV2 + val scalatest = "org.scalatest" % "scalatest" % "1.8-B1" % "test" cross CrossVersion.full // ApacheV2 val scalacheck = "org.scalacheck" % "scalacheck" % "1.10.0" % "test" cross CrossVersion.full // New BSD val ariesProxy = "org.apache.aries.proxy" % "org.apache.aries.proxy.impl" % "0.3" % "test" // ApacheV2 val pojosr = "com.googlecode.pojosr" % "de.kalpatec.pojosr.framework" % "0.1.4" % "test" // ApacheV2 @@ -728,7 +744,7 @@ object Dependencies { val zeroMQ = Seq(protobuf, zeroMQClient, Test.scalatest, Test.junit) - val clusterSample = Seq(Test.scalatest) + val clusterSample = Seq(Test.scalatest, sigar) val contrib = Seq(Test.junitIntf) diff --git a/project/Dist.scala b/project/Dist.scala index 53fd40fed2..86d4263346 100644 --- a/project/Dist.scala +++ b/project/Dist.scala @@ -57,7 +57,6 @@ object Dist { val base = unzipped / ("akka-" + version) val distBase = projectBase / "akka-kernel" / "src" / "main" / "dist" val deploy = base / "deploy" - val deployReadme = deploy / "readme" val doc = base / "doc" / "akka" val api = doc / "api" val docs = doc / "docs"