diff --git a/.gitignore b/.gitignore index 980881bca9..0863ff8a05 100755 --- a/.gitignore +++ b/.gitignore @@ -1,72 +1,73 @@ -*.vim -*~ *# -src_managed -activemq-data -project/akka-build.properties -project/plugins/project -project/boot/* -*/project/build/target +*.iml +*.ipr +*.iws +*.pyc +*.tm.epoch +*.vim */project/boot +*/project/build/target */project/project.target.config-classes -lib_managed -etags -tags +*~ +.#* +.*.swp +.DS_Store +.cache +.cache +.classpath +.codefellow +.ensime* +.eprj +.history +.idea +.manager +.multi-jvm +.project +.scala_dependencies +.scalastyle +.settings .tags .tags_sorted_by_file -TAGS -akka.tmproj -reports -target -deploy/*.jar -.history -data -out -logs -.#* -.codefellow -storage -.ensime* -_dump -.manager -manifest.mf -semantic.cache -tm*.log -tm*.lck -tm.out -*.tm.epoch -.DS_Store -*.iws -*.ipr -*.iml -run-codefellow -.project -.settings -.classpath -.cache -.idea -.scala_dependencies -.cache -multiverse.log -.eprj -.*.swp -akka-docs/_build/ -akka-docs/rst_preprocessed/ -akka-contrib/rst_preprocessed/ -*.pyc -akka-docs/exts/ -_akka_cluster/ +.target +.worksheet Makefile +TAGS +_akka_cluster/ +_dump +_mb +activemq-data +akka-contrib/rst_preprocessed/ +akka-docs/_build/ +akka-docs/exts/ +akka-docs/rst_preprocessed/ +akka-osgi/src/main/resources/*.conf akka.sublime-project akka.sublime-workspace -.target -.multi-jvm -_mb -schoir.props -worker*.log -mongoDB/ -redis/ +akka.tmproj beanstalk/ -.scalastyle bin/ -.worksheet +data +deploy/*.jar +etags +lib_managed +logs +manifest.mf +mongoDB/ +multiverse.log +out +project/akka-build.properties +project/boot/* +project/plugins/project +redis/ +reports +run-codefellow +schoir.props +semantic.cache +src_managed +storage +tags +target +tm*.lck +tm*.log +tm.out +worker*.log diff --git a/akka-actor-tests/src/test/java/akka/actor/JavaAPI.java b/akka-actor-tests/src/test/java/akka/actor/JavaAPI.java index 80563b679f..16b92e9abc 100644 --- a/akka-actor-tests/src/test/java/akka/actor/JavaAPI.java +++ b/akka-actor-tests/src/test/java/akka/actor/JavaAPI.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor; diff --git a/akka-actor-tests/src/test/java/akka/actor/JavaExtension.java b/akka-actor-tests/src/test/java/akka/actor/JavaExtension.java index b87ace5849..5a054a81b8 100644 --- a/akka-actor-tests/src/test/java/akka/actor/JavaExtension.java +++ b/akka-actor-tests/src/test/java/akka/actor/JavaExtension.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor; diff --git a/akka-actor-tests/src/test/java/akka/actor/NonPublicClass.java b/akka-actor-tests/src/test/java/akka/actor/NonPublicClass.java index 047492e00f..ac21542c46 100644 --- a/akka-actor-tests/src/test/java/akka/actor/NonPublicClass.java +++ b/akka-actor-tests/src/test/java/akka/actor/NonPublicClass.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor; @@ -14,4 +14,4 @@ class MyNonPublicActorClass extends UntypedActor { @Override public void onReceive(Object msg) { getSender().tell(msg, getSelf()); } -} \ No newline at end of file +} diff --git a/akka-actor-tests/src/test/java/akka/routing/CustomRouteTest.java b/akka-actor-tests/src/test/java/akka/routing/CustomRouteTest.java index c0ccd4de26..2eca39d70b 100644 --- a/akka-actor-tests/src/test/java/akka/routing/CustomRouteTest.java +++ b/akka-actor-tests/src/test/java/akka/routing/CustomRouteTest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.routing; diff --git a/akka-actor-tests/src/test/java/akka/util/JavaDuration.java b/akka-actor-tests/src/test/java/akka/util/JavaDuration.java index 326afb8543..34e7c61353 100644 --- a/akka-actor-tests/src/test/java/akka/util/JavaDuration.java +++ b/akka-actor-tests/src/test/java/akka/util/JavaDuration.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util; diff --git a/akka-actor-tests/src/test/scala/akka/Messages.scala b/akka-actor-tests/src/test/scala/akka/Messages.scala index 32f80c7b24..1a4867374a 100644 --- a/akka-actor-tests/src/test/scala/akka/Messages.scala +++ b/akka-actor-tests/src/test/scala/akka/Messages.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala index c130d23149..1466c9e969 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala index 2aba0e18d4..29ad6775f3 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorDSLSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorFireForgetRequestReplySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorFireForgetRequestReplySpec.scala index 93e17d3192..277b844319 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorFireForgetRequestReplySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorFireForgetRequestReplySpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala index 430a64172a..bcfc9fa0e8 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor @@ -119,6 +119,16 @@ class ActorLifeCycleSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitS system.stop(supervisor) } + "log failues in postStop" in { + val a = system.actorOf(Props(new Actor { + def receive = Actor.emptyBehavior + override def postStop { throw new Exception("hurrah") } + })) + EventFilter[Exception]("hurrah", occurrences = 1) intercept { + a ! PoisonPill + } + } + "clear the behavior stack upon restart" in { case class Become(recv: ActorContext ⇒ Receive) val a = system.actorOf(Props(new Actor { diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala index 4d19f5ea9e..02b69f83d8 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor @@ -312,4 +312,4 @@ class ActorLookupSpec extends AkkaSpec with DefaultTimeout { } -} \ No newline at end of file +} diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala index a1da055cf6..ae6532acea 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor @@ -145,7 +145,7 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { def contextStackMustBeEmpty = ActorCell.contextStack.get.headOption must be === None - filterException[akka.actor.ActorInitializationException] { + EventFilter[ActorInitializationException](occurrences = 1) intercept { intercept[akka.actor.ActorInitializationException] { wrap(result ⇒ actorOf(Props(new Actor { @@ -155,49 +155,63 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { } contextStackMustBeEmpty + } + EventFilter[ActorInitializationException](occurrences = 1) intercept { intercept[akka.actor.ActorInitializationException] { wrap(result ⇒ actorOf(Props(promiseIntercept(new FailingOuterActor(actorOf(Props(new InnerActor))))(result)))) } contextStackMustBeEmpty + } + EventFilter[ActorInitializationException](occurrences = 1) intercept { intercept[akka.actor.ActorInitializationException] { wrap(result ⇒ actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept(new FailingInnerActor)(result))))))) } contextStackMustBeEmpty + } + EventFilter[ActorInitializationException](occurrences = 1) intercept { intercept[akka.actor.ActorInitializationException] { wrap(result ⇒ actorOf(Props(promiseIntercept(new FailingInheritingOuterActor(actorOf(Props(new InnerActor))))(result)))) } contextStackMustBeEmpty + } + EventFilter[ActorInitializationException](occurrences = 2) intercept { intercept[akka.actor.ActorInitializationException] { wrap(result ⇒ actorOf(Props(new FailingOuterActor(actorOf(Props(promiseIntercept(new FailingInheritingInnerActor)(result))))))) } contextStackMustBeEmpty + } + EventFilter[ActorInitializationException](occurrences = 2) intercept { intercept[akka.actor.ActorInitializationException] { wrap(result ⇒ actorOf(Props(new FailingInheritingOuterActor(actorOf(Props(promiseIntercept(new FailingInheritingInnerActor)(result))))))) } contextStackMustBeEmpty + } + EventFilter[ActorInitializationException](occurrences = 2) intercept { intercept[akka.actor.ActorInitializationException] { wrap(result ⇒ actorOf(Props(new FailingInheritingOuterActor(actorOf(Props(promiseIntercept(new FailingInnerActor)(result))))))) } contextStackMustBeEmpty + } + EventFilter[ActorInitializationException](occurrences = 1) intercept { intercept[akka.actor.ActorInitializationException] { wrap(result ⇒ actorOf(Props(new OuterActor(actorOf(Props(new InnerActor { @@ -206,21 +220,27 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { } contextStackMustBeEmpty + } + EventFilter[ActorInitializationException](occurrences = 2) intercept { intercept[akka.actor.ActorInitializationException] { wrap(result ⇒ actorOf(Props(new FailingOuterActor(actorOf(Props(promiseIntercept(new FailingInheritingInnerActor)(result))))))) } contextStackMustBeEmpty + } + EventFilter[ActorInitializationException](occurrences = 1) intercept { intercept[akka.actor.ActorInitializationException] { wrap(result ⇒ actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept(new FailingInheritingInnerActor)(result))))))) } contextStackMustBeEmpty + } + EventFilter[ActorInitializationException](occurrences = 1) intercept { intercept[akka.actor.ActorInitializationException] { wrap(result ⇒ actorOf(Props(new OuterActor(actorOf(Props(promiseIntercept({ new InnerActor; new InnerActor })(result))))))) diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala index 781b8d4cab..486761279a 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor @@ -270,6 +270,7 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend val t = probe.expectMsg(Terminated(a)(existenceConfirmed = true, addressTerminated = false)) t.existenceConfirmed must be(true) t.addressTerminated must be(false) + system.shutdown() } "shut down when /user escalates" in { diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala index 965a99319d..bd78f849c8 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala index 4d95bf02f6..ae683b648b 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala index c4d9248d88..57f08041a0 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala index d01848943f..54d30d4b65 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala index 954337431c..f5f916a90d 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala index b503ddede4..a89c6a931e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala index e5436d4e9c..c90dde277d 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala index 04a0eea352..783efc8f1f 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor @@ -87,7 +87,7 @@ class FSMTransitionSpec extends AkkaSpec with ImplicitSender { "make previous and next state data available in onTransition" in { val fsm = system.actorOf(Props(new OtherFSM(testActor))) - within(300 millis) { + within(1 second) { fsm ! "tick" expectMsg((0, 1)) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ForwardActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ForwardActorSpec.scala index 40c652c3ec..965e55f897 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ForwardActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ForwardActorSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala index 120caa3e93..354f23c76f 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/HotSwapSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala b/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala index 5cd9075e38..fffc6896d9 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/JavaAPISpec.scala b/akka-actor-tests/src/test/scala/akka/actor/JavaAPISpec.scala index ca7210beaf..35f852ea77 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/JavaAPISpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/JavaAPISpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala index 4cb432aa23..314739e78e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala index f34dbda9e3..0e77d4a7a9 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/RelativeActorPathSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/RelativeActorPathSpec.scala index 6870a36125..fd076463c7 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/RelativeActorPathSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/RelativeActorPathSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala index 190c738f83..169c84b8e5 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala index ba34987c9c..8d12f58a46 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala @@ -1,37 +1,42 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ + package akka.actor import language.postfixOps - -import org.scalatest.BeforeAndAfterEach -import scala.concurrent.duration._ -import java.util.concurrent.{ CountDownLatch, ConcurrentLinkedQueue, TimeUnit } -import akka.testkit._ -import scala.concurrent.Await -import akka.pattern.ask +import java.io.Closeable +import java.util.concurrent._ import java.util.concurrent.atomic.AtomicInteger +import scala.concurrent.{ future, Await, ExecutionContext } +import scala.concurrent.duration._ +import scala.concurrent.forkjoin.ThreadLocalRandom +import scala.util.Try +import scala.util.control.NonFatal +import org.scalatest.BeforeAndAfterEach +import com.typesafe.config.{ Config, ConfigFactory } +import akka.pattern.ask +import akka.testkit._ -@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout with ImplicitSender { - private val cancellables = new ConcurrentLinkedQueue[Cancellable]() +object SchedulerSpec { + val testConf = ConfigFactory.parseString(""" + akka.scheduler.implementation = akka.actor.DefaultScheduler + akka.scheduler.ticks-per-wheel = 32 + """).withFallback(AkkaSpec.testConf) + + val testConfRevolver = ConfigFactory.parseString(""" + akka.scheduler.implementation = akka.actor.LightArrayRevolverScheduler + """).withFallback(testConf) +} + +trait SchedulerSpec extends BeforeAndAfterEach with DefaultTimeout with ImplicitSender { this: AkkaSpec ⇒ import system.dispatcher - def collectCancellable(c: Cancellable): Cancellable = { - cancellables.add(c) - c - } - - override def afterEach { - while (cancellables.peek() ne null) { - for (c ← Option(cancellables.poll())) { - c.cancel() - c.isCancelled must be === true - } - } - } + def collectCancellable(c: Cancellable): Cancellable "A Scheduler" must { - "schedule more than once" in { + "schedule more than once" taggedAs TimingTest in { case object Tick case object Tock @@ -76,7 +81,7 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout expectNoMsg(500 millis) } - "schedule once" in { + "schedule once" taggedAs TimingTest in { case object Tick val countDownLatch = new CountDownLatch(3) val tickActor = system.actorOf(Props(new Actor { @@ -100,7 +105,7 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout /** * ticket #372 */ - "be cancellable" in { + "be cancellable" taggedAs TimingTest in { for (_ ← 1 to 10) system.scheduler.scheduleOnce(1 second, testActor, "fail").cancel() expectNoMsg(2 seconds) @@ -124,12 +129,12 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout "be cancellable after initial delay" taggedAs TimingTest in { val ticks = new AtomicInteger - val initialDelay = 20.milliseconds.dilated - val delay = 200.milliseconds.dilated + val initialDelay = 90.milliseconds.dilated + val delay = 500.milliseconds.dilated val timeout = collectCancellable(system.scheduler.schedule(initialDelay, delay) { ticks.incrementAndGet() }) - Thread.sleep((initialDelay + 100.milliseconds.dilated).toMillis) + Thread.sleep((initialDelay + 200.milliseconds.dilated).toMillis) timeout.cancel() Thread.sleep((delay + 100.milliseconds.dilated).toMillis) @@ -139,7 +144,7 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout /** * ticket #307 */ - "pick up schedule after actor restart" in { + "pick up schedule after actor restart" taggedAs TimingTest in { object Ping object Crash @@ -169,7 +174,7 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout Await.ready(pingLatch, 5 seconds) } - "never fire prematurely" in { + "never fire prematurely" taggedAs TimingTest in { val ticks = new TestLatch(300) case class Msg(ts: Long) @@ -205,7 +210,10 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout collectCancellable(system.scheduler.schedule(1 second, 300 milliseconds, actor, Msg)) Await.ready(ticks, 3 seconds) - (System.nanoTime() - startTime).nanos.toMillis must be(1800L plusOrMinus 199) + // LARS is a bit more aggressive in scheduling recurring tasks at the right + // frequency and may execute them a little earlier; the actual expected timing + // is 1599ms on a fast machine or 1699ms on a loaded one (plus some room for jenkins) + (System.nanoTime() - startTime).nanos.toMillis must be(1750L plusOrMinus 250) } "adjust for scheduler inaccuracy" taggedAs TimingTest in { @@ -230,5 +238,276 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout // Rate n * 1000.0 / (System.nanoTime - startTime).nanos.toMillis must be(4.4 plusOrMinus 0.3) } + + "handle timeouts equal to multiple of wheel period" taggedAs TimingTest in { + val timeout = 3200 milliseconds + val barrier = TestLatch() + import system.dispatcher + val job = system.scheduler.scheduleOnce(timeout)(barrier.countDown()) + try { + Await.ready(barrier, 5000 milliseconds) + } finally { + job.cancel() + } + } + + "survive being stressed without cancellation" taggedAs TimingTest in { + val r = ThreadLocalRandom.current() + val N = 100000 + for (_ ← 1 to N) { + val next = r.nextInt(3000) + val now = System.nanoTime + system.scheduler.scheduleOnce(next.millis) { + val stop = System.nanoTime + testActor ! (stop - now - next * 1000000L) + } + } + val latencies = within(5.seconds) { + for (i ← 1 to N) yield try expectMsgType[Long] catch { + case NonFatal(e) ⇒ throw new Exception(s"failed expecting the $i-th latency", e) + } + } + val histogram = latencies groupBy (_ / 100000000L) + for (k ← histogram.keys.toSeq.sorted) { + system.log.info(f"${k * 100}%3d: ${histogram(k).size}") + } + } } } + +class DefaultSchedulerSpec extends AkkaSpec(SchedulerSpec.testConf) with SchedulerSpec { + private val cancellables = new ConcurrentLinkedQueue[Cancellable]() + + "A HashedWheelTimer" must { + + "not mess up long timeouts" taggedAs LongRunningTest in { + val longish = Long.MaxValue.nanos + val barrier = TestLatch() + import system.dispatcher + val job = system.scheduler.scheduleOnce(longish)(barrier.countDown()) + intercept[TimeoutException] { + // this used to fire after 46 seconds due to wrap-around + Await.ready(barrier, 90 seconds) + } + job.cancel() + } + + } + + def collectCancellable(c: Cancellable): Cancellable = { + cancellables.add(c) + c + } + + override def afterEach { + while (cancellables.peek() ne null) { + for (c ← Option(cancellables.poll())) { + c.cancel() + c.isCancelled must be === true + } + } + } +} + +class LightArrayRevolverSchedulerSpec extends AkkaSpec(SchedulerSpec.testConfRevolver) with SchedulerSpec { + + def collectCancellable(c: Cancellable): Cancellable = c + + "A LightArrayRevolverScheduler" must { + + "survive being stressed with cancellation" taggedAs TimingTest in { + import system.dispatcher + val r = ThreadLocalRandom.current + val N = 1000000 + val tasks = for (_ ← 1 to N) yield { + val next = r.nextInt(3000) + val now = System.nanoTime + system.scheduler.scheduleOnce(next.millis) { + val stop = System.nanoTime + testActor ! (stop - now - next * 1000000L) + } + } + // get somewhat into the middle of things + Thread.sleep(500) + val cancellations = for (t ← tasks) yield { + t.cancel() + if (t.isCancelled) 1 else 0 + } + val cancelled = cancellations.sum + println(cancelled) + val latencies = within(5.seconds) { + for (i ← 1 to (N - cancelled)) yield try expectMsgType[Long] catch { + case NonFatal(e) ⇒ throw new Exception(s"failed expecting the $i-th latency", e) + } + } + val histogram = latencies groupBy (_ / 100000000L) + for (k ← histogram.keys.toSeq.sorted) { + system.log.info(f"${k * 100}%3d: ${histogram(k).size}") + } + expectNoMsg(1.second) + } + + "survive vicious enqueueing" in { + withScheduler(config = ConfigFactory.parseString("akka.scheduler.ticks-per-wheel=2")) { (sched, driver) ⇒ + import driver._ + import system.dispatcher + val counter = new AtomicInteger + val terminated = future { + var rounds = 0 + while (Try(sched.scheduleOnce(Duration.Zero)(())(localEC)).isSuccess) { + Thread.sleep(1) + driver.wakeUp(step) + rounds += 1 + } + rounds + } + def delay = if (ThreadLocalRandom.current.nextBoolean) step * 2 else step + val N = 1000000 + (1 to N) foreach (_ ⇒ sched.scheduleOnce(delay)(counter.incrementAndGet())) + sched.close() + Await.result(terminated, 3.seconds.dilated) must be > 10 + awaitCond(counter.get == N) + } + } + + "execute multiple jobs at once when expiring multiple buckets" in { + withScheduler() { (sched, driver) ⇒ + implicit def ec = localEC + import driver._ + val start = step / 2 + (0 to 3) foreach (i ⇒ sched.scheduleOnce(start + step * i, testActor, "hello")) + expectNoMsg(step) + wakeUp(step) + expectWait(step) + wakeUp(step * 4 + step / 2) + expectWait(step / 2) + (0 to 3) foreach (_ ⇒ expectMsg(Duration.Zero, "hello")) + } + } + + "correctly wrap around wheel rounds" in { + withScheduler(config = ConfigFactory.parseString("akka.scheduler.ticks-per-wheel=2")) { (sched, driver) ⇒ + implicit def ec = localEC + import driver._ + val start = step / 2 + (0 to 3) foreach (i ⇒ sched.scheduleOnce(start + step * i, probe.ref, "hello")) + probe.expectNoMsg(step) + wakeUp(step) + expectWait(step) + // the following are no for-comp to see which iteration fails + wakeUp(step) + probe.expectMsg("hello") + expectWait(step) + wakeUp(step) + probe.expectMsg("hello") + expectWait(step) + wakeUp(step) + probe.expectMsg("hello") + expectWait(step) + wakeUp(step) + probe.expectMsg("hello") + expectWait(step) + wakeUp(step) + expectWait(step) + } + } + + "correctly execute jobs when clock wraps around" in { + withScheduler(Long.MaxValue - 200000000L) { (sched, driver) ⇒ + implicit def ec = localEC + import driver._ + val start = step / 2 + (0 to 3) foreach (i ⇒ sched.scheduleOnce(start + step * i, testActor, "hello")) + expectNoMsg(step) + wakeUp(step) + expectWait(step) + // the following are no for-comp to see which iteration fails + wakeUp(step) + expectMsg("hello") + expectWait(step) + wakeUp(step) + expectMsg("hello") + expectWait(step) + wakeUp(step) + expectMsg("hello") + expectWait(step) + wakeUp(step) + expectMsg("hello") + expectWait(step) + wakeUp(step) + expectWait(step) + } + } + + "reliably reject jobs when shutting down" in { + withScheduler() { (sched, driver) ⇒ + import system.dispatcher + val counter = new AtomicInteger + future { Thread.sleep(5); sched.close() } + val headroom = 200 + var overrun = headroom + val cap = 1000000 + val (success, failure) = Iterator + .continually(Try(sched.scheduleOnce(100.millis)(counter.incrementAndGet()))) + .take(cap) + .takeWhile(_.isSuccess || { overrun -= 1; overrun >= 0 }) + .partition(_.isSuccess) + val s = success.size + s must be < cap + awaitCond(s == counter.get, message = s"$s was not ${counter.get}") + failure.size must be === headroom + } + } + } + + trait Driver { + def wakeUp(d: FiniteDuration): Unit + def expectWait(): FiniteDuration + def expectWait(d: FiniteDuration) { expectWait() must be(d) } + def probe: TestProbe + def step: FiniteDuration + } + + val localEC = new ExecutionContext { + def execute(runnable: Runnable) { runnable.run() } + def reportFailure(t: Throwable) { t.printStackTrace() } + } + + def withScheduler(start: Long = 0L, config: Config = ConfigFactory.empty)(thunk: (Scheduler with Closeable, Driver) ⇒ Unit): Unit = { + import akka.actor.{ LightArrayRevolverScheduler ⇒ LARS } + val lbq = new LinkedBlockingQueue[Long] + val prb = TestProbe() + val tf = system.asInstanceOf[ActorSystemImpl].threadFactory + val sched = + new { @volatile var time = start } with LARS(config.withFallback(system.settings.config), log, tf) { + override protected def clock(): Long = { + // println(s"clock=$time") + time + } + override protected def waitNanos(ns: Long): Unit = { + // println(s"waiting $ns") + prb.ref ! ns + try time += lbq.take() + catch { + case _: InterruptedException ⇒ Thread.currentThread.interrupt() + } + } + } + val driver = new Driver { + def wakeUp(d: FiniteDuration) { lbq.offer(d.toNanos) } + def expectWait(): FiniteDuration = probe.expectMsgType[Long].nanos + def probe = prb + def step = sched.TickDuration + } + driver.expectWait() + try thunk(sched, driver) + catch { + case NonFatal(ex) ⇒ + try sched.close() + catch { case _: Exception ⇒ } + throw ex + } + sched.close() + } + +} diff --git a/akka-actor-tests/src/test/scala/akka/actor/Supervisor.scala b/akka-actor-tests/src/test/scala/akka/actor/Supervisor.scala index 80524ea9dc..6b16cdfbb6 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/Supervisor.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/Supervisor.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala index 722105ebec..095305dce4 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala index 070a5aba51..52b50491ee 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala index eafb47c47d..7b7c36e0a5 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorTreeSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorTreeSpec.scala index 96e063a383..f10ae024f3 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorTreeSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorTreeSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala b/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala index cca4652de9..f04b01d0ae 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala index 5522eb0b67..83e4e194d0 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor @@ -211,7 +211,7 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) import TypedActorSpec._ - def newFooBar: Foo = newFooBar(Duration(2, "s")) + def newFooBar: Foo = newFooBar(timeout.duration) def newFooBar(d: FiniteDuration): Foo = TypedActor(system).typedActorOf(TypedProps[Bar](classOf[Foo], classOf[Bar]).withTimeout(Timeout(d))) @@ -221,7 +221,7 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) def newStacked(): Stacked = TypedActor(system).typedActorOf( - TypedProps[StackedImpl](classOf[Stacked], classOf[StackedImpl]).withTimeout(Timeout(2000))) + TypedProps[StackedImpl](classOf[Stacked], classOf[StackedImpl]).withTimeout(timeout)) def mustStop(typedActor: AnyRef) = TypedActor(system).stop(typedActor) must be(true) @@ -296,7 +296,7 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) t.nullJOption() must be === JOption.none t.nullOption() must be === None t.nullReturn() must be === null - Await.result(t.nullFuture(), remaining) must be === null + Await.result(t.nullFuture(), timeout.duration) must be === null } "be able to call Future-returning methods non-blockingly" in { @@ -307,11 +307,11 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) mustStop(t) } - "be able to call multiple Future-returning methods non-blockingly" in { + "be able to call multiple Future-returning methods non-blockingly" in within(timeout.duration) { val t = newFooBar val futures = for (i ← 1 to 20) yield (i, t.futurePigdog(20, i)) for ((i, f) ← futures) { - Await.result(f, timeout.duration) must be("Pigdog" + i) + Await.result(f, remaining) must be("Pigdog" + i) } mustStop(t) } @@ -330,11 +330,11 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) mustStop(t) } - "be able to compose futures without blocking" in { - val t, t2 = newFooBar(2 seconds) + "be able to compose futures without blocking" in within(timeout.duration) { + val t, t2 = newFooBar(remaining) val f = t.futureComposePigdogFrom(t2) f.isCompleted must be(false) - Await.result(f, timeout.duration) must equal("PIGDOG") + Await.result(f, remaining) must equal("PIGDOG") mustStop(t) mustStop(t2) } @@ -391,13 +391,13 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) mustStop(t) } - "be able to support implementation only typed actors" in { + "be able to support implementation only typed actors" in within(timeout.duration) { val t: Foo = TypedActor(system).typedActorOf(TypedProps[Bar]()) val f = t.futurePigdog(200) val f2 = t.futurePigdog(0) f2.isCompleted must be(false) f.isCompleted must be(false) - Await.result(f, timeout.duration) must equal(Await.result(f2, timeout.duration)) + Await.result(f, remaining) must equal(Await.result(f2, remaining)) mustStop(t) } @@ -408,13 +408,13 @@ class TypedActorSpec extends AkkaSpec(TypedActorSpec.config) mustStop(t) } - "be able to use balancing dispatcher" in { + "be able to use balancing dispatcher" in within(timeout.duration) { val thais = for (i ← 1 to 60) yield newFooBar("pooled-dispatcher", 6 seconds) val iterator = new CyclicIterator(thais) val results = for (i ← 1 to 120) yield (i, iterator.next.futurePigdog(200L, i)) - for ((i, r) ← results) Await.result(r, timeout.duration) must be("Pigdog" + i) + for ((i, r) ← results) Await.result(r, remaining) must be("Pigdog" + i) for (t ← thais) mustStop(t) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index a736003421..75e4718718 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor.dispatch @@ -80,21 +80,21 @@ object ActorModelSpec { } def receive = { - case AwaitLatch(latch) ⇒ ack; latch.await(); busy.switchOff() - case Meet(sign, wait) ⇒ ack; sign.countDown(); wait.await(); busy.switchOff() - case Wait(time) ⇒ ack; Thread.sleep(time); busy.switchOff() - case WaitAck(time, l) ⇒ ack; Thread.sleep(time); l.countDown(); busy.switchOff() - case Reply(msg) ⇒ ack; sender ! msg; busy.switchOff() - case TryReply(msg) ⇒ ack; sender.tell(msg, null); busy.switchOff() - case Forward(to, msg) ⇒ ack; to.forward(msg); busy.switchOff() - case CountDown(latch) ⇒ ack; latch.countDown(); busy.switchOff() - case Increment(count) ⇒ ack; count.incrementAndGet(); busy.switchOff() - case CountDownNStop(l) ⇒ ack; l.countDown(); context.stop(self); busy.switchOff() - case Restart ⇒ ack; busy.switchOff(); throw new Exception("Restart requested") - case Interrupt ⇒ ack; sender ! Status.Failure(new ActorInterruptedException(new InterruptedException("Ping!"))); busy.switchOff(); throw new InterruptedException("Ping!") - case InterruptNicely(msg) ⇒ ack; sender ! msg; busy.switchOff(); Thread.currentThread().interrupt() - case ThrowException(e: Throwable) ⇒ ack; busy.switchOff(); throw e - case DoubleStop ⇒ ack; context.stop(self); context.stop(self); busy.switchOff + case AwaitLatch(latch) ⇒ { ack; latch.await(); busy.switchOff() } + case Meet(sign, wait) ⇒ { ack; sign.countDown(); wait.await(); busy.switchOff() } + case Wait(time) ⇒ { ack; Thread.sleep(time); busy.switchOff() } + case WaitAck(time, l) ⇒ { ack; Thread.sleep(time); l.countDown(); busy.switchOff() } + case Reply(msg) ⇒ { ack; sender ! msg; busy.switchOff() } + case TryReply(msg) ⇒ { ack; sender.tell(msg, null); busy.switchOff() } + case Forward(to, msg) ⇒ { ack; to.forward(msg); busy.switchOff() } + case CountDown(latch) ⇒ { ack; latch.countDown(); busy.switchOff() } + case Increment(count) ⇒ { ack; count.incrementAndGet(); busy.switchOff() } + case CountDownNStop(l) ⇒ { ack; l.countDown(); context.stop(self); busy.switchOff() } + case Restart ⇒ { ack; busy.switchOff(); throw new Exception("Restart requested") } + case Interrupt ⇒ { ack; sender ! Status.Failure(new ActorInterruptedException(new InterruptedException("Ping!"))); busy.switchOff(); throw new InterruptedException("Ping!") } + case InterruptNicely(msg) ⇒ { ack; sender ! msg; busy.switchOff(); Thread.currentThread().interrupt() } + case ThrowException(e: Throwable) ⇒ { ack; busy.switchOff(); throw e } + case DoubleStop ⇒ { ack; context.stop(self); context.stop(self); busy.switchOff } } } @@ -229,16 +229,17 @@ object ActorModelSpec { } } - @tailrec def await(until: Long)(condition: ⇒ Boolean): Unit = if (System.currentTimeMillis() <= until) { - var done = false - try { - done = condition - if (!done) Thread.sleep(25) - } catch { - case e: InterruptedException ⇒ - } - if (!done) await(until)(condition) - } else throw new AssertionError("await failed") + @tailrec def await(until: Long)(condition: ⇒ Boolean): Unit = + if (System.currentTimeMillis() <= until) { + var done = false + try { + done = condition + if (!done) Thread.sleep(25) + } catch { + case e: InterruptedException ⇒ + } + if (!done) await(until)(condition) + } else throw new AssertionError("await failed") } abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with DefaultTimeout { @@ -414,17 +415,28 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa val a = newTestActor(dispatcher.id) val f1 = a ? Reply("foo") val f2 = a ? Reply("bar") - val f3 = try { a ? Interrupt } catch { case ie: InterruptedException ⇒ Promise.failed(new ActorInterruptedException(ie)).future } + val f3 = a ? Interrupt + Thread.interrupted() // CallingThreadDispatcher may necessitate this val f4 = a ? Reply("foo2") - val f5 = try { a ? Interrupt } catch { case ie: InterruptedException ⇒ Promise.failed(new ActorInterruptedException(ie)).future } + val f5 = a ? Interrupt + Thread.interrupted() // CallingThreadDispatcher may necessitate this val f6 = a ? Reply("bar2") + val c = system.scheduler.scheduleOnce(2.seconds) { + import collection.JavaConverters._ + Thread.getAllStackTraces().asScala foreach { + case (thread, stack) ⇒ + println(s"$thread:") + stack foreach (s ⇒ println(s"\t$s")) + } + } assert(Await.result(f1, remaining) === "foo") assert(Await.result(f2, remaining) === "bar") assert(Await.result(f4, remaining) === "foo2") assert(intercept[ActorInterruptedException](Await.result(f3, remaining)).getCause.getMessage === "Ping!") assert(Await.result(f6, remaining) === "bar2") assert(intercept[ActorInterruptedException](Await.result(f5, remaining)).getCause.getMessage === "Ping!") + c.cancel() } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala index 39612fe409..6c39655706 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor.dispatch diff --git a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala index 9a43631894..0a6984eea4 100644 --- a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.config @@ -32,10 +32,8 @@ class ConfigSpec extends AkkaSpec(ConfigFactory.defaultReference(ActorSystem.fin settings.SerializeAllMessages must equal(false) getInt("akka.scheduler.ticks-per-wheel") must equal(512) - settings.SchedulerTicksPerWheel must equal(512) - getMilliseconds("akka.scheduler.tick-duration") must equal(100) - settings.SchedulerTickDuration must equal(100 millis) + getString("akka.scheduler.implementation") must equal("akka.actor.LightArrayRevolverScheduler") getBoolean("akka.daemonic") must be(false) settings.Daemonicity must be(false) diff --git a/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala b/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala index bc225933fe..572f0b5ef4 100644 --- a/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala +++ b/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.dataflow diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala index 742805883b..830a19eb29 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala @@ -4,6 +4,7 @@ import java.util.concurrent.{ ExecutorService, Executor, Executors } import java.util.concurrent.atomic.AtomicInteger import scala.concurrent._ import akka.testkit.{ TestLatch, AkkaSpec, DefaultTimeout } +import akka.util.SerializedSuspendableExecutionContext @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { @@ -81,4 +82,82 @@ class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { Await.ready(latch, timeout.duration) } } + + "A SerializedSuspendableExecutionContext" must { + "be suspendable and resumable" in { + val sec = SerializedSuspendableExecutionContext(1)(ExecutionContext.global) + val counter = new AtomicInteger(0) + def perform(f: Int ⇒ Int) = sec execute new Runnable { def run = counter.set(f(counter.get)) } + perform(_ + 1) + perform(x ⇒ { sec.suspend(); x * 2 }) + awaitCond(counter.get == 2) + perform(_ + 4) + perform(_ * 2) + sec.size must be === 2 + Thread.sleep(500) + sec.size must be === 2 + counter.get must be === 2 + sec.resume() + awaitCond(counter.get == 12) + perform(_ * 2) + awaitCond(counter.get == 24) + sec.isEmpty must be === true + } + + "execute 'throughput' number of tasks per sweep" in { + val submissions = new AtomicInteger(0) + val counter = new AtomicInteger(0) + val underlying = new ExecutionContext { + override def execute(r: Runnable) { submissions.incrementAndGet(); ExecutionContext.global.execute(r) } + override def reportFailure(t: Throwable) { ExecutionContext.global.reportFailure(t) } + } + val throughput = 25 + val sec = SerializedSuspendableExecutionContext(throughput)(underlying) + sec.suspend() + def perform(f: Int ⇒ Int) = sec execute new Runnable { def run = counter.set(f(counter.get)) } + + val total = 1000 + 1 to total foreach { _ ⇒ perform(_ + 1) } + sec.size() must be === total + sec.resume() + awaitCond(counter.get == total) + submissions.get must be === (total / throughput) + sec.isEmpty must be === true + } + + "execute tasks in serial" in { + val sec = SerializedSuspendableExecutionContext(1)(ExecutionContext.global) + val total = 10000 + val counter = new AtomicInteger(0) + def perform(f: Int ⇒ Int) = sec execute new Runnable { def run = counter.set(f(counter.get)) } + + 1 to total foreach { i ⇒ perform(c ⇒ if (c == (i - 1)) c + 1 else c) } + awaitCond(counter.get == total) + sec.isEmpty must be === true + } + + "should relinquish thread when suspended" in { + val submissions = new AtomicInteger(0) + val counter = new AtomicInteger(0) + val underlying = new ExecutionContext { + override def execute(r: Runnable) { submissions.incrementAndGet(); ExecutionContext.global.execute(r) } + override def reportFailure(t: Throwable) { ExecutionContext.global.reportFailure(t) } + } + val throughput = 25 + val sec = SerializedSuspendableExecutionContext(throughput)(underlying) + sec.suspend() + def perform(f: Int ⇒ Int) = sec execute new Runnable { def run = counter.set(f(counter.get)) } + perform(_ + 1) + 1 to 10 foreach { _ ⇒ perform(identity) } + perform(x ⇒ { sec.suspend(); x * 2 }) + perform(_ + 8) + sec.size must be === 13 + sec.resume() + awaitCond(counter.get == 2) + sec.resume() + awaitCond(counter.get == 10) + sec.isEmpty must be === true + submissions.get must be === 2 + } + } } diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala index 94954ab4d8..a79a3c9ab8 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.dispatch diff --git a/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala b/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala index 0f7799adc0..d93fdbb4f6 100644 --- a/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.event diff --git a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala index 442d35f194..af70e23ab5 100644 --- a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.event diff --git a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala index d7ce93e997..d61fd1496b 100644 --- a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.event diff --git a/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala index 8f3f7f0510..73e707e3ac 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.pattern @@ -70,4 +70,4 @@ class AskSpec extends AkkaSpec { } -} \ No newline at end of file +} diff --git a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala index 34cb3d4ef8..57fc716682 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala @@ -1,11 +1,12 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.pattern import akka.testkit._ +import scala.collection.immutable import scala.concurrent.duration._ -import scala.concurrent.{ Promise, Future, Await } +import scala.concurrent.{ Future, Await } import scala.annotation.tailrec class CircuitBreakerMTSpec extends AkkaSpec { @@ -13,42 +14,49 @@ class CircuitBreakerMTSpec extends AkkaSpec { "A circuit breaker being called by many threads" must { val callTimeout = 1.second.dilated val resetTimeout = 2.seconds.dilated - val breaker = new CircuitBreaker(system.scheduler, 5, callTimeout, resetTimeout) + val maxFailures = 5 + val breaker = new CircuitBreaker(system.scheduler, maxFailures, callTimeout, resetTimeout) + val numberOfTestCalls = 100 def openBreaker(): Unit = { - @tailrec def call(attemptsLeft: Int): Unit = { - attemptsLeft must be > (0) - if (Await.result(breaker.withCircuitBreaker(Future(throw new RuntimeException("FAIL"))) recover { - case _: CircuitBreakerOpenException ⇒ false - case _ ⇒ true - }, remaining)) call(attemptsLeft - 1) + // returns true if the breaker is open + def failingCall(): Boolean = + Await.result(breaker.withCircuitBreaker(Future(throw new RuntimeException("FAIL"))) recover { + case _: CircuitBreakerOpenException ⇒ true + case _ ⇒ false + }, remaining) + + // fire some failing calls + 1 to (maxFailures + 1) foreach { _ ⇒ failingCall() } + // and then continue with failing calls until the breaker is open + awaitCond(failingCall()) + } + + def testCallsWithBreaker(): immutable.IndexedSeq[Future[String]] = { + val aFewActive = new TestLatch(5) + for (_ ← 1 to numberOfTestCalls) yield breaker.withCircuitBreaker(Future { + aFewActive.countDown() + Await.ready(aFewActive, 5.seconds.dilated) + "succeed" + }) recoverWith { + case _: CircuitBreakerOpenException ⇒ + aFewActive.countDown() + Future.successful("CBO") } - call(10) } "allow many calls while in closed state with no errors" in { - - val futures = for (i ← 1 to 100) yield breaker.withCircuitBreaker(Future { Thread.sleep(10); "succeed" }) - + val futures = testCallsWithBreaker() val result = Await.result(Future.sequence(futures), 5.second.dilated) - - result.size must be(100) + result.size must be(numberOfTestCalls) result.toSet must be === Set("succeed") - } "transition to open state upon reaching failure limit and fail-fast" in { openBreaker() - - val futures = for (i ← 1 to 100) yield breaker.withCircuitBreaker(Future { - Thread.sleep(10); "success" - }) recoverWith { - case _: CircuitBreakerOpenException ⇒ Promise.successful("CBO").future - } - + val futures = testCallsWithBreaker() val result = Await.result(Future.sequence(futures), 5.second.dilated) - - result.size must be(100) + result.size must be(numberOfTestCalls) result.toSet must be === Set("CBO") } @@ -58,17 +66,12 @@ class CircuitBreakerMTSpec extends AkkaSpec { openBreaker() + // breaker should become half-open after a while Await.ready(halfOpenLatch, resetTimeout + 1.seconds.dilated) - val futures = for (i ← 1 to 100) yield breaker.withCircuitBreaker(Future { - Thread.sleep(10); "succeed" - }) recoverWith { - case _: CircuitBreakerOpenException ⇒ Promise.successful("CBO").future - } - + val futures = testCallsWithBreaker() val result = Await.result(Future.sequence(futures), 5.second.dilated) - - result.size must be(100) + result.size must be(numberOfTestCalls) result.toSet must be === Set("succeed", "CBO") } @@ -76,20 +79,20 @@ class CircuitBreakerMTSpec extends AkkaSpec { val halfOpenLatch = new TestLatch(1) breaker.onHalfOpen(halfOpenLatch.countDown()) openBreaker() - Await.ready(halfOpenLatch, 5.seconds.dilated) - Await.ready(breaker.withCircuitBreaker(Future("succeed")), resetTimeout) - val futures = (1 to 100) map { - i ⇒ - breaker.withCircuitBreaker(Future { Thread.sleep(10); "succeed" }) recoverWith { - case _: CircuitBreakerOpenException ⇒ Promise.successful("CBO").future - } - } + // breaker should become half-open after a while + Await.ready(halfOpenLatch, resetTimeout + 1.seconds.dilated) + // one successful call should close the latch + val closedLatch = new TestLatch(1) + breaker.onClose(closedLatch.countDown()) + breaker.withCircuitBreaker(Future("succeed")) + Await.ready(closedLatch, 5.seconds.dilated) + + val futures = testCallsWithBreaker() val result = Await.result(Future.sequence(futures), 5.second.dilated) - - result.size must be(100) + result.size must be(numberOfTestCalls) result.toSet must be === Set("succeed") } } -} \ No newline at end of file +} diff --git a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala index 954fefb58d..05a5fcb5c6 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.pattern @@ -10,7 +10,7 @@ import scala.concurrent.duration._ import akka.testkit._ import org.scalatest.BeforeAndAfter import akka.actor.{ ActorSystem, Scheduler } -import concurrent.{ ExecutionContext, Future, Await } +import scala.concurrent.{ ExecutionContext, Future, Await } object CircuitBreakerSpec { diff --git a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala index 0e4333e04e..028463c8bc 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.pattern diff --git a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala index 9f3c121d86..4f45bf9257 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.routing diff --git a/akka-actor-tests/src/test/scala/akka/routing/ConsistentHashingRouterSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ConsistentHashingRouterSpec.scala index 867da83bd7..a68f08f845 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ConsistentHashingRouterSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ConsistentHashingRouterSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.routing diff --git a/akka-actor-tests/src/test/scala/akka/routing/CustomRouteSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/CustomRouteSpec.scala index 00bd46f430..6363325797 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/CustomRouteSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/CustomRouteSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.routing @@ -49,4 +49,4 @@ class CustomRouteSpec extends AkkaSpec { } } -} \ No newline at end of file +} diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala index c9136248e3..398a931ca6 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.routing diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 9d7522f950..23445ae1a0 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.routing diff --git a/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala b/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala index c49dc8037f..8e1ccd1bdc 100644 --- a/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.serialization @@ -8,17 +8,20 @@ import language.postfixOps import akka.testkit.{ AkkaSpec, EventFilter } import akka.actor._ +import akka.dispatch._ import java.io._ import scala.concurrent.Await import akka.util.Timeout import scala.concurrent.duration._ import scala.reflect.BeanInfo import com.google.protobuf.Message +import com.typesafe.config._ import akka.pattern.ask +import org.apache.commons.codec.binary.Hex.{ encodeHex, decodeHex } -object SerializeSpec { +object SerializationTests { - val config = """ + val serializeConf = """ akka { actor { serializers { @@ -26,13 +29,13 @@ object SerializeSpec { } serialization-bindings { - "akka.serialization.SerializeSpec$Person" = java - "akka.serialization.SerializeSpec$Address" = java - "akka.serialization.TestSerializble" = test - "akka.serialization.SerializeSpec$PlainMessage" = test - "akka.serialization.SerializeSpec$A" = java - "akka.serialization.SerializeSpec$B" = test - "akka.serialization.SerializeSpec$D" = test + "akka.serialization.SerializationTests$Person" = java + "akka.serialization.SerializationTests$Address" = java + "akka.serialization.TestSerializable" = test + "akka.serialization.SerializationTests$PlainMessage" = test + "akka.serialization.SerializationTests$A" = java + "akka.serialization.SerializationTests$B" = test + "akka.serialization.SerializationTests$D" = test } } } @@ -45,11 +48,11 @@ object SerializeSpec { case class Record(id: Int, person: Person) - class SimpleMessage(s: String) extends TestSerializble + class SimpleMessage(s: String) extends TestSerializable class ExtendedSimpleMessage(s: String, i: Int) extends SimpleMessage(s) - trait AnotherInterface extends TestSerializble + trait AnotherInterface extends TestSerializable class AnotherMessage extends AnotherInterface @@ -67,11 +70,67 @@ object SerializeSpec { class D extends A class E extends D + val verifySerializabilityConf = """ + akka { + actor { + serialize-messages = on + serialize-creators = on + } + } + """ + + class FooActor extends Actor { + def receive = { + case s: String ⇒ sender ! s + } + } + + class FooUntypedActor extends UntypedActor { + def onReceive(message: Any) {} + } + + class NonSerializableActor(system: ActorSystem) extends Actor { + def receive = { + case s: String ⇒ sender ! s + } + } + + def mostlyReferenceSystem: ActorSystem = { + val referenceConf = ConfigFactory.defaultReference() + val mostlyReferenceConf = AkkaSpec.testConf.withFallback(referenceConf) + ActorSystem("SerializationSystem", mostlyReferenceConf) + } + + val systemMessageMultiSerializerConf = """ + akka { + actor { + serializers { + test = "akka.serialization.TestSerializer" + } + + serialization-bindings { + "akka.dispatch.SystemMessage" = test + } + } + } + """ + + val systemMessageClasses = List[Class[_]]( + classOf[Create], + classOf[Recreate], + classOf[Suspend], + classOf[Resume], + classOf[Terminate], + classOf[Supervise], + classOf[ChildTerminated], + classOf[Watch], + classOf[Unwatch], + NoMessage.getClass) } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class SerializeSpec extends AkkaSpec(SerializeSpec.config) { - import SerializeSpec._ +class SerializeSpec extends AkkaSpec(SerializationTests.serializeConf) { + import SerializationTests._ val ser = SerializationExtension(system) import ser._ @@ -156,7 +215,7 @@ class SerializeSpec extends AkkaSpec(SerializeSpec.config) { "give warning for message with several bindings" in { EventFilter.warning(start = "Multiple serializers found", occurrences = 1) intercept { - ser.serializerFor(classOf[Both]).getClass must be(classOf[TestSerializer]) + ser.serializerFor(classOf[Both]).getClass must (be(classOf[TestSerializer]) or be(classOf[JavaSerializer])) } } @@ -164,7 +223,7 @@ class SerializeSpec extends AkkaSpec(SerializeSpec.config) { ser.serializerFor(classOf[A]).getClass must be(classOf[JavaSerializer]) ser.serializerFor(classOf[B]).getClass must be(classOf[TestSerializer]) EventFilter.warning(start = "Multiple serializers found", occurrences = 1) intercept { - ser.serializerFor(classOf[C]).getClass must be(classOf[JavaSerializer]) + ser.serializerFor(classOf[C]).getClass must (be(classOf[TestSerializer]) or be(classOf[JavaSerializer])) } } @@ -194,36 +253,9 @@ class SerializeSpec extends AkkaSpec(SerializeSpec.config) { } } -object VerifySerializabilitySpec { - val conf = """ - akka { - actor { - serialize-messages = on - serialize-creators = on - } - } - """ - - class FooActor extends Actor { - def receive = { - case s: String ⇒ sender ! s - } - } - - class FooUntypedActor extends UntypedActor { - def onReceive(message: Any) {} - } - - class NonSerializableActor(system: ActorSystem) extends Actor { - def receive = { - case s: String ⇒ sender ! s - } - } -} - @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class VerifySerializabilitySpec extends AkkaSpec(VerifySerializabilitySpec.conf) { - import VerifySerializabilitySpec._ +class VerifySerializabilitySpec extends AkkaSpec(SerializationTests.verifySerializabilityConf) { + import SerializationTests._ implicit val timeout = Timeout(5 seconds) "verify config" in { @@ -260,7 +292,98 @@ class VerifySerializabilitySpec extends AkkaSpec(VerifySerializabilitySpec.conf) } } -trait TestSerializble +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class ReferenceSerializationSpec extends AkkaSpec(SerializationTests.mostlyReferenceSystem) { + import SerializationTests._ + + val ser = SerializationExtension(system) + def serializerMustBe(toSerialize: Class[_], expectedSerializer: Class[_]) = + ser.serializerFor(toSerialize).getClass must be(expectedSerializer) + + "Serialization settings from reference.conf" must { + + "declare Serializable classes to be use JavaSerializer" in { + serializerMustBe(classOf[Serializable], classOf[JavaSerializer]) + serializerMustBe(classOf[String], classOf[JavaSerializer]) + for (smc ← systemMessageClasses) { + serializerMustBe(smc, classOf[JavaSerializer]) + } + } + + "declare Array[Byte] to use ByteArraySerializer" in { + serializerMustBe(classOf[Array[Byte]], classOf[ByteArraySerializer]) + } + + "not support serialization for other classes" in { + intercept[NotSerializableException] { ser.serializerFor(classOf[Object]) } + } + + } +} + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class SerializationCompatibilitySpec extends AkkaSpec(SerializationTests.mostlyReferenceSystem) { + import SerializationTests._ + + val ser = SerializationExtension(system) + + "Cross-version serialization compatibility" must { + def verify(obj: Any, asExpected: String): Unit = + String.valueOf(encodeHex(ser.serialize(obj, obj.getClass).get)) must be(asExpected) + + "be preserved for the Create SystemMessage" in { + verify(Create(1234), "aced00057372000c7363616c612e5475706c6532bc7daadf46211a990200024c00025f317400124c6a6176612f6c616e672f4f626a6563743b4c00025f3271007e0001787073720014616b6b612e64697370617463682e437265617465bcdf9f7f2675038d0200014900037569647870000004d27671007e0003") + } + "be preserved for the Recreate SystemMessage" in { + verify(Recreate(null), "aced00057372000c7363616c612e5475706c6532bc7daadf46211a990200024c00025f317400124c6a6176612f6c616e672f4f626a6563743b4c00025f3271007e0001787073720016616b6b612e64697370617463682e52656372656174650987c65c8d378a800200014c000563617573657400154c6a6176612f6c616e672f5468726f7761626c653b7870707671007e0003") + } + "be preserved for the Suspend SystemMessage" in { + verify(Suspend(), "aced00057372000c7363616c612e5475706c6532bc7daadf46211a990200024c00025f317400124c6a6176612f6c616e672f4f626a6563743b4c00025f3271007e0001787073720015616b6b612e64697370617463682e53757370656e6464e531d5d134b59902000078707671007e0003") + } + "be preserved for the Resume SystemMessage" in { + verify(Resume(null), "aced00057372000c7363616c612e5475706c6532bc7daadf46211a990200024c00025f317400124c6a6176612f6c616e672f4f626a6563743b4c00025f3271007e0001787073720014616b6b612e64697370617463682e526573756d65dc5e646d445fcb010200014c000f63617573656442794661696c7572657400154c6a6176612f6c616e672f5468726f7761626c653b7870707671007e0003") + } + "be preserved for the Terminate SystemMessage" in { + verify(Terminate(), "aced00057372000c7363616c612e5475706c6532bc7daadf46211a990200024c00025f317400124c6a6176612f6c616e672f4f626a6563743b4c00025f3271007e0001787073720017616b6b612e64697370617463682e5465726d696e61746509d66ca68318700f02000078707671007e0003") + } + "be preserved for the Supervise SystemMessage" in { + verify(Supervise(FakeActorRef("child"), true, 2468), "aced00057372000c7363616c612e5475706c6532bc7daadf46211a990200024c00025f317400124c6a6176612f6c616e672f4f626a6563743b4c00025f3271007e0001787073720017616b6b612e64697370617463682e5375706572766973652d0b363f56ab5feb0200035a00056173796e634900037569644c00056368696c647400154c616b6b612f6163746f722f4163746f725265663b787001000009a47372001f616b6b612e73657269616c697a6174696f6e2e46616b654163746f7252656600000000000000010200014c00046e616d657400124c6a6176612f6c616e672f537472696e673b7872001b616b6b612e6163746f722e496e7465726e616c4163746f725265660d0aa2ca1e82097602000078720013616b6b612e6163746f722e4163746f72526566c3585dde655f469402000078707400056368696c647671007e0003") + } + "be preserved for the ChildTerminated SystemMessage" in { + verify(ChildTerminated(FakeActorRef("child")), "aced00057372000c7363616c612e5475706c6532bc7daadf46211a990200024c00025f317400124c6a6176612f6c616e672f4f626a6563743b4c00025f3271007e000178707372001d616b6b612e64697370617463682e4368696c645465726d696e617465644c84222437ed5db40200014c00056368696c647400154c616b6b612f6163746f722f4163746f725265663b78707372001f616b6b612e73657269616c697a6174696f6e2e46616b654163746f7252656600000000000000010200014c00046e616d657400124c6a6176612f6c616e672f537472696e673b7872001b616b6b612e6163746f722e496e7465726e616c4163746f725265660d0aa2ca1e82097602000078720013616b6b612e6163746f722e4163746f72526566c3585dde655f469402000078707400056368696c647671007e0003") + } + "be preserved for the Watch SystemMessage" in { + verify(Watch(FakeActorRef("watchee"), FakeActorRef("watcher")), "aced00057372000c7363616c612e5475706c6532bc7daadf46211a990200024c00025f317400124c6a6176612f6c616e672f4f626a6563743b4c00025f3271007e0001787073720013616b6b612e64697370617463682e57617463682e1e65bc74394fc40200024c0007776174636865657400154c616b6b612f6163746f722f4163746f725265663b4c00077761746368657271007e000478707372001f616b6b612e73657269616c697a6174696f6e2e46616b654163746f7252656600000000000000010200014c00046e616d657400124c6a6176612f6c616e672f537472696e673b7872001b616b6b612e6163746f722e496e7465726e616c4163746f725265660d0aa2ca1e82097602000078720013616b6b612e6163746f722e4163746f72526566c3585dde655f46940200007870740007776174636865657371007e0006740007776174636865727671007e0003") + } + "be preserved for the Unwatch SystemMessage" in { + verify(Unwatch(FakeActorRef("watchee"), FakeActorRef("watcher")), "aced00057372000c7363616c612e5475706c6532bc7daadf46211a990200024c00025f317400124c6a6176612f6c616e672f4f626a6563743b4c00025f3271007e0001787073720015616b6b612e64697370617463682e556e776174636858501f7ee63dc2100200024c0007776174636865657400154c616b6b612f6163746f722f4163746f725265663b4c00077761746368657271007e000478707372001f616b6b612e73657269616c697a6174696f6e2e46616b654163746f7252656600000000000000010200014c00046e616d657400124c6a6176612f6c616e672f537472696e673b7872001b616b6b612e6163746f722e496e7465726e616c4163746f725265660d0aa2ca1e82097602000078720013616b6b612e6163746f722e4163746f72526566c3585dde655f46940200007870740007776174636865657371007e0006740007776174636865727671007e0003") + } + "be preserved for the NoMessage SystemMessage" in { + verify(NoMessage, "aced00057372000c7363616c612e5475706c6532bc7daadf46211a990200024c00025f317400124c6a6176612f6c616e672f4f626a6563743b4c00025f3271007e0001787073720018616b6b612e64697370617463682e4e6f4d65737361676524b401a3610ccb70dd02000078707671007e0003") + } + } +} + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class OverriddenSystemMessageSerializationSpec extends AkkaSpec(SerializationTests.systemMessageMultiSerializerConf) { + import SerializationTests._ + + val ser = SerializationExtension(system) + + "Overridden SystemMessage serialization" must { + + "resolve to a single serializer" in { + EventFilter.warning(start = "Multiple serializers found", occurrences = 0) intercept { + for (smc ← systemMessageClasses) { + ser.serializerFor(smc).getClass must be(classOf[TestSerializer]) + } + } + } + + } +} + +trait TestSerializable class TestSerializer extends Serializer { def includeManifest: Boolean = false @@ -273,3 +396,26 @@ class TestSerializer extends Serializer { def fromBinary(bytes: Array[Byte], clazz: Option[Class[_]]): AnyRef = null } + +@SerialVersionUID(1) +case class FakeThrowable(msg: String) extends Throwable(msg) with Serializable { + override def fillInStackTrace = null +} + +@SerialVersionUID(1) +case class FakeActorRef(name: String) extends InternalActorRef with ActorRefScope { + override def path = RootActorPath(Address("proto", "SomeSystem"), name) + override def forward(message: Any)(implicit context: ActorContext) = ??? + override def isTerminated = ??? + override def start() = ??? + override def resume(causedByFailure: Throwable) = ??? + override def suspend() = ??? + override def restart(cause: Throwable) = ??? + override def stop() = ??? + override def sendSystemMessage(message: SystemMessage) = ??? + override def provider = ??? + override def getParent = ??? + override def getChild(name: Iterator[String]) = ??? + override def isLocal = ??? + override def !(message: Any)(implicit sender: ActorRef = Actor.noSender) = ??? +} \ No newline at end of file diff --git a/akka-actor-tests/src/test/scala/akka/testkit/CallingThreadDispatcherModelSpec.scala b/akka-actor-tests/src/test/scala/akka/testkit/CallingThreadDispatcherModelSpec.scala index 5b023054d4..c23102c35f 100644 --- a/akka-actor-tests/src/test/scala/akka/testkit/CallingThreadDispatcherModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/testkit/CallingThreadDispatcherModelSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.testkit diff --git a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala index a5f65b6a7e..117de28242 100644 --- a/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/ByteStringSpec.scala @@ -269,9 +269,19 @@ class ByteStringSpec extends WordSpec with MustMatchers with Checkers { (b.compact eq b) } } + + "asByteBuffers" in { + check { (a: ByteString) ⇒ if (a.isCompact) a.asByteBuffers.size == 1 && a.asByteBuffers.head == a.asByteBuffer else a.asByteBuffers.size > 0 } + check { (a: ByteString) ⇒ a.asByteBuffers.foldLeft(ByteString.empty) { (bs, bb) ⇒ bs ++ ByteString(bb) } == a } + check { (a: ByteString) ⇒ a.asByteBuffers.forall(_.isReadOnly) } + check { (a: ByteString) ⇒ + import scala.collection.JavaConverters.iterableAsScalaIterableConverter; + a.asByteBuffers.zip(a.getByteBuffers().asScala).forall(x ⇒ x._1 == x._2) + } + } } "behave like a Vector" when { - "concatenating" in { check { (a: ByteString, b: ByteString) ⇒ likeVectors(a, b) { (a, b) ⇒ (a ++ b) } } } + "concatenating" in { check { (a: ByteString, b: ByteString) ⇒ likeVectors(a, b) { _ ++ _ } } } "calling apply" in { check { slice: ByteStringSlice ⇒ diff --git a/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala b/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala index ca285274aa..71c3baedb9 100644 --- a/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala @@ -1,39 +1,16 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util import language.postfixOps -import org.scalatest.WordSpec -import org.scalatest.matchers.MustMatchers import scala.concurrent.duration._ -import scala.concurrent.Await -import java.util.concurrent.TimeUnit._ import akka.testkit.AkkaSpec -import akka.testkit.TestLatch -import java.util.concurrent.TimeoutException -import akka.testkit.LongRunningTest class DurationSpec extends AkkaSpec { - "A HashedWheelTimer" must { - - "not mess up long timeouts" taggedAs LongRunningTest in { - val longish = Long.MaxValue.nanos - val barrier = TestLatch() - import system.dispatcher - val job = system.scheduler.scheduleOnce(longish)(barrier.countDown()) - intercept[TimeoutException] { - // this used to fire after 46 seconds due to wrap-around - Await.ready(barrier, 90 seconds) - } - job.cancel() - } - - } - "Duration" must { "form a one-dimensional vector field" in { diff --git a/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala b/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala index d2d87b4c14..16374b9c10 100644 --- a/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util @@ -129,4 +129,4 @@ class IndexSpec extends AkkaSpec with MustMatchers with DefaultTimeout { tasks.foreach(Await.result(_, timeout.duration)) } } -} \ No newline at end of file +} diff --git a/akka-actor-tests/src/test/scala/akka/util/JavaDurationSpec.scala b/akka-actor-tests/src/test/scala/akka/util/JavaDurationSpec.scala index 2278873b56..f9bd2ace91 100644 --- a/akka-actor-tests/src/test/scala/akka/util/JavaDurationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/JavaDurationSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util diff --git a/akka-actor-tests/src/test/scala/akka/util/SwitchSpec.scala b/akka-actor-tests/src/test/scala/akka/util/SwitchSpec.scala index cf272cba88..10fca828a1 100644 --- a/akka-actor-tests/src/test/scala/akka/util/SwitchSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/SwitchSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util diff --git a/akka-actor/src/main/java/akka/actor/AbstractActorRef.java b/akka-actor/src/main/java/akka/actor/AbstractActorRef.java index 650182a457..65d2834694 100644 --- a/akka-actor/src/main/java/akka/actor/AbstractActorRef.java +++ b/akka-actor/src/main/java/akka/actor/AbstractActorRef.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor; diff --git a/akka-actor/src/main/java/akka/actor/AbstractScheduler.java b/akka-actor/src/main/java/akka/actor/AbstractScheduler.java new file mode 100644 index 0000000000..fdf5fe9001 --- /dev/null +++ b/akka-actor/src/main/java/akka/actor/AbstractScheduler.java @@ -0,0 +1,53 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.actor; + +import scala.concurrent.ExecutionContext; +import scala.concurrent.duration.FiniteDuration; + +//#scheduler +/** + * An Akka scheduler service. This one needs one special behavior: if + * Closeable, it MUST execute all outstanding tasks upon .close() in order + * to properly shutdown all dispatchers. + * + * Furthermore, this timer service MUST throw IllegalStateException if it + * cannot schedule a task. Once scheduled, the task MUST be executed. If + * executed upon close(), the task may execute before its timeout. + * + * Scheduler implementation are loaded reflectively at ActorSystem start-up + * with the following constructor arguments: + * 1) the system’s com.typesafe.config.Config (from system.settings.config) + * 2) a akka.event.LoggingAdapter + * 3) a java.util.concurrent.ThreadFactory + */ +public abstract class AbstractScheduler extends AbstractSchedulerBase { + + /** + * Schedules a function to be run repeatedly with an initial delay and + * a frequency. E.g. if you would like the function to be run after 2 + * seconds and thereafter every 100ms you would set delay = Duration(2, + * TimeUnit.SECONDS) and interval = Duration(100, TimeUnit.MILLISECONDS) + */ + @Override + public abstract Cancellable schedule(FiniteDuration initialDelay, + FiniteDuration interval, Runnable runnable, ExecutionContext executor); + + /** + * Schedules a Runnable to be run once with a delay, i.e. a time period that + * has to pass before the runnable is executed. + */ + @Override + public abstract Cancellable scheduleOnce(FiniteDuration delay, Runnable runnable, + ExecutionContext executor); + + /** + * The maximum supported task frequency of this scheduler, i.e. the inverse + * of the minimum time interval between executions of a recurring task, in Hz. + */ + @Override + public abstract double maxFrequency(); +} +//#scheduler diff --git a/akka-actor/src/main/java/akka/actor/dungeon/AbstractActorCell.java b/akka-actor/src/main/java/akka/actor/dungeon/AbstractActorCell.java index 6735b6e2cb..4f9771b256 100644 --- a/akka-actor/src/main/java/akka/actor/dungeon/AbstractActorCell.java +++ b/akka-actor/src/main/java/akka/actor/dungeon/AbstractActorCell.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor.dungeon; diff --git a/akka-actor/src/main/java/akka/dispatch/AbstractMailbox.java b/akka-actor/src/main/java/akka/dispatch/AbstractMailbox.java index c4ac378fad..d299a3886a 100644 --- a/akka-actor/src/main/java/akka/dispatch/AbstractMailbox.java +++ b/akka-actor/src/main/java/akka/dispatch/AbstractMailbox.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.dispatch; diff --git a/akka-actor/src/main/java/akka/dispatch/AbstractMessageDispatcher.java b/akka-actor/src/main/java/akka/dispatch/AbstractMessageDispatcher.java index 183812428e..415670968b 100644 --- a/akka-actor/src/main/java/akka/dispatch/AbstractMessageDispatcher.java +++ b/akka-actor/src/main/java/akka/dispatch/AbstractMessageDispatcher.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.dispatch; diff --git a/akka-actor/src/main/java/akka/pattern/AbstractCircuitBreaker.java b/akka-actor/src/main/java/akka/pattern/AbstractCircuitBreaker.java index 44482bb357..6d2f0dbf41 100644 --- a/akka-actor/src/main/java/akka/pattern/AbstractCircuitBreaker.java +++ b/akka-actor/src/main/java/akka/pattern/AbstractCircuitBreaker.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.pattern; diff --git a/akka-actor/src/main/java/akka/pattern/AbstractPromiseActorRef.java b/akka-actor/src/main/java/akka/pattern/AbstractPromiseActorRef.java index bb0f03861c..4c55a4c219 100644 --- a/akka-actor/src/main/java/akka/pattern/AbstractPromiseActorRef.java +++ b/akka-actor/src/main/java/akka/pattern/AbstractPromiseActorRef.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.pattern; @@ -18,4 +18,4 @@ final class AbstractPromiseActorRef { throw new ExceptionInInitializerError(t); } } -} \ No newline at end of file +} diff --git a/akka-actor/src/main/java/akka/util/internal/ConcurrentIdentityHashMap.java b/akka-actor/src/main/java/akka/util/internal/ConcurrentIdentityHashMap.java deleted file mode 100644 index eb83c98f35..0000000000 --- a/akka-actor/src/main/java/akka/util/internal/ConcurrentIdentityHashMap.java +++ /dev/null @@ -1,1417 +0,0 @@ -/* - * Copyright 2009 Red Hat, Inc. - * - * Red Hat licenses this file to you under the Apache License, version 2.0 - * (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -/* - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/licenses/publicdomain - */ -package akka.util.internal; - -import java.util.AbstractCollection; -import java.util.AbstractMap; -import java.util.AbstractSet; -import java.util.Collection; -import java.util.Enumeration; -import java.util.Iterator; -import java.util.Map; -import java.util.Map.Entry; // Needed by sbt doc - do not remove -import java.util.NoSuchElementException; -import java.util.Set; -import java.util.concurrent.ConcurrentMap; -import java.util.concurrent.locks.ReentrantLock; - - -/** - * An alternative identity-comparing {@link java.util.concurrent.ConcurrentMap} which is similar to - * {@link java.util.concurrent.ConcurrentHashMap}. - * - * @author The Netty Project - * @author Doug Lea - * @author Jason T. Greene - * @author Trustin Lee - * @version $Rev: 2371 $, $Date: 2010-10-19 15:00:42 +0900 (Tue, 19 Oct 2010) $ - * - * @param the type of keys maintained by this map - * @param the type of mapped values - */ -public final class ConcurrentIdentityHashMap extends AbstractMap - implements ConcurrentMap{ - - /** - * The default initial capacity for this table, used when not otherwise - * specified in a constructor. - */ - static final int DEFAULT_INITIAL_CAPACITY = 16; - - /** - * The default load factor for this table, used when not otherwise specified - * in a constructor. - */ - static final float DEFAULT_LOAD_FACTOR = 0.75f; - - /** - * The default concurrency level for this table, used when not otherwise - * specified in a constructor. - */ - static final int DEFAULT_CONCURRENCY_LEVEL = 16; - - /** - * The maximum capacity, used if a higher value is implicitly specified by - * either of the constructors with arguments. MUST be a power of two - * <= 1<<30 to ensure that entries are indexable using integers. - */ - static final int MAXIMUM_CAPACITY = 1 << 30; - - /** - * The maximum number of segments to allow; used to bound constructor - * arguments. - */ - static final int MAX_SEGMENTS = 1 << 16; // slightly conservative - - /** - * Number of unsynchronized retries in size and containsValue methods before - * resorting to locking. This is used to avoid unbounded retries if tables - * undergo continuous modification which would make it impossible to obtain - * an accurate result. - */ - static final int RETRIES_BEFORE_LOCK = 2; - - /* ---------------- Fields -------------- */ - - /** - * Mask value for indexing into segments. The upper bits of a key's hash - * code are used to choose the segment. - */ - final int segmentMask; - - /** - * Shift value for indexing within segments. - */ - final int segmentShift; - - /** - * The segments, each of which is a specialized hash table - */ - final Segment[] segments; - - Set keySet; - Set> entrySet; - Collection values; - - /* ---------------- Small Utilities -------------- */ - - /** - * Applies a supplemental hash function to a given hashCode, which defends - * against poor quality hash functions. This is critical because - * ConcurrentReferenceHashMap uses power-of-two length hash tables, that - * otherwise encounter collisions for hashCodes that do not differ in lower - * or upper bits. - */ - private static int hash(int h) { - // Spread bits to regularize both segment and index locations, - // using variant of single-word Wang/Jenkins hash. - h += h << 15 ^ 0xffffcd7d; - h ^= h >>> 10; - h += h << 3; - h ^= h >>> 6; - h += (h << 2) + (h << 14); - return h ^ h >>> 16; - } - - /** - * Returns the segment that should be used for key with given hash. - * - * @param hash the hash code for the key - * @return the segment - */ - final Segment segmentFor(int hash) { - return segments[hash >>> segmentShift & segmentMask]; - } - - private int hashOf(Object key) { - return hash(System.identityHashCode(key)); - } - - /** - * ConcurrentReferenceHashMap list entry. Note that this is never exported - * out as a user-visible Map.Entry. - * - * Because the value field is volatile, not final, it is legal wrt - * the Java Memory Model for an unsynchronized reader to see null - * instead of initial value when read via a data race. Although a - * reordering leading to this is not likely to ever actually - * occur, the Segment.readValueUnderLock method is used as a - * backup in case a null (pre-initialized) value is ever seen in - * an unsynchronized access method. - */ - static final class HashEntry { - final Object key; - final int hash; - volatile Object value; - final HashEntry next; - - HashEntry( - K key, int hash, HashEntry next, V value) { - this.hash = hash; - this.next = next; - this.key = key; - this.value = value; - } - - @SuppressWarnings("unchecked") - final K key() { - return (K) key; - } - - @SuppressWarnings("unchecked") - final V value() { - return (V) value; - } - - final void setValue(V value) { - this.value = value; - } - - @SuppressWarnings("unchecked") - static final HashEntry[] newArray(int i) { - return new HashEntry[i]; - } - } - - /** - * Segments are specialized versions of hash tables. This subclasses from - * ReentrantLock opportunistically, just to simplify some locking and avoid - * separate construction. - */ - static final class Segment extends ReentrantLock { - /* - * Segments maintain a table of entry lists that are ALWAYS kept in a - * consistent state, so can be read without locking. Next fields of - * nodes are immutable (final). All list additions are performed at the - * front of each bin. This makes it easy to check changes, and also fast - * to traverse. When nodes would otherwise be changed, new nodes are - * created to replace them. This works well for hash tables since the - * bin lists tend to be short. (The average length is less than two for - * the default load factor threshold.) - * - * Read operations can thus proceed without locking, but rely on - * selected uses of volatiles to ensure that completed write operations - * performed by other threads are noticed. For most purposes, the - * "count" field, tracking the number of elements, serves as that - * volatile variable ensuring visibility. This is convenient because - * this field needs to be read in many read operations anyway: - * - * - All (unsynchronized) read operations must first read the - * "count" field, and should not look at table entries if - * it is 0. - * - * - All (synchronized) write operations should write to - * the "count" field after structurally changing any bin. - * The operations must not take any action that could even - * momentarily cause a concurrent read operation to see - * inconsistent data. This is made easier by the nature of - * the read operations in Map. For example, no operation - * can reveal that the table has grown but the threshold - * has not yet been updated, so there are no atomicity - * requirements for this with respect to reads. - * - * As a guide, all critical volatile reads and writes to the count field - * are marked in code comments. - */ - - private static final long serialVersionUID = 5207829234977119743L; - - /** - * The number of elements in this segment's region. - */ - transient volatile int count; - - /** - * Number of updates that alter the size of the table. This is used - * during bulk-read methods to make sure they see a consistent snapshot: - * If modCounts change during a traversal of segments computing size or - * checking containsValue, then we might have an inconsistent view of - * state so (usually) must retry. - */ - int modCount; - - /** - * The table is rehashed when its size exceeds this threshold. - * (The value of this field is always (capacity * loadFactor).) - */ - int threshold; - - /** - * The per-segment table. - */ - transient volatile HashEntry[] table; - - /** - * The load factor for the hash table. Even though this value is same - * for all segments, it is replicated to avoid needing links to outer - * object. - */ - final float loadFactor; - - Segment(int initialCapacity, float lf) { - loadFactor = lf; - setTable(HashEntry. newArray(initialCapacity)); - } - - @SuppressWarnings("unchecked") - static final Segment[] newArray(int i) { - return new Segment[i]; - } - - private boolean keyEq(Object src, Object dest) { - return src == dest; - } - - /** - * Sets table to new HashEntry array. Call only while holding lock or in - * constructor. - */ - void setTable(HashEntry[] newTable) { - threshold = (int) (newTable.length * loadFactor); - table = newTable; - } - - /** - * Returns properly casted first entry of bin for given hash. - */ - HashEntry getFirst(int hash) { - HashEntry[] tab = table; - return tab[hash & tab.length - 1]; - } - - HashEntry newHashEntry( - K key, int hash, HashEntry next, V value) { - return new HashEntry(key, hash, next, value); - } - - /** - * Reads value field of an entry under lock. Called if value field ever - * appears to be null. This is possible only if a compiler happens to - * reorder a HashEntry initialization with its table assignment, which - * is legal under memory model but is not known to ever occur. - */ - V readValueUnderLock(HashEntry e) { - lock(); - try { - return e.value(); - } finally { - unlock(); - } - } - - /* Specialized implementations of map methods */ - - V get(Object key, int hash) { - if (count != 0) { // read-volatile - HashEntry e = getFirst(hash); - while (e != null) { - if (e.hash == hash && keyEq(key, e.key())) { - V opaque = e.value(); - if (opaque != null) { - return opaque; - } - - return readValueUnderLock(e); // recheck - } - e = e.next; - } - } - return null; - } - - boolean containsKey(Object key, int hash) { - if (count != 0) { // read-volatile - HashEntry e = getFirst(hash); - while (e != null) { - if (e.hash == hash && keyEq(key, e.key())) { - return true; - } - e = e.next; - } - } - return false; - } - - boolean containsValue(Object value) { - if (count != 0) { // read-volatile - HashEntry[] tab = table; - int len = tab.length; - for (int i = 0; i < len; i ++) { - for (HashEntry e = tab[i]; e != null; e = e.next) { - V opaque = e.value(); - V v; - - if (opaque == null) { - v = readValueUnderLock(e); // recheck - } else { - v = opaque; - } - - if (value.equals(v)) { - return true; - } - } - } - } - return false; - } - - boolean replace(K key, int hash, V oldValue, V newValue) { - lock(); - try { - HashEntry e = getFirst(hash); - while (e != null && (e.hash != hash || !keyEq(key, e.key()))) { - e = e.next; - } - - boolean replaced = false; - if (e != null && oldValue.equals(e.value())) { - replaced = true; - e.setValue(newValue); - } - return replaced; - } finally { - unlock(); - } - } - - V replace(K key, int hash, V newValue) { - lock(); - try { - HashEntry e = getFirst(hash); - while (e != null && (e.hash != hash || !keyEq(key, e.key()))) { - e = e.next; - } - - V oldValue = null; - if (e != null) { - oldValue = e.value(); - e.setValue(newValue); - } - return oldValue; - } finally { - unlock(); - } - } - - V put(K key, int hash, V value, boolean onlyIfAbsent) { - lock(); - try { - int c = count; - if (c ++ > threshold) { // ensure capacity - int reduced = rehash(); - if (reduced > 0) { - count = (c -= reduced) - 1; // write-volatile - } - } - - HashEntry[] tab = table; - int index = hash & tab.length - 1; - HashEntry first = tab[index]; - HashEntry e = first; - while (e != null && (e.hash != hash || !keyEq(key, e.key()))) { - e = e.next; - } - - V oldValue; - if (e != null) { - oldValue = e.value(); - if (!onlyIfAbsent) { - e.setValue(value); - } - } else { - oldValue = null; - ++ modCount; - tab[index] = newHashEntry(key, hash, first, value); - count = c; // write-volatile - } - return oldValue; - } finally { - unlock(); - } - } - - int rehash() { - HashEntry[] oldTable = table; - int oldCapacity = oldTable.length; - if (oldCapacity >= MAXIMUM_CAPACITY) { - return 0; - } - - /* - * Reclassify nodes in each list to new Map. Because we are using - * power-of-two expansion, the elements from each bin must either - * stay at same index, or move with a power of two offset. We - * eliminate unnecessary node creation by catching cases where old - * nodes can be reused because their next fields won't change. - * Statistically, at the default threshold, only about one-sixth of - * them need cloning when a table doubles. The nodes they replace - * will be garbage collectable as soon as they are no longer - * referenced by any reader thread that may be in the midst of - * traversing table right now. - */ - - HashEntry[] newTable = HashEntry.newArray(oldCapacity << 1); - threshold = (int) (newTable.length * loadFactor); - int sizeMask = newTable.length - 1; - int reduce = 0; - for (int i = 0; i < oldCapacity; i ++) { - // We need to guarantee that any existing reads of old Map can - // proceed. So we cannot yet null out each bin. - HashEntry e = oldTable[i]; - - if (e != null) { - HashEntry next = e.next; - int idx = e.hash & sizeMask; - - // Single node on list - if (next == null) { - newTable[idx] = e; - } else { - // Reuse trailing consecutive sequence at same slot - HashEntry lastRun = e; - int lastIdx = idx; - for (HashEntry last = next; last != null; last = last.next) { - int k = last.hash & sizeMask; - if (k != lastIdx) { - lastIdx = k; - lastRun = last; - } - } - newTable[lastIdx] = lastRun; - // Clone all remaining nodes - for (HashEntry p = e; p != lastRun; p = p.next) { - // Skip GC'd weak references - K key = p.key(); - if (key == null) { - reduce ++; - continue; - } - int k = p.hash & sizeMask; - HashEntry n = newTable[k]; - newTable[k] = newHashEntry(key, p.hash, n, p.value()); - } - } - } - } - table = newTable; - return reduce; - } - - /** - * Remove; match on key only if value null, else match both. - */ - V remove(Object key, int hash, Object value, boolean refRemove) { - lock(); - try { - int c = count - 1; - HashEntry[] tab = table; - int index = hash & tab.length - 1; - HashEntry first = tab[index]; - HashEntry e = first; - // a reference remove operation compares the Reference instance - while (e != null && key != e.key && - (refRemove || hash != e.hash || !keyEq(key, e.key()))) { - e = e.next; - } - - V oldValue = null; - if (e != null) { - V v = e.value(); - if (value == null || value.equals(v)) { - oldValue = v; - // All entries following removed node can stay in list, - // but all preceding ones need to be cloned. - ++ modCount; - HashEntry newFirst = e.next; - for (HashEntry p = first; p != e; p = p.next) { - K pKey = p.key(); - if (pKey == null) { // Skip GC'd keys - c --; - continue; - } - - newFirst = newHashEntry( - pKey, p.hash, newFirst, p.value()); - } - tab[index] = newFirst; - count = c; // write-volatile - } - } - return oldValue; - } finally { - unlock(); - } - } - - void clear() { - if (count != 0) { - lock(); - try { - HashEntry[] tab = table; - for (int i = 0; i < tab.length; i ++) { - tab[i] = null; - } - ++ modCount; - count = 0; // write-volatile - } finally { - unlock(); - } - } - } - } - - /* ---------------- Public operations -------------- */ - - /** - * Creates a new, empty map with the specified initial capacity, load factor - * and concurrency level. - * - * @param initialCapacity the initial capacity. The implementation performs - * internal sizing to accommodate this many elements. - * @param loadFactor the load factor threshold, used to control resizing. - * Resizing may be performed when the average number of - * elements per bin exceeds this threshold. - * @param concurrencyLevel the estimated number of concurrently updating - * threads. The implementation performs internal - * sizing to try to accommodate this many threads. - * @throws IllegalArgumentException if the initial capacity is negative or - * the load factor or concurrencyLevel are - * nonpositive. - */ - public ConcurrentIdentityHashMap( - int initialCapacity, float loadFactor, - int concurrencyLevel) { - if (!(loadFactor > 0) || initialCapacity < 0 || concurrencyLevel <= 0) { - throw new IllegalArgumentException(); - } - - if (concurrencyLevel > MAX_SEGMENTS) { - concurrencyLevel = MAX_SEGMENTS; - } - - // Find power-of-two sizes best matching arguments - int sshift = 0; - int ssize = 1; - while (ssize < concurrencyLevel) { - ++ sshift; - ssize <<= 1; - } - segmentShift = 32 - sshift; - segmentMask = ssize - 1; - this.segments = Segment.newArray(ssize); - - if (initialCapacity > MAXIMUM_CAPACITY) { - initialCapacity = MAXIMUM_CAPACITY; - } - int c = initialCapacity / ssize; - if (c * ssize < initialCapacity) { - ++ c; - } - int cap = 1; - while (cap < c) { - cap <<= 1; - } - - for (int i = 0; i < this.segments.length; ++ i) { - this.segments[i] = new Segment(cap, loadFactor); - } - } - - - /** - * Creates a new, empty map with the specified initial capacity and load - * factor and with the default reference types (weak keys, strong values), - * and concurrencyLevel (16). - * - * @param initialCapacity The implementation performs internal sizing to - * accommodate this many elements. - * @param loadFactor the load factor threshold, used to control resizing. - * Resizing may be performed when the average number of - * elements per bin exceeds this threshold. - * @throws IllegalArgumentException if the initial capacity of elements is - * negative or the load factor is - * nonpositive - */ - public ConcurrentIdentityHashMap(int initialCapacity, float loadFactor) { - this(initialCapacity, loadFactor, DEFAULT_CONCURRENCY_LEVEL); - } - - /** - * Creates a new, empty map with the specified initial capacity, and with - * default reference types (weak keys, strong values), load factor (0.75) - * and concurrencyLevel (16). - * - * @param initialCapacity the initial capacity. The implementation performs - * internal sizing to accommodate this many elements. - * @throws IllegalArgumentException if the initial capacity of elements is - * negative. - */ - public ConcurrentIdentityHashMap(int initialCapacity) { - this(initialCapacity, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); - } - - /** - * Creates a new, empty map with a default initial capacity (16), reference - * types (weak keys, strong values), default load factor (0.75) and - * concurrencyLevel (16). - */ - public ConcurrentIdentityHashMap() { - this(DEFAULT_INITIAL_CAPACITY, DEFAULT_LOAD_FACTOR, DEFAULT_CONCURRENCY_LEVEL); - } - - /** - * Creates a new map with the same mappings as the given map. The map is - * created with a capacity of 1.5 times the number of mappings in the given - * map or 16 (whichever is greater), and a default load factor (0.75) and - * concurrencyLevel (16). - * - * @param m the map - */ - public ConcurrentIdentityHashMap(Map m) { - this(Math.max((int) (m.size() / DEFAULT_LOAD_FACTOR) + 1, - DEFAULT_INITIAL_CAPACITY), DEFAULT_LOAD_FACTOR, - DEFAULT_CONCURRENCY_LEVEL); - putAll(m); - } - - /** - * Returns true if this map contains no key-value mappings. - * - * @return true if this map contains no key-value mappings - */ - @Override - public boolean isEmpty() { - final Segment[] segments = this.segments; - /* - * We keep track of per-segment modCounts to avoid ABA problems in which - * an element in one segment was added and in another removed during - * traversal, in which case the table was never actually empty at any - * point. Note the similar use of modCounts in the size() and - * containsValue() methods, which are the only other methods also - * susceptible to ABA problems. - */ - int[] mc = new int[segments.length]; - int mcsum = 0; - for (int i = 0; i < segments.length; ++ i) { - if (segments[i].count != 0) { - return false; - } else { - mcsum += mc[i] = segments[i].modCount; - } - } - // If mcsum happens to be zero, then we know we got a snapshot before - // any modifications at all were made. This is probably common enough - // to bother tracking. - if (mcsum != 0) { - for (int i = 0; i < segments.length; ++ i) { - if (segments[i].count != 0 || mc[i] != segments[i].modCount) { - return false; - } - } - } - return true; - } - - /** - * Returns the number of key-value mappings in this map. If the map contains - * more than Integer.MAX_VALUE elements, returns - * Integer.MAX_VALUE. - * - * @return the number of key-value mappings in this map - */ - @Override - public int size() { - final Segment[] segments = this.segments; - long sum = 0; - long check = 0; - int[] mc = new int[segments.length]; - // Try a few times to get accurate count. On failure due to continuous - // async changes in table, resort to locking. - for (int k = 0; k < RETRIES_BEFORE_LOCK; ++ k) { - check = 0; - sum = 0; - int mcsum = 0; - for (int i = 0; i < segments.length; ++ i) { - sum += segments[i].count; - mcsum += mc[i] = segments[i].modCount; - } - if (mcsum != 0) { - for (int i = 0; i < segments.length; ++ i) { - check += segments[i].count; - if (mc[i] != segments[i].modCount) { - check = -1; // force retry - break; - } - } - } - if (check == sum) { - break; - } - } - if (check != sum) { // Resort to locking all segments - sum = 0; - for (int i = 0; i < segments.length; ++ i) { - segments[i].lock(); - } - for (int i = 0; i < segments.length; ++ i) { - sum += segments[i].count; - } - for (int i = 0; i < segments.length; ++ i) { - segments[i].unlock(); - } - } - if (sum > Integer.MAX_VALUE) { - return Integer.MAX_VALUE; - } else { - return (int) sum; - } - } - - /** - * Returns the value to which the specified key is mapped, or {@code null} - * if this map contains no mapping for the key. - * - *

More formally, if this map contains a mapping from a key {@code k} to - * a value {@code v} such that {@code key.equals(k)}, then this method - * returns {@code v}; otherwise it returns {@code null}. (There can be at - * most one such mapping.) - * - * @throws NullPointerException if the specified key is null - */ - @Override - public V get(Object key) { - int hash = hashOf(key); - return segmentFor(hash).get(key, hash); - } - - /** - * Tests if the specified object is a key in this table. - * - * @param key possible key - * @return true if and only if the specified object is a key in - * this table, as determined by the equals method; - * false otherwise. - * @throws NullPointerException if the specified key is null - */ - @Override - public boolean containsKey(Object key) { - int hash = hashOf(key); - return segmentFor(hash).containsKey(key, hash); - } - - /** - * Returns true if this map maps one or more keys to the specified - * value. Note: This method requires a full internal traversal of the hash - * table, and so is much slower than method containsKey. - * - * @param value value whose presence in this map is to be tested - * @return true if this map maps one or more keys to the specified - * value - * @throws NullPointerException if the specified value is null - */ - - @Override - public boolean containsValue(Object value) { - if (value == null) { - throw new NullPointerException(); - } - - // See explanation of modCount use above - - final Segment[] segments = this.segments; - int[] mc = new int[segments.length]; - - // Try a few times without locking - for (int k = 0; k < RETRIES_BEFORE_LOCK; ++ k) { - int mcsum = 0; - for (int i = 0; i < segments.length; ++ i) { - mcsum += mc[i] = segments[i].modCount; - if (segments[i].containsValue(value)) { - return true; - } - } - boolean cleanSweep = true; - if (mcsum != 0) { - for (int i = 0; i < segments.length; ++ i) { - if (mc[i] != segments[i].modCount) { - cleanSweep = false; - break; - } - } - } - if (cleanSweep) { - return false; - } - } - // Resort to locking all segments - for (int i = 0; i < segments.length; ++ i) { - segments[i].lock(); - } - boolean found = false; - try { - for (int i = 0; i < segments.length; ++ i) { - if (segments[i].containsValue(value)) { - found = true; - break; - } - } - } finally { - for (int i = 0; i < segments.length; ++ i) { - segments[i].unlock(); - } - } - return found; - } - - /** - * Legacy method testing if some key maps into the specified value in this - * table. This method is identical in functionality to - * {@link #containsValue}, and exists solely to ensure full compatibility - * with class {@link java.util.Hashtable}, which supported this method prior to - * introduction of the Java Collections framework. - * - * @param value a value to search for - * @return true if and only if some key maps to the value - * argument in this table as determined by the equals - * method; false otherwise - * @throws NullPointerException if the specified value is null - */ - public boolean contains(Object value) { - return containsValue(value); - } - - /** - * Maps the specified key to the specified value in this table. Neither the - * key nor the value can be null. - * - *

The value can be retrieved by calling the get method with a - * key that is equal to the original key. - * - * @param key key with which the specified value is to be associated - * @param value value to be associated with the specified key - * @return the previous value associated with key, or null - * if there was no mapping for key - * @throws NullPointerException if the specified key or value is null - */ - @Override - public V put(K key, V value) { - if (value == null) { - throw new NullPointerException(); - } - int hash = hashOf(key); - return segmentFor(hash).put(key, hash, value, false); - } - - /** - * {@inheritDoc} - * - * @return the previous value associated with the specified key, or - * null if there was no mapping for the key - * @throws NullPointerException if the specified key or value is null - */ - public V putIfAbsent(K key, V value) { - if (value == null) { - throw new NullPointerException(); - } - int hash = hashOf(key); - return segmentFor(hash).put(key, hash, value, true); - } - - /** - * Copies all of the mappings from the specified map to this one. These - * mappings replace any mappings that this map had for any of the keys - * currently in the specified map. - * - * @param m mappings to be stored in this map - */ - @Override - public void putAll(Map m) { - for (Entry e: m.entrySet()) { - put(e.getKey(), e.getValue()); - } - } - - /** - * Removes the key (and its corresponding value) from this map. This method - * does nothing if the key is not in the map. - * - * @param key the key that needs to be removed - * @return the previous value associated with key, or null - * if there was no mapping for key - * @throws NullPointerException if the specified key is null - */ - @Override - public V remove(Object key) { - int hash = hashOf(key); - return segmentFor(hash).remove(key, hash, null, false); - } - - /** - * {@inheritDoc} - * - * @throws NullPointerException if the specified key is null - */ - public boolean remove(Object key, Object value) { - int hash = hashOf(key); - if (value == null) { - return false; - } - return segmentFor(hash).remove(key, hash, value, false) != null; - } - - /** - * {@inheritDoc} - * - * @throws NullPointerException if any of the arguments are null - */ - public boolean replace(K key, V oldValue, V newValue) { - if (oldValue == null || newValue == null) { - throw new NullPointerException(); - } - int hash = hashOf(key); - return segmentFor(hash).replace(key, hash, oldValue, newValue); - } - - /** - * {@inheritDoc} - * - * @return the previous value associated with the specified key, or - * null if there was no mapping for the key - * @throws NullPointerException if the specified key or value is null - */ - public V replace(K key, V value) { - if (value == null) { - throw new NullPointerException(); - } - int hash = hashOf(key); - return segmentFor(hash).replace(key, hash, value); - } - - /** - * Removes all of the mappings from this map. - */ - @Override - public void clear() { - for (int i = 0; i < segments.length; ++ i) { - segments[i].clear(); - } - } - - /** - * Returns a {@link java.util.Set} view of the keys contained in this map. The set is - * backed by the map, so changes to the map are reflected in the set, and - * vice-versa. The set supports element removal, which removes the - * corresponding mapping from this map, via the Iterator.remove, - * Set.remove, removeAll, retainAll, and - * clear operations. It does not support the add or - * addAll operations. - * - *

The view's iterator is a "weakly consistent" iterator that - * will never throw {@link java.util.ConcurrentModificationException}, and guarantees - * to traverse elements as they existed upon construction of the iterator, - * and may (but is not guaranteed to) reflect any modifications subsequent - * to construction. - */ - @Override - public Set keySet() { - Set ks = keySet; - return ks != null? ks : (keySet = new KeySet()); - } - - /** - * Returns a {@link java.util.Collection} view of the values contained in this map. - * The collection is backed by the map, so changes to the map are reflected - * in the collection, and vice-versa. The collection supports element - * removal, which removes the corresponding mapping from this map, via the - * Iterator.remove, Collection.remove, removeAll, - * retainAll, and clear operations. It does not support - * the add or addAll operations. - * - *

The view's iterator is a "weakly consistent" iterator that - * will never throw {@link java.util.ConcurrentModificationException}, and guarantees - * to traverse elements as they existed upon construction of the iterator, - * and may (but is not guaranteed to) reflect any modifications subsequent - * to construction. - */ - @Override - public Collection values() { - Collection vs = values; - return vs != null? vs : (values = new Values()); - } - - /** - * Returns a {@link java.util.Set} view of the mappings contained in this map. - * The set is backed by the map, so changes to the map are reflected in the - * set, and vice-versa. The set supports element removal, which removes the - * corresponding mapping from the map, via the Iterator.remove, - * Set.remove, removeAll, retainAll, and - * clear operations. It does not support the add or - * addAll operations. - * - *

The view's iterator is a "weakly consistent" iterator that - * will never throw {@link java.util.ConcurrentModificationException}, and guarantees - * to traverse elements as they existed upon construction of the iterator, - * and may (but is not guaranteed to) reflect any modifications subsequent - * to construction. - */ - @Override - public Set> entrySet() { - Set> es = entrySet; - return es != null? es : (entrySet = new EntrySet()); - } - - /** - * Returns an enumeration of the keys in this table. - * - * @return an enumeration of the keys in this table - * @see #keySet() - */ - public Enumeration keys() { - return new KeyIterator(); - } - - /** - * Returns an enumeration of the values in this table. - * - * @return an enumeration of the values in this table - * @see #values() - */ - public Enumeration elements() { - return new ValueIterator(); - } - - /* ---------------- Iterator Support -------------- */ - - abstract class HashIterator { - int nextSegmentIndex; - int nextTableIndex; - HashEntry[] currentTable; - HashEntry nextEntry; - HashEntry lastReturned; - K currentKey; // Strong reference to weak key (prevents gc) - - HashIterator() { - nextSegmentIndex = segments.length - 1; - nextTableIndex = -1; - advance(); - } - - public void rewind() { - nextSegmentIndex = segments.length - 1; - nextTableIndex = -1; - currentTable = null; - nextEntry = null; - lastReturned = null; - currentKey = null; - advance(); - } - - public boolean hasMoreElements() { - return hasNext(); - } - - final void advance() { - if (nextEntry != null && (nextEntry = nextEntry.next) != null) { - return; - } - - while (nextTableIndex >= 0) { - if ((nextEntry = currentTable[nextTableIndex --]) != null) { - return; - } - } - - while (nextSegmentIndex >= 0) { - Segment seg = segments[nextSegmentIndex --]; - if (seg.count != 0) { - currentTable = seg.table; - for (int j = currentTable.length - 1; j >= 0; -- j) { - if ((nextEntry = currentTable[j]) != null) { - nextTableIndex = j - 1; - return; - } - } - } - } - } - - public boolean hasNext() { - while (nextEntry != null) { - if (nextEntry.key() != null) { - return true; - } - advance(); - } - - return false; - } - - HashEntry nextEntry() { - do { - if (nextEntry == null) { - throw new NoSuchElementException(); - } - - lastReturned = nextEntry; - currentKey = lastReturned.key(); - advance(); - } while (currentKey == null); // Skip GC'd keys - - return lastReturned; - } - - public void remove() { - if (lastReturned == null) { - throw new IllegalStateException(); - } - ConcurrentIdentityHashMap.this.remove(currentKey); - lastReturned = null; - } - } - - final class KeyIterator - extends HashIterator implements ReusableIterator, Enumeration { - - public K next() { - return super.nextEntry().key(); - } - - public K nextElement() { - return super.nextEntry().key(); - } - } - - final class ValueIterator - extends HashIterator implements ReusableIterator, Enumeration { - - public V next() { - return super.nextEntry().value(); - } - - public V nextElement() { - return super.nextEntry().value(); - } - } - - /* - * This class is needed for JDK5 compatibility. - */ - static class SimpleEntry implements Entry { - - private static final long serialVersionUID = -8144765946475398746L; - - private final K key; - - private V value; - - public SimpleEntry(K key, V value) { - this.key = key; - this.value = value; - - } - - public SimpleEntry(Entry entry) { - this.key = entry.getKey(); - this.value = entry.getValue(); - - } - - public K getKey() { - return key; - } - - public V getValue() { - return value; - } - - public V setValue(V value) { - V oldValue = this.value; - this.value = value; - return oldValue; - } - - @Override - public boolean equals(Object o) { - if (!(o instanceof Entry)) { - return false; - } - @SuppressWarnings("rawtypes") - Entry e = (Entry) o; - return eq(key, e.getKey()) && eq(value, e.getValue()); - } - - @Override - public int hashCode() { - return (key == null? 0 : key.hashCode()) ^ (value == null? 0 : value.hashCode()); - } - - @Override - public String toString() { - return key + "=" + value; - } - - private static boolean eq(Object o1, Object o2) { - return o1 == null? o2 == null : o1.equals(o2); - } - } - - /** - * Custom Entry class used by EntryIterator.next(), that relays setValue - * changes to the underlying map. - */ - final class WriteThroughEntry extends SimpleEntry { - - WriteThroughEntry(K k, V v) { - super(k, v); - } - - /** - * Set our entry's value and write through to the map. The value to - * return is somewhat arbitrary here. Since a WriteThroughEntry does not - * necessarily track asynchronous changes, the most recent "previous" - * value could be different from what we return (or could even have been - * removed in which case the put will re-establish). We do not and can - * not guarantee more. - */ - @Override - public V setValue(V value) { - - if (value == null) { - throw new NullPointerException(); - } - V v = super.setValue(value); - ConcurrentIdentityHashMap.this.put(getKey(), value); - return v; - } - - } - - final class EntryIterator extends HashIterator implements - ReusableIterator> { - public Entry next() { - HashEntry e = super.nextEntry(); - return new WriteThroughEntry(e.key(), e.value()); - } - } - - final class KeySet extends AbstractSet { - @Override - public Iterator iterator() { - - return new KeyIterator(); - } - - @Override - public int size() { - return ConcurrentIdentityHashMap.this.size(); - } - - @Override - public boolean isEmpty() { - return ConcurrentIdentityHashMap.this.isEmpty(); - } - - @Override - public boolean contains(Object o) { - return ConcurrentIdentityHashMap.this.containsKey(o); - } - - @Override - public boolean remove(Object o) { - return ConcurrentIdentityHashMap.this.remove(o) != null; - - } - - @Override - public void clear() { - ConcurrentIdentityHashMap.this.clear(); - } - } - - final class Values extends AbstractCollection { - @Override - public Iterator iterator() { - return new ValueIterator(); - } - - @Override - public int size() { - return ConcurrentIdentityHashMap.this.size(); - } - - @Override - public boolean isEmpty() { - return ConcurrentIdentityHashMap.this.isEmpty(); - } - - @Override - public boolean contains(Object o) { - return ConcurrentIdentityHashMap.this.containsValue(o); - } - - @Override - public void clear() { - ConcurrentIdentityHashMap.this.clear(); - } - } - - final class EntrySet extends AbstractSet> { - @Override - public Iterator> iterator() { - return new EntryIterator(); - } - - @Override - public boolean contains(Object o) { - if (!(o instanceof Entry)) { - return false; - } - Entry e = (Entry) o; - V v = ConcurrentIdentityHashMap.this.get(e.getKey()); - return v != null && v.equals(e.getValue()); - } - - @Override - public boolean remove(Object o) { - if (!(o instanceof Entry)) { - return false; - } - Entry e = (Entry) o; - return ConcurrentIdentityHashMap.this.remove(e.getKey(), e.getValue()); - } - - @Override - public int size() { - return ConcurrentIdentityHashMap.this.size(); - } - - @Override - public boolean isEmpty() { - return ConcurrentIdentityHashMap.this.isEmpty(); - } - - @Override - public void clear() { - ConcurrentIdentityHashMap.this.clear(); - } - } -} diff --git a/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java b/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java index 73b3cf143d..c0c38887a3 100644 --- a/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java +++ b/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java @@ -20,10 +20,13 @@ import java.util.Collections; import java.util.HashSet; import java.util.List; import java.util.Set; +import java.util.Iterator; import java.util.concurrent.ThreadFactory; +import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.locks.ReadWriteLock; import java.util.concurrent.locks.ReentrantReadWriteLock; +import akka.dispatch.SystemMessage; import akka.util.Helpers; import scala.concurrent.duration.Duration; import scala.concurrent.duration.FiniteDuration; @@ -89,7 +92,6 @@ public class HashedWheelTimer implements Timer { boolean shutdown = false; final long tickDuration; final Set[] wheel; - final ReusableIterator[] iterators; final int mask; final ReadWriteLock lock = new ReentrantReadWriteLock(); volatile int wheelCursor; @@ -127,7 +129,6 @@ public class HashedWheelTimer implements Timer { // Normalize ticksPerWheel to power of two and initialize the wheel. wheel = createWheel(ticksPerWheel); - iterators = createIterators(wheel); mask = wheel.length - 1; // Convert to standardized tickDuration @@ -152,20 +153,11 @@ public class HashedWheelTimer implements Timer { final Set[] wheel = new Set[normalizeTicksPerWheel(ticksPerWheel)]; for (int i = 0; i < wheel.length; i ++) { - wheel[i] = Collections.newSetFromMap(new ConcurrentIdentityHashMap(16, 0.95f, 4)); + wheel[i] = Collections.newSetFromMap(new ConcurrentHashMap(16, 0.95f, 4)); } return wheel; } - @SuppressWarnings("unchecked") - private static ReusableIterator[] createIterators(Set[] wheel) { - ReusableIterator[] iterators = new ReusableIterator[wheel.length]; - for (int i = 0; i < wheel.length; i ++) { - iterators[i] = (ReusableIterator) wheel[i].iterator(); - } - return iterators; - } - private static int normalizeTicksPerWheel(int ticksPerWheel) { int normalizedTicksPerWheel = 1; while (normalizedTicksPerWheel < ticksPerWheel) { @@ -268,6 +260,8 @@ public class HashedWheelTimer implements Timer { // one tick early; that shouldn’t matter since we’re talking 270 years here if (relativeIndex < 0) relativeIndex = delay / tickDuration; if (relativeIndex == 0) relativeIndex = 1; + // if an integral number of wheel rotations, schedule one tick earlier + if ((relativeIndex & mask) == 0) relativeIndex--; final long remainingRounds = relativeIndex / wheel.length; // Add the timeout to the wheel. @@ -321,16 +315,16 @@ public class HashedWheelTimer implements Timer { lock.writeLock().lock(); try { final int newWheelCursor = wheelCursor = wheelCursor + 1 & mask; - return fetchExpiredTimeouts(iterators[newWheelCursor], deadline); + return fetchExpiredTimeouts(wheel[newWheelCursor], deadline); } finally { lock.writeLock().unlock(); } } - private ArrayList fetchExpiredTimeouts(final ReusableIterator i, final long deadline) { + private ArrayList fetchExpiredTimeouts(final Iterable it, final long deadline) { final ArrayList expiredTimeouts = new ArrayList(); List slipped = null; - i.rewind(); + Iterator i = it.iterator(); while (i.hasNext()) { HashedWheelTimeout timeout = i.next(); if (timeout.remainingRounds <= 0) { @@ -455,10 +449,11 @@ public class HashedWheelTimer implements Timer { return Unsafe.instance.compareAndSwapInt(this, _stateOffset, old, future); } - public void cancel() { + public boolean cancel() { if (updateState(ST_INIT, ST_CANCELLED)) { parent.wheel[stopIndex].remove(this); - } + return true; + } else return false; } public boolean isCancelled() { @@ -481,6 +476,14 @@ public class HashedWheelTimer implements Timer { } } + @Override public final int hashCode() { + return System.identityHashCode(this); + } + + @Override public final boolean equals(final Object that) { + return this == that; + } + @Override public String toString() { final long currentTime = System.nanoTime(); diff --git a/akka-actor/src/main/java/akka/util/internal/ReusableIterator.java b/akka-actor/src/main/java/akka/util/internal/ReusableIterator.java deleted file mode 100644 index 8c8e5e50e5..0000000000 --- a/akka-actor/src/main/java/akka/util/internal/ReusableIterator.java +++ /dev/null @@ -1,27 +0,0 @@ -/* - * Copyright 2009 Red Hat, Inc. - * - * Red Hat licenses this file to you under the Apache License, version 2.0 - * (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package akka.util.internal; - -import java.util.Iterator; - -/** - * @author The Netty Project - * @author Trustin Lee - * @version $Rev: 2080 $, $Date: 2010-01-26 18:04:19 +0900 (Tue, 26 Jan 2010) $ - */ -public interface ReusableIterator extends Iterator { - void rewind(); -} diff --git a/akka-actor/src/main/java/akka/util/internal/Timeout.java b/akka-actor/src/main/java/akka/util/internal/Timeout.java index a03534bb8d..b417796bfc 100644 --- a/akka-actor/src/main/java/akka/util/internal/Timeout.java +++ b/akka-actor/src/main/java/akka/util/internal/Timeout.java @@ -51,6 +51,10 @@ public interface Timeout { * Cancels the {@link TimerTask} associated with this handle. It the * task has been executed or cancelled already, it will return with no * side effect. + * + * @return whether the caller was the one who actually cancelled this + * timeout (there can be at most one; never returns true if the Timeout + * expired) */ - void cancel(); + boolean cancel(); } diff --git a/akka-actor/src/main/resources/reference.conf b/akka-actor/src/main/resources/reference.conf index aeee89a65f..0039320631 100644 --- a/akka-actor/src/main/resources/reference.conf +++ b/akka-actor/src/main/resources/reference.conf @@ -364,7 +364,31 @@ akka { # ticks per wheel. # For more information see: http://www.jboss.org/netty/ tick-duration = 100ms + + # The timer uses a circular wheel of buckets to store the timer tasks. + # This should be set such that the majority of scheduled timeouts (for high + # scheduling frequency) will be shorter than one rotation of the wheel + # (ticks-per-wheel * ticks-duration) + # THIS MUST BE A POWER OF TWO! ticks-per-wheel = 512 + + # This setting selects the timer implementation which shall be loaded at + # system start-up. Built-in choices are: + # - akka.actor.DefaultScheduler (HWT) + # - akka.actor.LightArrayRevolverScheduler + # (to be benchmarked and evaluated) + # The class given here must implement the akka.actor.Scheduler interface + # and offer a constructor which takes three arguments: + # 1) com.typesafe.config.Config + # 2) akka.event.LoggingAdapter + # 3) java.util.concurrent.ThreadFactory + implementation = akka.actor.LightArrayRevolverScheduler + + # When shutting down the scheduler, there will typically be a thread which + # needs to be stopped, and this timeout determines how long to wait for + # that to happen. In case of timeout the shutdown of the actor system will + # proceed without running possibly still enqueued tasks. + shutdown-timeout = 5s } io { diff --git a/akka-actor/src/main/scala/akka/AkkaException.scala b/akka-actor/src/main/scala/akka/AkkaException.scala index ae245e3591..3cec55f1c1 100644 --- a/akka-actor/src/main/scala/akka/AkkaException.scala +++ b/akka-actor/src/main/scala/akka/AkkaException.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka @@ -14,10 +14,6 @@ package akka @SerialVersionUID(1L) class AkkaException(message: String, cause: Throwable) extends RuntimeException(message, cause) with Serializable { def this(msg: String) = this(msg, null) - - lazy val uuid: String = java.util.UUID.randomUUID().toString - - override def toString(): String = uuid + super.toString() } /** diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 3706799f23..a665d1029b 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index 51f11c044c..387775ab7b 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor @@ -15,6 +15,7 @@ import akka.dispatch.{ Watch, Unwatch, Terminate, SystemMessage, Suspend, Superv import akka.event.Logging.{ LogEvent, Debug, Error } import akka.japi.Procedure import akka.dispatch.NullMessage +import scala.concurrent.ExecutionContext /** * The actor context - the view of the actor cell from the actor. @@ -119,7 +120,7 @@ trait ActorContext extends ActorRefFactory { * Returns the dispatcher (MessageDispatcher) that is used for this Actor. * Importing this member will place a implicit MessageDispatcher in scope. */ - implicit def dispatcher: MessageDispatcher + implicit def dispatcher: ExecutionContext /** * The system that the actor belongs to. @@ -214,19 +215,19 @@ private[akka] trait Cell { */ def start(): this.type /** - * Recursively suspend this actor and all its children. Must not throw exceptions. + * Recursively suspend this actor and all its children. Is only allowed to throw Fatal Throwables. */ def suspend(): Unit /** - * Recursively resume this actor and all its children. Must not throw exceptions. + * Recursively resume this actor and all its children. Is only allowed to throw Fatal Throwables. */ def resume(causedByFailure: Throwable): Unit /** - * Restart this actor (will recursively restart or stop all children). Must not throw exceptions. + * Restart this actor (will recursively restart or stop all children). Is only allowed to throw Fatal Throwables. */ def restart(cause: Throwable): Unit /** - * Recursively terminate this actor and all its children. Must not throw exceptions. + * Recursively terminate this actor and all its children. Is only allowed to throw Fatal Throwables. */ def stop(): Unit /** @@ -246,16 +247,26 @@ private[akka] trait Cell { * Get the stats for the named child, if that exists. */ def getChildByName(name: String): Option[ChildStats] + /** * Enqueue a message to be sent to the actor; may or may not actually * schedule the actor to run, depending on which type of cell it is. - * Must not throw exceptions. + * Is only allowed to throw Fatal Throwables. */ - def tell(message: Any, sender: ActorRef): Unit + def sendMessage(msg: Envelope): Unit + /** * Enqueue a message to be sent to the actor; may or may not actually * schedule the actor to run, depending on which type of cell it is. - * Must not throw exceptions. + * Is only allowed to throw Fatal Throwables. + */ + final def sendMessage(message: Any, sender: ActorRef): Unit = + sendMessage(Envelope(message, sender, system)) + + /** + * Enqueue a message to be sent to the actor; may or may not actually + * schedule the actor to run, depending on which type of cell it is. + * Is only allowed to throw Fatal Throwables. */ def sendSystemMessage(msg: SystemMessage): Unit /** @@ -286,8 +297,8 @@ private[akka] object ActorCell { } final val emptyCancellable: Cancellable = new Cancellable { - def isCancelled = false - def cancel() {} + def isCancelled: Boolean = false + def cancel(): Boolean = false } final val emptyBehaviorStack: List[Actor.Receive] = Nil @@ -392,35 +403,22 @@ private[akka] class ActorCell( checkReceiveTimeout // Reschedule receive timeout } - def autoReceiveMessage(msg: Envelope): Unit = if (msg.message != NullMessage) { - if (system.settings.DebugAutoReceive) - publish(Debug(self.path.toString, clazz(actor), "received AutoReceiveMessage " + msg)) + def autoReceiveMessage(msg: Envelope): Unit = + if (msg.message != NullMessage) { + if (system.settings.DebugAutoReceive) + publish(Debug(self.path.toString, clazz(actor), "received AutoReceiveMessage " + msg)) - msg.message match { - case Failed(cause, uid) ⇒ handleFailure(sender, cause, uid) - case t: Terminated ⇒ - if (t.addressTerminated) removeChildWhenToAddressTerminated(t.actor) - watchedActorTerminated(t) - case AddressTerminated(address) ⇒ addressTerminated(address) - case Kill ⇒ throw new ActorKilledException("Kill") - case PoisonPill ⇒ self.stop() - case SelectParent(m) ⇒ parent.tell(m, msg.sender) - case SelectChildName(name, m) ⇒ getChildByName(name) match { case Some(c: ChildRestartStats) ⇒ c.child.tell(m, msg.sender); case _ ⇒ } - case SelectChildPattern(p, m) ⇒ for (c ← children if p.matcher(c.path.name).matches) c.tell(m, msg.sender) + msg.message match { + case Failed(cause, uid) ⇒ handleFailure(sender, cause, uid) + case t: Terminated ⇒ watchedActorTerminated(t) + case AddressTerminated(address) ⇒ addressTerminated(address) + case Kill ⇒ throw new ActorKilledException("Kill") + case PoisonPill ⇒ self.stop() + case SelectParent(m) ⇒ parent.tell(m, msg.sender) + case SelectChildName(name, m) ⇒ getChildByName(name) match { case Some(c: ChildRestartStats) ⇒ c.child.tell(m, msg.sender); case _ ⇒ } + case SelectChildPattern(p, m) ⇒ for (c ← children if p.matcher(c.path.name).matches) c.tell(m, msg.sender) + } } - } - - /** - * When a parent is watching a child and it terminates due to AddressTerminated, - * it should be removed to support immediate creation of child with same name. - * - * For remote deployed actors ChildTerminated should be sent to the supervisor - * to clean up child references of remote deployed actors when remote node - * goes down, i.e. triggered by AddressTerminated, but that is the responsibility - * of the ActorRefProvider to handle that scenario. - */ - private def removeChildWhenToAddressTerminated(child: ActorRef): Unit = - childrenRefs.getByRef(child) foreach { crs ⇒ removeChildAndGetStateChange(crs.child) } final def receiveMessage(msg: Any): Unit = behaviorStack.head.applyOrElse(msg, actor.unhandled) @@ -497,16 +495,17 @@ private[akka] class ActorCell( } } - private def supervise(child: ActorRef, async: Boolean, uid: Int): Unit = if (!isTerminating) { - // Supervise is the first thing we get from a new child, so store away the UID for later use in handleFailure() - initChild(child) match { - case Some(crs) ⇒ - crs.uid = uid - handleSupervise(child, async) - if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(actor), "now supervising " + child)) - case None ⇒ publish(Error(self.path.toString, clazz(actor), "received Supervise from unregistered child " + child + ", this will not end well")) + private def supervise(child: ActorRef, async: Boolean, uid: Int): Unit = + if (!isTerminating) { + // Supervise is the first thing we get from a new child, so store away the UID for later use in handleFailure() + initChild(child) match { + case Some(crs) ⇒ + crs.uid = uid + handleSupervise(child, async) + if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(actor), "now supervising " + child)) + case None ⇒ publish(Error(self.path.toString, clazz(actor), "received Supervise from unregistered child " + child + ", this will not end well")) + } } - } // future extension point protected def handleSupervise(child: ActorRef, async: Boolean): Unit = child match { diff --git a/akka-actor/src/main/scala/akka/actor/ActorDSL.scala b/akka-actor/src/main/scala/akka/actor/ActorDSL.scala index bee50ff78e..b229795736 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorDSL.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorDSL.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor @@ -23,7 +23,7 @@ import java.util.concurrent.TimeUnit * * {{{ * import ActorDSL._ - * import concurrent.util.duration._ + * import scala.concurrent.util.duration._ * * implicit val system: ActorSystem = ... * diff --git a/akka-actor/src/main/scala/akka/actor/ActorPath.scala b/akka-actor/src/main/scala/akka/actor/ActorPath.scala index 4cb61d2212..4597d87f9c 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorPath.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorPath.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor import scala.annotation.tailrec diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index a6685ae549..82bf2c34ff 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor @@ -350,7 +350,7 @@ private[akka] class LocalActorRef private[akka] ( override def sendSystemMessage(message: SystemMessage): Unit = actorCell.sendSystemMessage(message) - override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = actorCell.tell(message, sender) + override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = actorCell.sendMessage(message, sender) override def restart(cause: Throwable): Unit = actorCell.restart(cause) @@ -446,7 +446,10 @@ private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider, override def isTerminated(): Boolean = true - override def sendSystemMessage(message: SystemMessage): Unit = specialHandle(message) + override def sendSystemMessage(message: SystemMessage): Unit = { + if (Mailbox.debug) println(s"ELAR $path having enqueued $message") + specialHandle(message) + } override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = message match { case d: DeadLetter ⇒ @@ -478,7 +481,8 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider, override def !(message: Any)(implicit sender: ActorRef = this): Unit = message match { case d: DeadLetter ⇒ if (!specialHandle(d.message)) eventStream.publish(d) - case _ ⇒ if (!specialHandle(message)) eventStream.publish(DeadLetter(message, sender, this)) + case _ ⇒ if (!specialHandle(message)) + eventStream.publish(DeadLetter(message, if (sender eq Actor.noSender) provider.deadLetters else sender, this)) } override protected def specialHandle(msg: Any): Boolean = msg match { diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index 5a3bb7dac2..51a54abdfc 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor @@ -11,8 +11,8 @@ import akka.util.{ Switch, Helpers } import akka.japi.Util.immutableSeq import akka.util.Collections.EmptyImmutableSeq import scala.util.{ Success, Failure } -import scala.concurrent.{ Future, Promise } import java.util.concurrent.atomic.AtomicLong +import scala.concurrent.{ ExecutionContext, Future, Promise } /** * Interface for all ActorRef providers to implement. @@ -51,8 +51,8 @@ trait ActorRefProvider { */ def settings: ActorSystem.Settings - //FIXME WHY IS THIS HERE? - def dispatcher: MessageDispatcher + //FIXME Only here because of AskSupport, should be dealt with + def dispatcher: ExecutionContext /** * Initialization of an ActorRefProvider happens in two steps: first @@ -169,7 +169,7 @@ trait ActorRefFactory { /** * Returns the default MessageDispatcher associated with this ActorRefFactory */ - implicit def dispatcher: MessageDispatcher + implicit def dispatcher: ExecutionContext /** * Father of all children created by this interface. @@ -326,13 +326,15 @@ private[akka] object SystemGuardian { * * Depending on this class is not supported, only the [[ActorRefProvider]] interface is supported. */ -class LocalActorRefProvider( +class LocalActorRefProvider private[akka] ( _systemName: String, override val settings: ActorSystem.Settings, val eventStream: EventStream, override val scheduler: Scheduler, val dynamicAccess: DynamicAccess, - override val deployer: Deployer) extends ActorRefProvider { + override val deployer: Deployer, + _deadLetters: Option[ActorPath ⇒ InternalActorRef]) + extends ActorRefProvider { // this is the constructor needed for reflectively instantiating the provider def this(_systemName: String, @@ -345,13 +347,15 @@ class LocalActorRefProvider( eventStream, scheduler, dynamicAccess, - new Deployer(settings, dynamicAccess)) + new Deployer(settings, dynamicAccess), + None) override val rootPath: ActorPath = RootActorPath(Address("akka", _systemName)) private[akka] val log: LoggingAdapter = Logging(eventStream, "LocalActorRefProvider(" + rootPath.address + ")") - override val deadLetters: InternalActorRef = new DeadLetterActorRef(this, rootPath / "deadLetters", eventStream) + override val deadLetters: InternalActorRef = + _deadLetters.getOrElse((p: ActorPath) ⇒ new DeadLetterActorRef(this, p, eventStream)).apply(rootPath / "deadLetters") /* * generate name for temporary actor refs @@ -382,7 +386,7 @@ class LocalActorRefProvider( override def isTerminated: Boolean = stopped.isOn override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = stopped.ifOff(message match { - case Failed(ex, _) if sender ne null ⇒ causeOfTermination = Some(ex); sender.asInstanceOf[InternalActorRef].stop() + case Failed(ex, _) if sender ne null ⇒ { causeOfTermination = Some(ex); sender.asInstanceOf[InternalActorRef].stop() } case NullMessage ⇒ // do nothing case _ ⇒ log.error(this + " received unexpected message [" + message + "]") }) @@ -449,10 +453,11 @@ class LocalActorRefProvider( stopWhenAllTerminationHooksDone() } - def stopWhenAllTerminationHooksDone(): Unit = if (terminationHooks.isEmpty) { - eventStream.stopDefaultLoggers() - context.stop(self) - } + def stopWhenAllTerminationHooksDone(): Unit = + if (terminationHooks.isEmpty) { + eventStream.stopDefaultLoggers() + context.stop(self) + } // guardian MUST NOT lose its children during restart override def preRestart(cause: Throwable, msg: Option[Any]) {} @@ -468,7 +473,7 @@ class LocalActorRefProvider( @volatile private var system: ActorSystemImpl = _ - def dispatcher: MessageDispatcher = system.dispatcher + def dispatcher: ExecutionContext = system.dispatcher lazy val terminationPromise: Promise[Unit] = Promise[Unit]() @@ -549,6 +554,7 @@ class LocalActorRefProvider( def init(_system: ActorSystemImpl) { system = _system + rootGuardian.start() // chain death watchers so that killing guardian stops the application systemGuardian.sendSystemMessage(Watch(guardian, systemGuardian)) rootGuardian.sendSystemMessage(Watch(systemGuardian, rootGuardian)) diff --git a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala index e329af556b..3aeee6c1d1 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor @@ -70,4 +70,4 @@ trait ScalaActorSelection { this: ActorSelection ⇒ def !(msg: Any)(implicit sender: ActorRef = Actor.noSender) = tell(msg, sender) -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index 45025f1887..d0139af91d 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -1,25 +1,24 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor +import java.io.Closeable +import java.util.concurrent.{ ConcurrentHashMap, ThreadFactory, CountDownLatch, TimeoutException, RejectedExecutionException } +import java.util.concurrent.TimeUnit.MILLISECONDS +import com.typesafe.config.{ Config, ConfigFactory } import akka.event._ import akka.dispatch._ import akka.japi.Util.immutableSeq -import com.typesafe.config.{ Config, ConfigFactory } +import akka.actor.dungeon.ChildrenContainer +import akka.util._ import scala.annotation.tailrec import scala.collection.immutable import scala.concurrent.duration.{ FiniteDuration, Duration } -import scala.concurrent.{ Await, Awaitable, CanAwait, Future } +import scala.concurrent.{ Await, Awaitable, CanAwait, Future, ExecutionContext } import scala.util.{ Failure, Success } -import scala.util.control.NonFatal -import akka.util._ -import java.io.Closeable -import akka.util.internal.{ HashedWheelTimer, ConcurrentIdentityHashMap } -import java.util.concurrent.{ ThreadFactory, CountDownLatch, TimeoutException, RejectedExecutionException } -import java.util.concurrent.TimeUnit.MILLISECONDS -import akka.actor.dungeon.ChildrenContainer +import scala.util.control.{ NonFatal, ControlThrowable } object ActorSystem { @@ -161,8 +160,7 @@ object ActorSystem { case x ⇒ Some(x) } - final val SchedulerTickDuration: FiniteDuration = Duration(getMilliseconds("akka.scheduler.tick-duration"), MILLISECONDS) - final val SchedulerTicksPerWheel: Int = getInt("akka.scheduler.ticks-per-wheel") + final val SchedulerClass: String = getString("akka.scheduler.implementation") final val Daemonicity: Boolean = getBoolean("akka.daemonic") final val JvmExitOnFatalError: Boolean = getBoolean("akka.jvm-exit-on-fatal-error") @@ -320,7 +318,7 @@ abstract class ActorSystem extends ActorRefFactory { * explicitly. * Importing this member will place the default MessageDispatcher in scope. */ - implicit def dispatcher: MessageDispatcher + implicit def dispatcher: ExecutionContext /** * Register a block of code (callback) to run after ActorSystem.shutdown has been issued and @@ -465,7 +463,7 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, new Thread.UncaughtExceptionHandler() { def uncaughtException(thread: Thread, cause: Throwable): Unit = { cause match { - case NonFatal(_) | _: InterruptedException ⇒ log.error(cause, "Uncaught error from thread [{}]", thread.getName) + case NonFatal(_) | _: InterruptedException | _: NotImplementedError | _: ControlThrowable ⇒ log.error(cause, "Uncaught error from thread [{}]", thread.getName) case _ ⇒ if (settings.JvmExitOnFatalError) { try { @@ -566,7 +564,7 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, val dispatchers: Dispatchers = new Dispatchers(settings, DefaultDispatcherPrerequisites( threadFactory, eventStream, deadLetterMailbox, scheduler, dynamicAccess, settings)) - val dispatcher: MessageDispatcher = dispatchers.defaultGlobalDispatcher + val dispatcher: ExecutionContext = dispatchers.defaultGlobalDispatcher def terminationFuture: Future[Unit] = provider.terminationFuture def lookupRoot: InternalActorRef = provider.rootGuardian @@ -601,6 +599,7 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, def shutdown(): Unit = guardian.stop() + //#create-scheduler /** * Create the scheduler service. This one needs one special behavior: if * Closeable, it MUST execute all outstanding tasks upon .close() in order @@ -611,12 +610,11 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, * executed upon close(), the task may execute before its timeout. */ protected def createScheduler(): Scheduler = - new DefaultScheduler( - new HashedWheelTimer(log, - threadFactory.withName(threadFactory.name + "-scheduler"), - settings.SchedulerTickDuration, - settings.SchedulerTicksPerWheel), - log) + dynamicAccess.createInstanceFor[Scheduler](settings.SchedulerClass, immutable.Seq( + classOf[Config] -> settings.config, + classOf[LoggingAdapter] -> log, + classOf[ThreadFactory] -> threadFactory.withName(threadFactory.name + "-scheduler"))).get + //#create-scheduler /* * This is called after the last actor has signaled its termination, i.e. @@ -628,15 +626,17 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, case _ ⇒ } - private val extensions = new ConcurrentIdentityHashMap[ExtensionId[_], AnyRef] + private val extensions = new ConcurrentHashMap[ExtensionId[_], AnyRef] /** * Returns any extension registered to the specified Extension or returns null if not registered */ @tailrec private def findExtension[T <: Extension](ext: ExtensionId[T]): T = extensions.get(ext) match { - case c: CountDownLatch ⇒ c.await(); findExtension(ext) //Registration in process, await completion and retry - case other ⇒ other.asInstanceOf[T] //could be a T or null, in which case we return the null as T + case c: CountDownLatch ⇒ + c.await(); findExtension(ext) //Registration in process, await completion and retry + case other ⇒ + other.asInstanceOf[T] //could be a T or null, in which case we return the null as T } @tailrec diff --git a/akka-actor/src/main/scala/akka/actor/Address.scala b/akka-actor/src/main/scala/akka/actor/Address.scala index d98bbcb208..72f0a0c38a 100644 --- a/akka-actor/src/main/scala/akka/actor/Address.scala +++ b/akka-actor/src/main/scala/akka/actor/Address.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor import java.net.URI @@ -19,10 +19,27 @@ import scala.collection.immutable */ @SerialVersionUID(1L) final case class Address private (protocol: String, system: String, host: Option[String], port: Option[Int]) { + // Please note that local/non-local distinction must be preserved: + // host.isDefined == hasGlobalScope + // host.isEmpty == hasLocalScope + // hasLocalScope == !hasGlobalScope def this(protocol: String, system: String) = this(protocol, system, None, None) def this(protocol: String, system: String, host: String, port: Int) = this(protocol, system, Option(host), Some(port)) + /** + * Returns true if this Address is only defined locally. It is not safe to send locally scoped addresses to remote + * hosts. See also [[akka.actor.Address#hasGlobalScope]]. + */ + def hasLocalScope: Boolean = host.isEmpty + + /** + * Returns true if this Address is usable globally. Unlike locally defined addresses ([[akka.actor.Address#hasLocalScope]]) + * addresses of global scope are safe to sent to other hosts, as they globally and uniquely identify an addressable + * entity. + */ + def hasGlobalScope: Boolean = host.isDefined + /** * Returns the canonical String representation of this Address formatted as: * @@ -130,4 +147,4 @@ object ActorPathExtractor extends PathUtils { } catch { case _: URISyntaxException ⇒ None } -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 8ed7dc754a..310032e06d 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala b/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala index af891bc483..9d865b7da9 100644 --- a/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala +++ b/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor @@ -101,4 +101,4 @@ class ReflectiveDynamicAccess(val classLoader: ClassLoader) extends DynamicAcces } recover { case i: InvocationTargetException if i.getTargetException ne null ⇒ throw i.getTargetException } } } -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/actor/Extension.scala b/akka-actor/src/main/scala/akka/actor/Extension.scala index 707c07982a..38b6a6d37c 100644 --- a/akka-actor/src/main/scala/akka/actor/Extension.scala +++ b/akka-actor/src/main/scala/akka/actor/Extension.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor @@ -52,6 +52,9 @@ trait ExtensionId[T <: Extension] { * internal use only. */ def createExtension(system: ExtendedActorSystem): T + + override final def hashCode: Int = System.identityHashCode(this) + override final def equals(other: Any): Boolean = this eq other.asInstanceOf[AnyRef] } /** diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 28151a4a47..d6f6ac488f 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor @@ -97,10 +97,11 @@ object FSM { if (repeat) scheduler.schedule(timeout, timeout, actor, this) else scheduler.scheduleOnce(timeout, actor, this)) - def cancel(): Unit = if (ref.isDefined) { - ref.get.cancel() - ref = None - } + def cancel(): Unit = + if (ref.isDefined) { + ref.get.cancel() + ref = None + } } /** diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index 7f65c84d02..b04ee4f06e 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor/src/main/scala/akka/actor/IO.scala b/akka-actor/src/main/scala/akka/actor/IO.scala index e1dedb3ba2..de598210b2 100644 --- a/akka-actor/src/main/scala/akka/actor/IO.scala +++ b/akka-actor/src/main/scala/akka/actor/IO.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index 30e28834dd..5608e7c8aa 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala index 02aef18564..fec648be06 100644 --- a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor @@ -151,7 +151,7 @@ private[akka] class RepointableActorRef( } } else this - def !(message: Any)(implicit sender: ActorRef = Actor.noSender) = underlying.tell(message, sender) + def !(message: Any)(implicit sender: ActorRef = Actor.noSender) = underlying.sendMessage(message, sender) def sendSystemMessage(message: SystemMessage) = underlying.sendSystemMessage(message) @@ -181,7 +181,7 @@ private[akka] class UnstartedCell(val systemImpl: ActorSystemImpl, while (!queue.isEmpty) { queue.poll() match { case s: SystemMessage ⇒ cell.sendSystemMessage(s) - case e: Envelope ⇒ cell.tell(e.message, e.sender) + case e: Envelope ⇒ cell.sendMessage(e) } } } finally { @@ -203,21 +203,20 @@ private[akka] class UnstartedCell(val systemImpl: ActorSystemImpl, def childrenRefs: ChildrenContainer = ChildrenContainer.EmptyChildrenContainer def getChildByName(name: String): Option[ChildRestartStats] = None - def tell(message: Any, sender: ActorRef): Unit = { - val useSender = if (sender eq Actor.noSender) system.deadLetters else sender + def sendMessage(msg: Envelope): Unit = { if (lock.tryLock(timeout.length, timeout.unit)) { try { val cell = self.underlying if (cellIsReady(cell)) { - cell.tell(message, useSender) - } else if (!queue.offer(Envelope(message, useSender, system))) { - system.eventStream.publish(Warning(self.path.toString, getClass, "dropping message of type " + message.getClass + " due to enqueue failure")) - system.deadLetters ! DeadLetter(message, useSender, self) - } + cell.sendMessage(msg) + } else if (!queue.offer(msg)) { + system.eventStream.publish(Warning(self.path.toString, getClass, "dropping message of type " + msg.message.getClass + " due to enqueue failure")) + system.deadLetters ! DeadLetter(msg.message, msg.sender, self) + } else if (Mailbox.debug) println(s"$self temp queueing ${msg.message} from ${msg.sender}") } finally lock.unlock() } else { - system.eventStream.publish(Warning(self.path.toString, getClass, "dropping message of type" + message.getClass + " due to lock timeout")) - system.deadLetters ! DeadLetter(message, useSender, self) + system.eventStream.publish(Warning(self.path.toString, getClass, "dropping message of type" + msg.message.getClass + " due to lock timeout")) + system.deadLetters ! DeadLetter(msg.message, msg.sender, self) } } @@ -244,7 +243,7 @@ private[akka] class UnstartedCell(val systemImpl: ActorSystemImpl, if (!wasEnqueued) { system.eventStream.publish(Warning(self.path.toString, getClass, "dropping system message " + msg + " due to enqueue failure")) system.deadLetters ! DeadLetter(msg, self, self) - } + } else if (Mailbox.debug) println(s"$self temp queueing system $msg") } } finally lock.unlock() } else { diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index bbb830110d..e3be799fb2 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -1,19 +1,31 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor -import scala.concurrent.duration.Duration -import akka.util.internal.{ TimerTask, HashedWheelTimer, Timeout ⇒ HWTimeout, Timer } -import akka.event.LoggingAdapter -import akka.dispatch.MessageDispatcher import java.io.Closeable -import java.util.concurrent.atomic.{ AtomicReference, AtomicLong } +import java.util.concurrent.ThreadFactory +import java.util.concurrent.atomic.{ AtomicLong, AtomicReference, AtomicReferenceArray } + import scala.annotation.tailrec -import akka.util.internal._ -import concurrent.ExecutionContext -import scala.concurrent.duration.FiniteDuration +import scala.collection.immutable +import scala.concurrent.{ Await, ExecutionContext, Future, Promise } +import scala.concurrent.duration._ +import scala.util.control.{ NoStackTrace, NonFatal } + +import com.typesafe.config.Config + +import akka.event.LoggingAdapter +import akka.util.Helpers +import akka.util.Unsafe.{ instance ⇒ unsafe } +import akka.util.internal.{ HashedWheelTimer, Timeout ⇒ HWTimeout, Timer ⇒ HWTimer, TimerTask ⇒ HWTimerTask } + +/** + * This exception is thrown by Scheduler.schedule* when scheduling is not + * possible, e.g. after shutting down the Scheduler. + */ +private case class SchedulerException(msg: String) extends akka.AkkaException(msg) with NoStackTrace // The Scheduler trait is included in the documentation. KEEP THE LINES SHORT!!! //#scheduler @@ -25,6 +37,12 @@ import scala.concurrent.duration.FiniteDuration * Furthermore, this timer service MUST throw IllegalStateException if it * cannot schedule a task. Once scheduled, the task MUST be executed. If * executed upon close(), the task may execute before its timeout. + * + * Scheduler implementation are loaded reflectively at ActorSystem start-up + * with the following constructor arguments: + * 1) the system’s com.typesafe.config.Config (from system.settings.config) + * 2) a akka.event.LoggingAdapter + * 3) a java.util.concurrent.ThreadFactory */ trait Scheduler { /** @@ -35,11 +53,19 @@ trait Scheduler { * * Java & Scala API */ - def schedule( + final def schedule( initialDelay: FiniteDuration, interval: FiniteDuration, receiver: ActorRef, - message: Any)(implicit executor: ExecutionContext, sender: ActorRef = Actor.noSender): Cancellable + message: Any)(implicit executor: ExecutionContext, + sender: ActorRef = Actor.noSender): Cancellable = + schedule(initialDelay, interval, new Runnable { + def run = { + receiver ! message + if (receiver.isTerminated) + throw new SchedulerException("timer active for terminated actor") + } + }) /** * Schedules a function to be run repeatedly with an initial delay and a @@ -49,10 +75,11 @@ trait Scheduler { * * Scala API */ - def schedule( + final def schedule( initialDelay: FiniteDuration, interval: FiniteDuration)(f: ⇒ Unit)( - implicit executor: ExecutionContext): Cancellable + implicit executor: ExecutionContext): Cancellable = + schedule(initialDelay, interval, new Runnable { override def run = f }) /** * Schedules a function to be run repeatedly with an initial delay and @@ -67,6 +94,31 @@ trait Scheduler { interval: FiniteDuration, runnable: Runnable)(implicit executor: ExecutionContext): Cancellable + /** + * Schedules a message to be sent once with a delay, i.e. a time period that has + * to pass before the message is sent. + * + * Java & Scala API + */ + final def scheduleOnce( + delay: FiniteDuration, + receiver: ActorRef, + message: Any)(implicit executor: ExecutionContext, + sender: ActorRef = Actor.noSender): Cancellable = + scheduleOnce(delay, new Runnable { + override def run = receiver ! message + }) + + /** + * Schedules a function to be run once with a delay, i.e. a time period that has + * to pass before the function is run. + * + * Scala API + */ + final def scheduleOnce(delay: FiniteDuration)(f: ⇒ Unit)( + implicit executor: ExecutionContext): Cancellable = + scheduleOnce(delay, new Runnable { override def run = f }) + /** * Schedules a Runnable to be run once with a delay, i.e. a time period that * has to pass before the runnable is executed. @@ -78,28 +130,17 @@ trait Scheduler { runnable: Runnable)(implicit executor: ExecutionContext): Cancellable /** - * Schedules a message to be sent once with a delay, i.e. a time period that has - * to pass before the message is sent. - * - * Java & Scala API + * The maximum supported task frequency of this scheduler, i.e. the inverse + * of the minimum time interval between executions of a recurring task, in Hz. */ - def scheduleOnce( - delay: FiniteDuration, - receiver: ActorRef, - message: Any)(implicit executor: ExecutionContext): Cancellable + def maxFrequency: Double - /** - * Schedules a function to be run once with a delay, i.e. a time period that has - * to pass before the function is run. - * - * Scala API - */ - def scheduleOnce( - delay: FiniteDuration)(f: ⇒ Unit)( - implicit executor: ExecutionContext): Cancellable } //#scheduler +// this one is just here so we can present a nice AbstractScheduler for Java +abstract class AbstractSchedulerBase extends Scheduler + //#cancellable /** * Signifies something that can be cancelled @@ -108,14 +149,16 @@ trait Scheduler { */ trait Cancellable { /** - * Cancels this Cancellable + * Cancels this Cancellable and returns true if that was successful. + * If this cancellable was (concurrently) cancelled already, then this method + * will return false although isCancelled will return true. * * Java & Scala API */ - def cancel(): Unit + def cancel(): Boolean /** - * Returns whether this Cancellable has been cancelled + * Returns true if and only if this Cancellable has been successfully cancelled * * Java & Scala API */ @@ -123,6 +166,317 @@ trait Cancellable { } //#cancellable +/** + * This scheduler implementation is based on a revolving wheel of buckets, + * like Netty’s HashedWheelTimer, which it advances at a fixed tick rate and + * dispatches tasks it finds in the current bucket to their respective + * ExecutionContexts. The tasks are held in TaskHolders, which upon + * cancellation null out their reference to the actual task, leaving only this + * shell to be cleaned up when the wheel reaches that bucket next time. This + * enables the use of a simple linked list to chain the TaskHolders off the + * wheel. + * + * Also noteworthy is that this scheduler does not obtain a current time stamp + * when scheduling single-shot tasks, instead it always rounds up the task + * delay to a full multiple of the TickDuration. This means that tasks are + * scheduled possibly one tick later than they could be (if checking that + * “now() + delay <= nextTick” were done). + */ +class LightArrayRevolverScheduler(config: Config, + log: LoggingAdapter, + threadFactory: ThreadFactory) + extends { + val WheelShift = { + val ticks = config.getInt("akka.scheduler.ticks-per-wheel") + val shift = 31 - Integer.numberOfLeadingZeros(ticks) + if ((ticks & (ticks - 1)) != 0) throw new akka.ConfigurationException("ticks-per-wheel must be a power of 2") + shift + } + val TickDuration = Duration(config.getMilliseconds("akka.scheduler.tick-duration"), MILLISECONDS) + val ShutdownTimeout = Duration(config.getMilliseconds("akka.scheduler.shutdown-timeout"), MILLISECONDS) + } with AtomicReferenceArray[LightArrayRevolverScheduler.TaskHolder](1 << WheelShift) with Scheduler with Closeable { + + import LightArrayRevolverScheduler._ + + private val oneNs = Duration.fromNanos(1l) + private def roundUp(d: FiniteDuration): FiniteDuration = + try { + ((d + TickDuration - oneNs) / TickDuration).toLong * TickDuration + } catch { + case _: IllegalArgumentException ⇒ d // rouding up Long.MaxValue.nanos overflows + } + + /** + * Clock implementation is replaceable (for testing); the implementation must + * return a monotonically increasing series of Long nanoseconds. + */ + protected def clock(): Long = System.nanoTime + + /** + * Overridable for tests + */ + protected def waitNanos(nanos: Long): Unit = { + // see http://www.javamex.com/tutorials/threads/sleep_issues.shtml + val sleepMs = if (Helpers.isWindows) (nanos + 4999999) / 10000000 * 10 else (nanos + 999999) / 1000000 + try Thread.sleep(sleepMs) catch { + case _: InterruptedException ⇒ Thread.currentThread.interrupt() // we got woken up + } + } + + override def schedule(initialDelay: FiniteDuration, + delay: FiniteDuration, + runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = + try new AtomicReference[Cancellable] with Cancellable { self ⇒ + set(schedule( + new AtomicLong(clock() + initialDelay.toNanos) with Runnable { + override def run(): Unit = { + try { + runnable.run() + val driftNanos = clock() - getAndAdd(delay.toNanos) + if (self.get != null) + swap(schedule(this, Duration.fromNanos(Math.max(delay.toNanos - driftNanos, 1)))) + } catch { + case _: SchedulerException ⇒ // ignore failure to enqueue or terminated target actor + } + } + }, roundUp(initialDelay))) + + @tailrec private def swap(c: Cancellable): Unit = { + get match { + case null ⇒ if (c != null) c.cancel() + case old ⇒ if (!compareAndSet(old, c)) swap(c) + } + } + + @tailrec final def cancel(): Boolean = { + get match { + case null ⇒ false + case c ⇒ + if (c.cancel()) compareAndSet(c, null) + else compareAndSet(c, null) || cancel() + } + } + + override def isCancelled: Boolean = get == null + } catch { + case SchedulerException(msg) ⇒ throw new IllegalStateException(msg) + } + + override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = + try schedule(runnable, roundUp(delay)) + catch { + case SchedulerException(msg) ⇒ throw new IllegalStateException(msg) + } + + private def execDirectly(t: TimerTask): Unit = { + try t.run() catch { + case e: InterruptedException ⇒ throw e + case _: SchedulerException ⇒ // ignore terminated actors + case NonFatal(e) ⇒ log.error(e, "exception while executing timer task") + } + } + + override def close(): Unit = Await.result(stop(), ShutdownTimeout) foreach execDirectly + + override val maxFrequency: Double = 1.second / TickDuration + + /* + * BELOW IS THE ACTUAL TIMER IMPLEMENTATION + */ + + private val start = clock() + private val tickNanos = TickDuration.toNanos + private val wheelMask = length() - 1 + @volatile private var currentBucket = 0 + + private def schedule(r: Runnable, delay: FiniteDuration)(implicit ec: ExecutionContext): TimerTask = + if (delay <= Duration.Zero) { + if (stopped.get != null) throw new SchedulerException("cannot enqueue after timer shutdown") + ec.execute(r) + NotCancellable + } else { + val ticks = (delay.toNanos / tickNanos).toInt + val rounds = (ticks >> WheelShift).toInt + + /* + * works as follows: + * - ticks are calculated to be never “too early” + * - base off of currentBucket, even after that was moved in the meantime + * - timer thread will swap in Pause, increment currentBucket, swap in null + * - hence spin on Pause, else normal CAS + * - stopping will set all buckets to Pause (in clearAll), so we need only check there + */ + @tailrec + def rec(t: TaskHolder): TimerTask = { + val bucket = (currentBucket + ticks) & wheelMask + get(bucket) match { + case Pause ⇒ + if (stopped.get != null) throw new SchedulerException("cannot enqueue after timer shutdown") + rec(t) + case tail ⇒ + t.next = tail + if (compareAndSet(bucket, tail, t)) t + else rec(t) + } + } + + rec(new TaskHolder(r, null, rounds)) + } + + private val stopped = new AtomicReference[Promise[immutable.Seq[TimerTask]]] + def stop(): Future[immutable.Seq[TimerTask]] = + if (stopped.compareAndSet(null, Promise())) { + timerThread.interrupt() + stopped.get.future + } else Future.successful(Nil) + + private def clearAll(): immutable.Seq[TimerTask] = { + def collect(curr: TaskHolder, acc: Vector[TimerTask]): Vector[TimerTask] = { + curr match { + case null ⇒ acc + case x ⇒ collect(x.next, acc :+ x) + } + } + (0 until length()) flatMap (i ⇒ collect(getAndSet(i, Pause), Vector.empty)) + } + + @volatile private var timerThread: Thread = threadFactory.newThread(new Runnable { + var tick = 0 + override final def run = + try nextTick() + catch { + case t: Throwable ⇒ + log.error(t, "exception on LARS’ timer thread") + stopped.get match { + case null ⇒ + val thread = threadFactory.newThread(this) + log.info("starting new LARS thread") + try thread.start() + catch { + case e: Throwable ⇒ log.error(e, "LARS cannot start new thread, ship’s going down!") + } + timerThread = thread + case x ⇒ x success clearAll() + } + throw t + } + @tailrec final def nextTick(): Unit = { + val sleepTime = start + tick * tickNanos - clock() + + if (sleepTime > 0) { + waitNanos(sleepTime) + } else { + // first get the list of tasks out and turn the wheel + val bucket = currentBucket + val tasks = getAndSet(bucket, Pause) + val next = (bucket + 1) & wheelMask + currentBucket = next + set(bucket, if (tasks eq null) Empty else null) + + // then process the tasks and keep the non-ripe ones in a list + var last: TaskHolder = null // the last element of the putBack list + @tailrec def rec1(task: TaskHolder, nonRipe: TaskHolder): TaskHolder = { + if ((task eq null) || (task eq Empty)) nonRipe + else if (task.isCancelled) rec1(task.next, nonRipe) + else if (task.rounds > 0) { + task.rounds -= 1 + + val next = task.next + task.next = nonRipe + + if (last == null) last = task + rec1(next, task) + } else { + task.executeTask() + rec1(task.next, nonRipe) + } + } + val putBack = rec1(tasks, null) + + // finally put back the non-ripe ones, who had their rounds decremented + @tailrec def rec2() { + val tail = get(bucket) + last.next = tail + if (!compareAndSet(bucket, tail, putBack)) rec2() + } + if (last != null) rec2() + + // and off to the next tick + tick += 1 + } + stopped.get match { + case null ⇒ nextTick() + case x ⇒ x success clearAll() + } + } + }) + + timerThread.start() +} + +object LightArrayRevolverScheduler { + private val taskOffset = unsafe.objectFieldOffset(classOf[TaskHolder].getDeclaredField("task")) + + /** + * INTERNAL API + */ + protected[actor] trait TimerTask extends Runnable with Cancellable + + /** + * INTERNAL API + */ + protected[actor] class TaskHolder(@volatile var task: Runnable, + @volatile var next: TaskHolder, + @volatile var rounds: Int)( + implicit executionContext: ExecutionContext) extends TimerTask { + @tailrec + private final def extractTask(cancel: Boolean): Runnable = { + task match { + case null | CancelledTask ⇒ null // null means expired + case x ⇒ + if (unsafe.compareAndSwapObject(this, taskOffset, x, if (cancel) CancelledTask else null)) x + else extractTask(cancel) + } + } + + private[akka] final def executeTask(): Boolean = extractTask(cancel = false) match { + case null | CancelledTask ⇒ false + case other ⇒ + try { + executionContext execute other + true + } catch { + case _: InterruptedException ⇒ { Thread.currentThread.interrupt(); false } + case NonFatal(e) ⇒ { executionContext.reportFailure(e); false } + } + } + + /** + * utility method to directly run the task, e.g. as clean-up action + */ + def run(): Unit = extractTask(cancel = false) match { + case null ⇒ + case r ⇒ r.run() + } + + override def cancel(): Boolean = extractTask(cancel = true) != null + + override def isCancelled: Boolean = task eq CancelledTask + } + + private val CancelledTask = new Runnable { def run = () } + + private val NotCancellable = new TimerTask { + def cancel(): Boolean = false + def isCancelled: Boolean = false + def run(): Unit = () + } + // marker object during wheel movement + private val Pause = new TaskHolder(null, null, 0)(null) + // we need two empty tokens so wheel passing can be detected in schedule() + private val Empty = new TaskHolder(null, null, 0)(null) +} + /** * Scheduled tasks (Runnable and functions) are executed with the supplied dispatcher. * Note that dispatcher is by-name parameter, because dispatcher might not be initialized @@ -132,35 +486,19 @@ trait Cancellable { * if it does not enqueue a task. Once a task is queued, it MUST be executed or * returned from stop(). */ -class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter) extends Scheduler with Closeable { - override def schedule(initialDelay: FiniteDuration, - delay: FiniteDuration, - receiver: ActorRef, - message: Any)(implicit executor: ExecutionContext, sender: ActorRef = Actor.noSender): Cancellable = { - val continuousCancellable = new ContinuousCancellable - continuousCancellable.init( - hashedWheelTimer.newTimeout( - new AtomicLong(System.nanoTime + initialDelay.toNanos) with TimerTask with ContinuousScheduling { - def run(timeout: HWTimeout) { - executor execute new Runnable { - override def run = { - receiver ! message - // Check if the receiver is still alive and kicking before reschedule the task - if (receiver.isTerminated) log.debug("Could not reschedule message to be sent because receiving actor {} has been terminated.", receiver) - else { - val driftNanos = System.nanoTime - getAndAdd(delay.toNanos) - scheduleNext(timeout, Duration.fromNanos(Math.max(delay.toNanos - driftNanos, 1)), continuousCancellable) - } - } - } - } - }, - initialDelay)) - } +class DefaultScheduler(config: Config, + log: LoggingAdapter, + threadFactory: ThreadFactory) extends Scheduler with Closeable { - override def schedule(initialDelay: FiniteDuration, - delay: FiniteDuration)(f: ⇒ Unit)(implicit executor: ExecutionContext): Cancellable = - schedule(initialDelay, delay, new Runnable { override def run = f }) + val TicksPerWheel = { + val ticks = config.getInt("akka.scheduler.ticks-per-wheel") + val shift = 31 - Integer.numberOfLeadingZeros(ticks) + if ((ticks & (ticks - 1)) != 0) throw new akka.ConfigurationException("ticks-per-wheel must be a power of 2") + ticks + } + val TickDuration = Duration(config.getMilliseconds("akka.scheduler.tick-duration"), MILLISECONDS) + + private val hashedWheelTimer = new HashedWheelTimer(log, threadFactory, TickDuration, TicksPerWheel) override def schedule(initialDelay: FiniteDuration, delay: FiniteDuration, @@ -168,14 +506,19 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter) val continuousCancellable = new ContinuousCancellable continuousCancellable.init( hashedWheelTimer.newTimeout( - new AtomicLong(System.nanoTime + initialDelay.toNanos) with TimerTask with ContinuousScheduling { - override def run(timeout: HWTimeout): Unit = executor.execute(new Runnable { - override def run = { - runnable.run() - val driftNanos = System.nanoTime - getAndAdd(delay.toNanos) - scheduleNext(timeout, Duration.fromNanos(Math.max(delay.toNanos - driftNanos, 1)), continuousCancellable) - } - }) + new AtomicLong(System.nanoTime + initialDelay.toNanos) with HWTimerTask with ContinuousScheduling { + override def run(timeout: HWTimeout): Unit = + executor.execute(new Runnable { + override def run = { + try { + runnable.run() + val driftNanos = System.nanoTime - getAndAdd(delay.toNanos) + scheduleNext(timeout, Duration.fromNanos(Math.max(delay.toNanos - driftNanos, 1)), continuousCancellable) + } catch { + case _: SchedulerException ⇒ // actor target terminated + } + } + }) }, initialDelay)) } @@ -183,16 +526,10 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter) override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = new DefaultCancellable( hashedWheelTimer.newTimeout( - new TimerTask() { def run(timeout: HWTimeout): Unit = executor.execute(runnable) }, + new HWTimerTask() { def run(timeout: HWTimeout): Unit = executor.execute(runnable) }, delay)) - override def scheduleOnce(delay: FiniteDuration, receiver: ActorRef, message: Any)(implicit executor: ExecutionContext): Cancellable = - scheduleOnce(delay, new Runnable { override def run = receiver ! message }) - - override def scheduleOnce(delay: FiniteDuration)(f: ⇒ Unit)(implicit executor: ExecutionContext): Cancellable = - scheduleOnce(delay, new Runnable { override def run = f }) - - private trait ContinuousScheduling { this: TimerTask ⇒ + private trait ContinuousScheduling { this: HWTimerTask ⇒ def scheduleNext(timeout: HWTimeout, delay: FiniteDuration, delegator: ContinuousCancellable) { try delegator.swap(timeout.getTimer.newTimeout(this, delay)) catch { case _: IllegalStateException ⇒ } // stop recurring if timer is stopped } @@ -209,23 +546,25 @@ class DefaultScheduler(hashedWheelTimer: HashedWheelTimer, log: LoggingAdapter) val i = hashedWheelTimer.stop().iterator() while (i.hasNext) execDirectly(i.next()) } + + override def maxFrequency: Double = 1.second / TickDuration } private[akka] object ContinuousCancellable { val initial: HWTimeout = new HWTimeout { - override def getTimer: Timer = null - override def getTask: TimerTask = null + override def getTimer: HWTimer = null + override def getTask: HWTimerTask = null override def isExpired: Boolean = false override def isCancelled: Boolean = false - override def cancel: Unit = () + override def cancel: Boolean = true } val cancelled: HWTimeout = new HWTimeout { - override def getTimer: Timer = null - override def getTask: TimerTask = null + override def getTimer: HWTimer = null + override def getTask: HWTimerTask = null override def isExpired: Boolean = false override def isCancelled: Boolean = true - override def cancel: Unit = () + override def cancel: Boolean = false } } /** @@ -245,10 +584,10 @@ private[akka] class ContinuousCancellable extends AtomicReference[HWTimeout](Con } def isCancelled(): Boolean = get().isCancelled() - def cancel(): Unit = getAndSet(ContinuousCancellable.cancelled).cancel() + def cancel(): Boolean = getAndSet(ContinuousCancellable.cancelled).cancel() } private[akka] class DefaultCancellable(timeout: HWTimeout) extends AtomicReference[HWTimeout](timeout) with Cancellable { - override def cancel(): Unit = getAndSet(ContinuousCancellable.cancelled).cancel() + override def cancel(): Boolean = getAndSet(ContinuousCancellable.cancelled).cancel() override def isCancelled: Boolean = get().isCancelled } diff --git a/akka-actor/src/main/scala/akka/actor/Stash.scala b/akka-actor/src/main/scala/akka/actor/Stash.scala index cdf4ef6d5b..34c6192016 100644 --- a/akka-actor/src/main/scala/akka/actor/Stash.scala +++ b/akka-actor/src/main/scala/akka/actor/Stash.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala index 7509cd758d..263a1c309d 100644 --- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor @@ -674,7 +674,7 @@ class TypedActorExtension(val system: ExtendedActorSystem) extends TypedActorFac * INTERNAL USE ONLY */ private[akka] def invocationHandlerFor(@deprecatedName('typedActor_?) typedActor: AnyRef): TypedActorInvocationHandler = - if ((typedActor ne null) && Proxy.isProxyClass(typedActor.getClass)) typedActor match { + if ((typedActor ne null) && classOf[Proxy].isAssignableFrom(typedActor.getClass) && Proxy.isProxyClass(typedActor.getClass)) typedActor match { case null ⇒ null case other ⇒ Proxy.getInvocationHandler(other) match { case null ⇒ null diff --git a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala index 47ddf4fc43..163d9468bc 100644 --- a/akka-actor/src/main/scala/akka/actor/UntypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/UntypedActor.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor/src/main/scala/akka/actor/UntypedActorWithStash.scala b/akka-actor/src/main/scala/akka/actor/UntypedActorWithStash.scala index 9b55e9397b..a41e03e8cf 100644 --- a/akka-actor/src/main/scala/akka/actor/UntypedActorWithStash.scala +++ b/akka-actor/src/main/scala/akka/actor/UntypedActorWithStash.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor diff --git a/akka-actor/src/main/scala/akka/actor/dsl/Creators.scala b/akka-actor/src/main/scala/akka/actor/dsl/Creators.scala index a9515f3000..bcbcd52cbd 100644 --- a/akka-actor/src/main/scala/akka/actor/dsl/Creators.scala +++ b/akka-actor/src/main/scala/akka/actor/dsl/Creators.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor.dsl diff --git a/akka-actor/src/main/scala/akka/actor/dsl/Inbox.scala b/akka-actor/src/main/scala/akka/actor/dsl/Inbox.scala index 418a035e53..46a4f53af5 100644 --- a/akka-actor/src/main/scala/akka/actor/dsl/Inbox.scala +++ b/akka-actor/src/main/scala/akka/actor/dsl/Inbox.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor.dsl diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala b/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala index ba856206ea..f897535c91 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/Children.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor.dungeon @@ -197,4 +197,4 @@ private[akka] trait Children { this: ActorCell ⇒ } } -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala b/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala index 1fccbf8078..13ab3b4c8d 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/ChildrenContainer.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor.dungeon @@ -210,4 +210,4 @@ private[akka] object ChildrenContainer { else c.mkString("children (" + toDie.size + " terminating):\n ", "\n ", "\n") + toDie } -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala b/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala index 70f79f1d48..a0fd5f1632 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/DeathWatch.scala @@ -1,11 +1,11 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor.dungeon -import akka.actor.{ Terminated, InternalActorRef, ActorRef, ActorCell, Actor, Address, AddressTerminated } -import akka.dispatch.{ Watch, Unwatch } +import akka.actor.{ Terminated, InternalActorRef, ActorRef, ActorRefScope, ActorCell, Actor, Address, AddressTerminated } +import akka.dispatch.{ ChildTerminated, Watch, Unwatch } import akka.event.Logging.{ Warning, Error, Debug } import scala.util.control.NonFatal @@ -40,23 +40,36 @@ private[akka] trait DeathWatch { this: ActorCell ⇒ * When this actor is watching the subject of [[akka.actor.Terminated]] message * it will be propagated to user's receive. */ - protected def watchedActorTerminated(t: Terminated): Unit = if (watching.contains(t.actor)) { - maintainAddressTerminatedSubscription(t.actor) { - watching -= t.actor + protected def watchedActorTerminated(t: Terminated): Unit = + if (watching.contains(t.actor)) { + maintainAddressTerminatedSubscription(t.actor) { + watching -= t.actor + } + receiveMessage(t) } - receiveMessage(t) - } protected def tellWatchersWeDied(actor: Actor): Unit = { if (!watchedBy.isEmpty) { val terminated = Terminated(self)(existenceConfirmed = true, addressTerminated = false) try { - watchedBy foreach { - watcher ⇒ - try watcher.tell(terminated, self) catch { - case NonFatal(t) ⇒ publish(Error(t, self.path.toString, clazz(actor), "deathwatch")) - } - } + def sendTerminated(ifLocal: Boolean)(watcher: ActorRef): Unit = + if (watcher.asInstanceOf[ActorRefScope].isLocal == ifLocal) watcher.tell(terminated, self) + + /* + * It is important to notify the remote watchers first, otherwise RemoteDaemon might shut down, causing + * the remoting to shut down as well. At this point Terminated messages to remote watchers are no longer + * deliverable. + * + * The problematic case is: + * 1. Terminated is sent to RemoteDaemon + * 1a. RemoteDaemon is fast enough to notify the terminator actor in RemoteActorRefProvider + * 1b. The terminator is fast enough to enqueue the shutdown command in the remoting + * 2. Only at this point is the Terminated (to be sent remotely) enqueued in the mailbox of remoting + * + * If the remote watchers are notified first, then the mailbox of the Remoting will guarantee the correct order. + */ + watchedBy foreach sendTerminated(ifLocal = false) + watchedBy foreach sendTerminated(ifLocal = true) } finally watchedBy = ActorCell.emptyActorRefSet } } @@ -65,9 +78,7 @@ private[akka] trait DeathWatch { this: ActorCell ⇒ if (!watching.isEmpty) { try { watching foreach { // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - case watchee: InternalActorRef ⇒ try watchee.sendSystemMessage(Unwatch(watchee, self)) catch { - case NonFatal(t) ⇒ publish(Error(t, self.path.toString, clazz(actor), "deathwatch")) - } + case watchee: InternalActorRef ⇒ watchee.sendSystemMessage(Unwatch(watchee, self)) } } finally { watching = ActorCell.emptyActorRefSet @@ -117,7 +128,11 @@ private[akka] trait DeathWatch { this: ActorCell ⇒ // send Terminated to self for all matching subjects // existenceConfirmed = false because we could have been watching a // non-local ActorRef that had never resolved before the other node went down + // When a parent is watching a child and it terminates due to AddressTerminated + // it is removed by sending ChildTerminated to support immediate creation of child + // with same name. for (a ← watching; if a.path.address == address) { + childrenRefs.getByRef(a) foreach { _ ⇒ self.sendSystemMessage(ChildTerminated(a)) } self ! Terminated(a)(existenceConfirmed = false, addressTerminated = true) } } @@ -152,4 +167,4 @@ private[akka] trait DeathWatch { this: ActorCell ⇒ private def subscribeAddressTerminated(): Unit = system.eventStream.subscribe(self, classOf[AddressTerminated]) -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala b/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala index 469aac78c2..8d4a62d073 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/Dispatch.scala @@ -1,16 +1,19 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor.dungeon import scala.annotation.tailrec -import akka.actor.{ ActorRef, ActorCell } import akka.dispatch.{ Terminate, SystemMessage, Suspend, Resume, Recreate, MessageDispatcher, Mailbox, Envelope, Create } import akka.event.Logging.Error import akka.util.Unsafe -import scala.util.control.NonFatal import akka.dispatch.NullMessage +import akka.actor.{ NoSerializationVerificationNeeded, InvalidMessageException, ActorRef, ActorCell } +import akka.serialization.SerializationExtension +import scala.util.control.NonFatal +import scala.util.control.Exception.Catcher +import scala.concurrent.ExecutionContext private[akka] trait Dispatch { this: ActorCell ⇒ @@ -30,11 +33,6 @@ private[akka] trait Dispatch { this: ActorCell ⇒ val dispatcher: MessageDispatcher = system.dispatchers.lookup(props.dispatcher) - /** - * UntypedActorContext impl - */ - final def getDispatcher(): MessageDispatcher = dispatcher - final def isTerminated: Boolean = mailbox.isClosed /** @@ -70,50 +68,37 @@ private[akka] trait Dispatch { this: ActorCell ⇒ this } - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - final def suspend(): Unit = - try dispatcher.systemDispatch(this, Suspend()) - catch { - case e @ (_: InterruptedException | NonFatal(_)) ⇒ - system.eventStream.publish(Error(e, self.path.toString, clazz(actor), "swallowing exception during message send")) - } + private def handleException: Catcher[Unit] = { + case e: InterruptedException ⇒ + system.eventStream.publish(Error(e, self.path.toString, clazz(actor), "interrupted during message send")) + Thread.currentThread.interrupt() + case NonFatal(e) ⇒ + system.eventStream.publish(Error(e, self.path.toString, clazz(actor), "swallowing exception during message send")) + } // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - final def resume(causedByFailure: Throwable): Unit = - try dispatcher.systemDispatch(this, Resume(causedByFailure)) - catch { - case e @ (_: InterruptedException | NonFatal(_)) ⇒ - system.eventStream.publish(Error(e, self.path.toString, clazz(actor), "swallowing exception during message send")) - } + final def suspend(): Unit = try dispatcher.systemDispatch(this, Suspend()) catch handleException // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - final def restart(cause: Throwable): Unit = - try dispatcher.systemDispatch(this, Recreate(cause)) - catch { - case e @ (_: InterruptedException | NonFatal(_)) ⇒ - system.eventStream.publish(Error(e, self.path.toString, clazz(actor), "swallowing exception during message send")) - } + final def resume(causedByFailure: Throwable): Unit = try dispatcher.systemDispatch(this, Resume(causedByFailure)) catch handleException // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - final def stop(): Unit = - try dispatcher.systemDispatch(this, Terminate()) - catch { - case e @ (_: InterruptedException | NonFatal(_)) ⇒ - system.eventStream.publish(Error(e, self.path.toString, clazz(actor), "swallowing exception during message send")) - } + final def restart(cause: Throwable): Unit = try dispatcher.systemDispatch(this, Recreate(cause)) catch handleException - def tell(message: Any, sender: ActorRef): Unit = - try dispatcher.dispatch(this, Envelope(message, if (sender eq null) system.deadLetters else sender, system)) - catch { - case e @ (_: InterruptedException | NonFatal(_)) ⇒ - system.eventStream.publish(Error(e, self.path.toString, clazz(actor), "swallowing exception during message send")) - } + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + final def stop(): Unit = try dispatcher.systemDispatch(this, Terminate()) catch handleException - override def sendSystemMessage(message: SystemMessage): Unit = - try dispatcher.systemDispatch(this, message) - catch { - case e @ (_: InterruptedException | NonFatal(_)) ⇒ - system.eventStream.publish(Error(e, self.path.toString, clazz(actor), "swallowing exception during message send")) - } + def sendMessage(msg: Envelope): Unit = + try { + val m = msg.message.asInstanceOf[AnyRef] + if (m eq null) throw new InvalidMessageException("Message is null") + if (system.settings.SerializeAllMessages && !m.isInstanceOf[NoSerializationVerificationNeeded]) { + val s = SerializationExtension(system) + s.deserialize(s.serialize(m).get, m.getClass).get + } + dispatcher.dispatch(this, msg) + } catch handleException -} \ No newline at end of file + override def sendSystemMessage(message: SystemMessage): Unit = try dispatcher.systemDispatch(this, message) catch handleException + +} diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala index ac4f5b5c36..b97313d794 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/FaultHandling.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor.dungeon @@ -141,6 +141,9 @@ private[akka] trait FaultHandling { this: ActorCell ⇒ setReceiveTimeout(Duration.Undefined) cancelReceiveTimeout + // prevent Deadletter(Terminated) messages + unwatchWatchedActors(actor) + // stop all children, which will turn childrenRefs into TerminatingChildrenContainer (if there are children) children foreach stop @@ -190,13 +193,19 @@ private[akka] trait FaultHandling { this: ActorCell ⇒ private def finishTerminate() { val a = actor - // The following order is crucial for things to work properly. Only chnage this if you're very confident and lucky. + /* The following order is crucial for things to work properly. Only change this if you're very confident and lucky. + * + * Please note that if a parent is also a watcher then ChildTerminated and Terminated must be processed in this + * specific order. + */ try if (a ne null) a.postStop() - finally try dispatcher.detach(this) + catch { + case NonFatal(e) ⇒ publish(Error(e, self.path.toString, clazz(a), e.getMessage)) + } finally try dispatcher.detach(this) finally try parent.sendSystemMessage(ChildTerminated(self)) finally try parent ! NullMessage // read ScalaDoc of NullMessage to see why finally try tellWatchersWeDied(a) - finally try unwatchWatchedActors(a) + finally try unwatchWatchedActors(a) // stay here as we expect an emergency stop from handleInvokeFailure finally { if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(a), "stopped")) diff --git a/akka-actor/src/main/scala/akka/actor/dungeon/ReceiveTimeout.scala b/akka-actor/src/main/scala/akka/actor/dungeon/ReceiveTimeout.scala index 5e1e4465eb..2f52f312fe 100644 --- a/akka-actor/src/main/scala/akka/actor/dungeon/ReceiveTimeout.scala +++ b/akka-actor/src/main/scala/akka/actor/dungeon/ReceiveTimeout.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.actor.dungeon @@ -46,4 +46,4 @@ private[akka] trait ReceiveTimeout { this: ActorCell ⇒ receiveTimeoutData = (receiveTimeoutData._1, emptyCancellable) } -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/actor/package.scala b/akka-actor/src/main/scala/akka/actor/package.scala index deb9f0e7a9..b6e45e74f6 100644 --- a/akka-actor/src/main/scala/akka/actor/package.scala +++ b/akka-actor/src/main/scala/akka/actor/package.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index f8fa5e1046..fff250c547 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.dispatch @@ -9,27 +9,19 @@ import akka.event.Logging.{ Error, LogEventException } import akka.actor._ import akka.event.EventStream import com.typesafe.config.Config -import akka.serialization.SerializationExtension import akka.util.{ Unsafe, Index } import scala.annotation.tailrec import scala.concurrent.forkjoin.{ ForkJoinTask, ForkJoinPool } import scala.concurrent.duration.Duration -import scala.concurrent.{ ExecutionContext, Await, Awaitable } -import scala.util.control.NonFatal +import scala.concurrent.ExecutionContext import scala.concurrent.duration.FiniteDuration +import scala.util.control.NonFatal final case class Envelope private (val message: Any, val sender: ActorRef) object Envelope { - def apply(message: Any, sender: ActorRef, system: ActorSystem): Envelope = { - val msg = message.asInstanceOf[AnyRef] - if (msg eq null) throw new InvalidMessageException("Message is null") - if (system.settings.SerializeAllMessages && !msg.isInstanceOf[NoSerializationVerificationNeeded]) { - val ser = SerializationExtension(system) - ser.deserialize(ser.serialize(msg).get, msg.getClass).get - } - new Envelope(message, sender) - } + def apply(message: Any, sender: ActorRef, system: ActorSystem): Envelope = + new Envelope(message, if (sender ne Actor.noSender) sender else system.deadLetters) } /** @@ -80,7 +72,7 @@ private[akka] object SystemMessage { * * ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ */ -private[akka] sealed trait SystemMessage extends PossiblyHarmful { +private[akka] sealed trait SystemMessage extends PossiblyHarmful with Serializable { @transient var next: SystemMessage = _ } @@ -88,42 +80,52 @@ private[akka] sealed trait SystemMessage extends PossiblyHarmful { /** * INTERNAL API */ +@SerialVersionUID(-4836972106317757555L) private[akka] case class Create(uid: Int) extends SystemMessage // send to self from Dispatcher.register /** * INTERNAL API */ +@SerialVersionUID(686735569005808256L) private[akka] case class Recreate(cause: Throwable) extends SystemMessage // sent to self from ActorCell.restart /** * INTERNAL API */ +@SerialVersionUID(7270271967867221401L) private[akka] case class Suspend() extends SystemMessage // sent to self from ActorCell.suspend /** * INTERNAL API */ +@SerialVersionUID(-2567504317093262591L) private[akka] case class Resume(causedByFailure: Throwable) extends SystemMessage // sent to self from ActorCell.resume /** * INTERNAL API */ +@SerialVersionUID(708873453777219599L) private[akka] case class Terminate() extends SystemMessage // sent to self from ActorCell.stop /** * INTERNAL API */ +@SerialVersionUID(3245747602115485675L) private[akka] case class Supervise(child: ActorRef, async: Boolean, uid: Int) extends SystemMessage // sent to supervisor ActorRef from ActorCell.start /** * INTERNAL API */ +@SerialVersionUID(5513569382760799668L) private[akka] case class ChildTerminated(child: ActorRef) extends SystemMessage // sent to supervisor from ActorCell.doTerminate /** * INTERNAL API */ +@SerialVersionUID(3323205435124174788L) private[akka] case class Watch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to establish a DeathWatch /** * INTERNAL API */ +@SerialVersionUID(6363620903363658256L) private[akka] case class Unwatch(watchee: ActorRef, watcher: ActorRef) extends SystemMessage // sent to tear down a DeathWatch /** * INTERNAL API */ +@SerialVersionUID(-5475916034683997987L) private[akka] case object NoMessage extends SystemMessage // switched into the mailbox to signal termination final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cleanup: () ⇒ Unit) extends Batchable { @@ -158,25 +160,24 @@ private[akka] object MessageDispatcher { // since this is a compile-time constant, scalac will elide code behind if (MessageDispatcher.debug) (RK checked with 2.9.1) final val debug = false // Deliberately without type ascription to make it a compile-time constant lazy val actors = new Index[MessageDispatcher, ActorRef](16, _ compareTo _) - def printActors: Unit = if (debug) { - for { - d ← actors.keys - a ← { println(d + " inhabitants: " + d.inhabitants); actors.valueIterator(d) } - } { - val status = if (a.isTerminated) " (terminated)" else " (alive)" - val messages = a match { - case r: ActorRefWithCell ⇒ " " + r.underlying.numberOfMessages + " messages" - case _ ⇒ " " + a.getClass + def printActors: Unit = + if (debug) { + for { + d ← actors.keys + a ← { println(d + " inhabitants: " + d.inhabitants); actors.valueIterator(d) } + } { + val status = if (a.isTerminated) " (terminated)" else " (alive)" + val messages = a match { + case r: ActorRefWithCell ⇒ " " + r.underlying.numberOfMessages + " messages" + case _ ⇒ " " + a.getClass + } + val parent = a match { + case i: InternalActorRef ⇒ ", parent: " + i.getParent + case _ ⇒ "" + } + println(" -> " + a + status + messages + parent) } - val parent = a match { - case i: InternalActorRef ⇒ ", parent: " + i.getParent - case _ ⇒ "" - } - println(" -> " + a + status + messages + parent) } - } - - implicit def defaultDispatcher(implicit system: ActorSystem): MessageDispatcher = system.dispatcher } abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) extends AbstractMessageDispatcher with BatchingExecutor with ExecutionContext { @@ -191,6 +192,13 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext @tailrec private final def addInhabitants(add: Long): Long = { val c = inhabitants val r = c + add + if (r < 0) { + // We haven't succeeded in decreasing the inhabitants yet but the simple fact that we're trying to + // go below zero means that there is an imbalance and we might as well throw the exception + val e = new IllegalStateException("ACTOR SYSTEM CORRUPTED!!! A dispatcher can't have less than 0 inhabitants!") + reportFailure(e) + throw e + } if (Unsafe.instance.compareAndSwapLong(this, inhabitantsOffset, c, r)) r else addInhabitants(add) } @@ -243,18 +251,16 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext } @tailrec - private final def ifSensibleToDoSoThenScheduleShutdown(): Unit = inhabitants match { - case 0 ⇒ - shutdownSchedule match { - case UNSCHEDULED ⇒ - if (updateShutdownSchedule(UNSCHEDULED, SCHEDULED)) scheduleShutdownAction() - else ifSensibleToDoSoThenScheduleShutdown() - case SCHEDULED ⇒ - if (updateShutdownSchedule(SCHEDULED, RESCHEDULED)) () - else ifSensibleToDoSoThenScheduleShutdown() - case RESCHEDULED ⇒ - } - case _ ⇒ + private final def ifSensibleToDoSoThenScheduleShutdown(): Unit = { + if (inhabitants <= 0) shutdownSchedule match { + case UNSCHEDULED ⇒ + if (updateShutdownSchedule(UNSCHEDULED, SCHEDULED)) scheduleShutdownAction() + else ifSensibleToDoSoThenScheduleShutdown() + case SCHEDULED ⇒ + if (updateShutdownSchedule(SCHEDULED, RESCHEDULED)) () + else ifSensibleToDoSoThenScheduleShutdown() + case RESCHEDULED ⇒ + } } private def scheduleShutdownAction(): Unit = { @@ -484,10 +490,8 @@ object ForkJoinExecutorConfigurator { threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, unhandledExceptionHandler: Thread.UncaughtExceptionHandler) extends ForkJoinPool(parallelism, threadFactory, unhandledExceptionHandler, true) with LoadMetrics { - override def execute(r: Runnable): Unit = r match { - case m: Mailbox ⇒ super.execute(new MailboxExecutionTask(m)) - case other ⇒ super.execute(other) - } + override def execute(r: Runnable): Unit = + if (r eq null) throw new NullPointerException else super.execute(new AkkaForkJoinTask(r)) def atFullThrottle(): Boolean = this.getActiveThreadCount() >= this.getParallelism() } @@ -495,10 +499,11 @@ object ForkJoinExecutorConfigurator { /** * INTERNAL AKKA USAGE ONLY */ - final class MailboxExecutionTask(mailbox: Mailbox) extends ForkJoinTask[Unit] { - final override def setRawResult(u: Unit): Unit = () - final override def getRawResult(): Unit = () - final override def exec(): Boolean = try { mailbox.run; true } catch { + @SerialVersionUID(1L) + final class AkkaForkJoinTask(runnable: Runnable) extends ForkJoinTask[Unit] { + override def getRawResult(): Unit = () + override def setRawResult(unit: Unit): Unit = () + final override def exec(): Boolean = try { runnable.run(); true } catch { case anything: Throwable ⇒ val t = Thread.currentThread t.getUncaughtExceptionHandler match { diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index 6efb5771ef..895d48a0e7 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.dispatch @@ -85,17 +85,18 @@ class BalancingDispatcher( if (!registerForExecution(receiver.mailbox, false, false)) teamWork() } - protected def teamWork(): Unit = if (attemptTeamWork) { - @tailrec def scheduleOne(i: Iterator[ActorCell] = team.iterator): Unit = - if (messageQueue.hasMessages - && i.hasNext - && (executorService.executor match { - case lm: LoadMetrics ⇒ lm.atFullThrottle == false - case other ⇒ true - }) - && !registerForExecution(i.next.mailbox, false, false)) - scheduleOne(i) + protected def teamWork(): Unit = + if (attemptTeamWork) { + @tailrec def scheduleOne(i: Iterator[ActorCell] = team.iterator): Unit = + if (messageQueue.hasMessages + && i.hasNext + && (executorService.executor match { + case lm: LoadMetrics ⇒ lm.atFullThrottle == false + case other ⇒ true + }) + && !registerForExecution(i.next.mailbox, false, false)) + scheduleOne(i) - scheduleOne() - } + scheduleOne() + } } diff --git a/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala b/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala index cde2034f64..87f7bc6770 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.dispatch diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index 6577e217a1..b1f4557815 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.dispatch diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index 910a5ceed5..3758bd6df0 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.dispatch diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index dbe6326e69..6e6c20225c 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.dispatch diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index d17ad5b7b6..e47f869a3e 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.dispatch diff --git a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala index eb5b2686c3..1b645a64ea 100644 --- a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.dispatch @@ -26,7 +26,7 @@ class PinnedDispatcher( Int.MaxValue, Duration.Zero, _mailboxType, - _threadPoolConfig.copy(allowCorePoolTimeout = true, corePoolSize = 1, maxPoolSize = 1), + _threadPoolConfig.copy(corePoolSize = 1, maxPoolSize = 1), _shutdownTimeout) { @volatile diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index 8fb4d6dc4f..cb60867bb1 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.dispatch @@ -246,4 +246,4 @@ class SaneRejectedExecutionHandler extends RejectedExecutionHandler { if (threadPoolExecutor.isShutdown) throw new RejectedExecutionException("Shutdown") else runnable.run() } -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/event/EventBus.scala b/akka-actor/src/main/scala/akka/event/EventBus.scala index 6e3e25e42c..2499d0fcd9 100644 --- a/akka-actor/src/main/scala/akka/event/EventBus.scala +++ b/akka-actor/src/main/scala/akka/event/EventBus.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.event diff --git a/akka-actor/src/main/scala/akka/event/EventStream.scala b/akka-actor/src/main/scala/akka/event/EventStream.scala index 7fa6c8a5cf..c37ea4c925 100644 --- a/akka-actor/src/main/scala/akka/event/EventStream.scala +++ b/akka-actor/src/main/scala/akka/event/EventStream.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.event @@ -59,4 +59,4 @@ class EventStream(private val debug: Boolean = false) extends LoggingBus with Su if (debug) publish(Logging.Debug(simpleName(this), this.getClass, "unsubscribing " + subscriber + " from all channels")) } -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index 14ba99bcaa..f054d787d1 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.event diff --git a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala index 03739de894..07546e30cc 100644 --- a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala +++ b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.event @@ -44,4 +44,4 @@ class LoggingReceive(source: Option[AnyRef], r: Receive)(implicit context: Actor handled } def apply(o: Any): Unit = r(o) -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala index 87bb338b0f..b37cee26fe 100644 --- a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala +++ b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.japi diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index eef089a85c..18f88ba30e 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.pattern @@ -255,4 +255,4 @@ private[akka] object PromiseActorRef { result.future onComplete { _ ⇒ try a.stop() finally f.cancel() } a } -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala index 8a423c12b3..aa578baf38 100644 --- a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala +++ b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.pattern diff --git a/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala b/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala index 6820cf4bfa..7408f18355 100644 --- a/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala @@ -1,7 +1,7 @@ package akka.pattern /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ import scala.concurrent.duration.Duration @@ -23,4 +23,4 @@ trait FutureTimeoutSupport { using.scheduleOnce(duration) { p completeWith { try value catch { case NonFatal(t) ⇒ Future.failed(t) } } } p.future } -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index 9279707238..521175fe60 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.pattern @@ -58,4 +58,4 @@ trait GracefulStopSupport { case s ⇒ throw new IllegalArgumentException("Unknown ActorSystem implementation: '" + s + "'") } } -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/pattern/Patterns.scala b/akka-actor/src/main/scala/akka/pattern/Patterns.scala index 66e391c285..484181a71e 100644 --- a/akka-actor/src/main/scala/akka/pattern/Patterns.scala +++ b/akka-actor/src/main/scala/akka/pattern/Patterns.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.pattern diff --git a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala index c0c3780319..49a019f03f 100644 --- a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.pattern @@ -41,4 +41,4 @@ trait PipeToSupport { * }}} */ implicit def pipe[T](future: Future[T])(implicit executionContext: ExecutionContext): PipeableFuture[T] = new PipeableFuture(future) -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/pattern/package.scala b/akka-actor/src/main/scala/akka/pattern/package.scala index b2f232de8f..f6dcbb9377 100644 --- a/akka-actor/src/main/scala/akka/pattern/package.scala +++ b/akka-actor/src/main/scala/akka/pattern/package.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka @@ -40,4 +40,4 @@ import akka.actor._ */ package object pattern extends PipeToSupport with AskSupport with GracefulStopSupport with FutureTimeoutSupport { -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala index 84100f0f21..c98fd7031c 100644 --- a/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala +++ b/akka-actor/src/main/scala/akka/routing/ConsistentHash.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.routing diff --git a/akka-actor/src/main/scala/akka/routing/ConsistentHashingRouter.scala b/akka-actor/src/main/scala/akka/routing/ConsistentHashingRouter.scala index e88195f577..a7a5c43e2e 100644 --- a/akka-actor/src/main/scala/akka/routing/ConsistentHashingRouter.scala +++ b/akka-actor/src/main/scala/akka/routing/ConsistentHashingRouter.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.routing @@ -312,4 +312,4 @@ private[akka] case class ConsistentActorRef(actorRef: ActorRef, selfAddress: Add case a ⇒ actorRef.path.toString } } -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/routing/Listeners.scala b/akka-actor/src/main/scala/akka/routing/Listeners.scala index 346f994a2f..dede8940fe 100644 --- a/akka-actor/src/main/scala/akka/routing/Listeners.scala +++ b/akka-actor/src/main/scala/akka/routing/Listeners.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.routing diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index 8c2c81bac2..14d2de39a9 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.routing @@ -10,7 +10,7 @@ import scala.collection.immutable import scala.concurrent.duration._ import akka.actor._ import akka.ConfigurationException -import akka.dispatch.Dispatchers +import akka.dispatch.{ Envelope, Dispatchers } import akka.pattern.pipe import akka.japi.Util.immutableSeq import com.typesafe.config.Config @@ -118,25 +118,24 @@ private[akka] class RoutedActorCell(_system: ActorSystemImpl, _ref: InternalActo * resizer is invoked asynchronously, i.e. not necessarily before the * message has been sent. */ - override def tell(message: Any, sender: ActorRef): Unit = { - val s = if (sender eq null) system.deadLetters else sender - val msg = message match { + override def sendMessage(msg: Envelope): Unit = { + val message = msg.message match { case wrapped: RouterEnvelope ⇒ wrapped.message case m ⇒ m } - applyRoute(s, message) foreach { - case Destination(snd, `self`) ⇒ - super.tell(msg, snd) - case Destination(snd, recipient) ⇒ + applyRoute(msg.sender, msg.message) foreach { + case Destination(sender, `self`) ⇒ + super.sendMessage(Envelope(message, sender, system)) + case Destination(sender, recipient) ⇒ resize() // only resize when the message target is one of the routees - recipient.tell(msg, snd) + recipient.tell(message, sender) } } def resize(): Unit = for (r ← routerConfig.resizer) { if (r.isTimeForResize(resizeCounter.getAndIncrement()) && resizeInProgress.compareAndSet(false, true)) - super.tell(Router.Resize, self) + super.sendMessage(Envelope(Router.Resize, self, system)) } } diff --git a/akka-actor/src/main/scala/akka/routing/package.scala b/akka-actor/src/main/scala/akka/routing/package.scala index 76dc2f3104..22074f7f10 100644 --- a/akka-actor/src/main/scala/akka/routing/package.scala +++ b/akka-actor/src/main/scala/akka/routing/package.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka diff --git a/akka-actor/src/main/scala/akka/serialization/Serialization.scala b/akka-actor/src/main/scala/akka/serialization/Serialization.scala index e0e52e8189..ef21355502 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serialization.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serialization.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.serialization diff --git a/akka-actor/src/main/scala/akka/serialization/SerializationExtension.scala b/akka-actor/src/main/scala/akka/serialization/SerializationExtension.scala index f96aa26e0c..d2b3844750 100644 --- a/akka-actor/src/main/scala/akka/serialization/SerializationExtension.scala +++ b/akka-actor/src/main/scala/akka/serialization/SerializationExtension.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.serialization @@ -13,4 +13,4 @@ object SerializationExtension extends ExtensionId[Serialization] with ExtensionI override def get(system: ActorSystem): Serialization = super.get(system) override def lookup = SerializationExtension override def createExtension(system: ExtendedActorSystem): Serialization = new Serialization(system) -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/serialization/Serializer.scala b/akka-actor/src/main/scala/akka/serialization/Serializer.scala index bdf8adbf85..736a4a6175 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serializer.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serializer.scala @@ -1,7 +1,7 @@ package akka.serialization /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ import java.io.{ ObjectOutputStream, ByteArrayOutputStream, ObjectInputStream, ByteArrayInputStream } diff --git a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala index 613599fa8e..24fc20b824 100644 --- a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala +++ b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util @@ -29,7 +29,7 @@ class BoundedBlockingQueue[E <: AnyRef]( require(maxCapacity > 0) } - protected val lock = new ReentrantLock(false) // TODO might want to switch to ReentrantReadWriteLock + protected val lock = new ReentrantLock(false) private val notEmpty = lock.newCondition() private val notFull = lock.newCondition() @@ -285,12 +285,13 @@ class BoundedBlockingQueue[E <: AnyRef]( last = -1 //To avoid 2 subsequent removes without a next in between lock.lock() try { - @tailrec def removeTarget(i: Iterator[E] = backing.iterator()): Unit = if (i.hasNext) { - if (i.next eq target) { - i.remove() - notFull.signal() - } else removeTarget(i) - } + @tailrec def removeTarget(i: Iterator[E] = backing.iterator()): Unit = + if (i.hasNext) { + if (i.next eq target) { + i.remove() + notFull.signal() + } else removeTarget(i) + } removeTarget() } finally { diff --git a/akka-actor/src/main/scala/akka/util/BoxedType.scala b/akka-actor/src/main/scala/akka/util/BoxedType.scala index f5f95096d9..29cbc79d01 100644 --- a/akka-actor/src/main/scala/akka/util/BoxedType.scala +++ b/akka-actor/src/main/scala/akka/util/BoxedType.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util diff --git a/akka-actor/src/main/scala/akka/util/ByteIterator.scala b/akka-actor/src/main/scala/akka/util/ByteIterator.scala index ef4c0c49bc..ded49f63a6 100644 --- a/akka-actor/src/main/scala/akka/util/ByteIterator.scala +++ b/akka-actor/src/main/scala/akka/util/ByteIterator.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util @@ -252,14 +252,15 @@ object ByteIterator { normalize() } - @tailrec final override def drop(n: Int): this.type = if ((n > 0) && !isEmpty) { - val nCurrent = math.min(n, current.len) - current.drop(n) - val rest = n - nCurrent - assert(current.isEmpty || (rest == 0)) - normalize() - drop(rest) - } else this + @tailrec final override def drop(n: Int): this.type = + if ((n > 0) && !isEmpty) { + val nCurrent = math.min(n, current.len) + current.drop(n) + val rest = n - nCurrent + assert(current.isEmpty || (rest == 0)) + normalize() + drop(rest) + } else this final override def takeWhile(p: Byte ⇒ Boolean): this.type = { var stop = false @@ -275,12 +276,13 @@ object ByteIterator { normalize() } - @tailrec final override def dropWhile(p: Byte ⇒ Boolean): this.type = if (!isEmpty) { - current.dropWhile(p) - val dropMore = current.isEmpty - normalize() - if (dropMore) dropWhile(p) else this - } else this + @tailrec final override def dropWhile(p: Byte ⇒ Boolean): this.type = + if (!isEmpty) { + current.dropWhile(p) + val dropMore = current.isEmpty + normalize() + if (dropMore) dropWhile(p) else this + } else this final override def copyToArray[B >: Byte](xs: Array[B], start: Int, len: Int): Unit = { var pos = start @@ -309,19 +311,20 @@ object ByteIterator { } } - @tailrec protected final def getToArray[A](xs: Array[A], offset: Int, n: Int, elemSize: Int)(getSingle: ⇒ A)(getMult: (Array[A], Int, Int) ⇒ Unit): this.type = if (n <= 0) this else { - if (isEmpty) Iterator.empty.next - val nDone = if (current.len >= elemSize) { - val nCurrent = math.min(n, current.len / elemSize) - getMult(xs, offset, nCurrent) - nCurrent - } else { - xs(offset) = getSingle - 1 + @tailrec protected final def getToArray[A](xs: Array[A], offset: Int, n: Int, elemSize: Int)(getSingle: ⇒ A)(getMult: (Array[A], Int, Int) ⇒ Unit): this.type = + if (n <= 0) this else { + if (isEmpty) Iterator.empty.next + val nDone = if (current.len >= elemSize) { + val nCurrent = math.min(n, current.len / elemSize) + getMult(xs, offset, nCurrent) + nCurrent + } else { + xs(offset) = getSingle + 1 + } + normalize() + getToArray(xs, offset + nDone, n - nDone, elemSize)(getSingle)(getMult) } - normalize() - getToArray(xs, offset + nDone, n - nDone, elemSize)(getSingle)(getMult) - } def getBytes(xs: Array[Byte], offset: Int, n: Int): this.type = getToArray(xs, offset, n, 1) { getByte } { current.getBytes(_, _, _) } @@ -359,16 +362,17 @@ object ByteIterator { } override def skip(n: Long): Long = { - @tailrec def skipImpl(n: Long, skipped: Long): Long = if (n > 0) { - if (!isEmpty) { - val m = current.asInputStream.skip(n) - normalize() - val newN = n - m - val newSkipped = skipped + m - if (newN > 0) skipImpl(newN, newSkipped) - else newSkipped + @tailrec def skipImpl(n: Long, skipped: Long): Long = + if (n > 0) { + if (!isEmpty) { + val m = current.asInputStream.skip(n) + normalize() + val newN = n - m + val newSkipped = skipped + m + if (newN > 0) skipImpl(newN, newSkipped) + else newSkipped + } else 0 } else 0 - } else 0 skipImpl(n, 0) } diff --git a/akka-actor/src/main/scala/akka/util/ByteString.scala b/akka-actor/src/main/scala/akka/util/ByteString.scala index 70f929c11a..7905a15e42 100644 --- a/akka-actor/src/main/scala/akka/util/ByteString.scala +++ b/akka-actor/src/main/scala/akka/util/ByteString.scala @@ -1,13 +1,15 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util import java.nio.{ ByteBuffer, ByteOrder } +import java.lang.{ Iterable ⇒ JIterable } import scala.collection.IndexedSeqOptimized import scala.collection.mutable.{ Builder, WrappedArray } +import scala.collection.immutable import scala.collection.immutable.{ IndexedSeq, VectorBuilder } import scala.collection.generic.CanBuildFrom import scala.reflect.ClassTag @@ -94,10 +96,12 @@ object ByteString { private[akka] def toByteString1: ByteString1 = ByteString1(bytes) - def asByteBuffer: ByteBuffer = - toByteString1.asByteBuffer + def asByteBuffer: ByteBuffer = toByteString1.asByteBuffer - def decodeString(charset: String): String = new String(bytes, charset) + def asByteBuffers: scala.collection.immutable.Iterable[ByteBuffer] = List(asByteBuffer) + + def decodeString(charset: String): String = + if (isEmpty) "" else new String(bytes, charset) def ++(that: ByteString): ByteString = if (that.isEmpty) this @@ -110,9 +114,10 @@ object ByteString { } private[akka] object ByteString1 { - def apply(bytes: Array[Byte]): ByteString1 = new ByteString1(bytes) + val empty: ByteString1 = new ByteString1(Array.empty[Byte]) + def apply(bytes: Array[Byte]): ByteString1 = ByteString1(bytes, 0, bytes.length) def apply(bytes: Array[Byte], startIndex: Int, length: Int): ByteString1 = - new ByteString1(bytes, startIndex, length) + if (length == 0) empty else new ByteString1(bytes, startIndex, length) } /** @@ -145,6 +150,8 @@ object ByteString { else buffer } + def asByteBuffers: scala.collection.immutable.Iterable[ByteBuffer] = List(asByteBuffer) + def decodeString(charset: String): String = new String(if (length == bytes.length) bytes else toArray, charset) @@ -250,6 +257,8 @@ object ByteString { def asByteBuffer: ByteBuffer = compact.asByteBuffer + def asByteBuffers: scala.collection.immutable.Iterable[ByteBuffer] = bytestrings map { _.asByteBuffer } + def decodeString(charset: String): String = compact.decodeString(charset) } @@ -345,6 +354,25 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz */ def asByteBuffer: ByteBuffer + /** + * Returns an immutable Iterable of read-only ByteBuffers that directly wraps this ByteStrings + * all fragments. Will always have at least one entry. + * + * Scala API + */ + def asByteBuffers: immutable.Iterable[ByteBuffer] + + /** + * Returns an Iterable of read-only ByteBuffers that directly wraps this ByteStrings + * all fragments. Will always have at least one entry. + * + * Java API + */ + def getByteBuffers(): JIterable[ByteBuffer] = { + import scala.collection.JavaConverters.asJavaIterableConverter + asByteBuffers.asJava + } + /** * Creates a new ByteBuffer with a copy of all bytes contained in this * ByteString. diff --git a/akka-actor/src/main/scala/akka/util/ClassLoaderObjectInputStream.scala b/akka-actor/src/main/scala/akka/util/ClassLoaderObjectInputStream.scala index ab2514861e..bc71e54006 100644 --- a/akka-actor/src/main/scala/akka/util/ClassLoaderObjectInputStream.scala +++ b/akka-actor/src/main/scala/akka/util/ClassLoaderObjectInputStream.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util diff --git a/akka-actor/src/main/scala/akka/util/Collections.scala b/akka-actor/src/main/scala/akka/util/Collections.scala index 0ccbcd408c..293556861b 100644 --- a/akka-actor/src/main/scala/akka/util/Collections.scala +++ b/akka-actor/src/main/scala/akka/util/Collections.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util @@ -38,12 +38,13 @@ private[akka] object Collections { } else hasNext //Attempt to find the next } else _hasNext // Return if we found one - override final def next(): To = if (hasNext) { - val ret = _next - _next = null.asInstanceOf[To] // Mark as consumed (nice to the GC, don't leak the last returned value) - _hasNext = false // Mark as consumed (we need to look for the next value) - ret - } else throw new java.util.NoSuchElementException("next") + override final def next(): To = + if (hasNext) { + val ret = _next + _next = null.asInstanceOf[To] // Mark as consumed (nice to the GC, don't leak the last returned value) + _hasNext = false // Mark as consumed (we need to look for the next value) + ret + } else throw new java.util.NoSuchElementException("next") } } @@ -51,4 +52,4 @@ private[akka] object Collections { override def foreach[C](f: To ⇒ C) = iterator foreach f } -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/util/Crypt.scala b/akka-actor/src/main/scala/akka/util/Crypt.scala index 280cd90768..86b98a2cfd 100644 --- a/akka-actor/src/main/scala/akka/util/Crypt.scala +++ b/akka-actor/src/main/scala/akka/util/Crypt.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util diff --git a/akka-actor/src/main/scala/akka/util/HashCode.scala b/akka-actor/src/main/scala/akka/util/HashCode.scala index 9e19c5e7c6..486434f577 100644 --- a/akka-actor/src/main/scala/akka/util/HashCode.scala +++ b/akka-actor/src/main/scala/akka/util/HashCode.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util diff --git a/akka-actor/src/main/scala/akka/util/Helpers.scala b/akka-actor/src/main/scala/akka/util/Helpers.scala index 430d4582ae..c0963b30fa 100644 --- a/akka-actor/src/main/scala/akka/util/Helpers.scala +++ b/akka-actor/src/main/scala/akka/util/Helpers.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util @@ -46,4 +46,48 @@ object Helpers { if (next == 0) sb.toString else base64(next, sb) } + + /** + * Implicit class providing `requiring` methods. This class is based on + * `Predef.ensuring` in the Scala standard library. The difference is that + * this class's methods throw `IllegalArgumentException`s rather than + * `AssertionError`s. + * + * An example adapted from `Predef`'s documentation: + * {{{ + * import akka.util.Helpers.Requiring + * + * def addNaturals(nats: List[Int]): Int = { + * require(nats forall (_ >= 0), "List contains negative numbers") + * nats.foldLeft(0)(_ + _) + * } requiring(_ >= 0) + * }}} + * + * @param value The value to check. + */ + @inline final implicit class Requiring[A](val value: A) extends AnyVal { + /** + * Check that a condition is true. If true, return `value`, otherwise throw + * an `IllegalArgumentException` with the given message. + * + * @param cond The condition to check. + * @param msg The message to report if the condition isn't met. + */ + @inline def requiring(cond: Boolean, msg: ⇒ Any): A = { + require(cond, msg) + value + } + + /** + * Check that a condition is true for the `value`. If true, return `value`, + * otherwise throw an `IllegalArgumentException` with the given message. + * + * @param cond The function used to check the `value`. + * @param msg The message to report if the condition isn't met. + */ + @inline def requiring(cond: A ⇒ Boolean, msg: ⇒ Any): A = { + require(cond(value), msg) + value + } + } } diff --git a/akka-actor/src/main/scala/akka/util/Index.scala b/akka-actor/src/main/scala/akka/util/Index.scala index 83d8a40885..5022db9865 100644 --- a/akka-actor/src/main/scala/akka/util/Index.scala +++ b/akka-actor/src/main/scala/akka/util/Index.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util diff --git a/akka-actor/src/main/scala/akka/util/LockUtil.scala b/akka-actor/src/main/scala/akka/util/LockUtil.scala index 8279fcc7df..4647e2e9b2 100644 --- a/akka-actor/src/main/scala/akka/util/LockUtil.scala +++ b/akka-actor/src/main/scala/akka/util/LockUtil.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util diff --git a/akka-actor/src/main/scala/akka/util/Reflect.scala b/akka-actor/src/main/scala/akka/util/Reflect.scala index f62ea5fb8c..5761fe5dd3 100644 --- a/akka-actor/src/main/scala/akka/util/Reflect.scala +++ b/akka-actor/src/main/scala/akka/util/Reflect.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util import scala.util.control.NonFatal @@ -50,4 +50,4 @@ private[akka] object Reflect { * @return a function which when applied will create a new instance from the default constructor of the given class */ private[akka] def instantiator[T](clazz: Class[T]): () ⇒ T = () ⇒ instantiate(clazz) -} \ No newline at end of file +} diff --git a/akka-actor/src/main/scala/akka/util/SerializedSuspendableExecutionContext.scala b/akka-actor/src/main/scala/akka/util/SerializedSuspendableExecutionContext.scala new file mode 100644 index 0000000000..fa52d20c5c --- /dev/null +++ b/akka-actor/src/main/scala/akka/util/SerializedSuspendableExecutionContext.scala @@ -0,0 +1,81 @@ +package akka.util + +import java.util.concurrent.ConcurrentLinkedQueue +import java.util.concurrent.atomic.AtomicInteger +import scala.concurrent.ExecutionContext +import scala.util.control.NonFatal +import scala.annotation.{ tailrec, switch } + +private[akka] object SerializedSuspendableExecutionContext { + final val Off = 0 + final val On = 1 + final val Suspended = 2 + + def apply(throughput: Int)(implicit context: ExecutionContext): SerializedSuspendableExecutionContext = + new SerializedSuspendableExecutionContext(throughput)(context match { + case s: SerializedSuspendableExecutionContext ⇒ s.context + case other ⇒ other + }) +} + +/** + * This `ExecutionContext` allows to wrap an underlying `ExecutionContext` and provide guaranteed serial execution + * of tasks submitted to it. On top of that it also allows for *suspending* and *resuming* processing of tasks. + * + * WARNING: This type must never leak into User code as anything but `ExecutionContext` + * + * @param throughput maximum number of tasks to be executed in serial before relinquishing the executing thread. + * @param context the underlying context which will be used to actually execute the submitted tasks + */ +private[akka] final class SerializedSuspendableExecutionContext(throughput: Int)(val context: ExecutionContext) + extends ConcurrentLinkedQueue[Runnable] with Runnable with ExecutionContext { + import SerializedSuspendableExecutionContext._ + require(throughput > 0, s"SerializedSuspendableExecutionContext.throughput must be greater than 0 but was $throughput") + + private final val state = new AtomicInteger(Off) + @tailrec private final def addState(newState: Int): Boolean = { + val c = state.get + state.compareAndSet(c, c | newState) || addState(newState) + } + @tailrec private final def remState(oldState: Int) { + val c = state.get + if (state.compareAndSet(c, c & ~oldState)) attach() else remState(oldState) + } + + /** + * Resumes execution of tasks until `suspend` is called, + * if it isn't currently suspended, it is a no-op. + * This operation is idempotent. + */ + final def resume(): Unit = remState(Suspended) + + /** + * Suspends execution of tasks until `resume` is called, + * this operation is idempotent. + */ + final def suspend(): Unit = addState(Suspended) + + final def run(): Unit = { + @tailrec def run(done: Int): Unit = + if (done < throughput && state.get == On) { + poll() match { + case null ⇒ () + case some ⇒ + try some.run() catch { case NonFatal(t) ⇒ context reportFailure t } + run(done + 1) + } + } + try run(0) finally remState(On) + } + + final def attach(): Unit = if (!isEmpty && state.compareAndSet(Off, On)) context execute this + override final def execute(task: Runnable): Unit = try add(task) finally attach() + override final def reportFailure(t: Throwable): Unit = context reportFailure t + + override final def toString: String = (state.get: @switch) match { + case 0 ⇒ "Off" + case 1 ⇒ "On" + case 2 ⇒ "Off & Suspended" + case 3 ⇒ "On & Suspended" + } +} \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala b/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala index 236f645864..206082c464 100644 --- a/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala +++ b/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util diff --git a/akka-actor/src/main/scala/akka/util/Timeout.scala b/akka-actor/src/main/scala/akka/util/Timeout.scala index 7062eabd35..4e758a6373 100644 --- a/akka-actor/src/main/scala/akka/util/Timeout.scala +++ b/akka-actor/src/main/scala/akka/util/Timeout.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util diff --git a/akka-actor/src/main/scala/akka/util/Unsafe.java b/akka-actor/src/main/scala/akka/util/Unsafe.java index 005d1b3441..b43e1704ca 100644 --- a/akka-actor/src/main/scala/akka/util/Unsafe.java +++ b/akka-actor/src/main/scala/akka/util/Unsafe.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ diff --git a/akka-actor/src/main/scala/akka/util/WildcardTree.scala b/akka-actor/src/main/scala/akka/util/WildcardTree.scala index 519e1a73e5..76c5b2124c 100644 --- a/akka-actor/src/main/scala/akka/util/WildcardTree.scala +++ b/akka-actor/src/main/scala/akka/util/WildcardTree.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.util diff --git a/akka-agent/src/main/scala/akka/agent/Agent.scala b/akka-agent/src/main/scala/akka/agent/Agent.scala index 215de37c28..fae81d1dc9 100644 --- a/akka-agent/src/main/scala/akka/agent/Agent.scala +++ b/akka-agent/src/main/scala/akka/agent/Agent.scala @@ -1,29 +1,18 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.agent -import akka.actor._ -import akka.japi.{ Function ⇒ JFunc, Procedure ⇒ JProc } -import akka.pattern.ask -import akka.util.Timeout import scala.concurrent.stm._ -import scala.concurrent.{ ExecutionContext, Future, Promise, Await } -import scala.concurrent.duration.{ FiniteDuration, Duration } - -/** - * Used internally to send functions. - */ -private[akka] case class Update[T](function: T ⇒ T) -private[akka] case class Alter[T](function: T ⇒ T) -private[akka] case object Get +import scala.concurrent.{ ExecutionContext, Future, Promise } +import akka.util.{ SerializedSuspendableExecutionContext } /** * Factory method for creating an Agent. */ object Agent { - def apply[T](initialValue: T)(implicit system: ActorSystem) = new Agent(initialValue, system, system) + def apply[T](initialValue: T)(implicit context: ExecutionContext) = new Agent(initialValue, context) } /** @@ -96,14 +85,13 @@ object Agent { * agent4.close * }}} */ -class Agent[T](initialValue: T, refFactory: ActorRefFactory, system: ActorSystem) { +class Agent[T](initialValue: T, context: ExecutionContext) { private val ref = Ref(initialValue) - private val updater = refFactory.actorOf(Props(new AgentUpdater(this, ref))).asInstanceOf[InternalActorRef] //TODO can we avoid this somehow? - - def this(initialValue: T, system: ActorSystem) = this(initialValue, system, system) + private val updater = SerializedSuspendableExecutionContext(10)(context) /** * Read the internal state of the agent. + * Java API */ def get(): T = ref.single.get @@ -113,187 +101,104 @@ class Agent[T](initialValue: T, refFactory: ActorRefFactory, system: ActorSystem def apply(): T = get /** - * Dispatch a function to update the internal state. + * Dispatch a new value for the internal state. Behaves the same + * as sending a function (x => newValue). */ - def send(f: T ⇒ T): Unit = { - def dispatch = updater ! Update(f) + def send(newValue: T): Unit = withinTransaction(new Runnable { def run = ref.single.update(newValue) }) + + /** + * Dispatch a function to update the internal state. + * In Java, pass in an instance of `akka.dispatch.Mapper`. + */ + def send(f: T ⇒ T): Unit = withinTransaction(new Runnable { def run = ref.single.transform(f) }) + + /** + * Dispatch a function to update the internal state but on its own thread. + * This does not use the reactive thread pool and can be used for long-running + * or blocking operations. Dispatches using either `sendOff` or `send` will + * still be executed in order. + * In Java, pass in an instance of `akka.dispatch.Mapper`. + */ + def sendOff(f: T ⇒ T)(implicit ec: ExecutionContext): Unit = withinTransaction( + new Runnable { + def run = + try updater.suspend() finally ec.execute(new Runnable { def run = try ref.single.transform(f) finally updater.resume() }) + }) + + /** + * Dispatch an update to the internal state, and return a Future where + * that new state can be obtained. + * In Java, pass in an instance of `akka.dispatch.Mapper`. + */ + def alter(newValue: T): Future[T] = doAlter({ ref.single.update(newValue); newValue }) + + /** + * Dispatch a function to update the internal state, and return a Future where + * that new state can be obtained. + * In Java, pass in an instance of `akka.dispatch.Mapper`. + */ + def alter(f: T ⇒ T): Future[T] = doAlter(ref.single.transformAndGet(f)) + + /** + * Dispatch a function to update the internal state but on its own thread, + * and return a Future where that new state can be obtained. + * This does not use the reactive thread pool and can be used for long-running + * or blocking operations. Dispatches using either `alterOff` or `alter` will + * still be executed in order. + * In Java, pass in an instance of `akka.dispatch.Mapper`. + */ + def alterOff(f: T ⇒ T)(implicit ec: ExecutionContext): Future[T] = { + val result = Promise[T]() + withinTransaction(new Runnable { + def run = { + updater.suspend() + result completeWith Future(try ref.single.transformAndGet(f) finally updater.resume()) + } + }) + result.future + } + + private final def withinTransaction(run: Runnable): Unit = { + def dispatch = updater.execute(run) Txn.findCurrent match { case Some(txn) ⇒ Txn.afterCommit(status ⇒ dispatch)(txn) case _ ⇒ dispatch } } - /** - * Dispatch a function to update the internal state, and return a Future where - * that new state can be obtained within the given timeout. - */ - def alter(f: T ⇒ T)(implicit timeout: Timeout): Future[T] = { - def dispatch = ask(updater, Alter(f)).asInstanceOf[Future[T]] + private final def doAlter(f: ⇒ T): Future[T] = { Txn.findCurrent match { case Some(txn) ⇒ val result = Promise[T]() - Txn.afterCommit(status ⇒ result completeWith dispatch)(txn) + Txn.afterCommit(status ⇒ result completeWith Future(f)(updater))(txn) result.future - case _ ⇒ dispatch + case _ ⇒ Future(f)(updater) } } - /** - * Dispatch a new value for the internal state. Behaves the same - * as sending a function (x => newValue). - */ - def send(newValue: T): Unit = send(_ ⇒ newValue) - - /** - * Dispatch a new value for the internal state. Behaves the same - * as sending a function (x => newValue). - */ - def update(newValue: T): Unit = send(newValue) - - /** - * Dispatch a function to update the internal state but on its own thread. - * This does not use the reactive thread pool and can be used for long-running - * or blocking operations. Dispatches using either `sendOff` or `send` will - * still be executed in order. - */ - def sendOff(f: T ⇒ T)(implicit ec: ExecutionContext): Unit = { - send((value: T) ⇒ { - suspend() - Future(ref.single.transformAndGet(f)).andThen({ case _ ⇒ resume() }) - value - }) - } - - /** - * Dispatch a function to update the internal state but on its own thread, - * and return a Future where that new state can be obtained within the given timeout. - * This does not use the reactive thread pool and can be used for long-running - * or blocking operations. Dispatches using either `alterOff` or `alter` will - * still be executed in order. - */ - def alterOff(f: T ⇒ T)(implicit timeout: Timeout, ec: ExecutionContext): Future[T] = { - val result = Promise[T]() - send((value: T) ⇒ { - suspend() - result completeWith Future(ref.single.transformAndGet(f)).andThen({ case _ ⇒ resume() }) - value - }) - result.future - } - /** * A future to the current value that will be completed after any currently * queued updates. */ - def future(implicit timeout: Timeout): Future[T] = (updater ? Get).asInstanceOf[Future[T]] //Known to be safe - - /** - * Gets this agent's value after all currently queued updates have completed. - */ - def await(implicit timeout: Timeout): T = Await.result(future, timeout.duration) + def future(): Future[T] = Future(ref.single.get)(updater) /** * Map this agent to a new agent, applying the function to the internal state. * Does not change the value of this agent. + * In Java, pass in an instance of `akka.dispatch.Mapper`. */ - def map[B](f: T ⇒ B): Agent[B] = Agent(f(get))(system) + def map[B](f: T ⇒ B): Agent[B] = Agent(f(get))(updater) /** * Flatmap this agent to a new agent, applying the function to the internal state. * Does not change the value of this agent. + * In Java, pass in an instance of `akka.dispatch.Mapper`. */ def flatMap[B](f: T ⇒ Agent[B]): Agent[B] = f(get) /** * Applies the function to the internal state. Does not change the value of this agent. + * In Java, pass in an instance of `akka.dispatch.Foreach`. */ def foreach[U](f: T ⇒ U): Unit = f(get) - - /** - * Suspends processing of `send` actions for the agent. - */ - def suspend(): Unit = updater.suspend() - - /** - * Resumes processing of `send` actions for the agent. - */ - def resume(): Unit = updater.resume(causedByFailure = null) - - /** - * Closes the agents and makes it eligible for garbage collection. - * A closed agent cannot accept any `send` actions. - */ - def close(): Unit = updater.stop() - - // --------------------------------------------- - // Support for Java API Functions and Procedures - // --------------------------------------------- - - /** - * Java API: - * Dispatch a function to update the internal state. - */ - def send(f: JFunc[T, T]): Unit = send(x ⇒ f(x)) - - /** - * Java API - * Dispatch a function to update the internal state, and return a Future where that new state can be obtained - * within the given timeout - */ - def alter(f: JFunc[T, T], timeout: FiniteDuration): Future[T] = alter(x ⇒ f(x))(timeout) - - /** - * Java API: - * Dispatch a function to update the internal state but on its own thread. - * This does not use the reactive thread pool and can be used for long-running - * or blocking operations. Dispatches using either `sendOff` or `send` will - * still be executed in order. - */ - def sendOff(f: JFunc[T, T], ec: ExecutionContext): Unit = sendOff(x ⇒ f(x))(ec) - - /** - * Java API: - * Dispatch a function to update the internal state but on its own thread, - * and return a Future where that new state can be obtained within the given timeout. - * This does not use the reactive thread pool and can be used for long-running - * or blocking operations. Dispatches using either `alterOff` or `alter` will - * still be executed in order. - */ - def alterOff(f: JFunc[T, T], timeout: FiniteDuration, ec: ExecutionContext): Unit = alterOff(x ⇒ f(x))(Timeout(timeout), ec) - - /** - * Java API: - * Map this agent to a new agent, applying the function to the internal state. - * Does not change the value of this agent. - */ - def map[B](f: JFunc[T, B]): Agent[B] = Agent(f(get))(system) - - /** - * Java API: - * Flatmap this agent to a new agent, applying the function to the internal state. - * Does not change the value of this agent. - */ - def flatMap[B](f: JFunc[T, Agent[B]]): Agent[B] = f(get) - - /** - * Java API: - * Applies the function to the internal state. Does not change the value of this agent. - */ - def foreach(f: JProc[T]): Unit = f(get) -} - -/** - * Agent updater actor. Used internally for `send` actions. - * - * INTERNAL API - */ -private[akka] class AgentUpdater[T](agent: Agent[T], ref: Ref[T]) extends Actor { - def receive = { - case u: Update[_] ⇒ update(u.function.asInstanceOf[T ⇒ T]) - case a: Alter[_] ⇒ sender ! update(a.function.asInstanceOf[T ⇒ T]) - case Get ⇒ sender ! agent.get - case _ ⇒ - } - - def update(function: T ⇒ T): T = ref.single.transformAndGet(function) -} +} \ No newline at end of file diff --git a/akka-agent/src/test/scala/akka/agent/AgentSpec.scala b/akka-agent/src/test/scala/akka/agent/AgentSpec.scala index e6fb305151..926720600c 100644 --- a/akka-agent/src/test/scala/akka/agent/AgentSpec.scala +++ b/akka-agent/src/test/scala/akka/agent/AgentSpec.scala @@ -18,7 +18,7 @@ class CountDownFunction[A](num: Int = 1) extends Function1[A, A] { class AgentSpec extends AkkaSpec { implicit val timeout = Timeout(5.seconds.dilated) - + import system.dispatcher "Agent" must { "update with send dispatches in order sent" in { val countDown = new CountDownFunction[String] @@ -31,36 +31,29 @@ class AgentSpec extends AkkaSpec { countDown.await(5 seconds) agent() must be("abcd") - - agent.close() } "maintain order between send and sendOff" in { val countDown = new CountDownFunction[String] - val l1, l2 = new CountDownLatch(1) - import system.dispatcher - + val l1, l2 = new TestLatch(1) val agent = Agent("a") agent send (_ + "b") - agent.sendOff((s: String) ⇒ { l1.countDown; l2.await(5, TimeUnit.SECONDS); s + "c" }) - l1.await(5, TimeUnit.SECONDS) + agent.sendOff((s: String) ⇒ { l1.countDown; Await.ready(l2, timeout.duration); s + "c" }) + Await.ready(l1, timeout.duration) agent send (_ + "d") agent send countDown l2.countDown countDown.await(5 seconds) agent() must be("abcd") - - agent.close() } "maintain order between alter and alterOff" in { - import system.dispatcher - val l1, l2 = new CountDownLatch(1) + val l1, l2 = new TestLatch(1) val agent = Agent("a") val r1 = agent.alter(_ + "b") - val r2 = agent.alterOff((s: String) ⇒ { l1.countDown; l2.await(5, TimeUnit.SECONDS); s + "c" }) - l1.await(5, TimeUnit.SECONDS) + val r2 = agent.alterOff(s ⇒ { l1.countDown; Await.ready(l2, timeout.duration); s + "c" }) + Await.ready(l1, timeout.duration) val r3 = agent.alter(_ + "d") val result = Future.sequence(Seq(r1, r2, r3)).map(_.mkString(":")) l2.countDown @@ -68,18 +61,16 @@ class AgentSpec extends AkkaSpec { Await.result(result, 5 seconds) must be === "ab:abc:abcd" agent() must be("abcd") - - agent.close() } "be immediately readable" in { val countDown = new CountDownFunction[Int] - val readLatch = new CountDownLatch(1) + val readLatch = new TestLatch(1) val readTimeout = 5 seconds val agent = Agent(5) val f1 = (i: Int) ⇒ { - readLatch.await(readTimeout.length, readTimeout.unit) + Await.ready(readLatch, readTimeout) i + 5 } agent send f1 @@ -90,15 +81,12 @@ class AgentSpec extends AkkaSpec { countDown.await(5 seconds) read must be(5) agent() must be(10) - - agent.close() } "be readable within a transaction" in { val agent = Agent(5) val value = atomic { t ⇒ agent() } value must be(5) - agent.close() } "dispatch sends in successful transactions" in { @@ -112,8 +100,6 @@ class AgentSpec extends AkkaSpec { countDown.await(5 seconds) agent() must be(10) - - agent.close() } "not dispatch sends in aborted transactions" in { @@ -132,8 +118,6 @@ class AgentSpec extends AkkaSpec { countDown.await(5 seconds) agent() must be(5) - - agent.close() } "be able to return a 'queued' future" in { @@ -142,8 +126,6 @@ class AgentSpec extends AkkaSpec { agent send (_ + "c") Await.result(agent.future, timeout.duration) must be("abc") - - agent.close() } "be able to await the value after updates have completed" in { @@ -151,9 +133,7 @@ class AgentSpec extends AkkaSpec { agent send (_ + "b") agent send (_ + "c") - agent.await must be("abc") - - agent.close() + Await.result(agent.future, timeout.duration) must be("abc") } "be able to be mapped" in { @@ -162,9 +142,6 @@ class AgentSpec extends AkkaSpec { agent1() must be(5) agent2() must be(10) - - agent1.close() - agent2.close() } "be able to be used in a 'foreach' for comprehension" in { @@ -176,8 +153,6 @@ class AgentSpec extends AkkaSpec { } result must be(3) - - agent.close() } "be able to be used in a 'map' for comprehension" in { @@ -186,9 +161,6 @@ class AgentSpec extends AkkaSpec { agent1() must be(5) agent2() must be(10) - - agent1.close() - agent2.close() } "be able to be used in a 'flatMap' for comprehension" in { @@ -203,10 +175,6 @@ class AgentSpec extends AkkaSpec { agent1() must be(1) agent2() must be(2) agent3() must be(3) - - agent1.close() - agent2.close() - agent3.close() } } } diff --git a/akka-camel/src/main/scala/akka/camel/Activation.scala b/akka-camel/src/main/scala/akka/camel/Activation.scala index b035cbd267..c4f1097a18 100644 --- a/akka-camel/src/main/scala/akka/camel/Activation.scala +++ b/akka-camel/src/main/scala/akka/camel/Activation.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel @@ -8,9 +8,8 @@ import akka.camel.internal._ import akka.util.Timeout import akka.actor.{ ActorSystem, Props, ActorRef } import akka.pattern._ -import concurrent.{ ExecutionContext, Future } -import scala.concurrent.duration.Duration -import scala.concurrent.duration.FiniteDuration +import scala.concurrent.{ ExecutionContext, Future } +import scala.concurrent.duration.{ Duration, FiniteDuration } /** * Activation trait that can be used to wait on activation or de-activation of Camel endpoints. diff --git a/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala b/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala index e8b1be8550..c75d45933a 100644 --- a/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala +++ b/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel diff --git a/akka-camel/src/main/scala/akka/camel/Camel.scala b/akka-camel/src/main/scala/akka/camel/Camel.scala index c72193becb..1273f525f1 100644 --- a/akka-camel/src/main/scala/akka/camel/Camel.scala +++ b/akka-camel/src/main/scala/akka/camel/Camel.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel diff --git a/akka-camel/src/main/scala/akka/camel/CamelMessage.scala b/akka-camel/src/main/scala/akka/camel/CamelMessage.scala index c9dc32e597..f6b5073534 100644 --- a/akka-camel/src/main/scala/akka/camel/CamelMessage.scala +++ b/akka-camel/src/main/scala/akka/camel/CamelMessage.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel diff --git a/akka-camel/src/main/scala/akka/camel/Consumer.scala b/akka-camel/src/main/scala/akka/camel/Consumer.scala index 19ddc85b59..dfbe3ac840 100644 --- a/akka-camel/src/main/scala/akka/camel/Consumer.scala +++ b/akka-camel/src/main/scala/akka/camel/Consumer.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel diff --git a/akka-camel/src/main/scala/akka/camel/Producer.scala b/akka-camel/src/main/scala/akka/camel/Producer.scala index ca05f7a45d..64ab0f61f8 100644 --- a/akka-camel/src/main/scala/akka/camel/Producer.scala +++ b/akka-camel/src/main/scala/akka/camel/Producer.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel diff --git a/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala b/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala index 7b27dbc789..2208cad046 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ActivationMessage.scala @@ -1,7 +1,7 @@ package akka.camel.internal /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ import akka.actor.ActorRef @@ -66,4 +66,4 @@ private[camel] object ActivationProtocol { */ @SerialVersionUID(1L) final case class EndpointFailedToDeActivate(actorRef: ActorRef, cause: Throwable) extends ActivationMessage(actorRef) -} \ No newline at end of file +} diff --git a/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala b/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala index 9beb6a8894..17ee254bd5 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ActivationTracker.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel.internal diff --git a/akka-camel/src/main/scala/akka/camel/internal/ConsumerActorRouteBuilder.scala b/akka-camel/src/main/scala/akka/camel/internal/ConsumerActorRouteBuilder.scala index a27c23ec2f..27f25af1ab 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ConsumerActorRouteBuilder.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ConsumerActorRouteBuilder.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel.internal diff --git a/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala b/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala index e876a36e2a..f79da6b568 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala @@ -9,7 +9,7 @@ import akka.camel.internal.ActivationProtocol._ import scala.util.control.NonFatal import scala.concurrent.duration._ import org.apache.camel.ProducerTemplate -import concurrent.{ Future, ExecutionContext } +import scala.concurrent.{ Future, ExecutionContext } import akka.util.Timeout import akka.pattern.ask import java.io.InputStream diff --git a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala index 2585b970c9..a42ef51abc 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel.internal.component diff --git a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumerActor.scala b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumerActor.scala index 77526dab08..7bfa26ce20 100644 --- a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumerActor.scala +++ b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedConsumerActor.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel.javaapi diff --git a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala index 7688df5130..aa7e7cb8df 100644 --- a/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala +++ b/akka-camel/src/main/scala/akka/camel/javaapi/UntypedProducerActor.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel.javaapi diff --git a/akka-camel/src/main/scala/akka/package.scala b/akka-camel/src/main/scala/akka/package.scala index 9347580e76..c36d83322f 100644 --- a/akka-camel/src/main/scala/akka/package.scala +++ b/akka-camel/src/main/scala/akka/package.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka diff --git a/akka-camel/src/test/java/akka/camel/ConsumerJavaTestBase.java b/akka-camel/src/test/java/akka/camel/ConsumerJavaTestBase.java index d8aec8a761..0ed6f85520 100644 --- a/akka-camel/src/test/java/akka/camel/ConsumerJavaTestBase.java +++ b/akka-camel/src/test/java/akka/camel/ConsumerJavaTestBase.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel; diff --git a/akka-camel/src/test/java/akka/camel/MessageJavaTestBase.java b/akka-camel/src/test/java/akka/camel/MessageJavaTestBase.java index d805a8b2c1..3eeb8c4186 100644 --- a/akka-camel/src/test/java/akka/camel/MessageJavaTestBase.java +++ b/akka-camel/src/test/java/akka/camel/MessageJavaTestBase.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel; diff --git a/akka-camel/src/test/java/akka/camel/SampleErrorHandlingConsumer.java b/akka-camel/src/test/java/akka/camel/SampleErrorHandlingConsumer.java index 92fb124a11..4e6efdde5b 100644 --- a/akka-camel/src/test/java/akka/camel/SampleErrorHandlingConsumer.java +++ b/akka-camel/src/test/java/akka/camel/SampleErrorHandlingConsumer.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel; diff --git a/akka-camel/src/test/java/akka/camel/SampleUntypedConsumer.java b/akka-camel/src/test/java/akka/camel/SampleUntypedConsumer.java index 030c951cc9..39e7759508 100644 --- a/akka-camel/src/test/java/akka/camel/SampleUntypedConsumer.java +++ b/akka-camel/src/test/java/akka/camel/SampleUntypedConsumer.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel; diff --git a/akka-camel/src/test/java/akka/camel/SampleUntypedForwardingProducer.java b/akka-camel/src/test/java/akka/camel/SampleUntypedForwardingProducer.java index b99a7ecc31..83bf56affc 100644 --- a/akka-camel/src/test/java/akka/camel/SampleUntypedForwardingProducer.java +++ b/akka-camel/src/test/java/akka/camel/SampleUntypedForwardingProducer.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel; diff --git a/akka-camel/src/test/java/akka/camel/SampleUntypedReplyingProducer.java b/akka-camel/src/test/java/akka/camel/SampleUntypedReplyingProducer.java index c47187d1da..adfabe3097 100644 --- a/akka-camel/src/test/java/akka/camel/SampleUntypedReplyingProducer.java +++ b/akka-camel/src/test/java/akka/camel/SampleUntypedReplyingProducer.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel; diff --git a/akka-camel/src/test/scala/akka/camel/ActivationIntegrationTest.scala b/akka-camel/src/test/scala/akka/camel/ActivationIntegrationTest.scala index a945e3a63e..408139fc18 100644 --- a/akka-camel/src/test/scala/akka/camel/ActivationIntegrationTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ActivationIntegrationTest.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel @@ -13,7 +13,7 @@ import akka.actor._ import TestSupport._ import org.scalatest.WordSpec import akka.testkit.TestLatch -import concurrent.Await +import scala.concurrent.Await import java.util.concurrent.TimeoutException import akka.util.Timeout @@ -79,4 +79,4 @@ class ActivationIntegrationTest extends WordSpec with MustMatchers with SharedCa } } -} \ No newline at end of file +} diff --git a/akka-camel/src/test/scala/akka/camel/CamelConfigSpec.scala b/akka-camel/src/test/scala/akka/camel/CamelConfigSpec.scala index ca7b4ba3cc..7de26bb1cd 100644 --- a/akka-camel/src/test/scala/akka/camel/CamelConfigSpec.scala +++ b/akka-camel/src/test/scala/akka/camel/CamelConfigSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel diff --git a/akka-camel/src/test/scala/akka/camel/CamelExchangeAdapterTest.scala b/akka-camel/src/test/scala/akka/camel/CamelExchangeAdapterTest.scala index 29863b146a..0fc1cd7f38 100644 --- a/akka-camel/src/test/scala/akka/camel/CamelExchangeAdapterTest.scala +++ b/akka-camel/src/test/scala/akka/camel/CamelExchangeAdapterTest.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel diff --git a/akka-camel/src/test/scala/akka/camel/CamelMessageTest.scala b/akka-camel/src/test/scala/akka/camel/CamelMessageTest.scala index 0fd0a73fb6..d2ac74324e 100644 --- a/akka-camel/src/test/scala/akka/camel/CamelMessageTest.scala +++ b/akka-camel/src/test/scala/akka/camel/CamelMessageTest.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel diff --git a/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala b/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala index ff5524ad6c..9e51164819 100644 --- a/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ConcurrentActivationTest.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel diff --git a/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala b/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala index 6462e0b191..4d2c3e7b48 100644 --- a/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel @@ -18,7 +18,7 @@ import org.apache.camel.{ FailedToCreateRouteException, CamelExecutionException import java.util.concurrent.{ ExecutionException, TimeUnit, TimeoutException } import akka.actor.Status.Failure import scala.concurrent.duration._ -import concurrent.{ ExecutionContext, Await } +import scala.concurrent.{ ExecutionContext, Await } import akka.testkit._ import akka.util.Timeout diff --git a/akka-camel/src/test/scala/akka/camel/ConsumerJavaTest.scala b/akka-camel/src/test/scala/akka/camel/ConsumerJavaTest.scala index 5685db2cea..e0685bdf06 100644 --- a/akka-camel/src/test/scala/akka/camel/ConsumerJavaTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ConsumerJavaTest.scala @@ -1,9 +1,9 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel import org.scalatest.junit.JUnitSuite -class ConsumerJavaTest extends ConsumerJavaTestBase with JUnitSuite \ No newline at end of file +class ConsumerJavaTest extends ConsumerJavaTestBase with JUnitSuite diff --git a/akka-camel/src/test/scala/akka/camel/MessageJavaTest.scala b/akka-camel/src/test/scala/akka/camel/MessageJavaTest.scala index 9d56642b9f..eb8ba6e194 100644 --- a/akka-camel/src/test/scala/akka/camel/MessageJavaTest.scala +++ b/akka-camel/src/test/scala/akka/camel/MessageJavaTest.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel diff --git a/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala b/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala index cbf0190e91..9e2a901ec5 100644 --- a/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala +++ b/akka-camel/src/test/scala/akka/camel/MessageScalaTest.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel diff --git a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala index 58cd0713d6..8463c185ae 100644 --- a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel diff --git a/akka-camel/src/test/scala/akka/camel/TestSupport.scala b/akka-camel/src/test/scala/akka/camel/TestSupport.scala index 4ff7155666..55e6b13d53 100644 --- a/akka-camel/src/test/scala/akka/camel/TestSupport.scala +++ b/akka-camel/src/test/scala/akka/camel/TestSupport.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel @@ -13,7 +13,7 @@ import org.scalatest.{ BeforeAndAfterEach, BeforeAndAfterAll, Suite } import org.scalatest.matchers.{ BePropertyMatcher, BePropertyMatchResult } import scala.reflect.ClassTag import akka.actor.{ ActorRef, Props, ActorSystem, Actor } -import concurrent.Await +import scala.concurrent.Await import akka.util.Timeout import akka.testkit.AkkaSpec diff --git a/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala b/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala index e89a568b42..7964d78b71 100644 --- a/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala +++ b/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel diff --git a/akka-camel/src/test/scala/akka/camel/internal/component/ActorComponentConfigurationTest.scala b/akka-camel/src/test/scala/akka/camel/internal/component/ActorComponentConfigurationTest.scala index 1be5295225..3f5a8f33cf 100644 --- a/akka-camel/src/test/scala/akka/camel/internal/component/ActorComponentConfigurationTest.scala +++ b/akka-camel/src/test/scala/akka/camel/internal/component/ActorComponentConfigurationTest.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel.internal.component @@ -26,4 +26,4 @@ class ActorComponentConfigurationTest extends WordSpec with MustMatchers with Sh 'replyTimeout(987000000 nanos)) } -} \ No newline at end of file +} diff --git a/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala b/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala index 57d4ee02c6..f353669bd3 100644 --- a/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala +++ b/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel.internal.component @@ -25,7 +25,7 @@ import akka.actor.ActorSystem.Settings import akka.event.LoggingAdapter import akka.testkit.{ TestLatch, TimingTest, TestKit, TestProbe } import org.apache.camel.impl.DefaultCamelContext -import concurrent.{ Await, Promise, Future } +import scala.concurrent.{ Await, Promise, Future } import akka.util.Timeout import akka.actor._ import akka.testkit._ diff --git a/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala b/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala index 06c5d5aa5e..83bf137af3 100644 --- a/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala +++ b/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.camel.internal.component diff --git a/akka-cluster/src/main/resources/reference.conf b/akka-cluster/src/main/resources/reference.conf index f8d18a516b..6a1f7ac3c3 100644 --- a/akka-cluster/src/main/resources/reference.conf +++ b/akka-cluster/src/main/resources/reference.conf @@ -55,10 +55,6 @@ akka { # A value of 0 s can be used to always publish the stats, when it happens. publish-stats-interval = 10s - # A joining node stops sending heartbeats to the node to join if it hasn't - # become member of the cluster within this deadline. - join-timeout = 60s - # The id of the dispatcher to use for cluster actors. If not specified # default dispatcher is used. # If specified you need to define the settings of the actual dispatcher. @@ -109,7 +105,29 @@ akka { # network drop. acceptable-heartbeat-pause = 3s + # Number of samples to use for calculation of mean and standard deviation of + # inter-arrival times. max-sample-size = 1000 + + # When a node stops sending heartbeats to another node it will end that + # with this number of EndHeartbeat messages, which will remove the + # monitoring from the failure detector. + nr-of-end-heartbeats = 8 + + # When no expected heartbeat message has been received an explicit + # heartbeat request is sent to the node that should emit heartbeats. + heartbeat-request { + # Grace period until an explicit heartbeat request is sent + grace-period = 10 s + + # After the heartbeat request has been sent the first failure detection + # will start after this period, even though no heartbeat mesage has + # been received. + expected-response-after = 3 s + + # Cleanup of obsolete heartbeat requests + time-to-live = 60 s + } } metrics { @@ -149,13 +167,6 @@ akka { ticks-per-wheel = 512 } - # Netty blocks when sending to broken connections, and this circuit breaker - # is used to reduce connect attempts to broken connections. - send-circuit-breaker { - max-failures = 3 - call-timeout = 2 s - reset-timeout = 30 s - } } # Default configuration for routers diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index feb950a9a8..92c876b22d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -109,18 +109,19 @@ class AccrualFailureDetector( private val state = new AtomicReference[State](State()) - /** - * Returns true if the connection is considered to be up and healthy - * and returns false otherwise. - */ - def isAvailable(connection: Address): Boolean = phi(connection) < threshold + override def isAvailable(connection: Address): Boolean = phi(connection) < threshold + + override def isMonitoring(connection: Address): Boolean = state.get.timestamps.get(connection).nonEmpty /** * Records a heartbeat for a connection. */ @tailrec final def heartbeat(connection: Address) { - log.debug("Heartbeat from connection [{}] ", connection) + if (isMonitoring(connection)) + log.debug("Heartbeat from connection [{}] ", connection) + else + log.info("First heartbeat from connection [{}] ", connection) val timestamp = clock() val oldState = state.get @@ -197,7 +198,9 @@ class AccrualFailureDetector( */ @tailrec final def remove(connection: Address): Unit = { - log.debug("Remove connection [{}] ", connection) + if (isMonitoring(connection)) + log.info("Remove heartbeat connection [{}] ", connection) + val oldState = state.get if (oldState.history.contains(connection)) { diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index 54d3e6bfa3..e8bd2e21fc 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -22,7 +22,8 @@ import java.io.Closeable import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.atomic.AtomicReference import akka.util.internal.HashedWheelTimer -import concurrent.{ ExecutionContext, Await } +import scala.concurrent.{ ExecutionContext, Await } +import com.typesafe.config.ConfigFactory /** * Cluster Extension Id and factory for creating Cluster extension. @@ -88,31 +89,26 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { * INTERNAL API */ private[cluster] val scheduler: Scheduler with Closeable = { - if (system.settings.SchedulerTickDuration > SchedulerTickDuration) { + if (system.scheduler.maxFrequency < 1.second / SchedulerTickDuration) { + import scala.collection.JavaConverters._ log.info("Using a dedicated scheduler for cluster. Default scheduler can be used if configured " + "with 'akka.scheduler.tick-duration' [{} ms] <= 'akka.cluster.scheduler.tick-duration' [{} ms].", - system.settings.SchedulerTickDuration.toMillis, SchedulerTickDuration.toMillis) + (1000 / system.scheduler.maxFrequency).toInt, SchedulerTickDuration.toMillis) new DefaultScheduler( - new HashedWheelTimer(log, - system.threadFactory match { - case tf: MonitorableThreadFactory ⇒ tf.withName(tf.name + "-cluster-scheduler") - case tf ⇒ tf - }, - SchedulerTickDuration, - SchedulerTicksPerWheel), - log) + ConfigFactory.parseString(s"akka.scheduler.tick-duration=${SchedulerTickDuration.toMillis}ms").withFallback( + system.settings.config), + log, + system.threadFactory match { + case tf: MonitorableThreadFactory ⇒ tf.withName(tf.name + "-cluster-scheduler") + case tf ⇒ tf + }) } else { // delegate to system.scheduler, but don't close over system val systemScheduler = system.scheduler new Scheduler with Closeable { override def close(): Unit = () // we are using system.scheduler, which we are not responsible for closing - override def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, - receiver: ActorRef, message: Any)(implicit executor: ExecutionContext, sender: ActorRef = Actor.noSender): Cancellable = - systemScheduler.schedule(initialDelay, interval, receiver, message) - - override def schedule(initialDelay: FiniteDuration, interval: FiniteDuration)(f: ⇒ Unit)(implicit executor: ExecutionContext): Cancellable = - systemScheduler.schedule(initialDelay, interval)(f) + override def maxFrequency: Double = systemScheduler.maxFrequency override def schedule(initialDelay: FiniteDuration, interval: FiniteDuration, runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = @@ -121,13 +117,6 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { override def scheduleOnce(delay: FiniteDuration, runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = systemScheduler.scheduleOnce(delay, runnable) - - override def scheduleOnce(delay: FiniteDuration, receiver: ActorRef, - message: Any)(implicit executor: ExecutionContext): Cancellable = - systemScheduler.scheduleOnce(delay, receiver, message) - - override def scheduleOnce(delay: FiniteDuration)(f: ⇒ Unit)(implicit executor: ExecutionContext): Cancellable = - systemScheduler.scheduleOnce(delay)(f) } } } @@ -176,8 +165,13 @@ class Cluster(val system: ExtendedActorSystem) extends Extension { /** * Subscribe to cluster domain events. * The `to` Class can be [[akka.cluster.ClusterEvent.ClusterDomainEvent]] - * or subclass. A snapshot of [[akka.cluster.ClusterEvent.CurrentClusterState]] - * will also be sent to the subscriber. + * or subclass. + * + * A snapshot of [[akka.cluster.ClusterEvent.CurrentClusterState]] + * will be sent to the subscriber as the first event. When + * `to` Class is a [[akka.cluster.ClusterEvent.InstantMemberEvent]] + * (or subclass) the snapshot event will instead be a + * [[akka.cluster.ClusterEvent.InstantClusterState]]. */ def subscribe(subscriber: ActorRef, to: Class[_]): Unit = clusterCore ! InternalClusterAction.Subscribe(subscriber, to) diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala index 5adb57615a..b1ed2e7c38 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterActorRefProvider.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -94,7 +94,7 @@ private[akka] class RemoteDeploymentWatcher extends Actor { case t @ Terminated(a) if supervisors isDefinedAt a ⇒ // send extra ChildTerminated to the supervisor so that it will remove the child - if (t.addressTerminated) supervisors(a).sendSystemMessage(ChildTerminated(a)) + supervisors(a).sendSystemMessage(ChildTerminated(a)) supervisors -= a case _: Terminated ⇒ diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index 50644e431c..106de783ae 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -99,8 +99,6 @@ private[cluster] object InternalClusterAction { case object PublishStatsTick extends Tick - case class SendClusterMessage(to: Address, msg: ClusterMessage) - case class SendGossipTo(address: Address) case object GetClusterCoreRef @@ -181,7 +179,6 @@ private[cluster] final class ClusterDaemon(settings: ClusterSettings) extends Ac private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Actor with ActorLogging { import ClusterLeaderAction._ import InternalClusterAction._ - import ClusterHeartbeatSender.JoinInProgress val cluster = Cluster(context.system) import cluster.{ selfAddress, scheduler, failureDetector } @@ -191,12 +188,16 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto // note that self is not initially member, // and the Gossip is not versioned for this 'Node' yet - var latestGossip: Gossip = Gossip() + var latestGossip: Gossip = Gossip.empty var stats = ClusterStats() - val coreSender = context.actorOf(Props[ClusterCoreSender]. - withDispatcher(UseDispatcher), name = "coreSender") + /** + * Looks up and returns the remote cluster command connection for the specific address. + */ + private def clusterCore(address: Address): ActorRef = + context.actorFor(RootActorPath(address) / "system" / "cluster" / "core") + val heartbeatSender = context.actorOf(Props[ClusterHeartbeatSender]. withDispatcher(UseDispatcher), name = "heartbeatSender") @@ -281,22 +282,27 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto * A 'Join(thisNodeAddress)' command is sent to the node to join. */ def join(address: Address): Unit = { - if (!latestGossip.members.exists(_.address == address)) { + if (address.protocol != selfAddress.protocol) + log.info("Member with wrong protocol tried to join, but was ignored, expected [{}] but was [{}]", + selfAddress.protocol, address.protocol) + else if (address.system != selfAddress.system) + log.info("Member with wrong ActorSystem name tried to join, but was ignored, expected [{}] but was [{}]", + selfAddress.system, address.system) + else if (!latestGossip.members.exists(_.address == address)) { // wipe our state since a node that joins a cluster must be empty - latestGossip = Gossip() + latestGossip = Gossip.empty // wipe the failure detector since we are starting fresh and shouldn't care about the past failureDetector.reset() // wipe the publisher since we are starting fresh publisher ! PublishStart publish(latestGossip) - heartbeatSender ! JoinInProgress(address, Deadline.now + JoinTimeout) context.become(initialized) if (address == selfAddress) joining(address) else - coreSender ! SendClusterMessage(address, ClusterUserAction.Join(selfAddress)) + clusterCore(address) ! ClusterUserAction.Join(selfAddress) } } @@ -331,7 +337,6 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto log.debug("Cluster Node [{}] - Node [{}] is JOINING", selfAddress, node) // treat join as initial heartbeat, so that it becomes unavailable if nothing more happens if (node != selfAddress) { - failureDetector heartbeat node gossipTo(node) } @@ -377,7 +382,7 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto def removing(address: Address): Unit = { log.info("Cluster Node [{}] - Node has been REMOVED by the leader - shutting down...", selfAddress) // just cleaning up the gossip state - latestGossip = Gossip() + latestGossip = Gossip.empty publish(latestGossip) context.become(removed) // make sure the final (removed) state is published @@ -494,7 +499,7 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto val rate = mergeRate(stats.mergeConflictCount) if (rate <= MaxGossipMergeRate) - coreSender ! SendClusterMessage(to = localGossip.leader.get, msg = GossipMergeConflict(GossipEnvelope(selfAddress, localGossip), envelope)) + localGossip.leader foreach { clusterCore(_) ! GossipMergeConflict(GossipEnvelope(selfAddress, localGossip), envelope) } else log.debug("Skipping gossip merge conflict due to rate [{}] / s ", rate) @@ -709,18 +714,14 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto removedMembers foreach { member ⇒ val address = member.address log.info("Cluster Node [{}] - Leader is moving node [{}] from EXITING to REMOVED - and removing node from node ring", selfAddress, address) - coreSender ! SendClusterMessage( - to = address, - msg = ClusterLeaderAction.Remove(address)) + clusterCore(address) ! ClusterLeaderAction.Remove(address) } // tell all exiting members to exit exitingMembers foreach { member ⇒ val address = member.address log.info("Cluster Node [{}] - Leader is moving node [{}] from LEAVING to EXITING", selfAddress, address) - coreSender ! SendClusterMessage( - to = address, - msg = ClusterLeaderAction.Exit(address)) // FIXME should use ? to await completion of handoff? + clusterCore(address) ! ClusterLeaderAction.Exit(address) // FIXME should use ? to await completion of handoff? } // log the auto-downing of the unreachable nodes @@ -800,8 +801,8 @@ private[cluster] final class ClusterCoreDaemon(publisher: ActorRef) extends Acto def oneWayGossipTo(address: Address): Unit = gossipTo(address, GossipEnvelope(selfAddress, latestGossip, conversation = false)) - def gossipTo(address: Address, gossipMsg: GossipEnvelope): Unit = if (address != selfAddress) - coreSender ! SendClusterMessage(address, gossipMsg) + def gossipTo(address: Address, gossipMsg: GossipEnvelope): Unit = + if (address != selfAddress) clusterCore(address) ! gossipMsg def publish(newGossip: Gossip): Unit = { publisher ! PublishChanges(newGossip) @@ -869,27 +870,6 @@ private[cluster] final class JoinSeedNodeProcess(seedNodes: immutable.IndexedSeq } } -/** - * INTERNAL API. - */ -private[cluster] final class ClusterCoreSender extends Actor with ActorLogging { - import InternalClusterAction._ - - val selfAddress = Cluster(context.system).selfAddress - - /** - * Looks up and returns the remote cluster command connection for the specific address. - */ - private def clusterCoreConnectionFor(address: Address): ActorRef = - context.actorFor(RootActorPath(address) / "system" / "cluster" / "core") - - def receive = { - case SendClusterMessage(to, msg) ⇒ - log.debug("Cluster Node [{}] - Trying to send [{}] to [{}]", selfAddress, msg.getClass.getSimpleName, to) - clusterCoreConnectionFor(to) ! msg - } -} - /** * INTERNAL API * diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala index c896e721cc..509536b3d8 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterEvent.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -68,54 +68,140 @@ object ClusterEvent { } /** - * Marker interface for member related events. + * Marker interface for membership events. + * Only published after convergence, when all members have seen current + * state. */ sealed trait MemberEvent extends ClusterDomainEvent { def member: Member } /** - * A new member joined the cluster. Only published after convergence. + * A new member joined the cluster. + * Only published after convergence, when all members have seen current + * state. */ case class MemberJoined(member: Member) extends MemberEvent { if (member.status != Joining) throw new IllegalArgumentException("Expected Joining status, got: " + member) } /** - * Member status changed to Up. Only published after convergence. + * Member status changed to Up. + * Only published after convergence, when all members have seen current + * state. */ case class MemberUp(member: Member) extends MemberEvent { if (member.status != Up) throw new IllegalArgumentException("Expected Up status, got: " + member) } /** - * Member status changed to Leaving. Only published after convergence. + * Member status changed to Leaving. + * Only published after convergence, when all members have seen current + * state. */ case class MemberLeft(member: Member) extends MemberEvent { if (member.status != Leaving) throw new IllegalArgumentException("Expected Leaving status, got: " + member) } /** - * Member status changed to Exiting. Only published after convergence. + * Member status changed to Exiting. + * Only published after convergence, when all members have seen current + * state. */ case class MemberExited(member: Member) extends MemberEvent { if (member.status != Exiting) throw new IllegalArgumentException("Expected Exiting status, got: " + member) } /** - * Member status changed to Down. Only published after convergence. + * Member status changed to Down. + * Only published after convergence, when all members have seen current + * state. */ case class MemberDowned(member: Member) extends MemberEvent { if (member.status != Down) throw new IllegalArgumentException("Expected Down status, got: " + member) } /** - * Member completely removed from the cluster. Only published after convergence. + * Member completely removed from the cluster. Only published after convergence, + * when all other members have seen the state. */ case class MemberRemoved(member: Member) extends MemberEvent { if (member.status != Removed) throw new IllegalArgumentException("Expected Removed status, got: " + member) } + /** + * Current snapshot state of the cluster. Sent to new subscriber of + * [akka.cluster.ClusterEvent.InstantMemberEvent]. + */ + case class InstantClusterState(members: immutable.SortedSet[Member] = immutable.SortedSet.empty) + extends ClusterDomainEvent { + + /** + * Java API + * Read only + */ + def getMembers: java.lang.Iterable[Member] = { + import scala.collection.JavaConverters._ + members.asJava + } + } + + /** + * Marker interface for membership events published immediately when + * it happened. All other members might not have seen the state. + */ + sealed trait InstantMemberEvent extends ClusterDomainEvent { + def member: Member + } + + /** + * A new member joined the cluster. Published immediately when it happened. + * All other members might not have seen the state. + */ + case class InstantMemberJoined(member: Member) extends InstantMemberEvent { + if (member.status != Joining) throw new IllegalArgumentException("Expected Joining status, got: " + member) + } + + /** + * Member status changed to Up. Published immediately when it happened. + * All other members might not have seen the state. + */ + case class InstantMemberUp(member: Member) extends InstantMemberEvent { + if (member.status != Up) throw new IllegalArgumentException("Expected Up status, got: " + member) + } + + /** + * Member status changed to Leaving. Published immediately when it happened. + * All other members might not have seen the state. + */ + case class InstantMemberLeft(member: Member) extends InstantMemberEvent { + if (member.status != Leaving) throw new IllegalArgumentException("Expected Leaving status, got: " + member) + } + + /** + * Member status changed to Exiting. Published immediately when it happened. + * All other members might not have seen the state. + */ + case class InstantMemberExited(member: Member) extends InstantMemberEvent { + if (member.status != Exiting) throw new IllegalArgumentException("Expected Exiting status, got: " + member) + } + + /** + * Member status changed to Down. Published immediately when it happened. + * All other members might not have seen the state. + */ + case class InstantMemberDowned(member: Member) extends InstantMemberEvent { + if (member.status != Down) throw new IllegalArgumentException("Expected Down status, got: " + member) + } + + /** + * Member completely removed from the cluster. Published immediately when it happened. + * All other members might not have seen the state. + */ + case class InstantMemberRemoved(member: Member) extends InstantMemberEvent { + if (member.status != Removed) throw new IllegalArgumentException("Expected Removed status, got: " + member) + } + /** * Leader of the cluster members changed. Only published after convergence. */ @@ -209,6 +295,19 @@ object ClusterEvent { ++= removedEvents).result() } + /** + * INTERNAL API + */ + private[cluster] def convertToInstantMemberEvents(memberEvents: immutable.Seq[MemberEvent]): immutable.Seq[InstantMemberEvent] = + memberEvents map { + case MemberJoined(m) ⇒ InstantMemberJoined(m) + case MemberUp(m) ⇒ InstantMemberUp(m) + case MemberDowned(m) ⇒ InstantMemberDowned(m) + case MemberLeft(m) ⇒ InstantMemberLeft(m) + case MemberExited(m) ⇒ InstantMemberExited(m) + case MemberRemoved(m) ⇒ InstantMemberRemoved(m) + } + /** * INTERNAL API */ @@ -238,8 +337,8 @@ object ClusterEvent { private[cluster] final class ClusterDomainEventPublisher extends Actor with ActorLogging { import InternalClusterAction._ - var latestGossip: Gossip = Gossip() - var latestConvergedGossip: Gossip = Gossip() + var latestGossip: Gossip = Gossip.empty + var latestConvergedGossip: Gossip = Gossip.empty var memberEvents: immutable.Seq[MemberEvent] = immutable.Seq.empty def receive = { @@ -255,9 +354,11 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto def eventStream: EventStream = context.system.eventStream + /** + * The current snapshot state that is a mix of converged and latest gossip + * to mimic what you would have seen if you where listening to the events. + */ def publishCurrentClusterState(receiver: Option[ActorRef]): Unit = { - // The state is a mix of converged and latest gossip to mimic what you - // would have seen if you where listening to the events. val state = CurrentClusterState( members = latestConvergedGossip.members, unreachable = latestGossip.overview.unreachable, @@ -269,8 +370,20 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto } } + /** + * Publish the snapshot state that is based on latest gossip to mimic what you + * would have seen if you where listening to the InstantMemberEvent stream. + */ + def publishInstantClusterState(receiver: ActorRef): Unit = + receiver ! InstantClusterState(members = latestGossip.members) + def subscribe(subscriber: ActorRef, to: Class[_]): Unit = { - publishCurrentClusterState(Some(subscriber)) + val isInstantMemberEvent = classOf[InstantMemberEvent].isAssignableFrom(to) + if (classOf[ClusterDomainEvent] == to || isInstantMemberEvent) + publishInstantClusterState(subscriber) + if (!isInstantMemberEvent) + publishCurrentClusterState(Some(subscriber)) + eventStream.subscribe(subscriber, to) } @@ -284,18 +397,26 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto // keep the latestGossip to be sent to new subscribers latestGossip = newGossip // first publish the diffUnreachable between the last two gossips - diffUnreachable(oldGossip, newGossip) foreach { event ⇒ - publish(event) - // notify DeathWatch about unreachable node - publish(AddressTerminated(event.member.address)) - } + diffUnreachable(oldGossip, newGossip) foreach publish + val newMemberEvents = diffMemberEvents(oldGossip, newGossip) + convertToInstantMemberEvents(newMemberEvents) foreach publish // buffer up the MemberEvents waiting for convergence - memberEvents ++= diffMemberEvents(oldGossip, newGossip) + memberEvents ++= newMemberEvents // if we have convergence then publish the MemberEvents and possibly a LeaderChanged if (newGossip.convergence) { val previousConvergedGossip = latestConvergedGossip latestConvergedGossip = newGossip - memberEvents foreach publish + memberEvents foreach { event ⇒ + event match { + case m @ (MemberDowned(_) | MemberRemoved(_)) ⇒ + // TODO MemberDowned match should probably be covered by MemberRemoved, see ticket #2788 + // but right now we don't change Downed to Removed + publish(event) + // notify DeathWatch about downed node + publish(AddressTerminated(m.member.address)) + case _ ⇒ publish(event) + } + } memberEvents = immutable.Seq.empty diffLeader(previousConvergedGossip, latestConvergedGossip) foreach publish } @@ -307,7 +428,11 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto def publish(event: AnyRef): Unit = eventStream publish event - def publishStart(): Unit = clearState() + def publishStart(): Unit = + if ((latestGossip ne Gossip.empty) || (latestConvergedGossip ne Gossip.empty)) { + clearState() + publishCurrentClusterState(None) + } def publishDone(receiver: ActorRef): Unit = { clearState() @@ -315,7 +440,7 @@ private[cluster] final class ClusterDomainEventPublisher extends Actor with Acto } def clearState(): Unit = { - latestGossip = Gossip() - latestConvergedGossip = Gossip() + latestGossip = Gossip.empty + latestConvergedGossip = Gossip.empty } } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala index 3ada580bb2..309b0d039a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala @@ -1,18 +1,15 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster import language.postfixOps import scala.collection.immutable -import scala.annotation.tailrec import scala.concurrent.duration._ -import java.net.URLEncoder -import akka.actor.{ ActorLogging, ActorRef, Address, Actor, RootActorPath, PoisonPill, Props } -import akka.pattern.{ CircuitBreaker, CircuitBreakerOpenException } +import akka.actor.{ ActorLogging, ActorRef, Address, Actor, RootActorPath, Props } import akka.cluster.ClusterEvent._ -import akka.routing.ConsistentHash +import akka.routing.MurmurHash /** * INTERNAL API @@ -55,13 +52,26 @@ private[cluster] final class ClusterHeartbeatReceiver extends Actor with ActorLo */ private[cluster] object ClusterHeartbeatSender { /** - * Tell [akka.cluster.ClusterHeartbeatSender]] that this node has started joining of - * another node and heartbeats should be sent unconditionally until it becomes - * member or deadline is overdue. This is done to be able to detect immediate death - * of the joining node. + * Request heartbeats from another node. Sent from the node that is + * expecting heartbeats from a specific sender, but has not received any. + */ + case class HeartbeatRequest(from: Address) extends ClusterMessage + + /** + * Delayed sending of a HeartbeatRequest. The actual request is + * only sent if no expected heartbeat message has been received. * Local only, no need to serialize. */ - case class JoinInProgress(address: Address, deadline: Deadline) + case class SendHeartbeatRequest(to: Address) + + /** + * Trigger a fake heartbeat message to trigger start of failure detection + * of a node that this node is expecting heartbeats from. HeartbeatRequest + * has been sent to the node so it should have started sending heartbeat + * messages. + * Local only, no need to serialize. + */ + case class ExpectedFirstHeartbeat(from: Address) } /* @@ -69,15 +79,9 @@ private[cluster] object ClusterHeartbeatSender { * * This actor is responsible for sending the heartbeat messages to * a few other nodes that will monitor this node. - * - * Netty blocks when sending to broken connections. This actor - * isolates sending to different nodes by using child actors for each target - * address and thereby reduce the risk of irregular heartbeats to healty - * nodes due to broken connections to other nodes. */ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogging { import ClusterHeartbeatSender._ - import ClusterHeartbeatSenderConnection._ import ClusterHeartbeatReceiver._ import InternalClusterAction.HeartbeatTick @@ -88,16 +92,16 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg val selfHeartbeat = Heartbeat(selfAddress) val selfEndHeartbeat = EndHeartbeat(selfAddress) + val selfHeartbeatRequest = HeartbeatRequest(selfAddress) - var state = ClusterHeartbeatSenderState.empty(ConsistentHash(Seq.empty[Address], HeartbeatConsistentHashingVirtualNodesFactor), - selfAddress.toString, MonitoredByNrOfMembers) + var state = ClusterHeartbeatSenderState.empty(selfAddress, MonitoredByNrOfMembers) // start periodic heartbeat to other nodes in cluster val heartbeatTask = scheduler.schedule(PeriodicTasksInitialDelay max HeartbeatInterval, HeartbeatInterval, self, HeartbeatTick) override def preStart(): Unit = { - cluster.subscribe(self, classOf[MemberEvent]) + cluster.subscribe(self, classOf[InstantMemberEvent]) cluster.subscribe(self, classOf[UnreachableMember]) } @@ -109,57 +113,77 @@ private[cluster] final class ClusterHeartbeatSender extends Actor with ActorLogg /** * Looks up and returns the remote cluster heartbeat connection for the specific address. */ - def clusterHeartbeatConnectionFor(address: Address): ActorRef = + def heartbeatReceiver(address: Address): ActorRef = context.actorFor(RootActorPath(address) / "system" / "cluster" / "heartbeatReceiver") + /** + * Looks up and returns the remote cluster heartbeat sender for the specific address. + */ + def heartbeatSender(address: Address): ActorRef = context.actorFor(self.path.toStringWithAddress(address)) + def receive = { - case HeartbeatTick ⇒ heartbeat() - case s: CurrentClusterState ⇒ reset(s) - case UnreachableMember(m) ⇒ removeMember(m) - case MemberRemoved(m) ⇒ removeMember(m) - case e: MemberEvent ⇒ addMember(e.member) - case JoinInProgress(a, d) ⇒ addJoinInProgress(a, d) + case HeartbeatTick ⇒ heartbeat() + case InstantMemberUp(m) ⇒ addMember(m) + case UnreachableMember(m) ⇒ removeMember(m) + case InstantMemberDowned(m) ⇒ removeMember(m) + case InstantMemberRemoved(m) ⇒ removeMember(m) + case s: InstantClusterState ⇒ reset(s) + case _: CurrentClusterState ⇒ // enough with InstantClusterState + case _: InstantMemberEvent ⇒ // not interested in other types of InstantMemberEvent + case HeartbeatRequest(from) ⇒ addHeartbeatRequest(from) + case SendHeartbeatRequest(to) ⇒ sendHeartbeatRequest(to) + case ExpectedFirstHeartbeat(from) ⇒ triggerFirstHeartbeat(from) } - def reset(snapshot: CurrentClusterState): Unit = - state = state.reset(snapshot.members.collect { case m if m.address != selfAddress ⇒ m.address }) + def reset(snapshot: InstantClusterState): Unit = state = state.reset(snapshot.members.map(_.address)) - def addMember(m: Member): Unit = if (m.address != selfAddress) - state = state addMember m.address + def addMember(m: Member): Unit = if (m.address != selfAddress) state = state addMember m.address - def removeMember(m: Member): Unit = if (m.address != selfAddress) - state = state removeMember m.address + def removeMember(m: Member): Unit = if (m.address != selfAddress) state = state removeMember m.address - def addJoinInProgress(address: Address, deadline: Deadline): Unit = if (address != selfAddress) - state = state.addJoinInProgress(address, deadline) + def addHeartbeatRequest(address: Address): Unit = + if (address != selfAddress) state = state.addHeartbeatRequest(address, Deadline.now + HeartbeatRequestTimeToLive) - def heartbeat(): Unit = { - state = state.removeOverdueJoinInProgress() - - def connection(to: Address): ActorRef = { - // URL encoded target address as child actor name - val connectionName = URLEncoder.encode(to.toString, "UTF-8") - context.actorFor(connectionName) match { - case notFound if notFound.isTerminated ⇒ - context.actorOf(Props(new ClusterHeartbeatSenderConnection(clusterHeartbeatConnectionFor(to))), connectionName) - case child ⇒ child - } + def sendHeartbeatRequest(address: Address): Unit = + if (!cluster.failureDetector.isMonitoring(address) && state.ring.mySenders.contains(address)) { + heartbeatSender(address) ! selfHeartbeatRequest + // schedule the expected heartbeat for later, which will give the + // sender a chance to start heartbeating, and also trigger some resends of + // the heartbeat request + scheduler.scheduleOnce(HeartbeatExpectedResponseAfter, self, ExpectedFirstHeartbeat(address)) } - val deadline = Deadline.now + HeartbeatInterval - state.active foreach { to ⇒ connection(to) ! SendHeartbeat(selfHeartbeat, to, deadline) } + def triggerFirstHeartbeat(address: Address): Unit = + if (!cluster.failureDetector.isMonitoring(address)) { + log.info("Trigger extra expected heartbeat from [{}]", address) + cluster.failureDetector.heartbeat(address) + } + + def heartbeat(): Unit = { + state = state.removeOverdueHeartbeatRequest() + + state.active foreach { to ⇒ + log.debug("Cluster Node [{}] - Heartbeat to [{}]", cluster.selfAddress, to) + heartbeatReceiver(to) ! selfHeartbeat + } // When sending heartbeats to a node is stopped a few `EndHeartbeat` messages is // sent to notify it that no more heartbeats will be sent. for ((to, count) ← state.ending) { - val c = connection(to) - c ! SendEndHeartbeat(selfEndHeartbeat, to) - if (count == NumberOfEndHeartbeats) { + log.debug("Cluster Node [{}] - EndHeartbeat to [{}]", cluster.selfAddress, to) + heartbeatReceiver(to) ! selfEndHeartbeat + if (count == NumberOfEndHeartbeats) state = state.removeEnding(to) - c ! PoisonPill - } else + else state = state.increaseEndingCount(to) } + + // request heartbeats from expected sender node if no heartbeat messages has been received + state.ring.mySenders foreach { address ⇒ + if (!cluster.failureDetector.isMonitoring(address)) + scheduler.scheduleOnce(HeartbeatRequestDelay, self, SendHeartbeatRequest(address)) + } + } } @@ -171,9 +195,8 @@ private[cluster] object ClusterHeartbeatSenderState { /** * Initial, empty state */ - def empty(consistentHash: ConsistentHash[Address], selfAddressStr: String, - monitoredByNrOfMembers: Int): ClusterHeartbeatSenderState = - ClusterHeartbeatSenderState(consistentHash, selfAddressStr, monitoredByNrOfMembers) + def empty(selfAddress: Address, monitoredByNrOfMembers: Int): ClusterHeartbeatSenderState = + ClusterHeartbeatSenderState(HeartbeatNodeRing(selfAddress, Set(selfAddress), monitoredByNrOfMembers)) /** * Create a new state based on previous state, and @@ -181,33 +204,13 @@ private[cluster] object ClusterHeartbeatSenderState { */ private def apply( old: ClusterHeartbeatSenderState, - consistentHash: ConsistentHash[Address], - all: Set[Address]): ClusterHeartbeatSenderState = { + ring: HeartbeatNodeRing): ClusterHeartbeatSenderState = { - /** - * Select a few peers that heartbeats will be sent to, i.e. that will - * monitor this node. Try to send heartbeats to same nodes as much - * as possible, but re-balance with consistent hashing algorithm when - * new members are added or removed. - */ - def selectPeers: Set[Address] = { - val allSize = all.size - val nrOfPeers = math.min(allSize, old.monitoredByNrOfMembers) - // try more if consistentHash results in same node as already selected - val attemptLimit = nrOfPeers * 2 - @tailrec def select(acc: Set[Address], n: Int): Set[Address] = { - if (acc.size == nrOfPeers || n == attemptLimit) acc - else select(acc + consistentHash.nodeFor(old.selfAddressStr + n), n + 1) - } - if (nrOfPeers >= allSize) all - else select(Set.empty[Address], 0) - } - - val curr = selectPeers + val curr = ring.myReceivers // start ending process for nodes not selected any more // abort ending process for nodes that have been selected again val end = old.ending ++ (old.current -- curr).map(_ -> 0) -- curr - old.copy(consistentHash = consistentHash, all = all, current = curr, ending = end) + old.copy(ring = ring, current = curr, ending = end, heartbeatRequest = old.heartbeatRequest -- curr) } } @@ -222,13 +225,10 @@ private[cluster] object ClusterHeartbeatSenderState { * i.e. the methods return new instances. */ private[cluster] case class ClusterHeartbeatSenderState private ( - consistentHash: ConsistentHash[Address], - selfAddressStr: String, - monitoredByNrOfMembers: Int, - all: Set[Address] = Set.empty, + ring: HeartbeatNodeRing, current: Set[Address] = Set.empty, ending: Map[Address, Int] = Map.empty, - joinInProgress: Map[Address, Deadline] = Map.empty) { + heartbeatRequest: Map[Address, Deadline] = Map.empty) { // FIXME can be disabled as optimization assertInvariants @@ -236,50 +236,53 @@ private[cluster] case class ClusterHeartbeatSenderState private ( private def assertInvariants: Unit = { val currentAndEnding = current.intersect(ending.keySet) require(currentAndEnding.isEmpty, - "Same nodes in current and ending not allowed, got [%s]" format currentAndEnding) - val joinInProgressAndAll = joinInProgress.keySet.intersect(all) - require(joinInProgressAndAll.isEmpty, - "Same nodes in joinInProgress and all not allowed, got [%s]" format joinInProgressAndAll) - val currentNotInAll = current -- all - require(currentNotInAll.isEmpty, - "Nodes in current but not in all not allowed, got [%s]" format currentNotInAll) - require(all.isEmpty == consistentHash.isEmpty, "ConsistentHash doesn't correspond to all nodes [%s]" - format all) + s"Same nodes in current and ending not allowed, got [${currentAndEnding}]") + + val currentAndHeartbeatRequest = current.intersect(heartbeatRequest.keySet) + require(currentAndHeartbeatRequest.isEmpty, + s"Same nodes in current and heartbeatRequest not allowed, got [${currentAndHeartbeatRequest}]") + + val currentNotInAll = current -- ring.nodes + require(current.isEmpty || currentNotInAll.isEmpty, + s"Nodes in current but not in ring nodes not allowed, got [${currentNotInAll}]") + + require(!current.contains(ring.selfAddress), + s"Self in current not allowed, got [${ring.selfAddress}]") + require(!heartbeatRequest.contains(ring.selfAddress), + s"Self in heartbeatRequest not allowed, got [${ring.selfAddress}]") } - val active: Set[Address] = current ++ joinInProgress.keySet + val active: Set[Address] = current ++ heartbeatRequest.keySet - def reset(nodes: Set[Address]): ClusterHeartbeatSenderState = - ClusterHeartbeatSenderState(nodes.foldLeft(this) { _ removeJoinInProgress _ }, - consistentHash = ConsistentHash(nodes, consistentHash.virtualNodesFactor), - all = nodes) + def reset(nodes: Set[Address]): ClusterHeartbeatSenderState = { + ClusterHeartbeatSenderState(nodes.foldLeft(this) { _ removeHeartbeatRequest _ }, ring.copy(nodes = nodes + ring.selfAddress)) + } def addMember(a: Address): ClusterHeartbeatSenderState = - ClusterHeartbeatSenderState(removeJoinInProgress(a), all = all + a, consistentHash = consistentHash :+ a) + ClusterHeartbeatSenderState(removeHeartbeatRequest(a), ring :+ a) def removeMember(a: Address): ClusterHeartbeatSenderState = - ClusterHeartbeatSenderState(removeJoinInProgress(a), all = all - a, consistentHash = consistentHash :- a) + ClusterHeartbeatSenderState(removeHeartbeatRequest(a), ring :- a) - private def removeJoinInProgress(address: Address): ClusterHeartbeatSenderState = { - if (joinInProgress contains address) - copy(joinInProgress = joinInProgress - address, ending = ending + (address -> 0)) + private def removeHeartbeatRequest(address: Address): ClusterHeartbeatSenderState = { + if (heartbeatRequest contains address) + copy(heartbeatRequest = heartbeatRequest - address, ending = ending + (address -> 0)) else this } - def addJoinInProgress(address: Address, deadline: Deadline): ClusterHeartbeatSenderState = { - if (all contains address) this - else copy(joinInProgress = joinInProgress + (address -> deadline), ending = ending - address) + def addHeartbeatRequest(address: Address, deadline: Deadline): ClusterHeartbeatSenderState = { + if (current.contains(address)) this + else copy(heartbeatRequest = heartbeatRequest + (address -> deadline), ending = ending - address) } /** - * Cleanup overdue joinInProgress, in case a joining node never - * became member, for some reason. + * Cleanup overdue heartbeatRequest */ - def removeOverdueJoinInProgress(): ClusterHeartbeatSenderState = { - val overdue = joinInProgress collect { case (address, deadline) if deadline.isOverdue ⇒ address } + def removeOverdueHeartbeatRequest(): ClusterHeartbeatSenderState = { + val overdue = heartbeatRequest collect { case (address, deadline) if deadline.isOverdue ⇒ address } if (overdue.isEmpty) this else - copy(ending = ending ++ overdue.map(_ -> 0), joinInProgress = joinInProgress -- overdue) + copy(ending = ending ++ overdue.map(_ -> 0), heartbeatRequest = heartbeatRequest -- overdue) } def removeEnding(a: Address): ClusterHeartbeatSenderState = copy(ending = ending - a) @@ -290,62 +293,72 @@ private[cluster] case class ClusterHeartbeatSenderState private ( /** * INTERNAL API - */ -private[cluster] object ClusterHeartbeatSenderConnection { - import ClusterHeartbeatReceiver._ - - /** - * Command to [akka.cluster.ClusterHeartbeatSenderConnection]], which will send - * [[akka.cluster.ClusterHeartbeatReceiver.Heartbeat]] to the other node. - * Local only, no need to serialize. - */ - case class SendHeartbeat(heartbeatMsg: Heartbeat, to: Address, deadline: Deadline) - - /** - * Command to [akka.cluster.ClusterHeartbeatSenderConnection]], which will send - * [[akka.cluster.ClusterHeartbeatReceiver.EndHeartbeat]] to the other node. - * Local only, no need to serialize. - */ - case class SendEndHeartbeat(endHeartbeatMsg: EndHeartbeat, to: Address) -} - -/** - * Responsible for sending [[akka.cluster.ClusterHeartbeatReceiver.Heartbeat]] - * and [[akka.cluster.ClusterHeartbeatReceiver.EndHeartbeat]] to one specific address. * - * This actor exists only because Netty blocks when sending to broken connections, - * and this actor uses a configurable circuit breaker to reduce connect attempts to broken - * connections. + * Data structure for picking heartbeat receivers and keep track of what nodes + * that are expected to send heartbeat messages to a node. The node ring is + * shuffled by deterministic hashing to avoid picking physically co-located + * neighbors. * - * @see akka.cluster.ClusterHeartbeatSender + * It is immutable, i.e. the methods return new instances. */ -private[cluster] final class ClusterHeartbeatSenderConnection(toRef: ActorRef) - extends Actor with ActorLogging { +private[cluster] case class HeartbeatNodeRing(selfAddress: Address, nodes: Set[Address], monitoredByNrOfMembers: Int) { - import ClusterHeartbeatSenderConnection._ + require(nodes contains selfAddress, s"nodes [${nodes.mkString(", ")}] must contain selfAddress [${selfAddress}]") - val breaker = { - val cbSettings = Cluster(context.system).settings.SendCircuitBreakerSettings - CircuitBreaker(context.system.scheduler, - cbSettings.maxFailures, cbSettings.callTimeout, cbSettings.resetTimeout). - onHalfOpen(log.debug("CircuitBreaker Half-Open for: [{}]", toRef)). - onOpen(log.debug("CircuitBreaker Open for [{}]", toRef)). - onClose(log.debug("CircuitBreaker Closed for [{}]", toRef)) + private val nodeRing: immutable.SortedSet[Address] = { + implicit val ringOrdering: Ordering[Address] = Ordering.fromLessThan[Address] { (a, b) ⇒ + val ha = hashFor(a) + val hb = hashFor(b) + ha < hb || (ha == hb && Member.addressOrdering.compare(a, b) < 0) + } + + immutable.SortedSet() ++ nodes } - def receive = { - case SendHeartbeat(heartbeatMsg, _, deadline) ⇒ - if (!deadline.isOverdue) { - log.debug("Cluster Node [{}] - Heartbeat to [{}]", heartbeatMsg.from, toRef) - // Netty blocks when sending to broken connections, the CircuitBreaker will - // measure elapsed time and open if too many long calls - try breaker.withSyncCircuitBreaker { - toRef ! heartbeatMsg - } catch { case e: CircuitBreakerOpenException ⇒ /* skip sending heartbeat to broken connection */ } - } - if (deadline.isOverdue) log.debug("Sending heartbeat to [{}] took longer than expected", toRef) - case SendEndHeartbeat(endHeartbeatMsg, _) ⇒ - log.debug("Cluster Node [{}] - EndHeartbeat to [{}]", endHeartbeatMsg.from, toRef) - toRef ! endHeartbeatMsg + private def hashFor(node: Address): Int = node match { + // cluster node identifier is the host and port of the address; protocol and system is assumed to be the same + case Address(_, _, Some(host), Some(port)) ⇒ MurmurHash.stringHash(s"${host}:${port}") + case _ ⇒ 0 } + + /** + * Receivers for `selfAddress`. Cached for subsequent access. + */ + lazy val myReceivers: immutable.Set[Address] = receivers(selfAddress) + /** + * Senders for `selfAddress`. Cached for subsequent access. + */ + lazy val mySenders: immutable.Set[Address] = senders(selfAddress) + + private val useAllAsReceivers = monitoredByNrOfMembers >= (nodeRing.size - 1) + + /** + * The receivers to use from a specified sender. + */ + def receivers(sender: Address): immutable.Set[Address] = + if (useAllAsReceivers) + nodeRing - sender + else { + val slice = nodeRing.from(sender).tail.take(monitoredByNrOfMembers) + if (slice.size < monitoredByNrOfMembers) + (slice ++ nodeRing.take(monitoredByNrOfMembers - slice.size)) + else slice + } + + /** + * The expected senders for a specific receiver. + */ + def senders(receiver: Address): Set[Address] = + nodes filter { sender ⇒ receivers(sender) contains receiver } + + /** + * Add a node to the ring. + */ + def :+(node: Address): HeartbeatNodeRing = if (nodes contains node) this else copy(nodes = nodes + node) + + /** + * Remove a node from the ring. + */ + def :-(node: Address): HeartbeatNodeRing = if (nodes contains node) copy(nodes = nodes - node) else this + } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala index ae023263c8..2e735b89d1 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterJmx.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -140,7 +140,6 @@ private[akka] class ClusterJmx(cluster: Cluster, log: LoggingAdapter) { * Unregisters the cluster JMX MBean from MBean server. */ def unregisterMBean(): Unit = { - clusterView.close() try { mBeanServer.unregisterMBean(clusterMBeanName) } catch { diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala index 2a7951a667..4c3f70108a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterMetricsCollector.scala @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -76,7 +76,7 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto MetricsInterval, self, MetricsTick) override def preStart(): Unit = { - cluster.subscribe(self, classOf[MemberEvent]) + cluster.subscribe(self, classOf[InstantMemberEvent]) cluster.subscribe(self, classOf[UnreachableMember]) log.info("Metrics collection has started successfully on node [{}]", selfAddress) } @@ -84,11 +84,15 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto def receive = { case GossipTick ⇒ gossip() case MetricsTick ⇒ collect() - case state: CurrentClusterState ⇒ receiveState(state) - case MemberUp(m) ⇒ addMember(m) - case e: MemberEvent ⇒ removeMember(e.member) - case UnreachableMember(m) ⇒ removeMember(m) case msg: MetricsGossipEnvelope ⇒ receiveGossip(msg) + case state: InstantClusterState ⇒ receiveState(state) + case state: CurrentClusterState ⇒ // enough with InstantClusterState + case InstantMemberUp(m) ⇒ addMember(m) + case InstantMemberDowned(m) ⇒ removeMember(m) + case InstantMemberRemoved(m) ⇒ removeMember(m) + case UnreachableMember(m) ⇒ removeMember(m) + case _: InstantMemberEvent ⇒ // not interested in other types of InstantMemberEvent + } override def postStop: Unit = { @@ -115,7 +119,7 @@ private[cluster] class ClusterMetricsCollector(publisher: ActorRef) extends Acto /** * Updates the initial node ring for those nodes that are [[akka.cluster.MemberStatus.Up]]. */ - def receiveState(state: CurrentClusterState): Unit = + def receiveState(state: InstantClusterState): Unit = nodes = state.members collect { case m if m.status == Up ⇒ m.address } /** @@ -303,12 +307,13 @@ case class Metric private (name: String, value: Number, private val average: Opt * If defined ( [[akka.cluster.MetricNumericConverter.defined()]] ), updates the new * data point, and if defined, updates the data stream. Returns the updated metric. */ - def :+(latest: Metric): Metric = if (this sameAs latest) average match { - case Some(avg) ⇒ copy(value = latest.value, average = Some(avg :+ latest.value.doubleValue)) - case None if latest.average.isDefined ⇒ copy(value = latest.value, average = latest.average) - case _ ⇒ copy(value = latest.value) - } - else this + def :+(latest: Metric): Metric = + if (this sameAs latest) average match { + case Some(avg) ⇒ copy(value = latest.value, average = Some(avg :+ latest.value.doubleValue)) + case None if latest.average.isDefined ⇒ copy(value = latest.value, average = latest.average) + case _ ⇒ copy(value = latest.value) + } + else this /** * The numerical value of the average, if defined, otherwise the latest value diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala index 831d72f9c8..af69f977e0 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterReadView.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -59,10 +59,11 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable { case event: MemberEvent ⇒ // replace current member with new member (might have different status, only address is used in equals) state = state.copy(members = state.members - event.member + event.member) - case LeaderChanged(leader) ⇒ state = state.copy(leader = leader) - case s: CurrentClusterState ⇒ state = s - case CurrentInternalStats(stats) ⇒ _latestStats = stats - case ClusterMetricsChanged(nodes) ⇒ _clusterMetrics = nodes + case LeaderChanged(leader) ⇒ state = state.copy(leader = leader) + case s: CurrentClusterState ⇒ state = s + case CurrentInternalStats(stats) ⇒ _latestStats = stats + case ClusterMetricsChanged(nodes) ⇒ _clusterMetrics = nodes + case _: InstantClusterState | _: InstantMemberEvent ⇒ // not used here } } }).withDispatcher(cluster.settings.UseDispatcher), name = "clusterEventBusListener") @@ -142,8 +143,8 @@ private[akka] class ClusterReadView(cluster: Cluster) extends Closeable { /** * Unsubscribe to cluster events. */ - def close(): Unit = if (!eventBusListener.isTerminated) { - eventBusListener ! PoisonPill - } + def close(): Unit = + if (!eventBusListener.isTerminated) + eventBusListener ! PoisonPill } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index 5b5c26ae33..2020c67993 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -11,6 +11,7 @@ import akka.ConfigurationException import akka.actor.Address import akka.actor.AddressFromURIString import akka.dispatch.Dispatchers +import akka.util.Helpers.Requiring import scala.concurrent.duration.FiniteDuration import akka.japi.Util.immutableSeq @@ -18,33 +19,36 @@ class ClusterSettings(val config: Config, val systemName: String) { import config._ final val FailureDetectorThreshold: Double = { - val x = getDouble("akka.cluster.failure-detector.threshold") - require(x > 0.0, "failure-detector.threshold must be > 0") - x - } + getDouble("akka.cluster.failure-detector.threshold") + } requiring (_ > 0.0, "failure-detector.threshold must be > 0") final val FailureDetectorMaxSampleSize: Int = { - val n = getInt("akka.cluster.failure-detector.max-sample-size") - require(n > 0, "failure-detector.max-sample-size must be > 0"); n - } + getInt("akka.cluster.failure-detector.max-sample-size") + } requiring (_ > 0, "failure-detector.max-sample-size must be > 0") final val FailureDetectorImplementationClass: String = getString("akka.cluster.failure-detector.implementation-class") final val FailureDetectorMinStdDeviation: FiniteDuration = { - val d = Duration(getMilliseconds("akka.cluster.failure-detector.min-std-deviation"), MILLISECONDS) - require(d > Duration.Zero, "failure-detector.min-std-deviation must be > 0"); d - } + Duration(getMilliseconds("akka.cluster.failure-detector.min-std-deviation"), MILLISECONDS) + } requiring (_ > Duration.Zero, "failure-detector.min-std-deviation must be > 0") final val FailureDetectorAcceptableHeartbeatPause: FiniteDuration = { - val d = Duration(getMilliseconds("akka.cluster.failure-detector.acceptable-heartbeat-pause"), MILLISECONDS) - require(d >= Duration.Zero, "failure-detector.acceptable-heartbeat-pause must be >= 0"); d - } + Duration(getMilliseconds("akka.cluster.failure-detector.acceptable-heartbeat-pause"), MILLISECONDS) + } requiring (_ >= Duration.Zero, "failure-detector.acceptable-heartbeat-pause must be >= 0") final val HeartbeatInterval: FiniteDuration = { - val d = Duration(getMilliseconds("akka.cluster.failure-detector.heartbeat-interval"), MILLISECONDS) - require(d > Duration.Zero, "failure-detector.heartbeat-interval must be > 0"); d - } - final val HeartbeatConsistentHashingVirtualNodesFactor = 10 // no need for configuration - final val NumberOfEndHeartbeats: Int = (FailureDetectorAcceptableHeartbeatPause / HeartbeatInterval + 1).toInt + Duration(getMilliseconds("akka.cluster.failure-detector.heartbeat-interval"), MILLISECONDS) + } requiring (_ > Duration.Zero, "failure-detector.heartbeat-interval must be > 0") + final val HeartbeatRequestDelay: FiniteDuration = { + Duration(getMilliseconds("akka.cluster.failure-detector.heartbeat-request.grace-period"), MILLISECONDS) + } requiring (_ > Duration.Zero, "failure-detector.heartbeat-request.grace-period must be > 0") + final val HeartbeatExpectedResponseAfter: FiniteDuration = { + Duration(getMilliseconds("akka.cluster.failure-detector.heartbeat-request.expected-response-after"), MILLISECONDS) + } requiring (_ > Duration.Zero, "failure-detector.heartbeat-request.expected-response-after > 0") + final val HeartbeatRequestTimeToLive: FiniteDuration = { + Duration(getMilliseconds("akka.cluster.failure-detector.heartbeat-request.time-to-live"), MILLISECONDS) + } requiring (_ > Duration.Zero, "failure-detector.heartbeat-request.time-to-live > 0") + final val NumberOfEndHeartbeats: Int = { + getInt("akka.cluster.failure-detector.nr-of-end-heartbeats") + } requiring (_ > 0, "failure-detector.nr-of-end-heartbeats must be > 0") final val MonitoredByNrOfMembers: Int = { - val n = getInt("akka.cluster.failure-detector.monitored-by-nr-of-members") - require(n > 0, "failure-detector.monitored-by-nr-of-members must be > 0"); n - } + getInt("akka.cluster.failure-detector.monitored-by-nr-of-members") + } requiring (_ > 0, "failure-detector.monitored-by-nr-of-members must be > 0") final val SeedNodes: immutable.IndexedSeq[Address] = immutableSeq(getStringList("akka.cluster.seed-nodes")).map { case AddressFromURIString(addr) ⇒ addr }.toVector @@ -57,11 +61,9 @@ class ClusterSettings(val config: Config, val systemName: String) { final val AutoJoin: Boolean = getBoolean("akka.cluster.auto-join") final val AutoDown: Boolean = getBoolean("akka.cluster.auto-down") final val MinNrOfMembers: Int = { - val n = getInt("akka.cluster.min-nr-of-members") - require(n > 0, "min-nr-of-members must be > 0"); n - } + getInt("akka.cluster.min-nr-of-members") + } requiring (_ > 0, "min-nr-of-members must be > 0") final val JmxEnabled: Boolean = getBoolean("akka.cluster.jmx.enabled") - final val JoinTimeout: FiniteDuration = Duration(getMilliseconds("akka.cluster.join-timeout"), MILLISECONDS) final val UseDispatcher: String = getString("akka.cluster.use-dispatcher") match { case "" ⇒ Dispatchers.DefaultDispatcherId case id ⇒ id @@ -70,21 +72,14 @@ class ClusterSettings(val config: Config, val systemName: String) { final val MaxGossipMergeRate: Double = getDouble("akka.cluster.max-gossip-merge-rate") final val SchedulerTickDuration: FiniteDuration = Duration(getMilliseconds("akka.cluster.scheduler.tick-duration"), MILLISECONDS) final val SchedulerTicksPerWheel: Int = getInt("akka.cluster.scheduler.ticks-per-wheel") - final val SendCircuitBreakerSettings: CircuitBreakerSettings = CircuitBreakerSettings( - maxFailures = getInt("akka.cluster.send-circuit-breaker.max-failures"), - callTimeout = Duration(getMilliseconds("akka.cluster.send-circuit-breaker.call-timeout"), MILLISECONDS), - resetTimeout = Duration(getMilliseconds("akka.cluster.send-circuit-breaker.reset-timeout"), MILLISECONDS)) final val MetricsEnabled: Boolean = getBoolean("akka.cluster.metrics.enabled") final val MetricsCollectorClass: String = getString("akka.cluster.metrics.collector-class") final val MetricsInterval: FiniteDuration = { - val d = Duration(getMilliseconds("akka.cluster.metrics.collect-interval"), MILLISECONDS) - require(d > Duration.Zero, "metrics.collect-interval must be > 0"); d - } + Duration(getMilliseconds("akka.cluster.metrics.collect-interval"), MILLISECONDS) + } requiring (_ > Duration.Zero, "metrics.collect-interval must be > 0") final val MetricsGossipInterval: FiniteDuration = Duration(getMilliseconds("akka.cluster.metrics.gossip-interval"), MILLISECONDS) final val MetricsMovingAverageHalfLife: FiniteDuration = { - val d = Duration(getMilliseconds("akka.cluster.metrics.moving-average-half-life"), MILLISECONDS) - require(d > Duration.Zero, "metrics.moving-average-half-life must be > 0"); d - } + Duration(getMilliseconds("akka.cluster.metrics.moving-average-half-life"), MILLISECONDS) + } requiring (_ > Duration.Zero, "metrics.moving-average-half-life must be > 0") } -case class CircuitBreakerSettings(maxFailures: Int, callTimeout: FiniteDuration, resetTimeout: FiniteDuration) diff --git a/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala index 1aa926c5e5..dbb17ac80a 100644 --- a/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/FailureDetector.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -16,6 +16,12 @@ trait FailureDetector { */ def isAvailable(connection: Address): Boolean + /** + * Returns true if the failure detector has received any heartbeats and started monitoring + * of the resource. + */ + def isMonitoring(connection: Address): Boolean + /** * Records a heartbeat for a connection. */ diff --git a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala index 1f96434995..876ca93aae 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Gossip.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Gossip.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -13,6 +13,10 @@ import MemberStatus._ */ private[cluster] object Gossip { val emptyMembers: immutable.SortedSet[Member] = immutable.SortedSet.empty + val empty: Gossip = new Gossip(Gossip.emptyMembers) + + def apply(members: immutable.SortedSet[Member]) = + if (members.isEmpty) empty else empty.copy(members = members) } /** @@ -49,8 +53,8 @@ private[cluster] object Gossip { * removed node telling it to shut itself down. */ private[cluster] case class Gossip( + members: immutable.SortedSet[Member], // sorted set of members with their status, sorted by address overview: GossipOverview = GossipOverview(), - members: immutable.SortedSet[Member] = Gossip.emptyMembers, // sorted set of members with their status, sorted by address version: VectorClock = VectorClock()) // vector clock version extends ClusterMessage // is a serializable cluster message with Versioned[Gossip] { @@ -128,7 +132,7 @@ private[cluster] case class Gossip( // 4. fresh seen table val mergedSeen = Map.empty[Address, VectorClock] - Gossip(GossipOverview(mergedSeen, mergedUnreachable), mergedMembers, mergedVClock) + Gossip(mergedMembers, GossipOverview(mergedSeen, mergedUnreachable), mergedVClock) } /** diff --git a/akka-cluster/src/main/scala/akka/cluster/Member.scala b/akka-cluster/src/main/scala/akka/cluster/Member.scala index 1ee4aae804..141018834e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Member.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Member.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -34,6 +34,7 @@ object Member { * `Address` ordering type class, sorts addresses by host and port. */ implicit val addressOrdering: Ordering[Address] = Ordering.fromLessThan[Address] { (a, b) ⇒ + // cluster node identifier is the host and port of the address; protocol and system is assumed to be the same if (a.host != b.host) a.host.getOrElse("").compareTo(b.host.getOrElse("")) < 0 else if (a.port != b.port) a.port.getOrElse(0) < b.port.getOrElse(0) else false @@ -126,4 +127,4 @@ object MemberStatus { * JAVA API */ def removed: MemberStatus = Removed -} \ No newline at end of file +} diff --git a/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala index baef66f26c..45fa26aa5c 100644 --- a/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala +++ b/akka-cluster/src/main/scala/akka/cluster/VectorClock.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancingRouter.scala b/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancingRouter.scala index 60a9c5b6a7..ab86177275 100644 --- a/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancingRouter.scala +++ b/akka-cluster/src/main/scala/akka/cluster/routing/AdaptiveLoadBalancingRouter.scala @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster.routing @@ -431,4 +431,4 @@ private[cluster] class WeightedRoutees(refs: immutable.IndexedSeq[ActorRef], sel else j } } -} \ No newline at end of file +} diff --git a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala index 59c88c9fee..4b6c3b5c30 100644 --- a/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala +++ b/akka-cluster/src/main/scala/akka/cluster/routing/ClusterRouterConfig.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster.routing diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala index bf1009b472..79f5a72c4b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUnreachableSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -52,7 +52,7 @@ abstract class ClientDowningNodeThatIsUnreachableSpec(multiNodeConfig: ClientDow cluster.down(thirdAddress) enterBarrier("down-third-node") - awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = List(thirdAddress)) + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Set(thirdAddress)) clusterView.members.exists(_.address == thirdAddress) must be(false) } @@ -63,7 +63,7 @@ abstract class ClientDowningNodeThatIsUnreachableSpec(multiNodeConfig: ClientDow runOn(second, fourth) { enterBarrier("down-third-node") - awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = List(thirdAddress)) + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Set(thirdAddress)) } enterBarrier("await-completion") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala index 2a0af15997..92622dd13a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClientDowningNodeThatIsUpSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -50,7 +50,7 @@ abstract class ClientDowningNodeThatIsUpSpec(multiNodeConfig: ClientDowningNodeT markNodeAsUnavailable(thirdAddress) - awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = List(thirdAddress)) + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Set(thirdAddress)) clusterView.members.exists(_.address == thirdAddress) must be(false) } @@ -61,7 +61,7 @@ abstract class ClientDowningNodeThatIsUpSpec(multiNodeConfig: ClientDowningNodeT runOn(second, fourth) { enterBarrier("down-third-node") - awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = List(thirdAddress)) + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Set(thirdAddress)) } enterBarrier("await-completion") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterAccrualFailureDetectorSpec.scala index 552f90bd49..3ef3beddcc 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterAccrualFailureDetectorSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala index d711aec55f..8a712dba0e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterDeathWatchSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -106,22 +106,6 @@ abstract class ClusterDeathWatchSpec } - "receive Terminated when watched node is unknown host" taggedAs LongRunningTest in { - runOn(first) { - val path = RootActorPath(Address("akka", system.name, "unknownhost", 2552)) / "user" / "subject" - system.actorOf(Props(new Actor { - context.watch(context.actorFor(path)) - def receive = { - case t: Terminated ⇒ testActor ! t.actor.path - } - }), name = "observer2") - - expectMsg(path) - } - - enterBarrier("after-2") - } - "receive Terminated when watched path doesn't exist" taggedAs LongRunningTest in { runOn(first) { val path = RootActorPath(second) / "user" / "non-existing" @@ -135,7 +119,7 @@ abstract class ClusterDeathWatchSpec expectMsg(path) } - enterBarrier("after-3") + enterBarrier("after-2") } "be able to shutdown system when using remote deployed actor on node that crash" taggedAs LongRunningTest in within(20 seconds) { @@ -172,7 +156,7 @@ abstract class ClusterDeathWatchSpec testConductor.removeNode(fourth) } - enterBarrier("after-4") + enterBarrier("after-3") } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsDisabledSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsDisabledSpec.scala index 0352a2c084..0bfc0a5975 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsDisabledSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsDisabledSpec.scala @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsSpec.scala index 6712502312..dc16f8dfe7 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterMetricsSpec.scala @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -55,4 +55,4 @@ abstract class ClusterMetricsSpec extends MultiNodeSpec(ClusterMetricsMultiJvmSp enterBarrier("finished") } } -} \ No newline at end of file +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index ae5dea869e..763d2623c2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialHeartbeatSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialHeartbeatSpec.scala new file mode 100644 index 0000000000..4e6bae48d5 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/InitialHeartbeatSpec.scala @@ -0,0 +1,87 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package akka.cluster + +import language.postfixOps +import scala.concurrent.duration._ +import com.typesafe.config.ConfigFactory +import akka.actor.Actor +import akka.actor.Props +import akka.cluster.ClusterEvent.InstantClusterState +import akka.cluster.ClusterEvent.InstantMemberJoined +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.remote.transport.ThrottlerTransportAdapter.Direction +import akka.testkit._ + +object InitialHeartbeatMultiJvmSpec extends MultiNodeConfig { + val controller = role("controller") + val first = role("first") + val second = role("second") + + commonConfig(debugConfig(on = false). + withFallback(ConfigFactory.parseString(""" + akka.cluster.failure-detector.heartbeat-request.grace-period = 3 s + akka.cluster.failure-detector.threshold = 4""")). + withFallback(MultiNodeClusterSpec.clusterConfig)) + + testTransport(on = true) +} + +class InitialHeartbeatMultiJvmNode1 extends InitialHeartbeatSpec +class InitialHeartbeatMultiJvmNode2 extends InitialHeartbeatSpec +class InitialHeartbeatMultiJvmNode3 extends InitialHeartbeatSpec + +abstract class InitialHeartbeatSpec + extends MultiNodeSpec(InitialHeartbeatMultiJvmSpec) + with MultiNodeClusterSpec { + + import InitialHeartbeatMultiJvmSpec._ + + muteMarkingAsUnreachable() + + "A member" must { + + "detect failure even though no heartbeats have been received" taggedAs LongRunningTest in { + val secondAddress = address(second) + awaitClusterUp(first) + + runOn(first) { + val joinLatch = TestLatch() + cluster.subscribe(system.actorOf(Props(new Actor { + def receive = { + case state: InstantClusterState ⇒ + if (state.members.exists(_.address == secondAddress)) + joinLatch.countDown() + case InstantMemberJoined(m) ⇒ + if (m.address == secondAddress) + joinLatch.countDown() + } + })), classOf[InstantMemberJoined]) + + within(10 seconds) { + joinLatch.await + } + } + runOn(second) { + cluster.join(first) + } + enterBarrier("second-joined") + + runOn(controller) { + // it is likely that first has not started sending heartbeats to second yet + // Direction must be Receive because the gossip from first to second must pass through + testConductor.blackhole(first, second, Direction.Receive).await + } + + runOn(second) { + within(15 seconds) { + awaitCond(!cluster.failureDetector.isAvailable(first)) + } + } + + enterBarrier("after-1") + } + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala index f59db3f21e..ed0a667a8b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala index 464b627944..577ea213ec 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala index efa9e3f25d..46ec2780b8 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinTwoClustersSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala deleted file mode 100644 index 97711b30de..0000000000 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala +++ /dev/null @@ -1,330 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.cluster - -import com.typesafe.config.ConfigFactory -import akka.remote.testkit.MultiNodeConfig -import akka.remote.testkit.MultiNodeSpec -import akka.testkit._ -import akka.testkit.TestEvent._ -import scala.concurrent.duration._ -import akka.actor.ActorSystem -import java.util.concurrent.TimeoutException -import scala.collection.immutable.SortedSet -import scala.concurrent.Await -import java.util.concurrent.TimeUnit -import akka.remote.testconductor.RoleName -import akka.actor.Props -import akka.actor.Actor -import akka.cluster.MemberStatus._ - -object LargeClusterMultiJvmSpec extends MultiNodeConfig { - // each jvm simulates a datacenter with many nodes - val firstDatacenter = role("first-datacenter") - val secondDatacenter = role("second-datacenter") - val thirdDatacenter = role("third-datacenter") - val fourthDatacenter = role("fourth-datacenter") - val fifthDatacenter = role("fifth-datacenter") - - // Note that this test uses default configuration, - // not MultiNodeClusterSpec.clusterConfig - commonConfig(ConfigFactory.parseString(""" - # Number of ActorSystems in each jvm, can be specified as - # system property when running real tests. Many nodes - # will take long time and consume many threads. - # 10 => 50 nodes is possible to run on one machine. - akka.test.large-cluster-spec.nodes-per-datacenter = 2 - akka.cluster { - gossip-interval = 500 ms - auto-join = off - auto-down = on - failure-detector.acceptable-heartbeat-pause = 5s - publish-stats-interval = 0 s # always, when it happens - } - akka.event-handlers = ["akka.testkit.TestEventListener"] - akka.loglevel = INFO - akka.actor.provider = akka.cluster.ClusterActorRefProvider - akka.actor.default-dispatcher.fork-join-executor { - # when using nodes-per-datacenter=10 we need some extra - # threads to keep up with netty connect blocking - parallelism-min = 13 - parallelism-max = 13 - } - akka.scheduler.tick-duration = 33 ms - akka.remote.log-remote-lifecycle-events = off - akka.remote.netty.execution-pool-size = 4 - #akka.remote.netty.reconnection-time-window = 10s - akka.remote.netty.write-timeout = 5s - akka.remote.netty.backoff-timeout = 500ms - akka.remote.netty.connection-timeout = 500ms - - # Using a separate dispatcher for netty io doesn't reduce number - # of needed threads - # akka.remote.netty.use-dispatcher-for-io=akka.test.io-dispatcher - # akka.test.io-dispatcher.fork-join-executor { - # parallelism-min = 100 - # parallelism-max = 100 - # } - """)) -} - -class LargeClusterMultiJvmNode1 extends LargeClusterSpec -class LargeClusterMultiJvmNode2 extends LargeClusterSpec -class LargeClusterMultiJvmNode3 extends LargeClusterSpec -class LargeClusterMultiJvmNode4 extends LargeClusterSpec -class LargeClusterMultiJvmNode5 extends LargeClusterSpec - -abstract class LargeClusterSpec - extends MultiNodeSpec(LargeClusterMultiJvmSpec) - with MultiNodeClusterSpec { - - import LargeClusterMultiJvmSpec._ - import ClusterEvent._ - - override def muteLog(sys: ActorSystem = system): Unit = { - super.muteLog(sys) - muteMarkingAsUnreachable(sys) - muteDeadLetters(sys) - } - - var systems: IndexedSeq[ActorSystem] = IndexedSeq(system) - val nodesPerDatacenter = system.settings.config.getInt( - "akka.test.large-cluster-spec.nodes-per-datacenter") - - /** - * Since we start some ActorSystems/Clusters outside of the - * MultiNodeClusterSpec control we can't use use the mechanism - * defined in MultiNodeClusterSpec to inject failure detector etc. - * Use ordinary Cluster extension with default AccrualFailureDetector. - */ - override def cluster: Cluster = Cluster(system) - - override def atTermination(): Unit = { - systems foreach { _.shutdown } - val shutdownTimeout = 20.seconds - val deadline = Deadline.now + shutdownTimeout - systems.foreach { sys ⇒ - if (sys.isTerminated) - () // already done - else if (deadline.isOverdue) - sys.log.warning("Failed to shutdown [{}] within [{}]", sys.name, shutdownTimeout) - else { - try sys.awaitTermination(deadline.timeLeft) catch { - case _: TimeoutException ⇒ sys.log.warning("Failed to shutdown [{}] within [{}]", sys.name, shutdownTimeout) - } - } - } - } - - def startupSystems(): Unit = { - // one system is already started by the multi-node test - for (n ← 2 to nodesPerDatacenter) { - val sys = ActorSystem(myself.name + "-" + n, system.settings.config) - muteLog(sys) - systems :+= sys - } - - // Initialize the Cluster extensions, i.e. startup the clusters - systems foreach { Cluster(_) } - } - - def expectedMaxDuration(totalNodes: Int): FiniteDuration = 5.seconds + 2.seconds * totalNodes - - def joinAll(from: RoleName, to: RoleName, totalNodes: Int, runOnRoles: RoleName*): Unit = { - val joiningClusters = systems.map(Cluster(_)).toSet - join(joiningClusters, from, to, totalNodes, runOnRoles: _*) - } - - def join(joiningClusterNodes: Set[Cluster], from: RoleName, to: RoleName, totalNodes: Int, runOnRoles: RoleName*): Unit = { - runOnRoles must contain(from) - runOnRoles must contain(to) - - runOn(runOnRoles: _*) { - systems.size must be(nodesPerDatacenter) // make sure it is initialized - - val clusterNodes = if(isNode(from)) joiningClusterNodes else systems.map(Cluster(_)).toSet - val startGossipCounts = Map.empty[Cluster, Long] ++ - clusterNodes.map(c ⇒ (c -> c.readView.latestStats.receivedGossipCount)) - def gossipCount(c: Cluster): Long = { - c.readView.latestStats.receivedGossipCount - startGossipCounts(c) - } - val startTime = System.nanoTime - def tookMillis: String = TimeUnit.NANOSECONDS.toMillis(System.nanoTime - startTime) + " ms" - - val latch = TestLatch(clusterNodes.size) - clusterNodes foreach { c ⇒ - c.subscribe(system.actorOf(Props(new Actor { - var upCount = 0 - def receive = { - case state: CurrentClusterState ⇒ - upCount = state.members.count(_.status == Up) - case MemberUp(_) if !latch.isOpen ⇒ - upCount += 1 - if (upCount == totalNodes) { - log.debug("All [{}] nodes Up in [{}], it took [{}], received [{}] gossip messages", - totalNodes, c.selfAddress, tookMillis, gossipCount(c)) - latch.countDown() - } - case _ ⇒ // ignore - } - })), classOf[MemberEvent]) - } - - runOn(from) { - clusterNodes foreach { _ join to } - } - - Await.ready(latch, remaining) - - val counts = clusterNodes.map(gossipCount(_)) - val formattedStats = "mean=%s min=%s max=%s".format(counts.sum / clusterNodes.size, counts.min, counts.max) - log.info("Convergence of [{}] nodes reached, it took [{}], received [{}] gossip messages per node", - totalNodes, tookMillis, formattedStats) - - } - } - - "A large cluster" must { - - "join all nodes in first-datacenter to first-datacenter" taggedAs LongRunningTest in { - runOn(firstDatacenter) { - startupSystems() - startClusterNode() - } - enterBarrier("first-datacenter-started") - - val totalNodes = nodesPerDatacenter - within(expectedMaxDuration(totalNodes)) { - joinAll(from = firstDatacenter, to = firstDatacenter, totalNodes, runOnRoles = firstDatacenter) - enterBarrier("first-datacenter-joined") - } - } - - "join all nodes in second-datacenter to first-datacenter" taggedAs LongRunningTest in { - runOn(secondDatacenter) { - startupSystems() - } - enterBarrier("second-datacenter-started") - - val totalNodes = nodesPerDatacenter * 2 - within(expectedMaxDuration(totalNodes)) { - joinAll(from = secondDatacenter, to = firstDatacenter, totalNodes, runOnRoles = firstDatacenter, secondDatacenter) - enterBarrier("second-datacenter-joined") - } - } - - "join all nodes in third-datacenter to first-datacenter" taggedAs LongRunningTest in { - runOn(thirdDatacenter) { - startupSystems() - } - enterBarrier("third-datacenter-started") - - val totalNodes = nodesPerDatacenter * 3 - within(expectedMaxDuration(totalNodes)) { - joinAll(from = thirdDatacenter, to = firstDatacenter, totalNodes, - runOnRoles = firstDatacenter, secondDatacenter, thirdDatacenter) - enterBarrier("third-datacenter-joined") - } - } - - "join all nodes in fourth-datacenter to first-datacenter" taggedAs LongRunningTest in { - runOn(fourthDatacenter) { - startupSystems() - } - enterBarrier("fourth-datacenter-started") - - val totalNodes = nodesPerDatacenter * 4 - within(expectedMaxDuration(totalNodes)) { - joinAll(from = fourthDatacenter, to = firstDatacenter, totalNodes, - runOnRoles = firstDatacenter, secondDatacenter, thirdDatacenter, fourthDatacenter) - enterBarrier("fourth-datacenter-joined") - } - } - - "join nodes one by one from fifth-datacenter to first-datacenter" taggedAs LongRunningTest in { - runOn(fifthDatacenter) { - startupSystems() - } - enterBarrier("fifth-datacenter-started") - - // enough to join a few one-by-one (takes too long time otherwise) - val (bulk, oneByOne) = systems.splitAt(systems.size - 3) - - if (bulk.nonEmpty) { - val totalNodes = nodesPerDatacenter * 4 + bulk.size - within(expectedMaxDuration(totalNodes)) { - val joiningClusters = if(isNode(fifthDatacenter)) bulk.map(Cluster(_)).toSet else Set.empty[Cluster] - join(joiningClusters, from = fifthDatacenter, to = firstDatacenter, totalNodes, - runOnRoles = firstDatacenter, secondDatacenter, thirdDatacenter, fourthDatacenter, fifthDatacenter) - enterBarrier("fifth-datacenter-joined-" + bulk.size) - } - } - - for (i ← 0 until oneByOne.size) { - val totalNodes = nodesPerDatacenter * 4 + bulk.size + i + 1 - within(expectedMaxDuration(totalNodes)) { - val joiningClusters = if(isNode(fifthDatacenter)) Set(Cluster(oneByOne(i))) else Set.empty[Cluster] - join(joiningClusters, from = fifthDatacenter, to = firstDatacenter, totalNodes, - runOnRoles = firstDatacenter, secondDatacenter, thirdDatacenter, fourthDatacenter, fifthDatacenter) - enterBarrier("fifth-datacenter-joined-" + (bulk.size + i)) - } - } - } - - "detect failure and auto-down crashed nodes in second-datacenter" taggedAs LongRunningTest in { - val downedNodes = nodesPerDatacenter - val liveNodes = nodesPerDatacenter * 4 - - within(30.seconds + 3.seconds * liveNodes) { - val startGossipCounts = Map.empty[Cluster, Long] ++ - systems.map(sys ⇒ (Cluster(sys) -> Cluster(sys).readView.latestStats.receivedGossipCount)) - def gossipCount(c: Cluster): Long = { - c.readView.latestStats.receivedGossipCount - startGossipCounts(c) - } - val startTime = System.nanoTime - def tookMillis: String = TimeUnit.NANOSECONDS.toMillis(System.nanoTime - startTime) + " ms" - - val latch = TestLatch(nodesPerDatacenter) - systems foreach { sys ⇒ - Cluster(sys).subscribe(sys.actorOf(Props(new Actor { - var gotDowned = Set.empty[Member] - def receive = { - case state: CurrentClusterState ⇒ - gotDowned = gotDowned ++ state.unreachable.filter(_.status == Down) - checkDone() - case MemberDowned(m) if !latch.isOpen ⇒ - gotDowned = gotDowned + m - checkDone() - case _ ⇒ // not interesting - } - def checkDone(): Unit = if (gotDowned.size == downedNodes) { - log.info("Detected [{}] downed nodes in [{}], it took [{}], received [{}] gossip messages", - downedNodes, Cluster(sys).selfAddress, tookMillis, gossipCount(Cluster(sys))) - latch.countDown() - } - })), classOf[ClusterDomainEvent]) - } - - runOn(firstDatacenter) { - testConductor.shutdown(secondDatacenter, 0).await - } - - enterBarrier("second-datacenter-shutdown") - - runOn(firstDatacenter, thirdDatacenter, fourthDatacenter, fifthDatacenter) { - Await.ready(latch, remaining) - val mergeCount = systems.map(sys ⇒ Cluster(sys).readView.latestStats.mergeCount).sum - val counts = systems.map(sys ⇒ gossipCount(Cluster(sys))) - val formattedStats = "mean=%s min=%s max=%s".format(counts.sum / nodesPerDatacenter, counts.min, counts.max) - log.info("Convergence of [{}] nodes reached after failure, it took [{}], received [{}] gossip messages per node, merged [{}] times", - liveNodes, tookMillis, formattedStats, mergeCount) - } - - enterBarrier("after-6") - } - - } - - } -} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index 279e32ab66..1203ac2740 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -60,7 +60,7 @@ abstract class LeaderDowningNodeThatIsUnreachableSpec(multiNodeConfig: LeaderDow // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- - awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = List(fourthAddress), 30.seconds) + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Set(fourthAddress), 30.seconds) } runOn(fourth) { @@ -70,7 +70,7 @@ abstract class LeaderDowningNodeThatIsUnreachableSpec(multiNodeConfig: LeaderDow runOn(second, third) { enterBarrier("down-fourth-node") - awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = List(fourthAddress), 30.seconds) + awaitUpConvergence(numberOfMembers = 3, canNotBePartOfMemberRing = Set(fourthAddress), 30.seconds) } enterBarrier("await-completion-1") @@ -90,7 +90,7 @@ abstract class LeaderDowningNodeThatIsUnreachableSpec(multiNodeConfig: LeaderDow // --- HERE THE LEADER SHOULD DETECT FAILURE AND AUTO-DOWN THE UNREACHABLE NODE --- - awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = List(secondAddress), 30.seconds) + awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Set(secondAddress), 30.seconds) } runOn(second) { @@ -100,7 +100,7 @@ abstract class LeaderDowningNodeThatIsUnreachableSpec(multiNodeConfig: LeaderDow runOn(third) { enterBarrier("down-second-node") - awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = List(secondAddress), 30 seconds) + awaitUpConvergence(numberOfMembers = 2, canNotBePartOfMemberRing = Set(secondAddress), 30 seconds) } enterBarrier("await-completion-2") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala index dfe1553369..39adca3ac5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderElectionSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -110,12 +110,12 @@ abstract class LeaderElectionSpec(multiNodeConfig: LeaderElectionMultiNodeConfig } } - "be able to 're-elect' a single leader after leader has left" taggedAs LongRunningTest in within(20 seconds) { + "be able to 're-elect' a single leader after leader has left" taggedAs LongRunningTest in within(30 seconds) { shutdownLeaderAndVerifyNewLeader(alreadyShutdown = 0) enterBarrier("after-2") } - "be able to 're-elect' a single leader after leader has left (again)" taggedAs LongRunningTest in within(20 seconds) { + "be able to 're-elect' a single leader after leader has left (again)" taggedAs LongRunningTest in within(30 seconds) { shutdownLeaderAndVerifyNewLeader(alreadyShutdown = 1) enterBarrier("after-3") } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala index acaf909d57..bcd8b48c61 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala index e6d83f881e..15bd8d75e4 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MBeanSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -115,7 +115,7 @@ abstract class MBeanSpec enterBarrier("fourth-down") runOn(first, second, third) { - awaitUpConvergence(3, canNotBePartOfMemberRing = List(fourthAddress)) + awaitUpConvergence(3, canNotBePartOfMemberRing = Set(fourthAddress)) assertMembers(clusterView.members, first, second, third) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index b36ffccf7c..6b3bac9341 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index effff75438..381780c810 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala index 835c3d722e..281ef451c1 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerLeavingSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala index f4faf4234b..2fbdc298df 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerUpSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MinMembersBeforeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MinMembersBeforeUpSpec.scala index 46891bbc49..4cad603747 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MinMembersBeforeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MinMembersBeforeUpSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index fd2714005b..1027a14279 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -69,8 +69,7 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec { self: MultiNodeS ".*Cluster Node.* - is starting up.*", ".*Shutting down cluster Node.*", ".*Cluster node successfully shut down.*", - ".*Using a dedicated scheduler for cluster.*", - ".*Phi value.* for connection.*") foreach { s ⇒ + ".*Using a dedicated scheduler for cluster.*") foreach { s ⇒ sys.eventStream.publish(Mute(EventFilter.info(pattern = s))) } @@ -82,13 +81,13 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec { self: MultiNodeS } } - def muteMarkingAsUnreachable(sys: ActorSystem = system): Unit = if (!sys.log.isDebugEnabled) { - sys.eventStream.publish(Mute(EventFilter.error(pattern = ".*Marking.* as UNREACHABLE.*"))) - } + def muteMarkingAsUnreachable(sys: ActorSystem = system): Unit = + if (!sys.log.isDebugEnabled) + sys.eventStream.publish(Mute(EventFilter.error(pattern = ".*Marking.* as UNREACHABLE.*"))) - def muteDeadLetters(sys: ActorSystem = system): Unit = if (!sys.log.isDebugEnabled) { - sys.eventStream.publish(Mute(EventFilter.warning(pattern = ".*received dead letter from.*"))) - } + def muteDeadLetters(sys: ActorSystem = system): Unit = + if (!sys.log.isDebugEnabled) + sys.eventStream.publish(Mute(EventFilter.warning(pattern = ".*received dead letter from.*"))) override def afterAll(): Unit = { if (!log.isDebugEnabled) { @@ -202,15 +201,16 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec { self: MultiNodeS * out of all nodes in the cluster. First * member in the cluster ring is expected leader. */ - def assertLeaderIn(nodesInCluster: immutable.Seq[RoleName]): Unit = if (nodesInCluster.contains(myself)) { - nodesInCluster.length must not be (0) - val expectedLeader = roleOfLeader(nodesInCluster) - val leader = clusterView.leader - val isLeader = leader == Some(clusterView.selfAddress) - assert(isLeader == isNode(expectedLeader), - "expectedLeader [%s], got leader [%s], members [%s]".format(expectedLeader, leader, clusterView.members)) - clusterView.status must (be(MemberStatus.Up) or be(MemberStatus.Leaving)) - } + def assertLeaderIn(nodesInCluster: immutable.Seq[RoleName]): Unit = + if (nodesInCluster.contains(myself)) { + nodesInCluster.length must not be (0) + val expectedLeader = roleOfLeader(nodesInCluster) + val leader = clusterView.leader + val isLeader = leader == Some(clusterView.selfAddress) + assert(isLeader == isNode(expectedLeader), + "expectedLeader [%s], got leader [%s], members [%s]".format(expectedLeader, leader, clusterView.members)) + clusterView.status must (be(MemberStatus.Up) or be(MemberStatus.Leaving)) + } /** * Wait until the expected number of members has status Up and convergence has been reached. @@ -218,17 +218,17 @@ trait MultiNodeClusterSpec extends Suite with STMultiNodeSpec { self: MultiNodeS */ def awaitUpConvergence( numberOfMembers: Int, - canNotBePartOfMemberRing: immutable.Seq[Address] = Nil, + canNotBePartOfMemberRing: Set[Address] = Set.empty, timeout: FiniteDuration = 20.seconds): Unit = { within(timeout) { + if (!canNotBePartOfMemberRing.isEmpty) // don't run this on an empty set + awaitCond( + canNotBePartOfMemberRing forall (address ⇒ !(clusterView.members exists (_.address == address)))) awaitCond(clusterView.members.size == numberOfMembers) awaitCond(clusterView.members.forall(_.status == MemberStatus.Up)) // clusterView.leader is updated by LeaderChanged, await that to be updated also val expectedLeader = clusterView.members.headOption.map(_.address) awaitCond(clusterView.leader == expectedLeader) - if (!canNotBePartOfMemberRing.isEmpty) // don't run this on an empty set - awaitCond( - canNotBePartOfMemberRing forall (address ⇒ !(clusterView.members exists (_.address == address)))) } } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index 2dfddc330f..f6d132df42 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index e1051e4161..d38cc06e1f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala index 336acc2769..da11b5b7d0 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeMembershipSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index 0a82b74563..43ca4fae3e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala index 33ce67ecb5..5910f48ac3 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -66,7 +66,7 @@ abstract class SingletonClusterSpec(multiNodeConfig: SingletonClusterMultiNodeCo markNodeAsUnavailable(secondAddress) - awaitUpConvergence(numberOfMembers = 1, canNotBePartOfMemberRing = List(secondAddress), 30.seconds) + awaitUpConvergence(numberOfMembers = 1, canNotBePartOfMemberRing = Set(secondAddress), 30.seconds) clusterView.isSingletonCluster must be(true) awaitCond(clusterView.isLeader) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala index 4fe1f551aa..36b727cc1c 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -24,7 +24,7 @@ case class SplitBrainMultiNodeConfig(failureDetectorPuppet: Boolean) extends Mul commonConfig(debugConfig(on = false). withFallback(ConfigFactory.parseString(""" - akka.remoting.retry-latch-closed-for = 3 s + akka.remote.retry-latch-closed-for = 3 s akka.cluster { auto-down = on failure-detector.threshold = 4 @@ -59,7 +59,7 @@ abstract class SplitBrainSpec(multiNodeConfig: SplitBrainMultiNodeConfig) val side1 = Vector(first, second) val side2 = Vector(third, fourth, fifth) - "A cluster of 5 members" ignore { + "A cluster of 5 members" must { "reach initial convergence" taggedAs LongRunningTest in { awaitClusterUp(first, second, third, fourth, fifth) @@ -87,10 +87,10 @@ abstract class SplitBrainSpec(multiNodeConfig: SplitBrainMultiNodeConfig) } runOn(side1: _*) { - awaitCond(clusterView.unreachableMembers.map(_.address) == (side2.toSet map address), 20 seconds) + awaitCond(clusterView.unreachableMembers.map(_.address) == (side2.toSet map address), 25 seconds) } runOn(side2: _*) { - awaitCond(clusterView.unreachableMembers.map(_.address) == (side1.toSet map address), 20 seconds) + awaitCond(clusterView.unreachableMembers.map(_.address) == (side1.toSet map address), 25 seconds) } enterBarrier("after-2") @@ -102,7 +102,7 @@ abstract class SplitBrainSpec(multiNodeConfig: SplitBrainMultiNodeConfig) // auto-down = on awaitCond(clusterView.unreachableMembers.forall(m ⇒ m.status == MemberStatus.Down), 15 seconds) clusterView.unreachableMembers.map(_.address) must be(side2.toSet map address) - awaitUpConvergence(side1.size, side2 map address) + awaitUpConvergence(side1.size, side2.toSet map address) assertLeader(side1: _*) } @@ -110,7 +110,7 @@ abstract class SplitBrainSpec(multiNodeConfig: SplitBrainMultiNodeConfig) // auto-down = on awaitCond(clusterView.unreachableMembers.forall(m ⇒ m.status == MemberStatus.Down), 15 seconds) clusterView.unreachableMembers.map(_.address) must be(side1.toSet map address) - awaitUpConvergence(side2.size, side1 map address) + awaitUpConvergence(side2.size, side1.toSet map address) assertLeader(side2: _*) } diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala new file mode 100644 index 0000000000..16a163c026 --- /dev/null +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/StressSpec.scala @@ -0,0 +1,1083 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ +package akka.cluster + +import language.postfixOps +import scala.annotation.tailrec +import scala.collection.immutable +import scala.concurrent.duration._ +import scala.concurrent.forkjoin.ThreadLocalRandom +import org.scalatest.BeforeAndAfterEach +import com.typesafe.config.Config +import com.typesafe.config.ConfigFactory +import akka.actor.Actor +import akka.actor.ActorLogging +import akka.actor.ActorRef +import akka.actor.ActorSystem +import akka.actor.Address +import akka.actor.Deploy +import akka.actor.OneForOneStrategy +import akka.actor.Props +import akka.actor.RootActorPath +import akka.actor.SupervisorStrategy._ +import akka.actor.Terminated +import akka.cluster.ClusterEvent.ClusterMetricsChanged +import akka.cluster.ClusterEvent.CurrentClusterState +import akka.cluster.ClusterEvent.MemberEvent +import akka.cluster.StandardMetrics.Cpu +import akka.cluster.StandardMetrics.HeapMemory +import akka.remote.RemoteScope +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.routing.FromConfig +import akka.testkit._ +import akka.testkit.TestEvent._ + +/** + * This test is intended to be used as long running stress test + * of cluster related features. Number of nodes and duration of + * the test steps can be configured. The test scenario is organized as + * follows: + * 1. join nodes in various ways up to the configured total number of nodes + * 2 while nodes are joining a few cluster aware routers are also working + * 3. exercise concurrent joining and shutdown of nodes repeatedly + * 4. exercise cluster aware routers, including high throughput + * 5. exercise many actors in a tree structure + * 6. exercise remote supervision + * 7. leave and shutdown nodes in various ways + * 8. while nodes are removed remote death watch is also exercised + * 9. while nodes are removed a few cluster aware routers are also working + */ +object StressMultiJvmSpec extends MultiNodeConfig { + + // Note that this test uses default configuration, + // not MultiNodeClusterSpec.clusterConfig + commonConfig(ConfigFactory.parseString(""" + akka.test.cluster-stress-spec { + # scale the nr-of-nodes* settings with this factor + nr-of-nodes-factor = 1 + nr-of-nodes = 13 + # not scaled + nr-of-seed-nodes = 3 + nr-of-nodes-joining-to-seed-initally = 2 + nr-of-nodes-joining-one-by-one-small = 2 + nr-of-nodes-joining-one-by-one-large = 2 + nr-of-nodes-joining-to-one = 2 + nr-of-nodes-joining-to-seed = 2 + nr-of-nodes-leaving-one-by-one-small = 1 + nr-of-nodes-leaving-one-by-one-large = 2 + nr-of-nodes-leaving = 2 + nr-of-nodes-shutdown-one-by-one-small = 1 + nr-of-nodes-shutdown-one-by-one-large = 2 + nr-of-nodes-shutdown = 2 + nr-of-nodes-join-remove = 2 + # not scaled + join-remove-duration = 90s + work-batch-size = 100 + work-batch-interval = 2s + payload-size = 1000 + # scale the *-duration settings with this factor + duration-factor = 1 + normal-throughput-duration = 30s + high-throughput-duration = 10s + supervision-duration = 10s + # actors are created in a tree structure defined + # by tree-width (number of children for each actor) and + # tree-levels, total number of actors can be calculated by + # (width * math.pow(width, levels) - 1) / (width - 1) + tree-width = 5 + tree-levels = 4 + report-metrics-interval = 10s + # scale convergence within timeouts with this factor + convergence-within-factor = 1.0 + } + + akka.actor.provider = akka.cluster.ClusterActorRefProvider + akka.cluster { + auto-join = off + auto-down = on + publish-stats-interval = 0 s # always, when it happens + } + akka.event-handlers = ["akka.testkit.TestEventListener"] + akka.loglevel = INFO + akka.remote.log-remote-lifecycle-events = off + + akka.actor.deployment { + /master-node-1/workers { + router = round-robin + nr-of-instances = 100 + cluster { + enabled = on + max-nr-of-instances-per-node = 1 + allow-local-routees = off + } + } + /master-node-2/workers { + router = round-robin + nr-of-instances = 100 + cluster { + enabled = on + routees-path = "/user/worker" + allow-local-routees = off + } + } + /master-node-3/workers = { + router = adaptive + nr-of-instances = 100 + cluster { + enabled = on + max-nr-of-instances-per-node = 1 + allow-local-routees = off + } + } + } + """)) + + class Settings(conf: Config) { + private val testConfig = conf.getConfig("akka.test.cluster-stress-spec") + import testConfig._ + + private def getDuration(name: String): FiniteDuration = Duration(getMilliseconds(name), MILLISECONDS) + + val nFactor = getInt("nr-of-nodes-factor") + val totalNumberOfNodes = getInt("nr-of-nodes") * nFactor ensuring ( + _ >= 10, "nr-of-nodes must be >= 10") + val numberOfSeedNodes = getInt("nr-of-seed-nodes") // not scaled by nodes factor + val numberOfNodesJoiningToSeedNodesInitially = getInt("nr-of-nodes-joining-to-seed-initally") * nFactor + val numberOfNodesJoiningOneByOneSmall = getInt("nr-of-nodes-joining-one-by-one-small") * nFactor + val numberOfNodesJoiningOneByOneLarge = getInt("nr-of-nodes-joining-one-by-one-large") * nFactor + val numberOfNodesJoiningToOneNode = getInt("nr-of-nodes-joining-to-one") * nFactor + val numberOfNodesJoiningToSeedNodes = getInt("nr-of-nodes-joining-to-seed") * nFactor + val numberOfNodesLeavingOneByOneSmall = getInt("nr-of-nodes-leaving-one-by-one-small") * nFactor + val numberOfNodesLeavingOneByOneLarge = getInt("nr-of-nodes-leaving-one-by-one-large") * nFactor + val numberOfNodesLeaving = getInt("nr-of-nodes-leaving") * nFactor + val numberOfNodesShutdownOneByOneSmall = getInt("nr-of-nodes-shutdown-one-by-one-small") * nFactor + val numberOfNodesShutdownOneByOneLarge = getInt("nr-of-nodes-shutdown-one-by-one-large") * nFactor + val numberOfNodesShutdown = getInt("nr-of-nodes-shutdown") * nFactor + val numberOfNodesJoinRemove = getInt("nr-of-nodes-join-remove") // not scaled by nodes factor + + val workBatchSize = getInt("work-batch-size") + val workBatchInterval = Duration(getMilliseconds("work-batch-interval"), MILLISECONDS) + val payloadSize = getInt("payload-size") + val dFactor = getInt("duration-factor") + val joinRemoveDuration = getDuration("join-remove-duration") * dFactor + val normalThroughputDuration = getDuration("normal-throughput-duration") * dFactor + val highThroughputDuration = getDuration("high-throughput-duration") * dFactor + val supervisionDuration = getDuration("supervision-duration") * dFactor + val treeWidth = getInt("tree-width") + val treeLevels = getInt("tree-levels") + val reportMetricsInterval = getDuration("report-metrics-interval") + val convergenceWithinFactor = getDouble("convergence-within-factor") + + require(numberOfSeedNodes + numberOfNodesJoiningToSeedNodesInitially + numberOfNodesJoiningOneByOneSmall + + numberOfNodesJoiningOneByOneLarge + numberOfNodesJoiningToOneNode + numberOfNodesJoiningToSeedNodes <= totalNumberOfNodes, + s"specified number of joining nodes <= ${totalNumberOfNodes}") + + // don't shutdown the 3 nodes hosting the master actors + require(numberOfNodesLeavingOneByOneSmall + numberOfNodesLeavingOneByOneLarge + numberOfNodesLeaving + + numberOfNodesShutdownOneByOneSmall + numberOfNodesShutdownOneByOneLarge + numberOfNodesShutdown <= totalNumberOfNodes - 3, + s"specified number of leaving/shutdown nodes <= ${totalNumberOfNodes - 3}") + + require(numberOfNodesJoinRemove <= totalNumberOfNodes, s"nr-of-nodes-join-remove must be <= ${totalNumberOfNodes}") + } + + // FIXME configurable number of nodes + for (n ← 1 to 13) role("node-" + n) + + implicit class FormattedDouble(val d: Double) extends AnyVal { + def form: String = d.formatted("%.2f") + } + + case class ClusterResult( + address: Address, + duration: Duration, + clusterStats: ClusterStats) + + case class AggregatedClusterResult(title: String, duration: Duration, clusterStats: ClusterStats) + + /** + * Central aggregator of cluster statistics and metrics. + * Reports the result via log periodically and when all + * expected results has been collected. It shuts down + * itself when expected results has been collected. + */ + class ClusterResultAggregator(title: String, expectedResults: Int, reportMetricsInterval: FiniteDuration) extends Actor with ActorLogging { + val cluster = Cluster(context.system) + var reportTo: Option[ActorRef] = None + var results = Vector.empty[ClusterResult] + var nodeMetrics = Set.empty[NodeMetrics] + var phiValuesObservedByNode = { + import akka.cluster.Member.addressOrdering + immutable.SortedMap.empty[Address, Set[PhiValue]] + } + + import context.dispatcher + val reportMetricsTask = context.system.scheduler.schedule( + reportMetricsInterval, reportMetricsInterval, self, ReportTick) + + // subscribe to ClusterMetricsChanged, re-subscribe when restart + override def preStart(): Unit = cluster.subscribe(self, classOf[ClusterMetricsChanged]) + override def postStop(): Unit = { + cluster.unsubscribe(self) + reportMetricsTask.cancel() + super.postStop() + } + + def receive = { + case ClusterMetricsChanged(clusterMetrics) ⇒ nodeMetrics = clusterMetrics + case PhiResult(from, phiValues) ⇒ phiValuesObservedByNode += from -> phiValues + case ReportTick ⇒ + log.info(s"[${title}] in progress\n${formatMetrics}\n\n${formatPhi}") + case r: ClusterResult ⇒ + results :+= r + if (results.size == expectedResults) { + val aggregated = AggregatedClusterResult(title, maxDuration, totalClusterStats) + log.info(s"[${title}] completed in [${aggregated.duration.toMillis}] ms\n${aggregated.clusterStats}\n${formatMetrics}\n\n${formatPhi}") + reportTo foreach { _ ! aggregated } + context stop self + } + case _: CurrentClusterState ⇒ + case ReportTo(ref) ⇒ reportTo = ref + } + + def maxDuration = results.map(_.duration).max + + def totalClusterStats = results.map(_.clusterStats).foldLeft(ClusterStats()) { (acc, s) ⇒ + ClusterStats( + receivedGossipCount = acc.receivedGossipCount + s.receivedGossipCount, + mergeConflictCount = acc.mergeConflictCount + s.mergeConflictCount, + mergeCount = acc.mergeCount + s.mergeCount, + mergeDetectedCount = acc.mergeDetectedCount + s.mergeDetectedCount) + } + + def formatMetrics: String = { + import akka.cluster.Member.addressOrdering + (formatMetricsHeader +: (nodeMetrics.toSeq.sortBy(_.address) map formatMetricsLine)).mkString("\n") + } + + def formatMetricsHeader: String = "Node\tHeap (MB)\tCPU (%)\tLoad" + + def formatMetricsLine(nodeMetrics: NodeMetrics): String = { + val heap = nodeMetrics match { + case HeapMemory(address, timestamp, used, committed, max) ⇒ + (used.doubleValue / 1024 / 1024).form + case _ ⇒ "" + } + val cpuAndLoad = nodeMetrics match { + case Cpu(address, timestamp, loadOption, cpuOption, processors) ⇒ + format(cpuOption) + "\t" + format(loadOption) + case _ ⇒ "N/A\tN/A" + } + s"${nodeMetrics.address}\t${heap}\t${cpuAndLoad}" + } + + def format(opt: Option[Double]) = opt match { + case None ⇒ "N/A" + case Some(x) ⇒ x.form + } + + def formatPhi: String = { + if (phiValuesObservedByNode.isEmpty) "" + else { + import akka.cluster.Member.addressOrdering + val lines = + for { + (monitor, phiValues) ← phiValuesObservedByNode.toSeq + phi ← phiValues.toSeq.sortBy(_.address) + } yield formatPhiLine(monitor, phi.address, phi) + + (formatPhiHeader +: lines).mkString("\n") + } + } + + def formatPhiHeader: String = "Monitor\tSubject\tcount\tcount phi > 1.0\tmax phi" + + def formatPhiLine(monitor: Address, subject: Address, phi: PhiValue): String = + s"${monitor}\t${subject}\t${phi.count}\t${phi.countAboveOne}\t${phi.max.form}" + + } + + /** + * Keeps cluster statistics and metrics reported by + * ClusterResultAggregator. Logs the list of historical + * results when a new AggregatedClusterResult is received. + */ + class ClusterResultHistory extends Actor with ActorLogging { + var history = Vector.empty[AggregatedClusterResult] + + def receive = { + case result: AggregatedClusterResult ⇒ + history :+= result + log.info("Cluster result history\n" + formatHistory) + } + + def formatHistory: String = + (formatHistoryHeader +: (history map formatHistoryLine)).mkString("\n") + + def formatHistoryHeader: String = "title\tduration (ms)\tgossip count\tmerge count" + + def formatHistoryLine(result: AggregatedClusterResult): String = + s"${result.title}\t${result.duration.toMillis}\t${result.clusterStats.receivedGossipCount}\t${result.clusterStats.mergeCount}" + + } + + /** + * Collect phi values of the failure detector and report to the + * central ClusterResultAggregator. + */ + class PhiObserver extends Actor with ActorLogging { + val cluster = Cluster(context.system) + val fd = cluster.failureDetector.asInstanceOf[AccrualFailureDetector] + var reportTo: Option[ActorRef] = None + val emptyPhiByNode = Map.empty[Address, PhiValue].withDefault(address ⇒ PhiValue(address, 0, 0, 0.0)) + var phiByNode = emptyPhiByNode + var nodes = Set.empty[Address] + + import context.dispatcher + val checkPhiTask = context.system.scheduler.schedule( + 1.second, 1.second, self, PhiTick) + + // subscribe to MemberEvent, re-subscribe when restart + override def preStart(): Unit = cluster.subscribe(self, classOf[MemberEvent]) + override def postStop(): Unit = { + cluster.unsubscribe(self) + checkPhiTask.cancel() + super.postStop() + } + + def receive = { + case PhiTick ⇒ + nodes foreach { node ⇒ + val previous = phiByNode(node) + val φ = fd.phi(node) + if (φ > 0 || fd.isMonitoring(node)) { + val aboveOne = if (!φ.isInfinite && φ > 1.0) 1 else 0 + phiByNode += node -> PhiValue(node, previous.countAboveOne + aboveOne, previous.count + 1, + math.max(previous.max, φ)) + } + } + reportTo foreach { _ ! PhiResult(cluster.selfAddress, phiByNode.values.toSet) } + case state: CurrentClusterState ⇒ nodes = state.members.map(_.address) + case memberEvent: MemberEvent ⇒ nodes += memberEvent.member.address + case ReportTo(ref) ⇒ reportTo = ref + case Reset ⇒ + phiByNode = emptyPhiByNode + nodes = Set.empty[Address] + cluster.unsubscribe(self) + cluster.subscribe(self, classOf[MemberEvent]) + + } + } + + /** + * Master of routers + * + * Flow control, to not flood the consumers, is handled by scheduling a + * batch of messages to be sent to the router when half of the number + * of outstanding messages remains. + * + * It uses a simple message retry mechanism. If an ack of a sent message + * is not received within a timeout, that message will be resent to the router, + * infinite number of times. + * + * When it receives the `End` command it will stop sending messages to the router, + * resends continuous, until all outstanding acks have been received, and then + * finally it replies with `WorkResult` to the sender of the `End` command, and stops + * itself. + */ + class Master(settings: StressMultiJvmSpec.Settings, batchInterval: FiniteDuration, tree: Boolean) extends Actor { + val workers = context.actorOf(Props[Worker].withRouter(FromConfig), "workers") + val payload = Array.fill(settings.payloadSize)(ThreadLocalRandom.current.nextInt(127).toByte) + val retryTimeout = 5.seconds.dilated(context.system) + val idCounter = Iterator from 0 + var sendCounter = 0L + var ackCounter = 0L + var outstanding = Map.empty[JobId, JobState] + var startTime = 0L + + import context.dispatcher + val resendTask = context.system.scheduler.schedule(3.seconds, 3.seconds, self, RetryTick) + + override def postStop(): Unit = { + resendTask.cancel() + super.postStop() + } + + def receive = { + case Begin ⇒ + startTime = System.nanoTime + self ! SendBatch + context.become(working) + case RetryTick ⇒ + } + + def working: Receive = { + case Ack(id) ⇒ + outstanding -= id + ackCounter += 1 + if (outstanding.size == settings.workBatchSize / 2) + if (batchInterval == Duration.Zero) self ! SendBatch + else context.system.scheduler.scheduleOnce(batchInterval, self, SendBatch) + case SendBatch ⇒ sendJobs() + case RetryTick ⇒ resend() + case End ⇒ + done(sender) + context.become(ending(sender)) + } + + def ending(replyTo: ActorRef): Receive = { + case Ack(id) ⇒ + outstanding -= id + ackCounter += 1 + done(replyTo) + case SendBatch ⇒ + case RetryTick ⇒ resend() + } + + def done(replyTo: ActorRef): Unit = + if (outstanding.isEmpty) { + val duration = (System.nanoTime - startTime).nanos + replyTo ! WorkResult(duration, sendCounter, ackCounter) + context stop self + } + + def sendJobs(): Unit = { + 0 until settings.workBatchSize foreach { _ ⇒ + send(createJob()) + } + } + + def createJob(): Job = { + if (tree) TreeJob(idCounter.next(), payload, ThreadLocalRandom.current.nextInt(settings.treeWidth), + settings.treeLevels, settings.treeWidth) + else SimpleJob(idCounter.next(), payload) + } + + def resend(): Unit = { + outstanding.values foreach { jobState ⇒ + if (jobState.deadline.isOverdue) + send(jobState.job) + } + } + + def send(job: Job): Unit = { + outstanding += job.id -> JobState(Deadline.now + retryTimeout, job) + sendCounter += 1 + workers ! job + } + } + + /** + * Used by Master as routee + */ + class Worker extends Actor with ActorLogging { + def receive = { + case SimpleJob(id, payload) ⇒ sender ! Ack(id) + case TreeJob(id, payload, idx, levels, width) ⇒ + // create the actors when first TreeJob message is received + val totalActors = ((width * math.pow(width, levels) - 1) / (width - 1)).toInt + log.info("Creating [{}] actors in a tree structure of [{}] levels and each actor has [{}] children", + totalActors, levels, width) + val tree = context.actorOf(Props(new TreeNode(levels, width)), "tree") + tree forward (idx, SimpleJob(id, payload)) + context.become(treeWorker(tree)) + } + + def treeWorker(tree: ActorRef): Receive = { + case SimpleJob(id, payload) ⇒ sender ! Ack(id) + case TreeJob(id, payload, idx, _, _) ⇒ + tree forward (idx, SimpleJob(id, payload)) + } + } + + class TreeNode(level: Int, width: Int) extends Actor { + require(level >= 1) + def createChild(): Actor = if (level == 1) new Leaf else new TreeNode(level - 1, width) + val indexedChildren = + 0 until width map { i ⇒ context.actorOf(Props(createChild()), name = i.toString) } toVector + + def receive = { + case (idx: Int, job: SimpleJob) if idx < width ⇒ indexedChildren(idx) forward (idx, job) + } + } + + class Leaf extends Actor { + def receive = { + case (_: Int, job: SimpleJob) ⇒ sender ! Ack(job.id) + } + } + + /** + * Used for remote death watch testing + */ + class Watchee extends Actor { + def receive = Actor.emptyBehavior + } + + /** + * Used for remote supervision testing + */ + class Supervisor extends Actor { + + var restartCount = 0 + + override val supervisorStrategy = + OneForOneStrategy(maxNrOfRetries = 5, withinTimeRange = 1 minute) { + case _: Exception ⇒ + restartCount += 1 + Restart + } + + def receive = { + case props: Props ⇒ context.actorOf(props) + case e: Exception ⇒ context.children foreach { _ ! e } + case GetChildrenCount ⇒ sender ! ChildrenCount(context.children.size, restartCount) + case Reset ⇒ + require(context.children.isEmpty, + s"ResetChildrenCount not allowed when children exists, [${context.children.size}]") + restartCount = 0 + } + } + + /** + * Child of Supervisor for remote supervision testing + */ + class RemoteChild extends Actor { + def receive = { + case e: Exception ⇒ throw e + } + } + + case object Begin + case object End + case object RetryTick + case object ReportTick + case object PhiTick + case class PhiResult(from: Address, phiValues: Set[PhiValue]) + case class PhiValue(address: Address, countAboveOne: Int, count: Int, max: Double) + case class ReportTo(ref: Option[ActorRef]) + + type JobId = Int + trait Job { def id: JobId } + case class SimpleJob(id: JobId, payload: Any) extends Job + case class TreeJob(id: JobId, payload: Any, idx: Int, levels: Int, width: Int) extends Job + case class Ack(id: JobId) + case class JobState(deadline: Deadline, job: Job) + case class WorkResult(duration: Duration, sendCount: Long, ackCount: Long) { + def retryCount: Long = sendCount - ackCount + def jobsPerSecond: Double = ackCount * 1000.0 / duration.toMillis + } + case object SendBatch + case class CreateTree(levels: Int, width: Int) + + case object GetChildrenCount + case class ChildrenCount(numberOfChildren: Int, numberOfChildRestarts: Int) + case object Reset + +} + +class StressMultiJvmNode1 extends StressSpec +class StressMultiJvmNode2 extends StressSpec +class StressMultiJvmNode3 extends StressSpec +class StressMultiJvmNode4 extends StressSpec +class StressMultiJvmNode5 extends StressSpec +class StressMultiJvmNode6 extends StressSpec +class StressMultiJvmNode7 extends StressSpec +class StressMultiJvmNode8 extends StressSpec +class StressMultiJvmNode9 extends StressSpec +class StressMultiJvmNode10 extends StressSpec +class StressMultiJvmNode11 extends StressSpec +class StressMultiJvmNode12 extends StressSpec +class StressMultiJvmNode13 extends StressSpec + +abstract class StressSpec + extends MultiNodeSpec(StressMultiJvmSpec) + with MultiNodeClusterSpec with BeforeAndAfterEach with ImplicitSender { + + import StressMultiJvmSpec._ + import ClusterEvent._ + + val settings = new Settings(system.settings.config) + import settings._ + + var step = 0 + var nbrUsedRoles = 0 + + override def beforeEach(): Unit = { step += 1 } + + override def muteLog(sys: ActorSystem = system): Unit = { + super.muteLog(sys) + sys.eventStream.publish(Mute(EventFilter[RuntimeException](pattern = ".*Simulated exception.*"))) + sys.eventStream.publish(Mute(EventFilter.warning(pattern = ".*PhiResult.*"))) + sys.eventStream.publish(Mute(EventFilter.warning(pattern = ".*SendBatch.*"))) + } + + val seedNodes = roles.take(numberOfSeedNodes) + + override def cluster: Cluster = { + createWorker + super.cluster + } + + // always create one worker when the cluster is started + lazy val createWorker: Unit = + system.actorOf(Props[Worker], "worker") + + def createResultAggregator(title: String, expectedResults: Int, includeInHistory: Boolean): Unit = { + runOn(roles.head) { + val aggregator = system.actorOf(Props(new ClusterResultAggregator(title, expectedResults, reportMetricsInterval)), + name = "result" + step) + if (includeInHistory) aggregator ! ReportTo(Some(clusterResultHistory)) + else aggregator ! ReportTo(None) + } + enterBarrier("result-aggregator-created-" + step) + runOn(roles.take(nbrUsedRoles): _*) { + phiObserver ! ReportTo(Some(clusterResultAggregator)) + } + } + + def clusterResultAggregator: ActorRef = system.actorFor(node(roles.head) / "user" / ("result" + step)) + + lazy val clusterResultHistory = system.actorOf(Props[ClusterResultHistory], "resultHistory") + + lazy val phiObserver = system.actorOf(Props[PhiObserver], "phiObserver") + + def awaitClusterResult: Unit = { + runOn(roles.head) { + val r = clusterResultAggregator + watch(r) + expectMsgPF(remaining) { case Terminated(`r`) ⇒ true } + } + enterBarrier("cluster-result-done-" + step) + } + + def joinOneByOne(numberOfNodes: Int): Unit = { + 0 until numberOfNodes foreach { _ ⇒ + joinOne() + nbrUsedRoles += 1 + step += 1 + } + } + + def convergenceWithin(base: FiniteDuration, nodes: Int): FiniteDuration = + (base.toMillis * convergenceWithinFactor * nodes).millis + + def joinOne(): Unit = within(5.seconds + convergenceWithin(2.seconds, nbrUsedRoles + 1)) { + val currentRoles = roles.take(nbrUsedRoles + 1) + val title = s"join one to ${nbrUsedRoles} nodes cluster" + createResultAggregator(title, expectedResults = currentRoles.size, includeInHistory = true) + runOn(currentRoles: _*) { + reportResult { + runOn(currentRoles.last) { + cluster.join(roles.head) + } + awaitUpConvergence(currentRoles.size, timeout = remaining) + } + + } + awaitClusterResult + enterBarrier("join-one-" + step) + } + + def joinSeveral(numberOfNodes: Int, toSeedNodes: Boolean): Unit = + within(10.seconds + convergenceWithin(3.seconds, nbrUsedRoles + numberOfNodes)) { + val currentRoles = roles.take(nbrUsedRoles + numberOfNodes) + val joiningRoles = currentRoles.takeRight(numberOfNodes) + val title = s"join ${numberOfNodes} to ${if (toSeedNodes) "seed nodes" else "one node"}, in ${nbrUsedRoles} nodes cluster" + createResultAggregator(title, expectedResults = currentRoles.size, includeInHistory = true) + runOn(currentRoles: _*) { + reportResult { + runOn(joiningRoles: _*) { + if (toSeedNodes) cluster.joinSeedNodes(seedNodes.toIndexedSeq map address) + else cluster.join(roles.head) + } + awaitUpConvergence(currentRoles.size, timeout = remaining) + } + + } + awaitClusterResult + enterBarrier("join-several-" + step) + } + + def removeOneByOne(numberOfNodes: Int, shutdown: Boolean): Unit = { + 0 until numberOfNodes foreach { _ ⇒ + removeOne(shutdown) + nbrUsedRoles -= 1 + step += 1 + } + } + + def removeOne(shutdown: Boolean): Unit = within(10.seconds + convergenceWithin(3.seconds, nbrUsedRoles - 1)) { + val currentRoles = roles.take(nbrUsedRoles - 1) + val title = s"${if (shutdown) "shutdown" else "remove"} one from ${nbrUsedRoles} nodes cluster" + createResultAggregator(title, expectedResults = currentRoles.size, includeInHistory = true) + val removeRole = roles(nbrUsedRoles - 1) + val removeAddress = address(removeRole) + runOn(removeRole) { + system.actorOf(Props[Watchee], "watchee") + if (!shutdown) cluster.leave(myself) + } + enterBarrier("watchee-created-" + step) + runOn(roles.head) { + watch(system.actorFor(node(removeRole) / "user" / "watchee")) + } + enterBarrier("watch-estabilished-" + step) + + runOn(currentRoles: _*) { + reportResult { + runOn(roles.head) { + if (shutdown) { + log.info("Shutting down [{}]", removeAddress) + testConductor.shutdown(removeRole, 0).await + } + } + awaitUpConvergence(currentRoles.size, timeout = remaining) + } + } + + runOn(roles.head) { + val expectedRef = system.actorFor(RootActorPath(removeAddress) / "user" / "watchee") + expectMsgPF(remaining) { + case Terminated(`expectedRef`) ⇒ true + } + } + enterBarrier("watch-verified-" + step) + + awaitClusterResult + enterBarrier("remove-one-" + step) + } + + def removeSeveral(numberOfNodes: Int, shutdown: Boolean): Unit = + within(10.seconds + convergenceWithin(5.seconds, nbrUsedRoles - numberOfNodes)) { + val currentRoles = roles.take(nbrUsedRoles - numberOfNodes) + val removeRoles = roles.slice(currentRoles.size, nbrUsedRoles) + val title = s"${if (shutdown) "shutdown" else "leave"} ${numberOfNodes} in ${nbrUsedRoles} nodes cluster" + createResultAggregator(title, expectedResults = currentRoles.size, includeInHistory = true) + runOn(removeRoles: _*) { + if (!shutdown) cluster.leave(myself) + } + runOn(currentRoles: _*) { + reportResult { + runOn(roles.head) { + if (shutdown) removeRoles.foreach { r ⇒ + log.info("Shutting down [{}]", address(r)) + testConductor.shutdown(r, 0).await + } + } + awaitUpConvergence(currentRoles.size, timeout = remaining) + } + } + awaitClusterResult + enterBarrier("remove-several-" + step) + } + + def reportResult[T](thunk: ⇒ T): T = { + val startTime = System.nanoTime + val startStats = clusterView.latestStats + + val returnValue = thunk + + val duration = (System.nanoTime - startTime).nanos + val latestStats = clusterView.latestStats + val clusterStats = ClusterStats( + receivedGossipCount = latestStats.receivedGossipCount - startStats.receivedGossipCount, + mergeConflictCount = latestStats.mergeConflictCount - startStats.mergeConflictCount, + mergeCount = latestStats.mergeCount - startStats.mergeCount, + mergeDetectedCount = latestStats.mergeDetectedCount - startStats.mergeDetectedCount) + clusterResultAggregator ! ClusterResult(cluster.selfAddress, duration, clusterStats) + returnValue + } + + def exerciseJoinRemove(title: String, duration: FiniteDuration): Unit = { + val activeRoles = roles.take(numberOfNodesJoinRemove) + val loopDuration = 10.seconds + convergenceWithin(4.seconds, nbrUsedRoles + activeRoles.size) + val deadline = Deadline.now + duration - loopDuration + val usedRoles = roles.take(nbrUsedRoles) + val usedAddresses = usedRoles.map(address(_)).toSet + + @tailrec def loop(counter: Int, previousAS: Option[ActorSystem], allPreviousAddresses: Set[Address]): Option[ActorSystem] = { + if (deadline.isOverdue) previousAS + else { + val t = title + " round " + counter + runOn(usedRoles: _*) { + phiObserver ! Reset + } + createResultAggregator(t, expectedResults = nbrUsedRoles, includeInHistory = true) + val (nextAS, nextAddresses) = within(loopDuration) { + reportResult { + val nextAS = + if (activeRoles contains myself) { + previousAS foreach { _.shutdown() } + val sys = ActorSystem(system.name, system.settings.config) + muteLog(sys) + Cluster(sys).joinSeedNodes(seedNodes.toIndexedSeq map address) + Some(sys) + } else previousAS + runOn(usedRoles: _*) { + awaitUpConvergence( + nbrUsedRoles + activeRoles.size, + canNotBePartOfMemberRing = allPreviousAddresses, + timeout = remaining) + } + val nextAddresses = clusterView.members.map(_.address) -- usedAddresses + runOn(usedRoles: _*) { + nextAddresses.size must be(numberOfNodesJoinRemove) + } + + enterBarrier("join-remove-" + step) + (nextAS, nextAddresses) + } + } + awaitClusterResult + + step += 1 + loop(counter + 1, nextAS, nextAddresses) + } + } + + loop(1, None, Set.empty) foreach { _.shutdown } + within(loopDuration) { + runOn(usedRoles: _*) { + awaitUpConvergence(nbrUsedRoles, timeout = remaining) + phiObserver ! Reset + } + } + enterBarrier("join-remove-shutdown-" + step) + + } + + def master: ActorRef = system.actorFor("/user/master-" + myself.name) + + def exerciseRouters(title: String, duration: FiniteDuration, batchInterval: FiniteDuration, + expectDroppedMessages: Boolean, tree: Boolean): Unit = + within(duration + 10.seconds) { + createResultAggregator(title, expectedResults = nbrUsedRoles, includeInHistory = false) + + val (masterRoles, otherRoles) = roles.take(nbrUsedRoles).splitAt(3) + runOn(masterRoles: _*) { + reportResult { + val m = system.actorOf(Props(new Master(settings, batchInterval, tree)), + name = "master-" + myself.name) + m ! Begin + import system.dispatcher + system.scheduler.scheduleOnce(duration) { + m.tell(End, testActor) + } + val workResult = awaitWorkResult + workResult.sendCount must be > (0L) + workResult.ackCount must be > (0L) + if (!expectDroppedMessages) + workResult.retryCount must be(0) + + enterBarrier("routers-done-" + step) + } + } + runOn(otherRoles: _*) { + reportResult { + enterBarrier("routers-done-" + step) + } + } + + awaitClusterResult + } + + def awaitWorkResult: WorkResult = { + val m = master + val workResult = expectMsgType[WorkResult] + log.info("{} result, [{}] jobs/s, retried [{}] of [{}] msg", m.path.name, + workResult.jobsPerSecond.form, + workResult.retryCount, workResult.sendCount) + watch(m) + expectMsgPF(remaining) { case Terminated(`m`) ⇒ true } + workResult + } + + def exerciseSupervision(title: String, duration: FiniteDuration): Unit = + within(duration + 10.seconds) { + val supervisor = system.actorOf(Props[Supervisor], "supervisor") + while (remaining > 10.seconds) { + createResultAggregator(title, expectedResults = nbrUsedRoles, includeInHistory = false) + + reportResult { + roles.take(nbrUsedRoles) foreach { r ⇒ + supervisor ! Props[RemoteChild].withDeploy(Deploy(scope = RemoteScope(address(r)))) + } + supervisor ! GetChildrenCount + expectMsgType[ChildrenCount] must be(ChildrenCount(nbrUsedRoles, 0)) + + 1 to 5 foreach { _ ⇒ supervisor ! new RuntimeException("Simulated exception") } + awaitCond { + supervisor ! GetChildrenCount + val c = expectMsgType[ChildrenCount] + c == ChildrenCount(nbrUsedRoles, 5 * nbrUsedRoles) + } + + // after 5 restart attempts the children should be stopped + supervisor ! new RuntimeException("Simulated exception") + awaitCond { + supervisor ! GetChildrenCount + val c = expectMsgType[ChildrenCount] + // zero children + c == ChildrenCount(0, 6 * nbrUsedRoles) + } + supervisor ! Reset + + } + + awaitClusterResult + step += 1 + } + } + + "A cluster under stress" must { + + "join seed nodes" taggedAs LongRunningTest in within(20 seconds) { + + val otherNodesJoiningSeedNodes = roles.slice(numberOfSeedNodes, numberOfSeedNodes + numberOfNodesJoiningToSeedNodesInitially) + val size = seedNodes.size + otherNodesJoiningSeedNodes.size + + createResultAggregator("join seed nodes", expectedResults = size, includeInHistory = true) + + runOn((seedNodes ++ otherNodesJoiningSeedNodes): _*) { + reportResult { + cluster.joinSeedNodes(seedNodes.toIndexedSeq map address) + awaitUpConvergence(size) + } + } + + awaitClusterResult + + nbrUsedRoles += size + enterBarrier("after-" + step) + } + + "start routers that are running while nodes are joining" taggedAs LongRunningTest in { + runOn(roles.take(3): _*) { + system.actorOf(Props(new Master(settings, settings.workBatchInterval, tree = false)), + name = "master-" + myself.name) ! Begin + } + enterBarrier("after-" + step) + } + + "join nodes one-by-one to small cluster" taggedAs LongRunningTest in { + joinOneByOne(numberOfNodesJoiningOneByOneSmall) + enterBarrier("after-" + step) + } + + "join several nodes to one node" taggedAs LongRunningTest in { + joinSeveral(numberOfNodesJoiningToOneNode, toSeedNodes = false) + nbrUsedRoles += numberOfNodesJoiningToOneNode + enterBarrier("after-" + step) + } + + "join several nodes to seed nodes" taggedAs LongRunningTest in { + joinSeveral(numberOfNodesJoiningToOneNode, toSeedNodes = true) + nbrUsedRoles += numberOfNodesJoiningToSeedNodes + enterBarrier("after-" + step) + } + + "join nodes one-by-one to large cluster" taggedAs LongRunningTest in { + joinOneByOne(numberOfNodesJoiningOneByOneLarge) + enterBarrier("after-" + step) + } + + "end routers that are running while nodes are joining" taggedAs LongRunningTest in within(30.seconds) { + runOn(roles.take(3): _*) { + val m = master + m.tell(End, testActor) + val workResult = awaitWorkResult + workResult.retryCount must be(0) + workResult.sendCount must be > (0L) + workResult.ackCount must be > (0L) + } + enterBarrier("after-" + step) + } + + "use routers with normal throughput" taggedAs LongRunningTest in { + exerciseRouters("use routers with normal throughput", normalThroughputDuration, + batchInterval = workBatchInterval, expectDroppedMessages = false, tree = false) + enterBarrier("after-" + step) + } + + "use routers with high throughput" taggedAs LongRunningTest in { + exerciseRouters("use routers with high throughput", highThroughputDuration, + batchInterval = Duration.Zero, expectDroppedMessages = false, tree = false) + enterBarrier("after-" + step) + } + + "use many actors with normal throughput" taggedAs LongRunningTest in { + exerciseRouters("use many actors with normal throughput", normalThroughputDuration, + batchInterval = workBatchInterval, expectDroppedMessages = false, tree = true) + enterBarrier("after-" + step) + } + + "use many actors with high throughput" taggedAs LongRunningTest in { + exerciseRouters("use many actors with high throughput", highThroughputDuration, + batchInterval = Duration.Zero, expectDroppedMessages = false, tree = true) + enterBarrier("after-" + step) + } + + "excercise join/remove/join/remove" taggedAs LongRunningTest in { + exerciseJoinRemove("excercise join/remove", joinRemoveDuration) + enterBarrier("after-" + step) + } + + "excercise supervision" taggedAs LongRunningTest in { + exerciseSupervision("excercise supervision", supervisionDuration) + enterBarrier("after-" + step) + } + + "start routers that are running while nodes are removed" taggedAs LongRunningTest in { + runOn(roles.take(3): _*) { + system.actorOf(Props(new Master(settings, settings.workBatchInterval, tree = false)), + name = "master-" + myself.name) ! Begin + } + enterBarrier("after-" + step) + } + + "leave nodes one-by-one from large cluster" taggedAs LongRunningTest in { + removeOneByOne(numberOfNodesLeavingOneByOneLarge, shutdown = false) + enterBarrier("after-" + step) + } + + "shutdown nodes one-by-one from large cluster" taggedAs LongRunningTest in { + removeOneByOne(numberOfNodesShutdownOneByOneLarge, shutdown = true) + enterBarrier("after-" + step) + } + + "leave several nodes" taggedAs LongRunningTest in { + removeSeveral(numberOfNodesLeaving, shutdown = false) + nbrUsedRoles -= numberOfNodesLeaving + enterBarrier("after-" + step) + } + + "shutdown several nodes" taggedAs LongRunningTest in { + removeSeveral(numberOfNodesShutdown, shutdown = true) + nbrUsedRoles -= numberOfNodesShutdown + enterBarrier("after-" + step) + } + + "leave nodes one-by-one from small cluster" taggedAs LongRunningTest in { + removeOneByOne(numberOfNodesLeavingOneByOneSmall, shutdown = false) + enterBarrier("after-" + step) + } + + "shutdown nodes one-by-one from small cluster" taggedAs LongRunningTest in { + removeOneByOne(numberOfNodesShutdownOneByOneSmall, shutdown = true) + enterBarrier("after-" + step) + } + + "end routers that are running while nodes are removed" taggedAs LongRunningTest in within(30.seconds) { + runOn(roles.take(3): _*) { + val m = master + m.tell(End, testActor) + val workResult = awaitWorkResult + workResult.sendCount must be > (0L) + workResult.ackCount must be > (0L) + } + enterBarrier("after-" + step) + } + + } +} diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index 581eca3978..2f0325c51a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala index 4c9054d3d1..c7c6bf685e 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/TransitionSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala index 45760a3bcd..c7666ba395 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -25,7 +25,7 @@ case class UnreachableNodeRejoinsClusterMultiNodeConfig(failureDetectorPuppet: B commonConfig(ConfigFactory.parseString( """ - akka.remoting.log-remote-lifecycle-events = off + akka.remote.log-remote-lifecycle-events = off akka.cluster.publish-stats-interval = 0s akka.loglevel = INFO """).withFallback(debugConfig(on = false).withFallback(MultiNodeClusterSpec.clusterConfig))) @@ -67,7 +67,7 @@ abstract class UnreachableNodeRejoinsClusterSpec(multiNodeConfig: UnreachableNod enterBarrier("after_" + endBarrierNumber) } - "A cluster of " + roles.size + " members" ignore { + "A cluster of " + roles.size + " members" must { "reach initial convergence" taggedAs LongRunningTest in { awaitClusterUp(roles: _*) @@ -130,7 +130,7 @@ abstract class UnreachableNodeRejoinsClusterSpec(multiNodeConfig: UnreachableNod } runOn(allBut(victim): _*) { - awaitUpConvergence(roles.size - 1, List(victim)) + awaitUpConvergence(roles.size - 1, Set(victim)) } endBarrier diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala index 723ef6b8ec..c98413c21a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/AdaptiveLoadBalancingRouterSpec.scala @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster.routing diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala index daf4e81038..7f6cdba39a 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterConsistentHashingRouterSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster.routing diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinRoutedActorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinRoutedActorSpec.scala index 0098da695b..0caf029de2 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinRoutedActorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/routing/ClusterRoundRobinRoutedActorSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster.routing @@ -110,6 +110,9 @@ abstract class ClusterRoundRobinRoutedActorSpec extends MultiNodeSpec(ClusterRou case a ⇒ a } + def currentRoutees(router: ActorRef) = + Await.result(router ? CurrentRoutees, remaining).asInstanceOf[RouterRoutees].routees + "A cluster router with a RoundRobin router" must { "start cluster with 2 nodes" taggedAs LongRunningTest in { awaitClusterUp(first, second) @@ -121,6 +124,9 @@ abstract class ClusterRoundRobinRoutedActorSpec extends MultiNodeSpec(ClusterRou runOn(first) { router1.isInstanceOf[RoutedActorRef] must be(true) + // max-nr-of-instances-per-node=2 times 2 nodes + awaitCond(currentRoutees(router1).size == 4) + val iterationCount = 10 for (i ← 0 until iterationCount) { router1 ! "hit" @@ -146,6 +152,9 @@ abstract class ClusterRoundRobinRoutedActorSpec extends MultiNodeSpec(ClusterRou enterBarrier("myservice-started") runOn(first) { + // 2 nodes, 1 routee on each node + awaitCond(currentRoutees(router4).size == 2) + val iterationCount = 10 for (i ← 0 until iterationCount) { router4 ! "hit" @@ -169,6 +178,9 @@ abstract class ClusterRoundRobinRoutedActorSpec extends MultiNodeSpec(ClusterRou awaitClusterUp(first, second, third, fourth) runOn(first) { + // max-nr-of-instances-per-node=2 times 4 nodes + awaitCond(currentRoutees(router1).size == 8) + val iterationCount = 10 for (i ← 0 until iterationCount) { router1 ! "hit" @@ -188,6 +200,9 @@ abstract class ClusterRoundRobinRoutedActorSpec extends MultiNodeSpec(ClusterRou // cluster consists of first, second, third and fourth runOn(first) { + // 4 nodes, 1 routee on each node + awaitCond(currentRoutees(router4).size == 4) + val iterationCount = 10 for (i ← 0 until iterationCount) { router4 ! "hit" @@ -205,6 +220,9 @@ abstract class ClusterRoundRobinRoutedActorSpec extends MultiNodeSpec(ClusterRou "deploy routees to only remote nodes when allow-local-routees = off" taggedAs LongRunningTest in { runOn(first) { + // max-nr-of-instances-per-node=1 times 3 nodes + awaitCond(currentRoutees(router3).size == 3) + val iterationCount = 10 for (i ← 0 until iterationCount) { router3 ! "hit" @@ -227,6 +245,9 @@ abstract class ClusterRoundRobinRoutedActorSpec extends MultiNodeSpec(ClusterRou runOn(first) { router2.isInstanceOf[RoutedActorRef] must be(true) + // totalInstances = 3, maxInstancesPerNode = 1 + awaitCond(currentRoutees(router2).size == 3) + val iterationCount = 10 for (i ← 0 until iterationCount) { router2 ! "hit" @@ -235,8 +256,8 @@ abstract class ClusterRoundRobinRoutedActorSpec extends MultiNodeSpec(ClusterRou val replies = receiveReplies(DeployRoutee, iterationCount) // note that router2 has totalInstances = 3, maxInstancesPerNode = 1 - val currentRoutees = Await.result(router2 ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees - val routeeAddresses = currentRoutees map fullAddress + val routees = currentRoutees(router2) + val routeeAddresses = routees map fullAddress routeeAddresses.size must be(3) replies.values.sum must be(iterationCount) @@ -249,8 +270,8 @@ abstract class ClusterRoundRobinRoutedActorSpec extends MultiNodeSpec(ClusterRou muteMarkingAsUnreachable() runOn(first) { - def currentRoutees = Await.result(router2 ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees - def routeeAddresses = (currentRoutees map fullAddress).toSet + def routees = currentRoutees(router2) + def routeeAddresses = (routees map fullAddress).toSet val notUsedAddress = ((roles map address).toSet -- routeeAddresses).head diff --git a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala index 9af8eacc7e..7f0b5bea14 100644 --- a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -24,8 +24,8 @@ class AccrualFailureDetectorSpec extends AkkaSpec(""" } "An AccrualFailureDetector" must { - val conn = Address("tcp.akka", "", "localhost", 2552) - val conn2 = Address("tcp.akka", "", "localhost", 2553) + val conn = Address("akka.tcp", "", "localhost", 2552) + val conn2 = Address("akka.tcp", "", "localhost", 2553) def fakeTimeGenerator(timeIntervals: immutable.Seq[Long]): () ⇒ Long = { var times = timeIntervals.tail.foldLeft(List[Long](timeIntervals.head))((acc, c) ⇒ acc ::: List[Long](acc.last + c)) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index ce7f7a4a70..4479520833 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -28,12 +28,14 @@ class ClusterConfigSpec extends AkkaSpec { PeriodicTasksInitialDelay must be(1 seconds) GossipInterval must be(1 second) HeartbeatInterval must be(1 second) - NumberOfEndHeartbeats must be(4) + NumberOfEndHeartbeats must be(8) MonitoredByNrOfMembers must be(5) + HeartbeatRequestDelay must be(10 seconds) + HeartbeatExpectedResponseAfter must be(3 seconds) + HeartbeatRequestTimeToLive must be(1 minute) LeaderActionsInterval must be(1 second) UnreachableNodesReaperInterval must be(1 second) PublishStatsInterval must be(10 second) - JoinTimeout must be(60 seconds) AutoJoin must be(true) AutoDown must be(false) MinNrOfMembers must be(1) @@ -43,10 +45,6 @@ class ClusterConfigSpec extends AkkaSpec { MaxGossipMergeRate must be(5.0 plusOrMinus 0.0001) SchedulerTickDuration must be(33 millis) SchedulerTicksPerWheel must be(512) - SendCircuitBreakerSettings must be(CircuitBreakerSettings( - maxFailures = 3, - callTimeout = 2 seconds, - resetTimeout = 30 seconds)) MetricsEnabled must be(true) MetricsCollectorClass must be(classOf[SigarMetricsCollector].getName) MetricsInterval must be(3 seconds) diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala index 6432b991ab..7aa7b81047 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDeployerSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -29,7 +29,7 @@ object ClusterDeployerSpec { cluster.routees-path = "/user/myservice" } } - akka.remote.netty.port = 0 + akka.remote.netty.tcp.port = 0 """, ConfigParseOptions.defaults) class RecipeActor extends Actor { @@ -73,4 +73,4 @@ class ClusterDeployerSpec extends AkkaSpec(ClusterDeployerSpec.deployerConf) { } -} \ No newline at end of file +} diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala index 6c13c79f23..fe6fbdb8f0 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventPublisherSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -23,11 +23,11 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitSender { var publisher: ActorRef = _ - val a1 = Member(Address("tcp.akka", "sys", "a", 2552), Up) - val b1 = Member(Address("tcp.akka", "sys", "b", 2552), Up) - val c1 = Member(Address("tcp.akka", "sys", "c", 2552), Joining) - val c2 = Member(Address("tcp.akka", "sys", "c", 2552), Up) - val d1 = Member(Address("tcp.akka", "sys", "a", 2551), Up) + val a1 = Member(Address("akka.tcp", "sys", "a", 2552), Up) + val b1 = Member(Address("akka.tcp", "sys", "b", 2552), Up) + val c1 = Member(Address("akka.tcp", "sys", "c", 2552), Joining) + val c2 = Member(Address("akka.tcp", "sys", "c", 2552), Up) + val d1 = Member(Address("akka.tcp", "sys", "a", 2551), Up) val g0 = Gossip(members = SortedSet(a1)).seen(a1.address) val g1 = Gossip(members = SortedSet(a1, b1, c1)).seen(a1.address).seen(b1.address).seen(c1.address) @@ -36,16 +36,18 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec val g4 = Gossip(members = SortedSet(d1, a1, b1, c2)).seen(a1.address) val g5 = Gossip(members = SortedSet(d1, a1, b1, c2)).seen(a1.address).seen(b1.address).seen(c2.address).seen(d1.address) - override def atStartup(): Unit = { - system.eventStream.subscribe(testActor, classOf[ClusterDomainEvent]) - } + // created in beforeEach + var memberSubscriber: TestProbe = _ override def beforeEach(): Unit = { + memberSubscriber = TestProbe() + system.eventStream.subscribe(memberSubscriber.ref, classOf[MemberEvent]) + system.eventStream.subscribe(memberSubscriber.ref, classOf[LeaderChanged]) + publisher = system.actorOf(Props[ClusterDomainEventPublisher]) publisher ! PublishChanges(g0) - expectMsg(MemberUp(a1)) - expectMsg(LeaderChanged(Some(a1.address))) - expectMsgType[SeenChanged] + memberSubscriber.expectMsg(MemberUp(a1)) + memberSubscriber.expectMsg(LeaderChanged(Some(a1.address))) } override def afterEach(): Unit = { @@ -56,83 +58,115 @@ class ClusterDomainEventPublisherSpec extends AkkaSpec "not publish MemberUp when there is no convergence" in { publisher ! PublishChanges(g2) - expectMsgType[SeenChanged] } "publish MemberEvents when there is convergence" in { publisher ! PublishChanges(g2) - expectMsgType[SeenChanged] publisher ! PublishChanges(g3) - expectMsg(MemberUp(b1)) - expectMsg(MemberUp(c2)) - expectMsgType[SeenChanged] + memberSubscriber.expectMsg(MemberUp(b1)) + memberSubscriber.expectMsg(MemberUp(c2)) } "publish leader changed when new leader after convergence" in { publisher ! PublishChanges(g4) - expectMsgType[SeenChanged] - expectNoMsg(1 second) + memberSubscriber.expectNoMsg(1 second) publisher ! PublishChanges(g5) - expectMsg(MemberUp(d1)) - expectMsg(MemberUp(b1)) - expectMsg(MemberUp(c2)) - expectMsg(LeaderChanged(Some(d1.address))) - expectMsgType[SeenChanged] + memberSubscriber.expectMsg(MemberUp(d1)) + memberSubscriber.expectMsg(MemberUp(b1)) + memberSubscriber.expectMsg(MemberUp(c2)) + memberSubscriber.expectMsg(LeaderChanged(Some(d1.address))) } "publish leader changed when new leader and convergence both before and after" in { // convergence both before and after publisher ! PublishChanges(g3) - expectMsg(MemberUp(b1)) - expectMsg(MemberUp(c2)) - expectMsgType[SeenChanged] + memberSubscriber.expectMsg(MemberUp(b1)) + memberSubscriber.expectMsg(MemberUp(c2)) publisher ! PublishChanges(g5) - expectMsg(MemberUp(d1)) - expectMsg(LeaderChanged(Some(d1.address))) - expectMsgType[SeenChanged] + memberSubscriber.expectMsg(MemberUp(d1)) + memberSubscriber.expectMsg(LeaderChanged(Some(d1.address))) } "not publish leader changed when not convergence" in { publisher ! PublishChanges(g4) - expectMsgType[SeenChanged] - expectNoMsg(1 second) + memberSubscriber.expectNoMsg(1 second) } "not publish leader changed when changed convergence but still same leader" in { publisher ! PublishChanges(g5) - expectMsg(MemberUp(d1)) - expectMsg(MemberUp(b1)) - expectMsg(MemberUp(c2)) - expectMsg(LeaderChanged(Some(d1.address))) - expectMsgType[SeenChanged] + memberSubscriber.expectMsg(MemberUp(d1)) + memberSubscriber.expectMsg(MemberUp(b1)) + memberSubscriber.expectMsg(MemberUp(c2)) + memberSubscriber.expectMsg(LeaderChanged(Some(d1.address))) publisher ! PublishChanges(g4) - expectMsgType[SeenChanged] + memberSubscriber.expectNoMsg(1 second) publisher ! PublishChanges(g5) - expectMsgType[SeenChanged] + memberSubscriber.expectNoMsg(1 second) } "send CurrentClusterState when subscribe" in { val subscriber = TestProbe() publisher ! Subscribe(subscriber.ref, classOf[ClusterDomainEvent]) + subscriber.expectMsgType[InstantClusterState] subscriber.expectMsgType[CurrentClusterState] // but only to the new subscriber - expectNoMsg(1 second) + memberSubscriber.expectNoMsg(1 second) } "support unsubscribe" in { val subscriber = TestProbe() - publisher ! Subscribe(subscriber.ref, classOf[ClusterDomainEvent]) + publisher ! Subscribe(subscriber.ref, classOf[MemberEvent]) subscriber.expectMsgType[CurrentClusterState] - publisher ! Unsubscribe(subscriber.ref, Some(classOf[ClusterDomainEvent])) + publisher ! Unsubscribe(subscriber.ref, Some(classOf[MemberEvent])) publisher ! PublishChanges(g3) subscriber.expectNoMsg(1 second) - // but testActor is still subscriber - expectMsg(MemberUp(b1)) - expectMsg(MemberUp(c2)) - expectMsgType[SeenChanged] + // but memberSubscriber is still subscriber + memberSubscriber.expectMsg(MemberUp(b1)) + memberSubscriber.expectMsg(MemberUp(c2)) } + + "publish clean state when PublishStart" in { + val subscriber = TestProbe() + publisher ! Subscribe(subscriber.ref, classOf[ClusterDomainEvent]) + subscriber.expectMsgType[InstantClusterState] + subscriber.expectMsgType[CurrentClusterState] + publisher ! PublishChanges(g3) + subscriber.expectMsg(InstantMemberUp(b1)) + subscriber.expectMsg(InstantMemberUp(c2)) + subscriber.expectMsg(MemberUp(b1)) + subscriber.expectMsg(MemberUp(c2)) + subscriber.expectMsgType[SeenChanged] + + publisher ! PublishStart + subscriber.expectMsgType[CurrentClusterState] must be(CurrentClusterState()) + } + + "publish immediately when subscribing to InstantMemberEvent" in { + val subscriber = TestProbe() + publisher ! Subscribe(subscriber.ref, classOf[InstantMemberEvent]) + subscriber.expectMsgType[InstantClusterState] + publisher ! PublishChanges(g2) + subscriber.expectMsg(InstantMemberUp(b1)) + subscriber.expectMsg(InstantMemberUp(c2)) + subscriber.expectNoMsg(1 second) + publisher ! PublishChanges(g3) + subscriber.expectNoMsg(1 second) + } + + "publish SeenChanged" in { + val subscriber = TestProbe() + publisher ! Subscribe(subscriber.ref, classOf[SeenChanged]) + subscriber.expectMsgType[CurrentClusterState] + publisher ! PublishChanges(g2) + subscriber.expectMsgType[SeenChanged] + subscriber.expectNoMsg(1 second) + publisher ! PublishChanges(g3) + subscriber.expectMsgType[SeenChanged] + subscriber.expectNoMsg(1 second) + } + } } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala index 284f6676f0..76f0838a96 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterDomainEventSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -15,19 +15,19 @@ class ClusterDomainEventSpec extends WordSpec with MustMatchers { import MemberStatus._ import ClusterEvent._ - val a1 = Member(Address("tcp.akka", "sys", "a", 2552), Up) - val a2 = Member(Address("tcp.akka", "sys", "a", 2552), Joining) - val a3 = Member(Address("tcp.akka", "sys", "a", 2552), Removed) - val b1 = Member(Address("tcp.akka", "sys", "b", 2552), Up) - val b2 = Member(Address("tcp.akka", "sys", "b", 2552), Removed) - val b3 = Member(Address("tcp.akka", "sys", "b", 2552), Down) - val c1 = Member(Address("tcp.akka", "sys", "c", 2552), Leaving) - val c2 = Member(Address("tcp.akka", "sys", "c", 2552), Up) - val d1 = Member(Address("tcp.akka", "sys", "d", 2552), Leaving) - val d2 = Member(Address("tcp.akka", "sys", "d", 2552), Removed) - val e1 = Member(Address("tcp.akka", "sys", "e", 2552), Joining) - val e2 = Member(Address("tcp.akka", "sys", "e", 2552), Up) - val e3 = Member(Address("tcp.akka", "sys", "e", 2552), Down) + val a1 = Member(Address("akka.tcp", "sys", "a", 2552), Up) + val a2 = Member(Address("akka.tcp", "sys", "a", 2552), Joining) + val a3 = Member(Address("akka.tcp", "sys", "a", 2552), Removed) + val b1 = Member(Address("akka.tcp", "sys", "b", 2552), Up) + val b2 = Member(Address("akka.tcp", "sys", "b", 2552), Removed) + val b3 = Member(Address("akka.tcp", "sys", "b", 2552), Down) + val c1 = Member(Address("akka.tcp", "sys", "c", 2552), Leaving) + val c2 = Member(Address("akka.tcp", "sys", "c", 2552), Up) + val d1 = Member(Address("akka.tcp", "sys", "d", 2552), Leaving) + val d2 = Member(Address("akka.tcp", "sys", "d", 2552), Removed) + val e1 = Member(Address("akka.tcp", "sys", "e", 2552), Joining) + val e2 = Member(Address("akka.tcp", "sys", "e", 2552), Up) + val e3 = Member(Address("akka.tcp", "sys", "e", 2552), Down) def converge(gossip: Gossip): (Gossip, Set[Address]) = ((gossip, Set.empty[Address]) /: gossip.members) { (gs, m) ⇒ (gs._1.seen(m.address), gs._2 + m.address) } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala index b9e6df49dc..d3a12d165d 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterHeartbeatSenderStateSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -9,19 +9,19 @@ import org.scalatest.matchers.MustMatchers import akka.actor.Address import akka.routing.ConsistentHash import scala.concurrent.duration._ +import scala.collection.immutable @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class ClusterHeartbeatSenderStateSpec extends WordSpec with MustMatchers { - val selfAddress = Address("tcp.akka", "sys", "myself", 2552) - val aa = Address("tcp.akka", "sys", "aa", 2552) - val bb = Address("tcp.akka", "sys", "bb", 2552) - val cc = Address("tcp.akka", "sys", "cc", 2552) - val dd = Address("tcp.akka", "sys", "dd", 2552) - val ee = Address("tcp.akka", "sys", "ee", 2552) + val selfAddress = Address("akka.tcp", "sys", "myself", 2552) + val aa = Address("akka.tcp", "sys", "aa", 2552) + val bb = Address("akka.tcp", "sys", "bb", 2552) + val cc = Address("akka.tcp", "sys", "cc", 2552) + val dd = Address("akka.tcp", "sys", "dd", 2552) + val ee = Address("akka.tcp", "sys", "ee", 2552) - val emptyState = ClusterHeartbeatSenderState.empty(ConsistentHash(Seq.empty[Address], 10), - selfAddress.toString, 3) + val emptyState = ClusterHeartbeatSenderState.empty(selfAddress, 3) "A ClusterHeartbeatSenderState" must { @@ -29,47 +29,46 @@ class ClusterHeartbeatSenderStateSpec extends WordSpec with MustMatchers { emptyState.active.isEmpty must be(true) } - "include joinInProgress in active set" in { - val s = emptyState.addJoinInProgress(aa, Deadline.now + 30.seconds) - s.joinInProgress.keySet must be(Set(aa)) + "include heartbeatRequest in active set" in { + val s = emptyState.addHeartbeatRequest(aa, Deadline.now + 30.seconds) + s.heartbeatRequest.keySet must be(Set(aa)) s.active must be(Set(aa)) } - "remove joinInProgress from active set after removeOverdueJoinInProgress" in { - val s = emptyState.addJoinInProgress(aa, Deadline.now - 30.seconds).removeOverdueJoinInProgress() - s.joinInProgress must be(Map.empty) + "remove heartbeatRequest from active set after removeOverdueHeartbeatRequest" in { + val s = emptyState.addHeartbeatRequest(aa, Deadline.now - 30.seconds).removeOverdueHeartbeatRequest() + s.heartbeatRequest must be(Map.empty) s.active must be(Set.empty) s.ending must be(Map(aa -> 0)) } - "remove joinInProgress after reset" in { - val s = emptyState.addJoinInProgress(aa, Deadline.now + 30.seconds).reset(Set(aa, bb)) - s.joinInProgress must be(Map.empty) + "remove heartbeatRequest after reset" in { + val s = emptyState.addHeartbeatRequest(aa, Deadline.now + 30.seconds).reset(Set(aa, bb)) + s.heartbeatRequest must be(Map.empty) } - "remove joinInProgress after addMember" in { - val s = emptyState.addJoinInProgress(aa, Deadline.now + 30.seconds).addMember(aa) - s.joinInProgress must be(Map.empty) + "remove heartbeatRequest after addMember" in { + val s = emptyState.addHeartbeatRequest(aa, Deadline.now + 30.seconds).addMember(aa) + s.heartbeatRequest must be(Map.empty) } - "remove joinInProgress after removeMember" in { - val s = emptyState.addJoinInProgress(aa, Deadline.now + 30.seconds).reset(Set(aa, bb)).removeMember(aa) - s.joinInProgress must be(Map.empty) + "remove heartbeatRequest after removeMember" in { + val s = emptyState.addHeartbeatRequest(aa, Deadline.now + 30.seconds).reset(Set(aa, bb)).removeMember(aa) + s.heartbeatRequest must be(Map.empty) s.ending must be(Map(aa -> 0)) } - "remove from ending after addJoinInProgress" in { + "remove from ending after addHeartbeatRequest" in { val s = emptyState.reset(Set(aa, bb)).removeMember(aa) s.ending must be(Map(aa -> 0)) - val s2 = s.addJoinInProgress(aa, Deadline.now + 30.seconds) - s2.joinInProgress.keySet must be(Set(aa)) + val s2 = s.addHeartbeatRequest(aa, Deadline.now + 30.seconds) + s2.heartbeatRequest.keySet must be(Set(aa)) s2.ending must be(Map.empty) } "include nodes from reset in active set" in { val nodes = Set(aa, bb, cc) val s = emptyState.reset(nodes) - s.all must be(nodes) s.current must be(nodes) s.ending must be(Map.empty) s.active must be(nodes) @@ -78,7 +77,6 @@ class ClusterHeartbeatSenderStateSpec extends WordSpec with MustMatchers { "limit current nodes to monitoredByNrOfMembers when adding members" in { val nodes = Set(aa, bb, cc, dd) val s = nodes.foldLeft(emptyState) { _ addMember _ } - s.all must be(nodes) s.current.size must be(3) s.addMember(ee).current.size must be(3) } diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index 008c98d4b7..242775fb40 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -27,7 +27,7 @@ object ClusterSpec { } akka.actor.provider = "akka.cluster.ClusterActorRefProvider" akka.remote.log-remote-lifecycle-events = off - akka.remote.netty.port = 0 + akka.remote.netty.tcp.port = 0 # akka.loglevel = DEBUG """ @@ -76,10 +76,11 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { try { cluster.subscribe(testActor, classOf[ClusterEvent.ClusterDomainEvent]) // first, is in response to the subscription - expectMsgClass(classOf[ClusterEvent.ClusterDomainEvent]) + expectMsgClass(classOf[ClusterEvent.InstantClusterState]) + expectMsgClass(classOf[ClusterEvent.CurrentClusterState]) cluster.publishCurrentClusterState() - expectMsgClass(classOf[ClusterEvent.ClusterDomainEvent]) + expectMsgClass(classOf[ClusterEvent.CurrentClusterState]) } finally { cluster.unsubscribe(testActor) } @@ -87,7 +88,7 @@ class ClusterSpec extends AkkaSpec(ClusterSpec.config) with ImplicitSender { "send CurrentClusterState to one receiver when requested" in { cluster.sendCurrentClusterState(testActor) - expectMsgClass(classOf[ClusterEvent.ClusterDomainEvent]) + expectMsgClass(classOf[ClusterEvent.CurrentClusterState]) } } diff --git a/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala b/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala index ed954b7bb6..519c07bc20 100644 --- a/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/EWMASpec.scala @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala index 869df97f84..e212da2f74 100644 --- a/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala +++ b/akka-cluster/src/test/scala/akka/cluster/FailureDetectorPuppet.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -50,6 +50,8 @@ class FailureDetectorPuppet(system: ActorSystem, settings: ClusterSettings) exte false } + override def isMonitoring(connection: Address): Boolean = connections.contains(connection) + def heartbeat(connection: Address): Unit = log.debug("Heart beat from cluster node[{}]", connection) def remove(connection: Address): Unit = { diff --git a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala index 704b12dac0..ca20858743 100644 --- a/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/GossipSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -14,22 +14,22 @@ class GossipSpec extends WordSpec with MustMatchers { import MemberStatus._ - val a1 = Member(Address("tcp.akka", "sys", "a", 2552), Up) - val a2 = Member(Address("tcp.akka", "sys", "a", 2552), Joining) - val b1 = Member(Address("tcp.akka", "sys", "b", 2552), Up) - val b2 = Member(Address("tcp.akka", "sys", "b", 2552), Removed) - val c1 = Member(Address("tcp.akka", "sys", "c", 2552), Leaving) - val c2 = Member(Address("tcp.akka", "sys", "c", 2552), Up) - val c3 = Member(Address("tcp.akka", "sys", "c", 2552), Exiting) - val d1 = Member(Address("tcp.akka", "sys", "d", 2552), Leaving) - val d2 = Member(Address("tcp.akka", "sys", "d", 2552), Removed) - val e1 = Member(Address("tcp.akka", "sys", "e", 2552), Joining) - val e2 = Member(Address("tcp.akka", "sys", "e", 2552), Up) + val a1 = Member(Address("akka.tcp", "sys", "a", 2552), Up) + val a2 = Member(Address("akka.tcp", "sys", "a", 2552), Joining) + val b1 = Member(Address("akka.tcp", "sys", "b", 2552), Up) + val b2 = Member(Address("akka.tcp", "sys", "b", 2552), Removed) + val c1 = Member(Address("akka.tcp", "sys", "c", 2552), Leaving) + val c2 = Member(Address("akka.tcp", "sys", "c", 2552), Up) + val c3 = Member(Address("akka.tcp", "sys", "c", 2552), Exiting) + val d1 = Member(Address("akka.tcp", "sys", "d", 2552), Leaving) + val d2 = Member(Address("akka.tcp", "sys", "d", 2552), Removed) + val e1 = Member(Address("akka.tcp", "sys", "e", 2552), Joining) + val e2 = Member(Address("akka.tcp", "sys", "e", 2552), Up) "A Gossip" must { "reach convergence when it's empty" in { - Gossip().convergence must be(true) + Gossip.empty.convergence must be(true) } "merge members by status priority" in { diff --git a/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingSpec.scala b/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingSpec.scala new file mode 100644 index 0000000000..619ae0a630 --- /dev/null +++ b/akka-cluster/src/test/scala/akka/cluster/HeartbeatNodeRingSpec.scala @@ -0,0 +1,61 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ + +package akka.cluster + +import org.scalatest.WordSpec +import org.scalatest.matchers.MustMatchers +import akka.actor.Address +import akka.routing.ConsistentHash +import scala.concurrent.duration._ +import scala.collection.immutable + +@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) +class HeartbeatNodeRingSpec extends WordSpec with MustMatchers { + + val aa = Address("akka.tcp", "sys", "aa", 2552) + val bb = Address("akka.tcp", "sys", "bb", 2552) + val cc = Address("akka.tcp", "sys", "cc", 2552) + val dd = Address("akka.tcp", "sys", "dd", 2552) + val ee = Address("akka.tcp", "sys", "ee", 2552) + + val nodes = Set(aa, bb, cc, dd, ee) + + "A HashedNodeRing" must { + + "pick specified number of nodes as receivers" in { + val ring = HeartbeatNodeRing(cc, nodes, 3) + ring.myReceivers must be(ring.receivers(cc)) + + nodes foreach { n ⇒ + val receivers = ring.receivers(n) + receivers.size must be(3) + receivers must not contain (n) + } + } + + "pick all except own as receivers when less than total number of nodes" in { + val expected = Set(aa, bb, dd, ee) + HeartbeatNodeRing(cc, nodes, 4).myReceivers must be(expected) + HeartbeatNodeRing(cc, nodes, 5).myReceivers must be(expected) + HeartbeatNodeRing(cc, nodes, 6).myReceivers must be(expected) + } + + "have matching senders and receivers" in { + val ring = HeartbeatNodeRing(cc, nodes, 3) + ring.mySenders must be(ring.senders(cc)) + + for (sender ← nodes; receiver ← ring.receivers(sender)) { + ring.senders(receiver) must contain(sender) + } + } + + "pick none when alone" in { + val ring = HeartbeatNodeRing(cc, Set(cc), 3) + ring.myReceivers must be(Set()) + ring.mySenders must be(Set()) + } + + } +} diff --git a/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala index 6f93f9827a..2442dd0e46 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MemberOrderingSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -35,8 +35,8 @@ class MemberOrderingSpec extends WordSpec with MustMatchers { "be sorted by address correctly" in { import Member.ordering // sorting should be done on host and port, only - val m1 = Member(Address("tcp.akka", "sys1", "host1", 9000), MemberStatus.Up) - val m2 = Member(Address("tcp.akka", "sys1", "host1", 10000), MemberStatus.Up) + val m1 = Member(Address("akka.tcp", "sys1", "host1", 9000), MemberStatus.Up) + val m2 = Member(Address("akka.tcp", "sys1", "host1", 10000), MemberStatus.Up) val m3 = Member(Address("cluster", "sys2", "host2", 8000), MemberStatus.Up) val m4 = Member(Address("cluster", "sys2", "host2", 9000), MemberStatus.Up) val m5 = Member(Address("cluster", "sys1", "host2", 10000), MemberStatus.Up) @@ -48,9 +48,9 @@ class MemberOrderingSpec extends WordSpec with MustMatchers { } "have stable equals and hashCode" in { - val m1 = Member(Address("tcp.akka", "sys1", "host1", 9000), MemberStatus.Joining) - val m2 = Member(Address("tcp.akka", "sys1", "host1", 9000), MemberStatus.Up) - val m3 = Member(Address("tcp.akka", "sys1", "host1", 10000), MemberStatus.Up) + val m1 = Member(Address("akka.tcp", "sys1", "host1", 9000), MemberStatus.Joining) + val m2 = Member(Address("akka.tcp", "sys1", "host1", 9000), MemberStatus.Up) + val m3 = Member(Address("akka.tcp", "sys1", "host1", 10000), MemberStatus.Up) m1 must be(m2) m1.hashCode must be(m2.hashCode) @@ -60,8 +60,8 @@ class MemberOrderingSpec extends WordSpec with MustMatchers { } "have consistent ordering and equals" in { - val address1 = Address("tcp.akka", "sys1", "host1", 9001) - val address2 = Address("tcp.akka", "sys1", "host1", 9002) + val address1 = Address("akka.tcp", "sys1", "host1", 9001) + val address2 = Address("akka.tcp", "sys1", "host1", 9002) val x = Member(address1, Exiting) val y = Member(address1, Removed) @@ -71,9 +71,9 @@ class MemberOrderingSpec extends WordSpec with MustMatchers { } "work with SortedSet" in { - val address1 = Address("tcp.akka", "sys1", "host1", 9001) - val address2 = Address("tcp.akka", "sys1", "host1", 9002) - val address3 = Address("tcp.akka", "sys1", "host1", 9003) + val address1 = Address("akka.tcp", "sys1", "host1", 9001) + val address2 = Address("akka.tcp", "sys1", "host1", 9002) + val address3 = Address("akka.tcp", "sys1", "host1", 9003) (SortedSet(Member(address1, MemberStatus.Joining)) - Member(address1, MemberStatus.Up)) must be(SortedSet.empty[Member]) (SortedSet(Member(address1, MemberStatus.Exiting)) - Member(address1, MemberStatus.Removed)) must be(SortedSet.empty[Member]) diff --git a/akka-cluster/src/test/scala/akka/cluster/MetricNumericConverterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MetricNumericConverterSpec.scala index f572b13233..20ad0e62bd 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MetricNumericConverterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MetricNumericConverterSpec.scala @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -46,4 +46,4 @@ class MetricNumericConverterSpec extends WordSpec with MustMatchers with MetricN defined(Double.NaN) must be(false) } } -} \ No newline at end of file +} diff --git a/akka-cluster/src/test/scala/akka/cluster/MetricValuesSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MetricValuesSpec.scala index 2b3b23e8c5..9358ad0e91 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MetricValuesSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MetricValuesSpec.scala @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -14,8 +14,8 @@ class MetricValuesSpec extends AkkaSpec(MetricsEnabledSpec.config) with MetricsC val collector = createMetricsCollector - val node1 = NodeMetrics(Address("tcp.akka", "sys", "a", 2554), 1, collector.sample.metrics) - val node2 = NodeMetrics(Address("tcp.akka", "sys", "a", 2555), 1, collector.sample.metrics) + val node1 = NodeMetrics(Address("akka.tcp", "sys", "a", 2554), 1, collector.sample.metrics) + val node2 = NodeMetrics(Address("akka.tcp", "sys", "a", 2555), 1, collector.sample.metrics) val nodes: Seq[NodeMetrics] = { (1 to 100).foldLeft(List(node1, node2)) { (nodes, _) ⇒ @@ -63,4 +63,4 @@ class MetricValuesSpec extends AkkaSpec(MetricsEnabledSpec.config) with MetricsC } } -} \ No newline at end of file +} diff --git a/akka-cluster/src/test/scala/akka/cluster/MetricsCollectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MetricsCollectorSpec.scala index 2ce3892645..b92ec1eb71 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MetricsCollectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MetricsCollectorSpec.scala @@ -1,6 +1,6 @@ /* - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/test/scala/akka/cluster/MetricsGossipSpec.scala b/akka-cluster/src/test/scala/akka/cluster/MetricsGossipSpec.scala index c6b8d64eb1..a0d9733479 100644 --- a/akka-cluster/src/test/scala/akka/cluster/MetricsGossipSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/MetricsGossipSpec.scala @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -18,8 +18,8 @@ class MetricsGossipSpec extends AkkaSpec(MetricsEnabledSpec.config) with Implici "A MetricsGossip" must { "add new NodeMetrics" in { - val m1 = NodeMetrics(Address("tcp.akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics) - val m2 = NodeMetrics(Address("tcp.akka", "sys", "a", 2555), newTimestamp, collector.sample.metrics) + val m1 = NodeMetrics(Address("akka.tcp", "sys", "a", 2554), newTimestamp, collector.sample.metrics) + val m2 = NodeMetrics(Address("akka.tcp", "sys", "a", 2555), newTimestamp, collector.sample.metrics) m1.metrics.size must be > (3) m2.metrics.size must be > (3) @@ -35,8 +35,8 @@ class MetricsGossipSpec extends AkkaSpec(MetricsEnabledSpec.config) with Implici } "merge peer metrics" in { - val m1 = NodeMetrics(Address("tcp.akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics) - val m2 = NodeMetrics(Address("tcp.akka", "sys", "a", 2555), newTimestamp, collector.sample.metrics) + val m1 = NodeMetrics(Address("akka.tcp", "sys", "a", 2554), newTimestamp, collector.sample.metrics) + val m2 = NodeMetrics(Address("akka.tcp", "sys", "a", 2555), newTimestamp, collector.sample.metrics) val g1 = MetricsGossip.empty :+ m1 :+ m2 g1.nodes.size must be(2) @@ -51,9 +51,9 @@ class MetricsGossipSpec extends AkkaSpec(MetricsEnabledSpec.config) with Implici } "merge an existing metric set for a node and update node ring" in { - val m1 = NodeMetrics(Address("tcp.akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics) - val m2 = NodeMetrics(Address("tcp.akka", "sys", "a", 2555), newTimestamp, collector.sample.metrics) - val m3 = NodeMetrics(Address("tcp.akka", "sys", "a", 2556), newTimestamp, collector.sample.metrics) + val m1 = NodeMetrics(Address("akka.tcp", "sys", "a", 2554), newTimestamp, collector.sample.metrics) + val m2 = NodeMetrics(Address("akka.tcp", "sys", "a", 2555), newTimestamp, collector.sample.metrics) + val m3 = NodeMetrics(Address("akka.tcp", "sys", "a", 2556), newTimestamp, collector.sample.metrics) val m2Updated = m2 copy (metrics = collector.sample.metrics, timestamp = m2.timestamp + 1000) val g1 = MetricsGossip.empty :+ m1 :+ m2 @@ -72,14 +72,14 @@ class MetricsGossipSpec extends AkkaSpec(MetricsEnabledSpec.config) with Implici } "get the current NodeMetrics if it exists in the local nodes" in { - val m1 = NodeMetrics(Address("tcp.akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics) + val m1 = NodeMetrics(Address("akka.tcp", "sys", "a", 2554), newTimestamp, collector.sample.metrics) val g1 = MetricsGossip.empty :+ m1 g1.nodeMetricsFor(m1.address).map(_.metrics) must be(Some(m1.metrics)) } "remove a node if it is no longer Up" in { - val m1 = NodeMetrics(Address("tcp.akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics) - val m2 = NodeMetrics(Address("tcp.akka", "sys", "a", 2555), newTimestamp, collector.sample.metrics) + val m1 = NodeMetrics(Address("akka.tcp", "sys", "a", 2554), newTimestamp, collector.sample.metrics) + val m2 = NodeMetrics(Address("akka.tcp", "sys", "a", 2555), newTimestamp, collector.sample.metrics) val g1 = MetricsGossip.empty :+ m1 :+ m2 g1.nodes.size must be(2) @@ -91,8 +91,8 @@ class MetricsGossipSpec extends AkkaSpec(MetricsEnabledSpec.config) with Implici } "filter nodes" in { - val m1 = NodeMetrics(Address("tcp.akka", "sys", "a", 2554), newTimestamp, collector.sample.metrics) - val m2 = NodeMetrics(Address("tcp.akka", "sys", "a", 2555), newTimestamp, collector.sample.metrics) + val m1 = NodeMetrics(Address("akka.tcp", "sys", "a", 2554), newTimestamp, collector.sample.metrics) + val m2 = NodeMetrics(Address("akka.tcp", "sys", "a", 2555), newTimestamp, collector.sample.metrics) val g1 = MetricsGossip.empty :+ m1 :+ m2 g1.nodes.size must be(2) diff --git a/akka-cluster/src/test/scala/akka/cluster/NodeMetricsSpec.scala b/akka-cluster/src/test/scala/akka/cluster/NodeMetricsSpec.scala index 8ddd0af7ce..43b1aaf503 100644 --- a/akka-cluster/src/test/scala/akka/cluster/NodeMetricsSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/NodeMetricsSpec.scala @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster @@ -11,8 +11,8 @@ import akka.actor.Address @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class NodeMetricsSpec extends WordSpec with MustMatchers { - val node1 = Address("tcp.akka", "sys", "a", 2554) - val node2 = Address("tcp.akka", "sys", "a", 2555) + val node1 = Address("akka.tcp", "sys", "a", 2554) + val node2 = Address("akka.tcp", "sys", "a", 2555) "NodeMetrics must" must { diff --git a/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala b/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala index 19ad9410c4..fad1665e5f 100644 --- a/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/VectorClockSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster diff --git a/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala index 659f1a7cf6..f0443d4f61 100644 --- a/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/routing/MetricsSelectorSpec.scala @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster.routing @@ -19,10 +19,10 @@ class MetricsSelectorSpec extends WordSpec with MustMatchers { override def capacity(nodeMetrics: Set[NodeMetrics]): Map[Address, Double] = Map.empty } - val a1 = Address("tcp.akka", "sys", "a1", 2551) - val b1 = Address("tcp.akka", "sys", "b1", 2551) - val c1 = Address("tcp.akka", "sys", "c1", 2551) - val d1 = Address("tcp.akka", "sys", "d1", 2551) + val a1 = Address("akka.tcp", "sys", "a1", 2551) + val b1 = Address("akka.tcp", "sys", "b1", 2551) + val c1 = Address("akka.tcp", "sys", "c1", 2551) + val d1 = Address("akka.tcp", "sys", "d1", 2551) val decayFactor = Some(0.18) diff --git a/akka-cluster/src/test/scala/akka/cluster/routing/WeightedRouteesSpec.scala b/akka-cluster/src/test/scala/akka/cluster/routing/WeightedRouteesSpec.scala index 76a00fa6ae..9a7023ee6c 100644 --- a/akka-cluster/src/test/scala/akka/cluster/routing/WeightedRouteesSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/routing/WeightedRouteesSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.cluster.routing @@ -13,13 +13,13 @@ import akka.testkit.AkkaSpec @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class WeightedRouteesSpec extends AkkaSpec(ConfigFactory.parseString(""" akka.actor.provider = "akka.cluster.ClusterActorRefProvider" - akka.remote.netty.port = 0 + akka.remote.netty.tcp.port = 0 """)) { - val a1 = Address("tcp.akka", "sys", "a1", 2551) - val b1 = Address("tcp.akka", "sys", "b1", 2551) - val c1 = Address("tcp.akka", "sys", "c1", 2551) - val d1 = Address("tcp.akka", "sys", "d1", 2551) + val a1 = Address("akka.tcp", "sys", "a1", 2551) + val b1 = Address("akka.tcp", "sys", "b1", 2551) + val c1 = Address("akka.tcp", "sys", "c1", 2551) + val d1 = Address("akka.tcp", "sys", "d1", 2551) val refA = system.actorFor(RootActorPath(a1) / "user" / "a") val refB = system.actorFor(RootActorPath(b1) / "user" / "b") diff --git a/akka-contrib/docs/cluster-singleton.rst b/akka-contrib/docs/cluster-singleton.rst new file mode 100644 index 0000000000..47a372115b --- /dev/null +++ b/akka-contrib/docs/cluster-singleton.rst @@ -0,0 +1,92 @@ +.. _cluster-singleton: + +Cluster Singleton Pattern +========================= + +For some use cases it is convenient and sometimes also mandatory to ensure that +you have exactly one actor of a certain type running somewhere in the cluster. + +Some examples: + +* single point of responsibility for certain cluster-wide consistent decisions, or + coordination of actions across the cluster system +* single entry point to an external system +* single master, many workers +* centralized naming service, or routing logic + +Using a singleton should not be the first design choice. It has several drawbacks, +such as single-point of bottleneck. Single-point of failure is also a relevant concern, +but for some cases this feature takes care of that by making sure that another singleton +instance will eventually be started. + +The cluster singleton pattern is implemented by ``akka.contrib.pattern.ClusterSingletonManager``, +which is an actor that is supposed to be started on all nodes in the cluster. +The actual singleton actor is started by the ``ClusterSingletonManager`` on the +leader node of the cluster by creating a child actor from supplied ``Props``. +``ClusterSingletonManager`` makes sure that at most one singleton instance is +running at any point in time. + +The singleton actor is always running on the leader member, which is nothing more than +the address currently sorted first in the member ring. This can change when adding +or removing members. A graceful hand over can normally be performed when joining a new +node that becomes leader or removing current leader node. Be aware that there is a short +time period when there is no active singleton during the hand over process. + +The cluster failure detector will notice when a leader node becomes unreachable due to +things like JVM crash, hard shut down, or network failure. Then a new leader node will +take over and a new singleton actor is created. For these failure scenarios there will +not be a graceful hand-over, but more than one active singletons is prevented by all +reasonable means. Some corner cases are eventually resolved by configurable timeouts. + +You access the singleton actor with ``actorFor`` using the names you have specified when +creating the ClusterSingletonManager. You can subscribe to cluster ``LeaderChanged`` events +to keep track of which node it is supposed to be running on. Alternatively the singleton +actor may broadcast its existence when it is started. + +An Example +---------- + +Assume that we need one single entry point to an external system. An actor that +receives messages from a JMS queue with the strict requirement that only one +JMS consumer must exist to be make sure that the messages are processed in order. +That is perhaps not how one would like to design things, but a typical real-world +scenario when integrating with external systems. + +On each node in the cluster you need to start the ``ClusterSingletonManager`` and +supply the ``Props`` of the singleton actor, in this case the JMS queue consumer. + +.. includecode:: @contribSrc@/src/multi-jvm/scala/akka/contrib/pattern/ClusterSingletonManagerSpec.scala#create-singleton-manager + +The corresponding Java API for the ``singeltonProps`` function is ``akka.contrib.pattern.ClusterSingletonPropsFactory``. + +Here we use an application specific ``terminationMessage`` to be able to close the +resources before actually stopping the singleton actor. Note that ``PoisonPill`` is a +perfectly fine ``terminationMessage`` if you only need to stop the actor. + +Here is how the singleton actor handles the ``terminationMessage`` in this example. + +.. includecode:: @contribSrc@/src/multi-jvm/scala/akka/contrib/pattern/ClusterSingletonManagerSpec.scala#consumer-end + +Note that you can send back current state to the ``ClusterSingletonManager`` before terminating. +This message will be sent over to the ``ClusterSingletonManager`` at the new leader node and it +will be passed to the ``singletonProps`` factory when creating the new singleton instance. + +With the names given above the path of singleton actor can be constructed by subscribing to +``LeaderChanged`` cluster event and the actor reference is then looked up using ``actorFor``: + +.. includecode:: @contribSrc@/src/multi-jvm/scala/akka/contrib/pattern/ClusterSingletonManagerSpec.scala#singleton-proxy + +Note that the hand-over might still be in progress and the singleton actor might not be started yet +when you receive the ``LeaderChanged`` event. + +To test scenarios where the cluster leader node is removed or shut down you can use :ref:`multi-node-testing` and +utilize the fact that the leader is supposed to be the first member when sorted by member address. + +.. includecode:: @contribSrc@/src/multi-jvm/scala/akka/contrib/pattern/ClusterSingletonManagerSpec.scala#sort-cluster-roles + +.. includecode:: @contribSrc@/src/multi-jvm/scala/akka/contrib/pattern/ClusterSingletonManagerSpec.scala#test-leave + +Also, make sure that you don't shut down the first role, which is running the test conductor controller. +Use a dedicated role for the controller, which is not a cluster member. + +.. note:: The singleton pattern will be simplified, perhaps provided out-of-the-box, when the cluster handles automatic actor partitioning. diff --git a/akka-contrib/docs/index.rst b/akka-contrib/docs/index.rst index f0d3245187..e6701d5f35 100644 --- a/akka-contrib/docs/index.rst +++ b/akka-contrib/docs/index.rst @@ -32,6 +32,7 @@ The Current List of Modules throttle jul peek-mailbox + cluster-singleton Suggested Way of Using these Contributions ------------------------------------------ diff --git a/akka-contrib/src/main/scala/akka/contrib/jul/JulEventHandler.scala b/akka-contrib/src/main/scala/akka/contrib/jul/JulEventHandler.scala index 68ce0ed973..6e971ddfcb 100644 --- a/akka-contrib/src/main/scala/akka/contrib/jul/JulEventHandler.scala +++ b/akka-contrib/src/main/scala/akka/contrib/jul/JulEventHandler.scala @@ -4,7 +4,7 @@ import akka.event.Logging._ import akka.actor._ import akka.event.LoggingAdapter import java.util.logging -import concurrent.{ ExecutionContext, Future } +import scala.concurrent.{ ExecutionContext, Future } /** * Makes the Akka `Logging` API available as the `log` diff --git a/akka-contrib/src/main/scala/akka/contrib/pattern/ClusterSingletonManager.scala b/akka-contrib/src/main/scala/akka/contrib/pattern/ClusterSingletonManager.scala new file mode 100644 index 0000000000..c641524644 --- /dev/null +++ b/akka-contrib/src/main/scala/akka/contrib/pattern/ClusterSingletonManager.scala @@ -0,0 +1,596 @@ +/** + * Copyright (C) 2009-2013 Typesafe Inc. + */ + +package akka.contrib.pattern + +import scala.concurrent.duration._ +import akka.actor.Actor +import akka.actor.Actor.Receive +import akka.actor.ActorLogging +import akka.actor.ActorRef +import akka.actor.Address +import akka.actor.FSM +import akka.actor.Props +import akka.actor.Terminated +import akka.cluster.Cluster +import akka.cluster.ClusterEvent._ +import akka.AkkaException + +object ClusterSingletonManager { + + /** + * Internal API + * public due to the `with FSM` type parameters + */ + sealed trait State + /** + * Internal API + * public due to the `with FSM` type parameters + */ + sealed trait Data + + /** + * Internal API + */ + private object Internal { + /** + * Sent from new leader to previous leader to initate the + * hand-over process. `HandOverInProgress` and `HandOverDone` + * are expected replies. + */ + case object HandOverToMe + /** + * Confirmation by the previous leader that the hand + * over process, shut down of the singleton actor, has + * started. + */ + case object HandOverInProgress + /** + * Confirmation by the previous leader that the singleton + * actor has been terminated and the hand-over process is + * completed. The `handOverData` holds the message, if any, + * sent from the singleton actor to its parent ClusterSingletonManager + * when shutting down. It is passed to the `singletonProps` + * factory on the new leader node. + */ + case class HandOverDone(handOverData: Option[Any]) + /** + * Sent from from previous leader to new leader to + * initiate the normal hand-over process. + * Especially useful when new node joins and becomes + * leader immediately, without knowing who was previous + * leader. + */ + case object TakeOverFromMe + + case class HandOverRetry(count: Int) + case class TakeOverRetry(leaderPeer: ActorRef, count: Int) + case object Cleanup + case object StartLeaderChangedBuffer + + case object Start extends State + case object Leader extends State + case object NonLeader extends State + case object BecomingLeader extends State + case object WasLeader extends State + case object HandingOver extends State + case object TakeOver extends State + + case object Uninitialized extends Data + case class NonLeaderData(leaderOption: Option[Address]) extends Data + case class BecomingLeaderData(previousLeaderOption: Option[Address]) extends Data + case class LeaderData(singleton: ActorRef, singletonTerminated: Boolean = false, + handOverData: Option[Any] = None) extends Data + case class WasLeaderData(singleton: ActorRef, singletonTerminated: Boolean, handOverData: Option[Any], + newLeader: Address) extends Data + case class HandingOverData(singleton: ActorRef, handOverTo: Option[ActorRef], handOverData: Option[Any]) extends Data + + val HandOverRetryTimer = "hand-over-retry" + val TakeOverRetryTimer = "take-over-retry" + val CleanupTimer = "cleanup" + + object LeaderChangedBuffer { + /** + * Request to deliver one more event. + */ + case object GetNext + /** + * The first event, corresponding to CurrentClusterState. + */ + case class InitialLeaderState(leader: Option[Address], memberCount: Int) + } + + /** + * Notifications of [[akka.cluster.ClusterEvent.LeaderChanged]] is tunneled + * via this actor (child of ClusterSingletonManager) to be able to deliver + * one change at a time. Avoiding simultaneous leader changes simplifies + * the process in ClusterSingletonManager. ClusterSingletonManager requests + * next event with `GetNext` when it is ready for it. Only one outstanding + * `GetNext` request is allowed. Incoming events are buffered and delivered + * upon `GetNext` request. + */ + class LeaderChangedBuffer extends Actor { + import LeaderChangedBuffer._ + import context.dispatcher + + val cluster = Cluster(context.system) + var changes = Vector.empty[AnyRef] + var memberCount = 0 + + // subscribe to LeaderChanged, re-subscribe when restart + override def preStart(): Unit = cluster.subscribe(self, classOf[LeaderChanged]) + override def postStop(): Unit = cluster.unsubscribe(self) + + def receive = { + case state: CurrentClusterState ⇒ + changes :+= InitialLeaderState(state.leader, state.members.size) + case event: LeaderChanged ⇒ + changes :+= event + case GetNext if changes.isEmpty ⇒ + context.become(deliverNext, discardOld = false) + case GetNext ⇒ + val event = changes.head + changes = changes.tail + context.parent ! event + } + + // the buffer was empty when GetNext was received, deliver next event immediately + def deliverNext: Actor.Receive = { + case state: CurrentClusterState ⇒ + context.parent ! InitialLeaderState(state.leader, state.members.size) + context.unbecome() + case event: LeaderChanged ⇒ + context.parent ! event + context.unbecome() + } + + } + + } +} + +/** + * Java API. Factory for the [[akka.actor.Props]] of the singleton + * actor instance. Used in constructor of + * [[akka.contrib.pattern.ClusterSingletonManager]] + */ +@SerialVersionUID(1L) +trait ClusterSingletonPropsFactory extends Serializable { + /** + * Create the `Props` from the `handOverData` sent from + * previous singleton. `handOverData` might be null + * when no hand-over took place, or when the there is no need + * for sending data to the new singleton. + */ + def create(handOverData: Any): Props +} + +/** + * Thrown when a consistent state can't be determined within the + * defined retry limits. Eventually it will reach a stable state and + * can continue, and that is simplified by starting over with a clean + * state. Parent supervisor should typically restart the actor, i.e. + * default decision. + */ +class ClusterSingletonManagerIsStuck(message: String) extends AkkaException(message, null) + +/** + * Manages a cluster wide singleton actor instance, i.e. + * at most one singleton instance is running at any point in time. + * The ClusterSingletonManager is supposed to be started on all + * nodes in the cluster with `actorOf`. The actual singleton is + * started on the leader node of the cluster by creating a child + * actor from the supplied `singletonProps`. + * + * The singleton actor is always running on the leader member, which is + * nothing more than the address currently sorted first in the member + * ring. This can change when adding or removing members. A graceful hand + * over can normally be performed when joining a new node that becomes + * leader or removing current leader node. Be aware that there is a + * short time period when there is no active singleton during the + * hand-over process. + * + * The singleton actor can at any time send a message to its parent + * ClusterSingletonManager and this message will be passed to the + * `singletonProps` factory on the new leader node when a graceful + * hand-over is performed. + * + * The cluster failure detector will notice when a leader node + * becomes unreachable due to things like JVM crash, hard shut down, + * or network failure. Then a new leader node will take over and a + * new singleton actor is created. For these failure scenarios there + * will not be a graceful hand-over, but more than one active singletons + * is prevented by all reasonable means. Some corner cases are eventually + * resolved by configurable timeouts. + * + * You access the singleton actor with `actorFor` using the names you have + * specified when creating the ClusterSingletonManager. You can subscribe to + * [[akka.cluster.ClusterEvent.LeaderChanged]] to keep track of which node + * it is supposed to be running on. Alternatively the singleton actor may + * broadcast its existence when it is started. + * + * ==Arguments== + * + * '''''singletonProps''''' Factory for [[akka.actor.Props]] of the + * singleton actor instance. The `Option` parameter is the the + * `handOverData` sent from previous singleton. `handOverData` + * might be None when no hand-over took place, or when the there + * is no need for sending data to the new singleton. The `handOverData` + * is typically passed as parameter to the constructor of the + * singleton actor. + * + * '''''singletonName''''' The actor name of the child singleton actor. + * + * '''''terminationMessage''''' When handing over to a new leader node + * this `terminationMessage` is sent to the singleton actor to tell + * it to finish its work, close resources, and stop. It can sending + * a message back to the parent ClusterSingletonManager, which will + * passed to the `singletonProps` factory on the new leader node. + * The hand-over to the new leader node is completed when the + * singleton actor is terminated. + * Note that [[akka.actor.PoisonPill]] is a perfectly fine + * `terminationMessage` if you only need to stop the actor. + * + * '''''maxHandOverRetries''''' When a node is becoming leader it sends + * hand-over request to previous leader. This is retried with the + * `retryInterval` until the previous leader confirms that the hand + * over has started, or this `maxHandOverRetries` limit has been + * reached. If the retry limit is reached it takes the decision to be + * the new leader if previous leader is unknown (typically removed or + * downed), otherwise it initiates a new round by throwing + * [[akka.contrib.pattern.ClusterSingletonManagerIsStuck]] and expecting + * restart with fresh state. For a cluster with many members you might + * need to increase this retry limit because it takes longer time to + * propagate changes across all nodes. + * + * '''''maxTakeOverRetries''''' When a leader node is not leader any more + * it sends take over request to the new leader to initiate the normal + * hand-over process. This is especially useful when new node joins and becomes + * leader immediately, without knowing who was previous leader. This is retried + * with the `retryInterval` until this retry limit has been reached. If the retry + * limit is reached it initiates a new round by throwing + * [[akka.contrib.pattern.ClusterSingletonManagerIsStuck]] and expecting + * restart with fresh state. This will also cause the singleton actor to be + * stopped. `maxTakeOverRetries` must be less than `maxHandOverRetries` to + * ensure that new leader doesn't start singleton actor before previous is + * stopped for certain corner cases. + * + * '''''loggingEnabled''''' Logging of what is going on at info log level. + */ +class ClusterSingletonManager( + singletonProps: Option[Any] ⇒ Props, + singletonName: String, + terminationMessage: Any, + maxHandOverRetries: Int = 20, + maxTakeOverRetries: Int = 15, + retryInterval: FiniteDuration = 1.second, + loggingEnabled: Boolean = true) + extends Actor with FSM[ClusterSingletonManager.State, ClusterSingletonManager.Data] { + + // to ensure that new leader doesn't start singleton actor before previous is stopped for certain corner cases + require(maxTakeOverRetries < maxHandOverRetries, + s"maxTakeOverRetries [${maxTakeOverRetries}]must be < maxHandOverRetries [${maxHandOverRetries}]") + + /** + * Full Java API constructor. + */ + def this( + singletonName: String, + terminationMessage: Any, + maxHandOverRetries: Int, + maxTakeOverRetries: Int, + retryInterval: FiniteDuration, + loggingEnabled: Boolean, + singletonPropsFactory: ClusterSingletonPropsFactory) = + this(handOverData ⇒ singletonPropsFactory.create(handOverData.orNull), singletonName, terminationMessage, + maxHandOverRetries, maxTakeOverRetries, retryInterval) + + /** + * Java API constructor with default values. + */ + def this( + singletonName: String, + terminationMessage: Any, + singletonPropsFactory: ClusterSingletonPropsFactory) = + this(handOverData ⇒ singletonPropsFactory.create(handOverData.orNull), singletonName, terminationMessage) + + import ClusterSingletonManager._ + import ClusterSingletonManager.Internal._ + import ClusterSingletonManager.Internal.LeaderChangedBuffer._ + + val cluster = Cluster(context.system) + val selfAddressOption = Some(cluster.selfAddress) + // started when when self member is Up + var leaderChangedBuffer: ActorRef = _ + // Previous GetNext request delivered event and new GetNext is to be sent + var leaderChangedReceived = true + + // keep track of previously downed members + var downed = Map.empty[Address, Deadline] + // keep track of previously removed members + var removed = Map.empty[Address, Deadline] + + def addDowned(address: Address): Unit = + downed += address -> (Deadline.now + 15.minutes) + + def addRemoved(address: Address): Unit = + removed += address -> (Deadline.now + 15.minutes) + + def cleanupOverdueNotMemberAnyMore(): Unit = { + downed = downed filter { case (address, deadline) ⇒ deadline.hasTimeLeft } + removed = removed filter { case (address, deadline) ⇒ deadline.hasTimeLeft } + } + + def logInfo(message: String): Unit = + if (loggingEnabled) log.info(message) + + def logInfo(template: String, arg1: Any): Unit = + if (loggingEnabled) log.info(template, arg1) + + def logInfo(template: String, arg1: Any, arg2: Any): Unit = + if (loggingEnabled) log.info(template, arg1, arg2) + + override def preStart(): Unit = { + super.preStart() + require(!cluster.isTerminated, "Cluster node must not be terminated") + + // subscribe to cluster changes, re-subscribe when restart + cluster.subscribe(self, classOf[MemberDowned]) + cluster.subscribe(self, classOf[MemberRemoved]) + + setTimer(CleanupTimer, Cleanup, 1.minute, repeat = true) + + // defer subscription to LeaderChanged to avoid some jitter when + // starting/joining several nodes at the same time + cluster.registerOnMemberUp(self ! StartLeaderChangedBuffer) + } + + override def postStop(): Unit = { + cancelTimer(CleanupTimer) + cluster.unsubscribe(self) + super.postStop() + } + + def peer(at: Address): ActorRef = context.actorFor(self.path.toStringWithAddress(at)) + + def getNextLeaderChanged(): Unit = + if (leaderChangedReceived) { + leaderChangedReceived = false + leaderChangedBuffer ! GetNext + } + + startWith(Start, Uninitialized) + + when(Start) { + case Event(StartLeaderChangedBuffer, _) ⇒ + leaderChangedBuffer = context.actorOf(Props[LeaderChangedBuffer].withDispatcher(context.props.dispatcher)) + getNextLeaderChanged() + stay + + case Event(InitialLeaderState(leaderOption, memberCount), _) ⇒ + leaderChangedReceived = true + if (leaderOption == selfAddressOption && memberCount == 1) + // alone, leader immediately + gotoLeader(None) + else if (leaderOption == selfAddressOption) + goto(BecomingLeader) using BecomingLeaderData(None) + else + goto(NonLeader) using NonLeaderData(leaderOption) + } + + when(NonLeader) { + case Event(LeaderChanged(leaderOption), NonLeaderData(previousLeaderOption)) ⇒ + leaderChangedReceived = true + if (leaderOption == selfAddressOption) { + logInfo("NonLeader observed LeaderChanged: [{} -> myself]", previousLeaderOption) + previousLeaderOption match { + case None ⇒ gotoLeader(None) + case Some(prev) if downed.contains(prev) ⇒ gotoLeader(None) + case Some(prev) ⇒ + peer(prev) ! HandOverToMe + goto(BecomingLeader) using BecomingLeaderData(previousLeaderOption) + } + } else { + logInfo("NonLeader observed LeaderChanged: [{} -> {}]", previousLeaderOption, leaderOption) + getNextLeaderChanged() + stay using NonLeaderData(leaderOption) + } + + case Event(MemberDowned(m), NonLeaderData(Some(previousLeader))) if m.address == previousLeader ⇒ + logInfo("Previous leader downed [{}]", m.address) + addDowned(m.address) + // transition when LeaderChanged + stay using NonLeaderData(None) + + case Event(MemberRemoved(m), _) if m.address == cluster.selfAddress ⇒ + logInfo("Self removed, stopping ClusterSingletonManager") + stop() + + } + + when(BecomingLeader) { + + case Event(HandOverInProgress, _) ⇒ + // confirmation that the hand-over process has started + logInfo("Hand-over in progress at [{}]", sender.path.address) + cancelTimer(HandOverRetryTimer) + stay + + case Event(HandOverDone(handOverData), BecomingLeaderData(Some(previousLeader))) ⇒ + if (sender.path.address == previousLeader) + gotoLeader(handOverData) + else { + logInfo("Ignoring HandOverDone in BecomingLeader from [{}]. Expected previous leader [{}]", + sender.path.address, previousLeader) + stay + } + + case Event(MemberDowned(m), BecomingLeaderData(Some(previousLeader))) if m.address == previousLeader ⇒ + logInfo("Previous leader [{}] downed", previousLeader) + addDowned(m.address) + gotoLeader(None) + + case Event(TakeOverFromMe, BecomingLeaderData(None)) ⇒ + sender ! HandOverToMe + stay using BecomingLeaderData(Some(sender.path.address)) + + case Event(TakeOverFromMe, BecomingLeaderData(Some(previousLeader))) ⇒ + if (previousLeader == sender.path.address) sender ! HandOverToMe + else logInfo("Ignoring TakeOver request in BecomingLeader from [{}]. Expected previous leader [{}]", + sender.path.address, previousLeader) + stay + + case Event(HandOverRetry(count), BecomingLeaderData(previousLeaderOption)) ⇒ + if (count <= maxHandOverRetries) { + logInfo("Retry [{}], sending HandOverToMe to [{}]", count, previousLeaderOption) + previousLeaderOption foreach { peer(_) ! HandOverToMe } + setTimer(HandOverRetryTimer, HandOverRetry(count + 1), retryInterval, repeat = false) + } else if (previousLeaderOption.isEmpty) { + // can't send HandOverToMe, previousLeader unknown for new node (or restart) + // previous leader might be down or removed, so no TakeOverFromMe message is received + logInfo("Timeout in BecomingLeader. Previous leader unknown and no TakeOver request.") + gotoLeader(None) + } else + throw new ClusterSingletonManagerIsStuck( + s"Becoming singleton leader was stuck because previous leader [${previousLeaderOption}] is unresponsive") + + } + + def gotoLeader(handOverData: Option[Any]): State = { + logInfo("Singleton manager [{}] starting singleton actor", cluster.selfAddress) + val singleton = context watch context.actorOf(singletonProps(handOverData), singletonName) + goto(Leader) using LeaderData(singleton) + } + + when(Leader) { + case Event(LeaderChanged(leaderOption), LeaderData(singleton, singletonTerminated, handOverData)) ⇒ + leaderChangedReceived = true + logInfo("Leader observed LeaderChanged: [{} -> {}]", cluster.selfAddress, leaderOption) + leaderOption match { + case Some(a) if a == cluster.selfAddress ⇒ + // already leader + stay + case Some(a) if downed.contains(a) || removed.contains(a) ⇒ + gotoHandingOver(singleton, singletonTerminated, handOverData, None) + case Some(a) ⇒ + // send TakeOver request in case the new leader doesn't know previous leader + val leaderPeer = peer(a) + leaderPeer ! TakeOverFromMe + setTimer(TakeOverRetryTimer, TakeOverRetry(leaderPeer, 1), retryInterval, repeat = false) + goto(WasLeader) using WasLeaderData(singleton, singletonTerminated, handOverData, newLeader = a) + case _ ⇒ + // new leader will initiate the hand-over + stay + } + + case Event(HandOverToMe, LeaderData(singleton, singletonTerminated, handOverData)) ⇒ + gotoHandingOver(singleton, singletonTerminated, handOverData, Some(sender)) + + case Event(singletonHandOverMessage, d @ LeaderData(singleton, _, _)) if sender == singleton ⇒ + stay using d.copy(handOverData = Some(singletonHandOverMessage)) + + case Event(Terminated(ref), d @ LeaderData(singleton, _, _)) if ref == singleton ⇒ + stay using d.copy(singletonTerminated = true) + } + + when(WasLeader) { + case Event(TakeOverRetry(leaderPeer, count), _) ⇒ + val newLeader = leaderPeer.path.address + if (count <= maxTakeOverRetries) { + logInfo("Retry [{}], sending TakeOverFromMe to [{}]", count, newLeader) + leaderPeer ! TakeOverFromMe + setTimer(TakeOverRetryTimer, TakeOverRetry(leaderPeer, count + 1), retryInterval, repeat = false) + stay + } else + throw new ClusterSingletonManagerIsStuck(s"Expected hand-over to [${newLeader}] never occured") + + case Event(HandOverToMe, WasLeaderData(singleton, singletonTerminated, handOverData, _)) ⇒ + gotoHandingOver(singleton, singletonTerminated, handOverData, Some(sender)) + + case Event(MemberDowned(m), WasLeaderData(singleton, singletonTerminated, handOverData, newLeader)) if m.address == newLeader ⇒ + addDowned(m.address) + gotoHandingOver(singleton, singletonTerminated, handOverData, None) + + case Event(singletonHandOverMessage, d @ WasLeaderData(singleton, _, _, _)) if sender == singleton ⇒ + stay using d.copy(handOverData = Some(singletonHandOverMessage)) + + case Event(Terminated(ref), d @ WasLeaderData(singleton, _, _, _)) if ref == singleton ⇒ + stay using d.copy(singletonTerminated = true) + + } + + def gotoHandingOver(singleton: ActorRef, singletonTerminated: Boolean, handOverData: Option[Any], handOverTo: Option[ActorRef]): State = { + if (singletonTerminated) { + handOverDone(handOverTo, handOverData) + } else { + handOverTo foreach { _ ! HandOverInProgress } + singleton ! terminationMessage + goto(HandingOver) using HandingOverData(singleton, handOverTo, handOverData) + } + } + + when(HandingOver) { + case (Event(Terminated(ref), HandingOverData(singleton, handOverTo, handOverData))) if ref == singleton ⇒ + handOverDone(handOverTo, handOverData) + + case Event(HandOverToMe, d @ HandingOverData(singleton, handOverTo, _)) if handOverTo == Some(sender) ⇒ + // retry + sender ! HandOverInProgress + stay + + case Event(singletonHandOverMessage, d @ HandingOverData(singleton, _, _)) if sender == singleton ⇒ + stay using d.copy(handOverData = Some(singletonHandOverMessage)) + + } + + def handOverDone(handOverTo: Option[ActorRef], handOverData: Option[Any]): State = { + val newLeader = handOverTo.map(_.path.address) + logInfo("Singleton terminated, hand-over done [{} -> {}]", cluster.selfAddress, newLeader) + handOverTo foreach { _ ! HandOverDone(handOverData) } + goto(NonLeader) using NonLeaderData(newLeader) + } + + whenUnhandled { + case Event(_: CurrentClusterState, _) ⇒ stay + case Event(MemberRemoved(m), _) ⇒ + logInfo("Member removed [{}]", m.address) + // if self removed, it will be stopped onTranstion to NonLeader + addRemoved(m.address) + stay + case Event(MemberDowned(m), _) ⇒ + logInfo("Member downed [{}]", m.address) + addDowned(m.address) + stay + case Event(TakeOverFromMe, _) ⇒ + logInfo("Ignoring TakeOver request in [{}] from [{}].", stateName, sender.path.address) + stay + case Event(Cleanup, _) ⇒ + cleanupOverdueNotMemberAnyMore() + stay + } + + onTransition { + case from -> to ⇒ logInfo("ClusterSingletonManager state change [{} -> {}]", from, to) + } + + onTransition { + case _ -> BecomingLeader ⇒ setTimer(HandOverRetryTimer, HandOverRetry(1), retryInterval, repeat = false) + } + + onTransition { + case BecomingLeader -> _ ⇒ cancelTimer(HandOverRetryTimer) + case WasLeader -> _ ⇒ cancelTimer(TakeOverRetryTimer) + } + + onTransition { + case _ -> (NonLeader | Leader) ⇒ getNextLeaderChanged() + } + + onTransition { + case _ -> NonLeader if removed.contains(cluster.selfAddress) || downed.contains(cluster.selfAddress) ⇒ + logInfo("Self removed, stopping ClusterSingletonManager") + stop() + } + +} \ No newline at end of file diff --git a/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala b/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala index 9d4b9ecd7b..b3e24e2361 100644 --- a/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala +++ b/akka-contrib/src/main/scala/akka/contrib/pattern/ReliableProxy.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.contrib.pattern diff --git a/akka-contrib/src/main/scala/akka/contrib/throttle/TimerBasedThrottler.scala b/akka-contrib/src/main/scala/akka/contrib/throttle/TimerBasedThrottler.scala index de614619f7..3e1d2c6908 100644 --- a/akka-contrib/src/main/scala/akka/contrib/throttle/TimerBasedThrottler.scala +++ b/akka-contrib/src/main/scala/akka/contrib/throttle/TimerBasedThrottler.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.contrib.throttle @@ -293,4 +293,4 @@ class TimerBasedThrottler(var rate: Rate) extends Actor with Throttler with FSM[ data.copy(queue = queue.drop(nrOfMsgToSend), callsLeftInThisPeriod = data.callsLeftInThisPeriod - nrOfMsgToSend) } -} \ No newline at end of file +} diff --git a/akka-contrib/src/multi-jvm/scala/akka/contrib/pattern/ClusterSingletonManagerSpec.scala b/akka-contrib/src/multi-jvm/scala/akka/contrib/pattern/ClusterSingletonManagerSpec.scala new file mode 100644 index 0000000000..672ca7037d --- /dev/null +++ b/akka-contrib/src/multi-jvm/scala/akka/contrib/pattern/ClusterSingletonManagerSpec.scala @@ -0,0 +1,345 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.contrib.pattern + +import language.postfixOps +import scala.collection.immutable +import scala.concurrent.duration._ +import com.typesafe.config.ConfigFactory +import akka.actor.Actor +import akka.actor.ActorLogging +import akka.actor.ActorRef +import akka.actor.Address +import akka.actor.Props +import akka.actor.RootActorPath +import akka.cluster.Cluster +import akka.cluster.ClusterEvent._ +import akka.cluster.Member +import akka.remote.testconductor.RoleName +import akka.remote.testkit.MultiNodeConfig +import akka.remote.testkit.MultiNodeSpec +import akka.remote.testkit.STMultiNodeSpec +import akka.testkit._ +import akka.testkit.ImplicitSender +import akka.testkit.TestEvent._ +import akka.actor.Terminated + +object ClusterSingletonManagerSpec extends MultiNodeConfig { + val controller = role("controller") + val first = role("first") + val second = role("second") + val third = role("third") + val fourth = role("fourth") + val fifth = role("fifth") + val sixth = role("sixth") + + commonConfig(ConfigFactory.parseString(""" + akka.loglevel = INFO + akka.actor.provider = "akka.cluster.ClusterActorRefProvider" + akka.remote.log-remote-lifecycle-events = off + akka.cluster.auto-join = off + akka.cluster.auto-down = on + """)) + + testTransport(on = true) + + object PointToPointChannel { + case object RegisterConsumer + case object UnregisterConsumer + case object RegistrationOk + case object UnexpectedRegistration + case object UnregistrationOk + case object UnexpectedUnregistration + case object Reset + case object ResetOk + } + + /** + * This channel is extremly strict with regards to + * registration and unregistration of consumer to + * be able to detect misbehaviour (e.g. two active + * singleton instances). + */ + class PointToPointChannel extends Actor with ActorLogging { + import PointToPointChannel._ + + def receive = idle + + def idle: Receive = { + case RegisterConsumer ⇒ + log.info("RegisterConsumer: [{}]", sender.path) + sender ! RegistrationOk + context.become(active(sender)) + case UnregisterConsumer ⇒ + log.info("UnexpectedUnregistration: [{}]", sender.path) + sender ! UnexpectedUnregistration + context stop self + case Reset ⇒ sender ! ResetOk + case msg ⇒ // no consumer, drop + } + + def active(consumer: ActorRef): Receive = { + case UnregisterConsumer if sender == consumer ⇒ + log.info("UnregistrationOk: [{}]", sender.path) + sender ! UnregistrationOk + context.become(idle) + case UnregisterConsumer ⇒ + log.info("UnexpectedUnregistration: [{}], expected [{}]", sender.path, consumer.path) + sender ! UnexpectedUnregistration + context stop self + case RegisterConsumer ⇒ + log.info("Unexpected RegisterConsumer [{}], active consumer [{}]", sender.path, consumer.path) + sender ! UnexpectedRegistration + context stop self + case Reset ⇒ + context.become(idle) + sender ! ResetOk + case msg ⇒ consumer ! msg + } + } + + object Consumer { + case object End + case object GetCurrent + } + + /** + * The Singleton actor + */ + class Consumer(handOverData: Option[Any], queue: ActorRef, delegateTo: ActorRef) extends Actor { + import Consumer._ + import PointToPointChannel._ + + var current: Int = handOverData match { + case Some(x: Int) ⇒ x + case Some(x) ⇒ throw new IllegalArgumentException(s"handOverData must be an Int, got [${x}]") + case None ⇒ 0 + } + + override def preStart(): Unit = queue ! RegisterConsumer + + def receive = { + case n: Int if n <= current ⇒ + context.stop(self) + case n: Int ⇒ + current = n + delegateTo ! n + case x @ (RegistrationOk | UnexpectedRegistration) ⇒ + delegateTo ! x + case GetCurrent ⇒ + sender ! current + //#consumer-end + case End ⇒ + queue ! UnregisterConsumer + case UnregistrationOk ⇒ + // reply to ClusterSingletonManager with hand over data, + // which will be passed as parameter to new leader consumer + context.parent ! current + context stop self + //#consumer-end + } + } + + // documentation of how to keep track of the leader address in user land + //#singleton-proxy + class ConsumerProxy extends Actor { + // subscribe to LeaderChanged, re-subscribe when restart + override def preStart(): Unit = + Cluster(context.system).subscribe(self, classOf[LeaderChanged]) + override def postStop(): Unit = + Cluster(context.system).unsubscribe(self) + + var leaderAddress: Option[Address] = None + + def receive = { + case state: CurrentClusterState ⇒ leaderAddress = state.leader + case LeaderChanged(leader) ⇒ leaderAddress = leader + case other => consumer foreach { _ forward other } + } + + def consumer: Option[ActorRef] = + leaderAddress map (a => context.actorFor(RootActorPath(a) / + "user" / "singleton" / "consumer")) + } + //#singleton-proxy + +} + +class ClusterSingletonManagerMultiJvmNode1 extends ClusterSingletonManagerSpec +class ClusterSingletonManagerMultiJvmNode2 extends ClusterSingletonManagerSpec +class ClusterSingletonManagerMultiJvmNode3 extends ClusterSingletonManagerSpec +class ClusterSingletonManagerMultiJvmNode4 extends ClusterSingletonManagerSpec +class ClusterSingletonManagerMultiJvmNode5 extends ClusterSingletonManagerSpec +class ClusterSingletonManagerMultiJvmNode6 extends ClusterSingletonManagerSpec +class ClusterSingletonManagerMultiJvmNode7 extends ClusterSingletonManagerSpec + +class ClusterSingletonManagerSpec extends MultiNodeSpec(ClusterSingletonManagerSpec) with STMultiNodeSpec with ImplicitSender { + import ClusterSingletonManagerSpec._ + import ClusterSingletonManagerSpec.PointToPointChannel._ + import ClusterSingletonManagerSpec.Consumer._ + + override def initialParticipants = roles.size + + //#sort-cluster-roles + // Sort the roles in the order used by the cluster. + lazy val sortedClusterRoles: immutable.IndexedSeq[RoleName] = { + implicit val clusterOrdering: Ordering[RoleName] = new Ordering[RoleName] { + import Member.addressOrdering + def compare(x: RoleName, y: RoleName) = + addressOrdering.compare(node(x).address, node(y).address) + } + roles.filterNot(_ == controller).toVector.sorted + } + //#sort-cluster-roles + + def queue: ActorRef = system.actorFor(node(controller) / "user" / "queue") + + def createSingleton(): ActorRef = { + //#create-singleton-manager + system.actorOf(Props(new ClusterSingletonManager( + singletonProps = handOverData ⇒ + Props(new Consumer(handOverData, queue, testActor)), + singletonName = "consumer", + terminationMessage = End)), + name = "singleton") + //#create-singleton-manager + } + + def consumer(leader: RoleName): ActorRef = + system.actorFor(RootActorPath(node(leader).address) / "user" / "singleton" / "consumer") + + def verify(leader: RoleName, msg: Int, expectedCurrent: Int): Unit = { + enterBarrier("before-" + leader.name + "-verified") + runOn(leader) { + expectMsg(RegistrationOk) + consumer(leader) ! GetCurrent + expectMsg(expectedCurrent) + } + enterBarrier(leader.name + "-active") + + runOn(controller) { + queue ! msg + // make sure it's not terminated, which would be wrong + expectNoMsg(1 second) + } + runOn(leader) { + expectMsg(msg) + } + runOn(sortedClusterRoles.filterNot(_ == leader): _*) { + expectNoMsg(1 second) + } + enterBarrier(leader.name + "-verified") + } + + def crash(roles: RoleName*): Unit = { + runOn(controller) { + queue ! Reset + expectMsg(ResetOk) + roles foreach { r ⇒ + log.info("Shutdown [{}]", node(r).address) + testConductor.shutdown(r, 0).await + } + } + } + + "A ClusterSingletonManager" must { + + "startup in single member cluster" in within(10 seconds) { + log.info("Sorted cluster nodes [{}]", sortedClusterRoles.map(node(_).address).mkString(", ")) + + runOn(controller) { + // watch that it is not terminated, which would indicate misbehaviour + watch(system.actorOf(Props[PointToPointChannel], "queue")) + } + enterBarrier("queue-started") + + runOn(sortedClusterRoles(5)) { + Cluster(system) join node(sortedClusterRoles(5)).address + createSingleton() + } + + verify(sortedClusterRoles.last, msg = 1, expectedCurrent = 0) + } + + "hand over when new leader joins to 1 node cluster" in within(15 seconds) { + val newLeaderRole = sortedClusterRoles(4) + runOn(newLeaderRole) { + Cluster(system) join node(sortedClusterRoles.last).address + createSingleton() + } + + verify(newLeaderRole, msg = 2, expectedCurrent = 1) + } + + "hand over when new leader joins to 2 nodes cluster" in within(15 seconds) { + val newLeaderRole = sortedClusterRoles(3) + runOn(newLeaderRole) { + Cluster(system) join node(sortedClusterRoles.last).address + createSingleton() + } + + verify(newLeaderRole, msg = 3, expectedCurrent = 2) + } + + "hand over when adding three new potential leaders to 3 nodes cluster" in within(30 seconds) { + runOn(sortedClusterRoles(2)) { + Cluster(system) join node(sortedClusterRoles(3)).address + createSingleton() + } + runOn(sortedClusterRoles(1)) { + Cluster(system) join node(sortedClusterRoles(4)).address + createSingleton() + } + runOn(sortedClusterRoles(0)) { + Cluster(system) join node(sortedClusterRoles(5)).address + createSingleton() + } + + verify(sortedClusterRoles(0), msg = 4, expectedCurrent = 3) + } + + "hand over when leader leaves in 6 nodes cluster " in within(20 seconds) { + //#test-leave + val leaveRole = sortedClusterRoles(0) + val newLeaderRole = sortedClusterRoles(1) + + runOn(leaveRole) { + Cluster(system) leave node(leaveRole).address + } + //#test-leave + + verify(newLeaderRole, msg = 5, expectedCurrent = 4) + + runOn(leaveRole) { + val singleton = system.actorFor("/user/singleton") + watch(singleton) + expectMsgType[Terminated].actor must be(singleton) + } + + enterBarrier("after-leave") + } + + "take over when leader crashes in 5 nodes cluster" in within(35 seconds) { + system.eventStream.publish(Mute(EventFilter.warning(pattern = ".*received dead letter from.*"))) + system.eventStream.publish(Mute(EventFilter.error(pattern = ".*Disassociated.*"))) + system.eventStream.publish(Mute(EventFilter.error(pattern = ".*Association failed.*"))) + enterBarrier("logs-muted") + + crash(sortedClusterRoles(1)) + verify(sortedClusterRoles(2), msg = 6, expectedCurrent = 0) + } + + "take over when two leaders crash in 3 nodes cluster" in within(45 seconds) { + crash(sortedClusterRoles(2), sortedClusterRoles(3)) + verify(sortedClusterRoles(4), msg = 7, expectedCurrent = 0) + } + + "take over when leader crashes in 2 nodes cluster" in within(25 seconds) { + crash(sortedClusterRoles(4)) + verify(sortedClusterRoles(5), msg = 6, expectedCurrent = 0) + } + + } +} diff --git a/akka-contrib/src/multi-jvm/scala/akka/contrib/pattern/ReliableProxySpec.scala b/akka-contrib/src/multi-jvm/scala/akka/contrib/pattern/ReliableProxySpec.scala index deef1871c2..9ddf5f4cde 100644 --- a/akka-contrib/src/multi-jvm/scala/akka/contrib/pattern/ReliableProxySpec.scala +++ b/akka-contrib/src/multi-jvm/scala/akka/contrib/pattern/ReliableProxySpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.contrib.pattern @@ -33,12 +33,13 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod import ReliableProxySpec._ import ReliableProxy._ - override def initialParticipants = 2 + override def initialParticipants = roles.size override def afterEach { runOn(local) { - testConductor.throttle(local, remote, Direction.Both, -1).await + testConductor.passThrough(local, remote, Direction.Both).await } + enterBarrier("after-each") } @volatile var target: ActorRef = system.deadLetters @@ -46,8 +47,8 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod def expectState(s: State) = expectMsg(FSM.CurrentState(proxy, s)) def expectTransition(s1: State, s2: State) = expectMsg(FSM.Transition(proxy, s1, s2)) - - def sendN(n: Int) = (1 to n) foreach (proxy ! _) + + def sendN(n: Int) = (1 to n) foreach (proxy ! _) def expectN(n: Int) = (1 to n) foreach { n ⇒ expectMsg(n); lastSender must be === target } "A ReliableProxy" must { @@ -82,6 +83,8 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod runOn(remote) { expectMsg("hello") } + + enterBarrier("initialize-done") } "forward messages in sequence" in { @@ -95,9 +98,9 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod expectN(100) } } - + enterBarrier("test1a") - + runOn(local) { sendN(100) expectTransition(Idle, Active) @@ -108,7 +111,7 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod expectN(100) } } - + enterBarrier("test1b") } @@ -121,17 +124,17 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod expectNoMsg } } - + enterBarrier("test2a") - + runOn(remote) { expectNoMsg(0 seconds) } - + enterBarrier("test2b") - + runOn(local) { - testConductor.throttle(local, remote, Direction.Send, -1).await + testConductor.passThrough(local, remote, Direction.Send).await within(5 seconds) { expectTransition(Active, Idle) } } runOn(remote) { @@ -139,7 +142,7 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod expectN(100) } } - + enterBarrier("test2c") } @@ -157,23 +160,29 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod expectN(100) } } - + enterBarrier("test3a") - + runOn(local) { - testConductor.throttle(local, remote, Direction.Receive, -1).await + testConductor.passThrough(local, remote, Direction.Receive).await within(5 seconds) { expectTransition(Active, Idle) } } - + enterBarrier("test3b") } - "resend across a slow link" in { + "resend across a slow outbound link" in { runOn(local) { - testConductor.throttle(local, remote, Direction.Send, rateMBit = 0.1).await + // the rateMBit value is derived from empirical studies so that it will trigger resends, + // the exact value is not important, but it must not be too large + testConductor.throttle(local, remote, Direction.Send, rateMBit = 0.02).await sendN(50) within(5 seconds) { expectTransition(Idle, Active) + // use the slow link for a while, which will trigger resends + Thread.sleep(2000) + // full speed, and it will catch up outstanding messages + testConductor.passThrough(local, remote, Direction.Send).await expectTransition(Active, Idle) } } @@ -181,16 +190,25 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod within(5 seconds) { expectN(50) } + expectNoMsg(1 second) } - - enterBarrier("test4a") - + + enterBarrier("test4") + } + + "resend across a slow inbound link" in { runOn(local) { - testConductor.throttle(local, remote, Direction.Send, rateMBit = -1).await - testConductor.throttle(local, remote, Direction.Receive, rateMBit = 0.1).await + testConductor.passThrough(local, remote, Direction.Send).await + // the rateMBit value is derived from empirical studies so that it will trigger resends, + // the exact value is not important, but it must not be too large + testConductor.throttle(local, remote, Direction.Receive, rateMBit = 0.02).await sendN(50) within(5 seconds) { expectTransition(Idle, Active) + // use the slow link for a while, which will trigger resends + Thread.sleep(2000) + // full speed, and it will catch up outstanding messages + testConductor.passThrough(local, remote, Direction.Receive).await expectTransition(Active, Idle) } } @@ -198,9 +216,10 @@ class ReliableProxySpec extends MultiNodeSpec(ReliableProxySpec) with STMultiNod within(1 second) { expectN(50) } + expectNoMsg(2 seconds) } - - enterBarrier("test4a") + + enterBarrier("test5") } } diff --git a/akka-contrib/src/test/java/akka/contrib/pattern/ReliableProxyTest.java b/akka-contrib/src/test/java/akka/contrib/pattern/ReliableProxyTest.java index 4ae2c20b1f..8a293bc201 100644 --- a/akka-contrib/src/test/java/akka/contrib/pattern/ReliableProxyTest.java +++ b/akka-contrib/src/test/java/akka/contrib/pattern/ReliableProxyTest.java @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.contrib.pattern; diff --git a/akka-contrib/src/test/scala/akka/contrib/pattern/ReliableProxyDocSpec.scala b/akka-contrib/src/test/scala/akka/contrib/pattern/ReliableProxyDocSpec.scala index 07c2d7af74..7e3809123b 100644 --- a/akka-contrib/src/test/scala/akka/contrib/pattern/ReliableProxyDocSpec.scala +++ b/akka-contrib/src/test/scala/akka/contrib/pattern/ReliableProxyDocSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.contrib.pattern diff --git a/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottlerSpec.scala b/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottlerSpec.scala index 7304df1448..b6963ff093 100644 --- a/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottlerSpec.scala +++ b/akka-contrib/src/test/scala/akka/contrib/throttle/TimerBasedThrottlerSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.contrib.throttle @@ -203,4 +203,4 @@ class TimerBasedThrottlerSpec extends TestKit(ActorSystem("TimerBasedThrottlerSp } } } -} \ No newline at end of file +} diff --git a/akka-dataflow/src/main/scala/akka/dataflow/package.scala b/akka-dataflow/src/main/scala/akka/dataflow/package.scala index 31248958d1..4537668f6b 100644 --- a/akka-dataflow/src/main/scala/akka/dataflow/package.scala +++ b/akka-dataflow/src/main/scala/akka/dataflow/package.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka @@ -99,4 +99,4 @@ package object dataflow { */ final def apply()(implicit ec: ExecutionContext): T @cps[Future[Any]] = shift(future flatMap (_: T ⇒ Future[Any])) } -} \ No newline at end of file +} diff --git a/akka-dataflow/src/test/scala/akka/dataflow/DataflowSpec.scala b/akka-dataflow/src/test/scala/akka/dataflow/DataflowSpec.scala index 0543b557c3..a9999b7083 100644 --- a/akka-dataflow/src/test/scala/akka/dataflow/DataflowSpec.scala +++ b/akka-dataflow/src/test/scala/akka/dataflow/DataflowSpec.scala @@ -1,5 +1,5 @@ /** - * Copyright (C) 2009-2012 Typesafe Inc. + * Copyright (C) 2009-2013 Typesafe Inc. */ package akka.dataflow diff --git a/akka-docs/_sphinx/themes/akka/layout.html b/akka-docs/_sphinx/themes/akka/layout.html index 1e0f13bdc2..d60fe7d599 100644 --- a/akka-docs/_sphinx/themes/akka/layout.html +++ b/akka-docs/_sphinx/themes/akka/layout.html @@ -141,7 +141,7 @@