diff --git a/akka-actor-tests/src/test/java/akka/actor/JavaAPI.java b/akka-actor-tests/src/test/java/akka/actor/JavaAPI.java index 5125611498..80563b679f 100644 --- a/akka-actor-tests/src/test/java/akka/actor/JavaAPI.java +++ b/akka-actor-tests/src/test/java/akka/actor/JavaAPI.java @@ -1,3 +1,7 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + package akka.actor; import akka.actor.ActorSystem; diff --git a/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java index e9ab0fccf1..c092ccceb2 100644 --- a/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java +++ b/akka-actor-tests/src/test/java/akka/dispatch/JavaFutureTests.java @@ -4,7 +4,10 @@ import akka.util.Timeout; import akka.actor.ActorSystem; import akka.japi.*; -import akka.util.Duration; +import scala.concurrent.Await; +import scala.concurrent.Future; +import scala.concurrent.Promise; +import scala.concurrent.util.Duration; import akka.testkit.TestKitExtension; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -17,7 +20,7 @@ import java.util.LinkedList; import java.lang.Iterable; import java.util.concurrent.CountDownLatch; import java.util.concurrent.TimeUnit; -import static akka.japi.Util.manifest; +import static akka.japi.Util.classTag; import akka.testkit.AkkaSpec; @@ -53,7 +56,7 @@ public class JavaFutureTests { public String apply(String s) { return s + " World"; } - }); + }, system.dispatcher()); assertEquals("Hello World", Await.result(f2, timeout)); } @@ -61,14 +64,14 @@ public class JavaFutureTests { @Test public void mustBeAbleToExecuteAnOnResultCallback() throws Throwable { final CountDownLatch latch = new CountDownLatch(1); - Promise cf = Futures.promise(system.dispatcher()); - Future f = cf; + Promise cf = Futures.promise(); + Future f = cf.future(); f.onSuccess(new OnSuccess() { public void onSuccess(String result) { if (result.equals("foo")) latch.countDown(); } - }); + }, system.dispatcher()); cf.success("foo"); assertTrue(latch.await(5000, TimeUnit.MILLISECONDS)); @@ -78,14 +81,14 @@ public class JavaFutureTests { @Test public void mustBeAbleToExecuteAnOnExceptionCallback() throws Throwable { final CountDownLatch latch = new CountDownLatch(1); - Promise cf = Futures.promise(system.dispatcher()); - Future f = cf; + Promise cf = Futures.promise(); + Future f = cf.future(); f.onFailure(new OnFailure() { public void onFailure(Throwable t) { if (t instanceof NullPointerException) latch.countDown(); } - }); + }, system.dispatcher()); Throwable exception = new NullPointerException(); cf.failure(exception); @@ -96,13 +99,13 @@ public class JavaFutureTests { @Test public void mustBeAbleToExecuteAnOnCompleteCallback() throws Throwable { final CountDownLatch latch = new CountDownLatch(1); - Promise cf = Futures.promise(system.dispatcher()); - Future f = cf; + Promise cf = Futures.promise(); + Future f = cf.future(); f.onComplete(new OnComplete() { public void onComplete(Throwable t, String r) { latch.countDown(); } - }); + }, system.dispatcher()); cf.success("foo"); assertTrue(latch.await(5000, TimeUnit.MILLISECONDS)); @@ -112,13 +115,13 @@ public class JavaFutureTests { @Test public void mustBeAbleToForeachAFuture() throws Throwable { final CountDownLatch latch = new CountDownLatch(1); - Promise cf = Futures.promise(system.dispatcher()); - Future f = cf; + Promise cf = Futures.promise(); + Future f = cf.future(); f.foreach(new Foreach() { public void each(String future) { latch.countDown(); } - }); + },system.dispatcher()); cf.success("foo"); assertTrue(latch.await(5000, TimeUnit.MILLISECONDS)); @@ -128,18 +131,18 @@ public class JavaFutureTests { @Test public void mustBeAbleToFlatMapAFuture() throws Throwable { final CountDownLatch latch = new CountDownLatch(1); - Promise cf = Futures.promise(system.dispatcher()); + Promise cf = Futures.promise(); cf.success("1000"); - Future f = cf; + Future f = cf.future(); Future r = f.flatMap(new Mapper>() { public Future checkedApply(String r) throws Throwable { if (false) throw new IOException("Just here to make sure this compiles."); latch.countDown(); - Promise cf = Futures.promise(system.dispatcher()); + Promise cf = Futures.promise(); cf.success(Integer.parseInt(r)); - return cf; + return cf.future(); } - }); + }, system.dispatcher()); assertEquals(Await.result(f, timeout), "1000"); assertEquals(Await.result(r, timeout).intValue(), 1000); @@ -149,14 +152,14 @@ public class JavaFutureTests { @Test public void mustBeAbleToFilterAFuture() throws Throwable { final CountDownLatch latch = new CountDownLatch(1); - Promise cf = Futures.promise(system.dispatcher()); - Future f = cf; + Promise cf = Futures.promise(); + Future f = cf.future(); Future r = f.filter(Filter.filterOf(new Function() { public Boolean apply(String r) { latch.countDown(); return r.equals("foo"); } - })); + }), system.dispatcher()); cf.success("foo"); assertTrue(latch.await(5000, TimeUnit.MILLISECONDS)); @@ -277,27 +280,27 @@ public class JavaFutureTests { @Test public void blockMustBeCallable() throws Exception { - Promise p = Futures.promise(system.dispatcher()); + Promise p = Futures.promise(); Duration d = Duration.create(1, TimeUnit.SECONDS); p.success("foo"); - Await.ready(p, d); - assertEquals(Await.result(p, d), "foo"); + Await.ready(p.future(), d); + assertEquals(Await.result(p.future(), d), "foo"); } @Test public void mapToMustBeCallable() throws Exception { - Promise p = Futures.promise(system.dispatcher()); - Future f = p.future().mapTo(manifest(String.class)); + Promise p = Futures.promise(); + Future f = p.future().mapTo(classTag(String.class)); Duration d = Duration.create(1, TimeUnit.SECONDS); p.success("foo"); - Await.ready(p, d); - assertEquals(Await.result(p, d), "foo"); + Await.ready(p.future(), d); + assertEquals(Await.result(p.future(), d), "foo"); } @Test public void recoverToMustBeCallable() throws Exception { final IllegalStateException fail = new IllegalStateException("OHNOES"); - Promise p = Futures.promise(system.dispatcher()); + Promise p = Futures.promise(); Future f = p.future().recover(new Recover() { public Object recover(Throwable t) throws Throwable { if (t == fail) @@ -305,7 +308,7 @@ public class JavaFutureTests { else throw t; } - }); + }, system.dispatcher()); Duration d = Duration.create(1, TimeUnit.SECONDS); p.failure(fail); assertEquals(Await.result(f, d), "foo"); @@ -314,15 +317,15 @@ public class JavaFutureTests { @Test public void recoverWithToMustBeCallable() throws Exception{ final IllegalStateException fail = new IllegalStateException("OHNOES"); - Promise p = Futures.promise(system.dispatcher()); + Promise p = Futures.promise(); Future f = p.future().recoverWith(new Recover>() { public Future recover(Throwable t) throws Throwable { if (t == fail) - return Futures. successful("foo", system.dispatcher()).future(); + return Futures.successful("foo"); else throw t; } - }); + }, system.dispatcher()); Duration d = Duration.create(1, TimeUnit.SECONDS); p.failure(fail); assertEquals(Await.result(f, d), "foo"); diff --git a/akka-actor-tests/src/test/java/akka/util/JavaDuration.java b/akka-actor-tests/src/test/java/akka/util/JavaDuration.java index 835a0301c7..0cbcea80d4 100644 --- a/akka-actor-tests/src/test/java/akka/util/JavaDuration.java +++ b/akka-actor-tests/src/test/java/akka/util/JavaDuration.java @@ -4,6 +4,7 @@ package akka.util; import org.junit.Test; +import scala.concurrent.util.Duration; public class JavaDuration { diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala index 5752bd7806..6532b5e5cd 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorConfigurationVerificationSpec.scala @@ -3,10 +3,12 @@ */ package akka.actor +import language.postfixOps + import akka.testkit._ import akka.testkit.DefaultTimeout import akka.testkit.TestEvent._ -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.routing._ import org.scalatest.BeforeAndAfterEach import akka.ConfigurationException diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorFireForgetRequestReplySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorFireForgetRequestReplySpec.scala index 69cf463276..42018823bc 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorFireForgetRequestReplySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorFireForgetRequestReplySpec.scala @@ -6,8 +6,8 @@ package akka.actor import akka.testkit._ import org.scalatest.BeforeAndAfterEach -import akka.util.duration._ -import akka.dispatch.Await +import scala.concurrent.util.duration._ +import scala.concurrent.Await import akka.pattern.ask object ActorFireForgetRequestReplySpec { @@ -87,7 +87,7 @@ class ActorFireForgetRequestReplySpec extends AkkaSpec with BeforeAndAfterEach w actor.isTerminated must be(false) actor ! "Die" state.finished.await - 1.second.dilated.sleep() + Thread.sleep(1.second.dilated.toMillis) actor.isTerminated must be(true) system.stop(supervisor) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala index d87aaaaee6..40907e74a0 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorLifeCycleSpec.scala @@ -4,14 +4,16 @@ package akka.actor +import language.postfixOps + import org.scalatest.BeforeAndAfterEach import org.scalatest.matchers.MustMatchers import akka.actor.Actor._ import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ import java.util.concurrent.atomic._ -import akka.dispatch.Await +import scala.concurrent.Await import akka.pattern.ask import java.util.UUID.{ randomUUID ⇒ newUuid } @@ -116,6 +118,30 @@ class ActorLifeCycleSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitS expectNoMsg(1 seconds) system.stop(supervisor) } + + "clear the behavior stack upon restart" in { + case class Become(recv: ActorContext ⇒ Receive) + val a = system.actorOf(Props(new Actor { + def receive = { + case Become(beh) ⇒ context.become(beh(context), discardOld = false); sender ! "ok" + case x ⇒ sender ! 42 + } + })) + a ! "hello" + expectMsg(42) + a ! Become(ctx ⇒ { + case "fail" ⇒ throw new RuntimeException("buh") + case x ⇒ ctx.sender ! 43 + }) + expectMsg("ok") + a ! "hello" + expectMsg(43) + EventFilter[RuntimeException]("buh", occurrences = 1) intercept { + a ! "fail" + } + a ! "hello" + expectMsg(42) + } } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala index 299cc16679..2d49ba884d 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorLookupSpec.scala @@ -3,9 +3,11 @@ */ package akka.actor +import language.postfixOps + import akka.testkit._ -import akka.util.duration._ -import akka.dispatch.Await +import scala.concurrent.util.duration._ +import scala.concurrent.Await import akka.pattern.ask import java.net.MalformedURLException diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala index a003d25757..0f8dc392a3 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorRefSpec.scala @@ -4,15 +4,17 @@ package akka.actor +import language.postfixOps + import org.scalatest.WordSpec import org.scalatest.matchers.MustMatchers import akka.testkit._ import akka.util.Timeout -import akka.util.duration._ +import scala.concurrent.util.duration._ +import scala.concurrent.Await import java.lang.IllegalStateException -import java.util.concurrent.{ CountDownLatch, TimeUnit } -import akka.dispatch.{ Await, DefaultPromise, Promise, Future } +import scala.concurrent.Promise import akka.pattern.ask import akka.serialization.JavaSerializer @@ -52,7 +54,7 @@ object ActorRefSpec { } private def work { - 1.second.dilated.sleep + Thread.sleep(1.second.dilated.toMillis) } } @@ -121,7 +123,7 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { to.success(r) r } catch { - case e ⇒ + case e: Throwable ⇒ to.failure(e) throw e } @@ -129,7 +131,7 @@ class ActorRefSpec extends AkkaSpec with DefaultTimeout { def wrap[T](f: Promise[Actor] ⇒ T): T = { val result = Promise[Actor]() val r = f(result) - Await.result(result, 1 minute) + Await.result(result.future, 1 minute) r } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala index e659fbb455..68485fc57f 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorSystemSpec.scala @@ -3,16 +3,18 @@ */ package akka.actor +import language.postfixOps + import akka.testkit._ import org.scalatest.junit.JUnitSuite import com.typesafe.config.ConfigFactory -import akka.dispatch.Await -import akka.util.duration._ +import scala.concurrent.Await +import scala.concurrent.util.duration._ import scala.collection.JavaConverters import java.util.concurrent.{ TimeUnit, RejectedExecutionException, CountDownLatch, ConcurrentLinkedQueue } import akka.pattern.ask import akka.util.Timeout -import akka.dispatch.Future +import scala.concurrent.Future class JavaExtensionSpec extends JavaExtension with JUnitSuite @@ -103,7 +105,7 @@ class ActorSystemSpec extends AkkaSpec("""akka.extensions = ["akka.actor.TestExt for (i ← 1 to count) { system2.registerOnTermination { - (i % 3).millis.dilated.sleep() + Thread.sleep((i % 3).millis.dilated.toMillis) result add i latch.countDown() } @@ -125,7 +127,7 @@ class ActorSystemSpec extends AkkaSpec("""akka.extensions = ["akka.actor.TestExt var callbackWasRun = false system2.registerOnTermination { - 50.millis.dilated.sleep() + Thread.sleep(50.millis.dilated.toMillis) callbackWasRun = true } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala index 61f71ad994..6017606fa0 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorTimeoutSpec.scala @@ -3,9 +3,9 @@ */ package akka.actor -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.testkit._ -import akka.dispatch.Await +import scala.concurrent.Await import akka.util.Timeout import akka.pattern.{ ask, AskTimeoutException } diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala index 33283b18cf..a836572ad2 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorWithBoundedStashSpec.scala @@ -3,12 +3,15 @@ */ package akka.actor +import language.postfixOps + import akka.testkit._ import akka.testkit.DefaultTimeout import akka.testkit.TestEvent._ -import akka.dispatch.{ Await, BoundedDequeBasedMailbox } +import akka.dispatch.BoundedDequeBasedMailbox import akka.pattern.ask -import akka.util.duration._ +import scala.concurrent.Await +import scala.concurrent.util.duration._ import akka.actor.ActorSystem.Settings import com.typesafe.config.{ Config, ConfigFactory } import org.scalatest.BeforeAndAfterEach diff --git a/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala index 524913b01d..5913000215 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ActorWithStashSpec.scala @@ -3,12 +3,14 @@ */ package akka.actor +import language.postfixOps + import akka.testkit._ import akka.testkit.DefaultTimeout import akka.testkit.TestEvent._ -import akka.dispatch.Await +import scala.concurrent.Await import akka.pattern.ask -import akka.util.duration._ +import scala.concurrent.util.duration._ import com.typesafe.config.{ Config, ConfigFactory } import org.scalatest.BeforeAndAfterEach import org.scalatest.junit.JUnitSuite diff --git a/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala index 6f8639f4a4..8dbd9f6b4f 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ConsistencySpec.scala @@ -1,8 +1,10 @@ package akka.actor +import language.postfixOps + import akka.testkit.AkkaSpec import akka.dispatch.UnboundedMailbox -import akka.util.duration._ +import scala.concurrent.util.duration._ object ConsistencySpec { val config = """ diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala index 8a21f5f070..38b51a4ad4 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeathWatchSpec.scala @@ -4,10 +4,12 @@ package akka.actor +import language.postfixOps + import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ import java.util.concurrent.atomic._ -import akka.dispatch.Await +import scala.concurrent.Await import akka.pattern.ask @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) diff --git a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala index 9b1c0b0bef..1aef438627 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/DeployerSpec.scala @@ -4,11 +4,13 @@ package akka.actor +import language.postfixOps + import akka.testkit.AkkaSpec import com.typesafe.config.ConfigFactory import com.typesafe.config.ConfigParseOptions import akka.routing._ -import akka.util.duration._ +import scala.concurrent.util.duration._ object DeployerSpec { val deployerConf = ConfigFactory.parseString(""" diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala index cc98a23f1f..c0c6bf0f06 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMActorSpec.scala @@ -4,14 +4,17 @@ package akka.actor +import language.postfixOps + import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } import akka.testkit._ import TestEvent.Mute -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.event._ import com.typesafe.config.ConfigFactory -import akka.dispatch.Await -import akka.util.{ Timeout, Duration } +import scala.concurrent.Await +import akka.util.Timeout +import scala.concurrent.util.Duration object FSMActorSpec { val timeout = Timeout(2 seconds) diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala index 76d8df1e92..1fa9a1ff03 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMTimingSpec.scala @@ -4,9 +4,11 @@ package akka.actor +import language.postfixOps + import akka.testkit._ -import akka.util.Duration -import akka.util.duration._ +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ import akka.event.Logging @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) @@ -145,7 +147,7 @@ object FSMTimingSpec { } def resume(actorRef: ActorRef): Unit = actorRef match { - case l: ActorRefWithCell ⇒ l.resume() + case l: ActorRefWithCell ⇒ l.resume(inResponseToFailure = false) case _ ⇒ } diff --git a/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala index 691be63a0b..13fb4a1238 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/FSMTransitionSpec.scala @@ -3,9 +3,11 @@ */ package akka.actor +import language.postfixOps + import akka.testkit._ -import akka.util.duration._ -import akka.util.Duration +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration object FSMTransitionSpec { diff --git a/akka-actor-tests/src/test/scala/akka/actor/ForwardActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ForwardActorSpec.scala index 2566e5a955..9e662b5535 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ForwardActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ForwardActorSpec.scala @@ -4,12 +4,14 @@ package akka.actor +import language.postfixOps + import akka.testkit._ -import akka.util.duration._ -import Actor._ -import akka.util.Duration -import akka.dispatch.Await -import akka.pattern.ask +import scala.concurrent.util.duration._ +import akka.actor.Actor._ +import scala.concurrent.util.Duration +import scala.concurrent.Await +import akka.pattern.{ ask, pipe } object ForwardActorSpec { val ExpectedMessage = "FOO" @@ -31,12 +33,10 @@ object ForwardActorSpec { @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class ForwardActorSpec extends AkkaSpec { import ForwardActorSpec._ - + implicit val ec = system.dispatcher "A Forward Actor" must { "forward actor reference when invoking forward on tell" in { - val latch = new TestLatch(1) - val replyTo = system.actorOf(Props(new Actor { def receive = { case ExpectedMessage ⇒ testActor ! ExpectedMessage } })) val chain = createForwardingChain(system) @@ -47,7 +47,7 @@ class ForwardActorSpec extends AkkaSpec { "forward actor reference when invoking forward on ask" in { val chain = createForwardingChain(system) - chain.ask(ExpectedMessage)(5 seconds) onSuccess { case ExpectedMessage ⇒ testActor ! ExpectedMessage } + chain.ask(ExpectedMessage)(5 seconds) pipeTo testActor expectMsg(5 seconds, ExpectedMessage) } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala b/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala index b337455ac2..9d2af4ec7f 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/IOActor.scala @@ -4,11 +4,15 @@ package akka.actor -import akka.util.{ ByteString, Duration, Deadline } -import akka.util.duration._ +import language.postfixOps + +import akka.util.ByteString +import scala.concurrent.{ ExecutionContext, Await, Future, Promise } +import scala.concurrent.util.{ Duration, Deadline } +import scala.concurrent.util.duration._ import scala.util.continuations._ import akka.testkit._ -import akka.dispatch.{ Await, Future, Promise, ExecutionContext, MessageDispatcher } +import akka.dispatch.MessageDispatcher import java.net.{ SocketAddress } import akka.pattern.ask @@ -242,7 +246,7 @@ class IOActorSpec extends AkkaSpec with DefaultTimeout { */ def retry[T](count: Option[Int] = None, timeout: Option[Duration] = None, delay: Option[Duration] = Some(100 millis), filter: Option[Throwable ⇒ Boolean] = None)(future: ⇒ Future[T])(implicit executor: ExecutionContext): Future[T] = { - val promise = Promise[T]()(executor) + val promise = Promise[T]() val timer: Option[Deadline] = timeout match { case Some(duration) ⇒ Some(duration fromNow) @@ -267,15 +271,16 @@ class IOActorSpec extends AkkaSpec with DefaultTimeout { run(0) - promise + promise.future } "an IO Actor" must { + implicit val ec = system.dispatcher "run echo server" in { filterException[java.net.ConnectException] { val addressPromise = Promise[SocketAddress]() val server = system.actorOf(Props(new SimpleEchoServer(addressPromise))) - val address = Await.result(addressPromise, TestLatch.DefaultTimeout) + val address = Await.result(addressPromise.future, TestLatch.DefaultTimeout) val client = system.actorOf(Props(new SimpleEchoClient(address))) val f1 = retry() { client ? ByteString("Hello World!1") } val f2 = retry() { client ? ByteString("Hello World!2") } @@ -292,7 +297,7 @@ class IOActorSpec extends AkkaSpec with DefaultTimeout { filterException[java.net.ConnectException] { val addressPromise = Promise[SocketAddress]() val server = system.actorOf(Props(new SimpleEchoServer(addressPromise))) - val address = Await.result(addressPromise, TestLatch.DefaultTimeout) + val address = Await.result(addressPromise.future, TestLatch.DefaultTimeout) val client = system.actorOf(Props(new SimpleEchoClient(address))) val list = List.range(0, 100) val f = Future.traverse(list)(i ⇒ retry() { client ? ByteString(i.toString) }) @@ -306,7 +311,7 @@ class IOActorSpec extends AkkaSpec with DefaultTimeout { filterException[java.net.ConnectException] { val addressPromise = Promise[SocketAddress]() val server = system.actorOf(Props(new KVStore(addressPromise))) - val address = Await.result(addressPromise, TestLatch.DefaultTimeout) + val address = Await.result(addressPromise.future, TestLatch.DefaultTimeout) val client1 = system.actorOf(Props(new KVClient(address))) val client2 = system.actorOf(Props(new KVClient(address))) val f1 = retry() { client1 ? KVSet("hello", "World") } diff --git a/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala index 5ebd8ff565..426f940b5d 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/LocalActorRefProviderSpec.scala @@ -4,10 +4,13 @@ package akka.actor +import language.postfixOps + import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.Await +import scala.concurrent.util.duration._ import akka.util.Timeout -import akka.dispatch.{ Await, Future } +import scala.concurrent.Future object LocalActorRefProviderSpec { val config = """ @@ -38,7 +41,7 @@ class LocalActorRefProviderSpec extends AkkaSpec(LocalActorRefProviderSpec.confi } "An ActorRefFactory" must { - + implicit val ec = system.dispatcher "only create one instance of an actor with a specific address in a concurrent environment" in { val impl = system.asInstanceOf[ActorSystemImpl] val provider = impl.provider diff --git a/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala index 09fe9c103f..dc5229ae41 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/ReceiveTimeoutSpec.scala @@ -4,11 +4,13 @@ package akka.actor +import language.postfixOps + import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ import java.util.concurrent.atomic.AtomicInteger -import akka.dispatch.Await +import scala.concurrent.Await import java.util.concurrent.TimeoutException @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) diff --git a/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala index 8d114bc396..55e87b75da 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/RestartStrategySpec.scala @@ -4,17 +4,19 @@ package akka.actor +import language.postfixOps + import java.lang.Thread.sleep import org.scalatest.BeforeAndAfterAll -import akka.dispatch.Await +import scala.concurrent.Await import akka.testkit.TestEvent._ import akka.testkit.EventFilter import java.util.concurrent.{ TimeUnit, CountDownLatch } import akka.testkit.AkkaSpec import akka.testkit.DefaultTimeout import akka.testkit.TestLatch -import akka.util.duration._ -import akka.util.Duration +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration import akka.pattern.ask @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) diff --git a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala index c67bcb44af..7cbbeb4164 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SchedulerSpec.scala @@ -1,10 +1,12 @@ package akka.actor +import language.postfixOps + import org.scalatest.BeforeAndAfterEach -import akka.util.duration._ +import scala.concurrent.util.duration._ import java.util.concurrent.{ CountDownLatch, ConcurrentLinkedQueue, TimeUnit } import akka.testkit._ -import akka.dispatch.Await +import scala.concurrent.Await import akka.pattern.ask import java.util.concurrent.atomic.AtomicInteger @@ -113,9 +115,9 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout val timeout = collectCancellable(system.scheduler.schedule(initialDelay, delay) { ticks.incrementAndGet() }) - 10.milliseconds.dilated.sleep() + Thread.sleep(10.milliseconds.dilated.toMillis) timeout.cancel() - (initialDelay + 100.milliseconds.dilated).sleep() + Thread.sleep((initialDelay + 100.milliseconds.dilated).toMillis) ticks.get must be(0) } @@ -128,9 +130,9 @@ class SchedulerSpec extends AkkaSpec with BeforeAndAfterEach with DefaultTimeout val timeout = collectCancellable(system.scheduler.schedule(initialDelay, delay) { ticks.incrementAndGet() }) - (initialDelay + 100.milliseconds.dilated).sleep() + Thread.sleep((initialDelay + 100.milliseconds.dilated).toMillis) timeout.cancel() - (delay + 100.milliseconds.dilated).sleep() + Thread.sleep((delay + 100.milliseconds.dilated).toMillis) ticks.get must be(1) } diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala index 62752d8052..7a73d6acce 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorHierarchySpec.scala @@ -4,12 +4,26 @@ package akka.actor -import akka.testkit._ +import language.postfixOps + import java.util.concurrent.{ TimeUnit, CountDownLatch } -import akka.dispatch.Await + +import scala.concurrent.Await +import scala.concurrent.util.Duration +import scala.concurrent.util.duration.intToDurationInt +import scala.math.BigInt.int2bigInt +import scala.util.Random +import scala.util.control.NoStackTrace + +import com.typesafe.config.{ ConfigFactory, Config } + +import SupervisorStrategy.{ Resume, Restart, Directive } +import akka.actor.SupervisorStrategy.seqThrowable2Decider +import akka.dispatch.{ MessageDispatcher, DispatcherPrerequisites, DispatcherConfigurator, Dispatcher } import akka.pattern.ask -import akka.util.Duration -import akka.util.duration._ +import akka.testkit.{ ImplicitSender, EventFilter, DefaultTimeout, AkkaSpec } +import akka.testkit.{ filterException, duration2TestDuration, TestLatch } +import akka.testkit.TestEvent.Mute object SupervisorHierarchySpec { class FireWorkerException(msg: String) extends Exception(msg) @@ -29,10 +43,361 @@ object SupervisorHierarchySpec { countDown.countDown() } } + + class Resumer extends Actor { + override def supervisorStrategy = OneForOneStrategy() { case _ ⇒ SupervisorStrategy.Resume } + def receive = { + case "spawn" ⇒ sender ! context.actorOf(Props[Resumer]) + case "fail" ⇒ throw new Exception("expected") + case "ping" ⇒ sender ! "pong" + } + } + + case class Event(msg: Any) { val time: Long = System.nanoTime } + case class ErrorLog(msg: String, log: Vector[Event]) + case class Failure(directive: Directive, log: Vector[Event]) extends RuntimeException with NoStackTrace { + override def toString = "Failure(" + directive + ")" + } + val strategy = OneForOneStrategy() { case Failure(directive, _) ⇒ directive } + + val config = ConfigFactory.parseString(""" + hierarchy { + type = "akka.actor.SupervisorHierarchySpec$MyDispatcherConfigurator" + } + akka.loglevel = INFO + akka.actor.debug.fsm = on + """) + + class MyDispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites) + extends DispatcherConfigurator(config, prerequisites) { + + private val instance: MessageDispatcher = + new Dispatcher(prerequisites, + config.getString("id"), + config.getInt("throughput"), + Duration(config.getNanoseconds("throughput-deadline-time"), TimeUnit.NANOSECONDS), + mailboxType, + configureExecutor(), + Duration(config.getMilliseconds("shutdown-timeout"), TimeUnit.MILLISECONDS)) { + + override def suspend(cell: ActorCell): Unit = { + val a = cell.actor.asInstanceOf[Hierarchy] + a.log :+= Event("suspended") + super.suspend(cell) + } + + override def resume(cell: ActorCell): Unit = { + val a = cell.actor.asInstanceOf[Hierarchy] + a.log :+= Event("resumed") + super.resume(cell) + } + + } + + override def dispatcher(): MessageDispatcher = instance + } + + class Hierarchy(depth: Int, breadth: Int, listener: ActorRef) extends Actor { + + override def preStart { + if (depth > 1) + for (_ ← 1 to breadth) + context.watch(context.actorOf(Props(new Hierarchy(depth - 1, breadth, listener)).withDispatcher("hierarchy"))) + listener ! self + } + override def postRestart(cause: Throwable) { + cause match { + case Failure(_, l) ⇒ log = l + } + log :+= Event("restarted") + } + + override def supervisorStrategy = strategy + override def preRestart(cause: Throwable, msg: Option[Any]): Unit = { + // do not scrap children + } + + override def postStop { + if (failed || suspended) { + listener ! ErrorLog("not resumed (" + failed + ", " + suspended + ")", log) + } + } + + var failed = false + var suspended = false + var log = Vector.empty[Event] + def check(msg: Any) = { + suspended = false + log :+= Event(msg) + if (failed) { + listener ! ErrorLog("processing message while failed", log) + failed = false + context stop self + } + } + + def receive = new Receive { + val handler: Receive = { + case f @ Failure(Resume, _) ⇒ suspended = true; throw f.copy(log = log) + case f: Failure ⇒ failed = true; throw f.copy(log = log) + case "ping" ⇒ Thread.sleep((Random.nextFloat * 1.03).toLong); sender ! "pong" + case Terminated(_) ⇒ listener ! ErrorLog("terminating", log); context stop self + } + override def isDefinedAt(msg: Any) = handler.isDefinedAt(msg) + override def apply(msg: Any) = { check(msg); handler(msg) } + } + } + + case class Work(n: Int) + + sealed trait Action + case class Ping(ref: ActorRef) extends Action + case class Fail(ref: ActorRef, directive: Directive) extends Action + + sealed trait State + case object Idle extends State + case object Init extends State + case object Stress extends State + case object Finishing extends State + case object LastPing extends State + case object Stopping extends State + case object Failed extends State + + /* + * This stress test will construct a supervision hierarchy of configurable + * depth and breadth and then randomly fail and check its actors. The actors + * perform certain checks internally (verifying that they do not run when + * suspended, for example), and they are checked for health by the test + * procedure. + * + * Execution happens in phases (which is the reason for FSM): + * + * Idle: + * - upon reception of Init message, construct hierary and go to Init state + * + * Init: + * - receive refs of all contained actors + * + * Stress: + * - deal out actions (Fail or "ping"), keeping the hierarchy busy + * - whenever all actors are in the "pinged" list (i.e. have not yet + * answered with a "pong"), delay processing of the next Work() by + * 100 millis + * - when receiving a Work() while all actors are "pinged", stop the + * hierarchy and go to the Stopping state + * + * Finishing: + * - after dealing out the last action, wait for the outstanding "pong" + * messages + * - when last "pong" is received, goto LastPing state + * - upon state timeout, stop the hierarchy and go to the Failed state + * + * LastPing: + * - upon entering this state, send a "ping" to all actors + * - when last "pong" is received, goto Stopping state + * - upon state timeout, stop the hierarchy and go to the Failed state + * + * Stopping: + * - upon entering this state, stop the hierarchy + * - upon termination of the hierarchy send back successful result + * + * Whenever an ErrorLog is received, goto Failed state + * + * Failed: + * - accumulate ErrorLog messages + * - upon termination of the hierarchy send back failed result and print + * the logs, merged and in chronological order. + * + * TODO RK: also test Stop directive, and keep a complete list of all + * actors ever created, then verify after stop()ping the hierarchy that + * all are terminated, transfer them to a WeakHashMap and verify that + * they are indeed GCed + * + * TODO RK: make hierarchy construction stochastic so that it includes + * different breadth (including the degenerate breadth-1 case). + * + * TODO RK: also test Escalate by adding an exception with a `var depth` + * which gets decremented within the supervisor and gets handled when zero + * is reached (Restart resolution) + * + * TODO RK: also test exceptions during recreate + * + * TODO RK: also test recreate including terminating children + * + * TODO RK: also verify that preRestart is not called more than once per instance + */ + + class StressTest(testActor: ActorRef, depth: Int, breadth: Int) extends Actor with LoggingFSM[State, Null] { + import context.system + + override def supervisorStrategy = strategy + + var children = Vector.empty[ActorRef] + var idleChildren = Vector.empty[ActorRef] + var pingChildren = Set.empty[ActorRef] + + val nextJob = Iterator.continually(Random.nextFloat match { + case x if x >= 0.5 ⇒ + // ping one child + val pick = ((x - 0.5) * 2 * idleChildren.size).toInt + val ref = idleChildren(pick) + idleChildren = idleChildren.take(pick) ++ idleChildren.drop(pick + 1) + pingChildren += ref + Ping(ref) + case x ⇒ + // fail one child + val pick = ((if (x >= 0.25) x - 0.25 else x) * 4 * children.size).toInt + Fail(children(pick), if (x > 0.25) Restart else Resume) + }) + + val familySize = ((1 - BigInt(breadth).pow(depth)) / (1 - breadth)).toInt + var hierarchy: ActorRef = _ + + override def preRestart(cause: Throwable, msg: Option[Any]) { + throw new ActorKilledException("I want to DIE") + } + + override def postRestart(cause: Throwable) { + throw new ActorKilledException("I said I wanted to DIE, dammit!") + } + + override def postStop { + testActor ! "stressTestStopped" + } + + startWith(Idle, null) + + when(Idle) { + case Event(Init, _) ⇒ + hierarchy = context.watch(context.actorOf(Props(new Hierarchy(depth, breadth, self)).withDispatcher("hierarchy"))) + setTimer("phase", StateTimeout, 5 seconds, false) + goto(Init) + } + + when(Init) { + case Event(ref: ActorRef, _) ⇒ + if (idleChildren.nonEmpty || pingChildren.nonEmpty) + throw new IllegalStateException("received unexpected child " + children.size) + children :+= ref + if (children.size == familySize) { + idleChildren = children + goto(Stress) + } else stay + case Event(StateTimeout, _) ⇒ + testActor ! "only got %d out of %d refs".format(children.size, familySize) + stop() + } + + onTransition { + case Init -> Stress ⇒ + self ! Work(familySize * 1000) + // set timeout for completion of the whole test (i.e. including Finishing and Stopping) + setTimer("phase", StateTimeout, 60 seconds, false) + } + + val workSchedule = 250.millis + + when(Stress) { + case Event(w: Work, _) if idleChildren.isEmpty ⇒ + context stop hierarchy + goto(Failed) + case Event(Work(x), _) if x > 0 ⇒ + nextJob.next match { + case Ping(ref) ⇒ ref ! "ping" + case Fail(ref, dir) ⇒ ref ! Failure(dir, Vector.empty) + } + if (idleChildren.nonEmpty) self ! Work(x - 1) + else context.system.scheduler.scheduleOnce(workSchedule, self, Work(x - 1)) + stay + case Event(Work(_), _) ⇒ if (pingChildren.isEmpty) goto(LastPing) else goto(Finishing) + case Event("pong", _) ⇒ + pingChildren -= sender + idleChildren :+= sender + stay + } + + when(Finishing) { + case Event("pong", _) ⇒ + pingChildren -= sender + idleChildren :+= sender + if (pingChildren.isEmpty) goto(LastPing) else stay + } + + onTransition { + case _ -> LastPing ⇒ + idleChildren foreach (_ ! "ping") + pingChildren ++= idleChildren + idleChildren = Vector.empty + } + + when(LastPing) { + case Event("pong", _) ⇒ + pingChildren -= sender + idleChildren :+= sender + if (pingChildren.isEmpty) goto(Stopping) else stay + } + + onTransition { + case _ -> Stopping ⇒ context stop hierarchy + } + + when(Stopping, stateTimeout = 5 seconds) { + case Event(Terminated(r), _) if r == hierarchy ⇒ + testActor ! "stressTestSuccessful" + stop + case Event(StateTimeout, _) ⇒ + testActor ! "timeout in Stopping" + stop + } + + var errors = Vector.empty[(ActorRef, ErrorLog)] + + when(Failed, stateTimeout = 5 seconds) { + case Event(e: ErrorLog, _) ⇒ + errors :+= sender -> e + stay + case Event(Terminated(r), _) if r == hierarchy ⇒ + printErrors() + testActor ! "stressTestFailed" + stop + case Event(StateTimeout, _) ⇒ + printErrors() + testActor ! "timeout in Failed" + stop + case Event("pong", _) ⇒ stay // don’t care? + } + + def printErrors(): Unit = { + val merged = errors flatMap { + case (ref, ErrorLog(msg, log)) ⇒ + println(ref + " " + msg) + log map (l ⇒ (l.time, ref, l.msg.toString)) + } + merged.sorted foreach println + } + + whenUnhandled { + case Event(e: ErrorLog, _) ⇒ + errors :+= sender -> e + // don’t stop the hierarchy, that is going to happen all by itself and in the right order + goto(Failed) + case Event(StateTimeout, _) ⇒ + println("pingChildren:\n" + pingChildren.mkString("\n")) + context stop hierarchy + goto(Failed) + case Event(msg, _) ⇒ + testActor ! ("received unexpected msg: " + msg) + stop + } + + initialize + + } + } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) -class SupervisorHierarchySpec extends AkkaSpec with DefaultTimeout { +class SupervisorHierarchySpec extends AkkaSpec(SupervisorHierarchySpec.config) with DefaultTimeout with ImplicitSender { import SupervisorHierarchySpec._ "A Supervisor Hierarchy" must { @@ -81,6 +446,67 @@ class SupervisorHierarchySpec extends AkkaSpec with DefaultTimeout { assert(countDownMax.await(2, TimeUnit.SECONDS)) } } + + "resume children after Resume" in { + val boss = system.actorOf(Props[Resumer], "resumer") + boss ! "spawn" + val middle = expectMsgType[ActorRef] + middle ! "spawn" + val worker = expectMsgType[ActorRef] + worker ! "ping" + expectMsg("pong") + EventFilter[Exception]("expected", occurrences = 1) intercept { + middle ! "fail" + } + middle ! "ping" + expectMsg("pong") + worker ! "ping" + expectMsg("pong") + } + + "suspend children while failing" in { + val latch = TestLatch() + val slowResumer = system.actorOf(Props(new Actor { + override def supervisorStrategy = OneForOneStrategy() { case _ ⇒ Await.ready(latch, 4.seconds.dilated); SupervisorStrategy.Resume } + def receive = { + case "spawn" ⇒ sender ! context.actorOf(Props[Resumer]) + } + }), "slowResumer") + slowResumer ! "spawn" + val boss = expectMsgType[ActorRef] + boss ! "spawn" + val middle = expectMsgType[ActorRef] + middle ! "spawn" + val worker = expectMsgType[ActorRef] + worker ! "ping" + expectMsg("pong") + EventFilter[Exception]("expected", occurrences = 1) intercept { + boss ! "fail" + } + worker ! "ping" + expectNoMsg(2 seconds) + latch.countDown() + expectMsg("pong") + } + + "survive being stressed" in { + system.eventStream.publish(Mute(EventFilter[Failure]())) + system.eventStream.publish(Mute(EventFilter.warning(start = "received dead letter"))) + + val fsm = system.actorOf(Props(new StressTest(testActor, 6, 3)), "stressTest") + + fsm ! FSM.SubscribeTransitionCallBack(system.actorOf(Props(new Actor { + def receive = { + case s: FSM.CurrentState[_] ⇒ log.info("{}", s) + case t: FSM.Transition[_] ⇒ log.info("{}", t) + } + }))) + + fsm ! Init + + expectMsg(70 seconds, "stressTestSuccessful") + expectMsg("stressTestStopped") + } } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala index ec0c51e9ae..b13457338c 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorMiscSpec.scala @@ -3,14 +3,17 @@ */ package akka.actor +import language.postfixOps + import akka.testkit.{ filterEvents, EventFilter } -import akka.dispatch.{ PinnedDispatcher, Dispatchers, Await } +import scala.concurrent.Await +import akka.dispatch.{ PinnedDispatcher, Dispatchers } import java.util.concurrent.{ TimeUnit, CountDownLatch } import akka.testkit.AkkaSpec import akka.testkit.DefaultTimeout import akka.pattern.ask -import akka.util.duration._ -import akka.util.NonFatal +import scala.concurrent.util.duration._ +import scala.util.control.NonFatal object SupervisorMiscSpec { val config = """ @@ -134,6 +137,9 @@ class SupervisorMiscSpec extends AkkaSpec(SupervisorMiscSpec.config) with Defaul })) parent ! "engage" expectMsg("green") + EventFilter[IllegalStateException]("handleChildTerminated failed", occurrences = 1) intercept { + system.stop(parent) + } } } diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala index 2f4863711f..5362ad4153 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorSpec.scala @@ -4,13 +4,15 @@ package akka.actor +import language.postfixOps + import org.scalatest.BeforeAndAfterEach -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.{ Die, Ping } import akka.testkit.TestEvent._ import akka.testkit._ import java.util.concurrent.atomic.AtomicInteger -import akka.dispatch.Await +import scala.concurrent.Await import akka.pattern.ask object SupervisorSpec { @@ -339,7 +341,12 @@ class SupervisorSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitSende OneForOneStrategy(maxNrOfRetries = 3, withinTimeRange = 10 seconds)(classOf[Exception] :: Nil)))) val dyingProps = Props(new Actor { - if (inits.incrementAndGet % 2 == 0) throw new IllegalStateException("Don't wanna!") + val init = inits.getAndIncrement() + if (init % 3 == 1) throw new IllegalStateException("Don't wanna!") + + override def preRestart(cause: Throwable, msg: Option[Any]) { + if (init % 3 == 0) throw new IllegalStateException("Don't wanna!") + } def receive = { case Ping ⇒ sender ! PongMessage @@ -349,16 +356,20 @@ class SupervisorSpec extends AkkaSpec with BeforeAndAfterEach with ImplicitSende throw e } }) - val dyingActor = Await.result((supervisor ? dyingProps).mapTo[ActorRef], timeout.duration) + supervisor ! dyingProps + val dyingActor = expectMsgType[ActorRef] - filterEvents(EventFilter[RuntimeException]("Expected", occurrences = 1), - EventFilter[IllegalStateException]("error while creating actor", occurrences = 1)) { + filterEvents( + EventFilter[RuntimeException]("Expected", occurrences = 1), + EventFilter[PreRestartException]("Don't wanna!", occurrences = 1), + EventFilter[PostRestartException]("Don't wanna!", occurrences = 1)) { intercept[RuntimeException] { Await.result(dyingActor.?(DieReply)(DilatedTimeout), DilatedTimeout) } } - Await.result(dyingActor.?(Ping)(DilatedTimeout), DilatedTimeout) must be === PongMessage + dyingActor ! Ping + expectMsg(PongMessage) inits.get must be(3) diff --git a/akka-actor-tests/src/test/scala/akka/actor/SupervisorTreeSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/SupervisorTreeSpec.scala index b84cce002c..4213b548d9 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/SupervisorTreeSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/SupervisorTreeSpec.scala @@ -3,15 +3,15 @@ */ package akka.actor +import language.postfixOps + import org.scalatest.WordSpec import org.scalatest.matchers.MustMatchers -import akka.util.duration._ +import scala.concurrent.Await +import scala.concurrent.util.duration._ import akka.actor.Actor._ -import akka.testkit.{ TestKit, EventFilter, filterEvents, filterException } -import akka.testkit.AkkaSpec -import akka.testkit.ImplicitSender -import akka.testkit.DefaultTimeout -import akka.dispatch.{ Await, Dispatchers } +import akka.testkit.{ TestKit, EventFilter, filterEvents, filterException, AkkaSpec, ImplicitSender, DefaultTimeout } +import akka.dispatch.Dispatchers import akka.pattern.ask @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) diff --git a/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala b/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala index 285b63c2c7..e6ac2a13f3 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/Ticket669Spec.scala @@ -3,6 +3,8 @@ */ package akka.actor +import language.postfixOps + import java.util.concurrent.{ CountDownLatch, TimeUnit } import akka.actor._ import org.scalatest.BeforeAndAfterAll @@ -10,9 +12,9 @@ import akka.testkit.{ TestKit, filterEvents, EventFilter } import akka.testkit.AkkaSpec import akka.testkit.ImplicitSender import akka.testkit.DefaultTimeout -import akka.dispatch.Await +import scala.concurrent.Await import akka.pattern.ask -import akka.util.duration._ +import scala.concurrent.util.duration._ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class Ticket669Spec extends AkkaSpec with BeforeAndAfterAll with ImplicitSender with DefaultTimeout { diff --git a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala index b7a5a8f64b..61a1e84f7e 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/TypedActorSpec.scala @@ -1,20 +1,21 @@ -package akka.actor - /** * Copyright (C) 2009-2012 Typesafe Inc. */ +package akka.actor + +import language.postfixOps import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } -import akka.util.Duration import akka.util.Timeout -import akka.util.duration._ +import scala.concurrent.{ Await, Future, Promise } +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ import java.util.concurrent.atomic.AtomicReference import annotation.tailrec import akka.testkit.{ EventFilter, filterEvents, AkkaSpec } -import akka.serialization.SerializationExtension -import akka.japi.{ Creator, Option ⇒ JOption } +import akka.japi.{ Option ⇒ JOption } import akka.testkit.DefaultTimeout -import akka.dispatch.{ Await, Dispatchers, Future, Promise } +import akka.dispatch.{ Dispatchers } import akka.pattern.ask import akka.serialization.JavaSerializer import akka.actor.TypedActor._ @@ -108,7 +109,7 @@ object TypedActorSpec { def pigdog = "Pigdog" - def futurePigdog(): Future[String] = Promise.successful(pigdog) + def futurePigdog(): Future[String] = Promise.successful(pigdog).future def futurePigdog(delay: Long): Future[String] = { Thread.sleep(delay) @@ -117,7 +118,7 @@ object TypedActorSpec { def futurePigdog(delay: Long, numbered: Int): Future[String] = { Thread.sleep(delay) - Promise.successful(pigdog + numbered) + Promise.successful(pigdog + numbered).future } def futureComposePigdogFrom(foo: Foo): Future[String] = { diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala index da789d9dce..b5d284d7af 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/ActorModelSpec.scala @@ -3,6 +3,8 @@ */ package akka.actor.dispatch +import language.postfixOps + import java.rmi.RemoteException import java.util.concurrent.{ TimeUnit, CountDownLatch, ConcurrentHashMap } import java.util.concurrent.atomic.{ AtomicLong, AtomicInteger } @@ -18,8 +20,11 @@ import akka.dispatch._ import akka.event.Logging.Error import akka.pattern.ask import akka.testkit._ -import akka.util.{ Timeout, Switch, Duration } -import akka.util.duration._ +import akka.util.{ Timeout, Switch } +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration +import scala.concurrent.{ Await, Future, Promise } +import scala.annotation.tailrec object ActorModelSpec { @@ -154,7 +159,7 @@ object ActorModelSpec { try { await(deadline)(stops == dispatcher.stops.get) } catch { - case e ⇒ + case e: Throwable ⇒ system.eventStream.publish(Error(e, dispatcher.toString, dispatcher.getClass, "actual: stops=" + dispatcher.stops.get + " required: stops=" + stops)) throw e @@ -211,7 +216,7 @@ object ActorModelSpec { await(deadline)(stats.msgsProcessed.get() == msgsProcessed) await(deadline)(stats.restarts.get() == restarts) } catch { - case e ⇒ + case e: Throwable ⇒ system.eventStream.publish(Error(e, Option(dispatcher).toString, (Option(dispatcher) getOrElse this).getClass, @@ -222,16 +227,16 @@ object ActorModelSpec { } } - def await(until: Long)(condition: ⇒ Boolean): Unit = try { - while (System.currentTimeMillis() <= until) { - try { - if (condition) return else Thread.sleep(25) - } catch { - case e: InterruptedException ⇒ - } + @tailrec def await(until: Long)(condition: ⇒ Boolean): Unit = if (System.currentTimeMillis() <= until) { + var done = false + try { + done = condition + if (!done) Thread.sleep(25) + } catch { + case e: InterruptedException ⇒ } - throw new AssertionError("await failed") - } + if (!done) await(until)(condition) + } else throw new AssertionError("await failed") } abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with DefaultTimeout { @@ -343,7 +348,7 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa assertNoCountDown(done, 1000, "Should not process messages while suspended") assertRefDefaultZero(a)(registers = 1, msgsReceived = 1, suspensions = 1) - a.resume + a.resume(inResponseToFailure = false) assertCountDown(done, 3.seconds.dilated.toMillis, "Should resume processing of messages when resumed") assertRefDefaultZero(a)(registers = 1, msgsReceived = 1, msgsProcessed = 1, suspensions = 1, resumes = 1) @@ -408,9 +413,9 @@ abstract class ActorModelSpec(config: String) extends AkkaSpec(config) with Defa val a = newTestActor(dispatcher.id) val f1 = a ? Reply("foo") val f2 = a ? Reply("bar") - val f3 = try { a ? Interrupt } catch { case ie: InterruptedException ⇒ Promise.failed(new ActorInterruptedException(ie)) } + val f3 = try { a ? Interrupt } catch { case ie: InterruptedException ⇒ Promise.failed(new ActorInterruptedException(ie)).future } val f4 = a ? Reply("foo2") - val f5 = try { a ? Interrupt } catch { case ie: InterruptedException ⇒ Promise.failed(new ActorInterruptedException(ie)) } + val f5 = try { a ? Interrupt } catch { case ie: InterruptedException ⇒ Promise.failed(new ActorInterruptedException(ie)).future } val f6 = a ? Reply("bar2") assert(Await.result(f1, timeout.duration) === "foo") diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala index 4b3dd4a5b3..c0b14896f8 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatcherActorSpec.scala @@ -1,13 +1,16 @@ package akka.actor.dispatch +import language.postfixOps + import java.util.concurrent.{ CountDownLatch, TimeUnit } import java.util.concurrent.atomic.{ AtomicBoolean, AtomicInteger } import akka.testkit.{ filterEvents, EventFilter, AkkaSpec } import akka.actor.{ Props, Actor } -import akka.util.Duration -import akka.util.duration._ +import scala.concurrent.Await +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ import akka.testkit.DefaultTimeout -import akka.dispatch.{ Await, PinnedDispatcher, Dispatchers, Dispatcher } +import akka.dispatch.{ PinnedDispatcher, Dispatchers, Dispatcher } import akka.pattern.ask object DispatcherActorSpec { diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala index 03c5f21711..bab56cf9df 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/DispatchersSpec.scala @@ -3,6 +3,8 @@ */ package akka.actor.dispatch +import language.postfixOps + import java.util.concurrent.{ CountDownLatch, TimeUnit } import scala.reflect.{ Manifest } import akka.dispatch._ @@ -12,7 +14,7 @@ import scala.collection.JavaConverters._ import com.typesafe.config.ConfigFactory import akka.actor.Actor import akka.actor.Props -import akka.util.duration._ +import scala.concurrent.util.duration._ object DispatchersSpec { val config = """ diff --git a/akka-actor-tests/src/test/scala/akka/actor/dispatch/PinnedActorSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/dispatch/PinnedActorSpec.scala index cf8dd5eab5..6dfbf2aedd 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/dispatch/PinnedActorSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/dispatch/PinnedActorSpec.scala @@ -6,7 +6,8 @@ import akka.testkit._ import akka.actor.{ Props, Actor } import akka.testkit.AkkaSpec import org.scalatest.BeforeAndAfterEach -import akka.dispatch.{ Await, PinnedDispatcher, Dispatchers } +import akka.dispatch.{ PinnedDispatcher, Dispatchers } +import scala.concurrent.Await import akka.pattern.ask object PinnedActorSpec { diff --git a/akka-actor-tests/src/test/scala/akka/actor/routing/ListenerSpec.scala b/akka-actor-tests/src/test/scala/akka/actor/routing/ListenerSpec.scala index b9fc484957..22f96adc63 100644 --- a/akka-actor-tests/src/test/scala/akka/actor/routing/ListenerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/actor/routing/ListenerSpec.scala @@ -5,7 +5,7 @@ import akka.actor._ import akka.actor.Actor._ import akka.routing._ import java.util.concurrent.atomic.AtomicInteger -import akka.dispatch.Await +import scala.concurrent.Await @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class ListenerSpec extends AkkaSpec { diff --git a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala index 0d9951e7b7..6d58c22718 100644 --- a/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/config/ConfigSpec.scala @@ -4,11 +4,13 @@ package akka.config +import language.postfixOps + import akka.testkit.AkkaSpec import com.typesafe.config.ConfigFactory import scala.collection.JavaConverters._ -import akka.util.duration._ -import akka.util.Duration +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration import akka.actor.ActorSystem @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) diff --git a/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala b/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala index a850bae739..97f5e0d236 100644 --- a/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala +++ b/akka-actor-tests/src/test/scala/akka/dataflow/Future2Actor.scala @@ -3,15 +3,18 @@ */ package akka.dataflow +import language.postfixOps + import akka.actor.{ Actor, Props } -import akka.dispatch.{ Future, Await } -import akka.util.duration._ -import akka.testkit.AkkaSpec -import akka.testkit.DefaultTimeout +import scala.concurrent.Future +import scala.concurrent.Await +import scala.concurrent.util.duration._ +import akka.testkit.{ AkkaSpec, DefaultTimeout } import akka.pattern.{ ask, pipe } +import scala.concurrent.ExecutionException class Future2ActorSpec extends AkkaSpec with DefaultTimeout { - + implicit val ec = system.dispatcher "The Future2Actor bridge" must { "support convenient sending to multiple destinations" in { @@ -41,9 +44,9 @@ class Future2ActorSpec extends AkkaSpec with DefaultTimeout { } })) Await.result(actor ? "do", timeout.duration) must be(31) - intercept[AssertionError] { + intercept[ExecutionException] { Await.result(actor ? "ex", timeout.duration) - } + }.getCause.isInstanceOf[AssertionError] must be(true) } } } diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala index 1403c10bbe..ab24192c5a 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/ExecutionContextSpec.scala @@ -3,6 +3,7 @@ package akka.dispatch import akka.testkit.AkkaSpec import akka.testkit.DefaultTimeout import java.util.concurrent.{ ExecutorService, Executor, Executors } +import scala.concurrent.ExecutionContext @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { @@ -18,12 +19,12 @@ class ExecutionContextSpec extends AkkaSpec with DefaultTimeout { val executorService: ExecutorService with ExecutionContext = ExecutionContext.fromExecutorService(es) executorService must not be (null) - val jExecutor: ExecutionContextExecutor = ExecutionContexts.fromExecutor(es) + /*val jExecutor: ExecutionContextExecutor = ExecutionContext.fromExecutor(es) jExecutor must not be (null) val jExecutorService: ExecutionContextExecutorService = ExecutionContexts.fromExecutorService(es) jExecutorService must not be (null) - + */ } finally { es.shutdown } diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala index bed288f812..d5e6f6f955 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/FutureSpec.scala @@ -1,5 +1,7 @@ package akka.dispatch +import language.postfixOps + import org.scalatest.BeforeAndAfterAll import org.scalatest.prop.Checkers import org.scalacheck._ @@ -7,18 +9,26 @@ import org.scalacheck.Arbitrary._ import org.scalacheck.Prop._ import org.scalacheck.Gen._ import akka.actor._ -import akka.testkit.{ EventFilter, filterEvents, filterException } -import akka.util.duration._ -import akka.testkit.AkkaSpec +import akka.testkit.{ EventFilter, filterEvents, filterException, AkkaSpec, DefaultTimeout, TestLatch } +import scala.concurrent.{ Await, Awaitable, Future, Promise, ExecutionContext } +import scala.util.control.NonFatal +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration +import scala.concurrent.ExecutionContext import org.scalatest.junit.JUnitSuite -import akka.testkit.DefaultTimeout -import akka.testkit.TestLatch import scala.runtime.NonLocalReturnControl import akka.pattern.ask import java.lang.{ IllegalStateException, ArithmeticException } import java.util.concurrent._ object FutureSpec { + + def ready[T](awaitable: Awaitable[T], atMost: Duration): awaitable.type = + try Await.ready(awaitable, atMost) catch { + case t: TimeoutException ⇒ throw t + case e if NonFatal(e) ⇒ awaitable //swallow + } + class TestActor extends Actor { def receive = { case "Hello" ⇒ sender ! "World" @@ -30,10 +40,10 @@ object FutureSpec { class TestDelayActor(await: TestLatch) extends Actor { def receive = { - case "Hello" ⇒ Await.ready(await, TestLatch.DefaultTimeout); sender ! "World" - case "NoReply" ⇒ Await.ready(await, TestLatch.DefaultTimeout) + case "Hello" ⇒ FutureSpec.ready(await, TestLatch.DefaultTimeout); sender ! "World" + case "NoReply" ⇒ FutureSpec.ready(await, TestLatch.DefaultTimeout) case "Failure" ⇒ - Await.ready(await, TestLatch.DefaultTimeout) + FutureSpec.ready(await, TestLatch.DefaultTimeout) sender ! Status.Failure(new RuntimeException("Expected exception; to test fault-tolerance")) } } @@ -44,15 +54,15 @@ class JavaFutureSpec extends JavaFutureTests with JUnitSuite @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with DefaultTimeout { import FutureSpec._ - + implicit val ec: ExecutionContext = system.dispatcher "A Promise" when { "never completed" must { - behave like emptyFuture(_(Promise())) + behave like emptyFuture(_(Promise().future)) "return supplied value on timeout" in { - val failure = Promise.failed[String](new RuntimeException("br0ken")) - val otherFailure = Promise.failed[String](new RuntimeException("last")) - val empty = Promise[String]() - val timedOut = Promise.successful[String]("Timedout") + val failure = Promise.failed[String](new RuntimeException("br0ken")).future + val otherFailure = Promise.failed[String](new RuntimeException("last")).future + val empty = Promise[String]().future + val timedOut = Promise.successful[String]("Timedout").future Await.result(failure fallbackTo timedOut, timeout.duration) must be("Timedout") Await.result(timedOut fallbackTo empty, timeout.duration) must be("Timedout") @@ -64,47 +74,49 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa } "completed with a result" must { val result = "test value" - val future = Promise[String]().complete(Right(result)) + val future = Promise[String]().complete(Right(result)).future behave like futureWithResult(_(future, result)) } "completed with an exception" must { val message = "Expected Exception" - val future = Promise[String]().complete(Left(new RuntimeException(message))) + val future = Promise[String]().complete(Left(new RuntimeException(message))).future behave like futureWithException[RuntimeException](_(future, message)) } "completed with an InterruptedException" must { val message = "Boxed InterruptedException" - val future = Promise[String]().complete(Left(new InterruptedException(message))) + val future = Promise[String]().complete(Left(new InterruptedException(message))).future behave like futureWithException[RuntimeException](_(future, message)) } "completed with a NonLocalReturnControl" must { val result = "test value" - val future = Promise[String]().complete(Left(new NonLocalReturnControl[String]("test", result))) + val future = Promise[String]().complete(Left(new NonLocalReturnControl[String]("test", result))).future behave like futureWithResult(_(future, result)) } "have different ECs" in { - def namedCtx(n: String) = ExecutionContexts.fromExecutorService( - Executors.newSingleThreadExecutor(new ThreadFactory { - def newThread(r: Runnable) = new Thread(r, n) - })) + def namedCtx(n: String) = + ExecutionContext.fromExecutorService( + Executors.newSingleThreadExecutor(new ThreadFactory { def newThread(r: Runnable) = new Thread(r, n) })) val A = namedCtx("A") val B = namedCtx("B") // create a promise with ctx A - val p = Promise[String]()(A) + val p = Promise[String]() // I would expect that any callback from p // is executed in the context of p - val result = p map { _ + Thread.currentThread().getName() } + val result = { + implicit val ec = A + p.future map { _ + Thread.currentThread().getName() } + } p.completeWith(Future { "Hi " }(B)) try { Await.result(result, timeout.duration) must be === "Hi A" } finally { - A.asInstanceOf[ExecutorService].shutdown() - B.asInstanceOf[ExecutorService].shutdown() + A.shutdown() + B.shutdown() } } } @@ -117,12 +129,12 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa val latch = new TestLatch val result = "test value" val future = Future { - Await.ready(latch, TestLatch.DefaultTimeout) + FutureSpec.ready(latch, TestLatch.DefaultTimeout) result } test(future) latch.open() - Await.ready(future, timeout.duration) + FutureSpec.ready(future, timeout.duration) } } "is completed" must { @@ -130,11 +142,11 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa val latch = new TestLatch val result = "test value" val future = Future { - Await.ready(latch, TestLatch.DefaultTimeout) + FutureSpec.ready(latch, TestLatch.DefaultTimeout) result } latch.open() - Await.ready(future, timeout.duration) + FutureSpec.ready(future, timeout.duration) test(future, result) } } @@ -142,13 +154,13 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa "pass checks" in { filterException[ArithmeticException] { check({ (future: Future[Int], actions: List[FutureAction]) ⇒ + def wrap[T](f: Future[T]): Either[Throwable, T] = FutureSpec.ready(f, timeout.duration).value.get val result = (future /: actions)(_ /: _) - val expected = (Await.ready(future, timeout.duration).value.get /: actions)(_ /: _) - ((Await.ready(result, timeout.duration).value.get, expected) match { - case (Right(a), Right(b)) ⇒ a == b + val expected = (wrap(future) /: actions)(_ /: _) + ((wrap(result), expected) match { + case (Right(a), Right(b)) ⇒ a == b case (Left(a), Left(b)) if a.toString == b.toString ⇒ true - case (Left(a), Left(b)) if a.getStackTrace.isEmpty || b.getStackTrace.isEmpty ⇒ - a.getClass.toString == b.getClass.toString + case (Left(a), Left(b)) if a.getStackTrace.isEmpty || b.getStackTrace.isEmpty ⇒ a.getClass.toString == b.getClass.toString case _ ⇒ false }) :| result.value.get.toString + " is expected to be " + expected.toString }, minSuccessful(10000), workers(4)) @@ -162,7 +174,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa behave like futureWithResult { test ⇒ val actor = system.actorOf(Props[TestActor]) val future = actor ? "Hello" - Await.ready(future, timeout.duration) + FutureSpec.ready(future, timeout.duration) test(future, "World") system.stop(actor) } @@ -172,7 +184,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa filterException[RuntimeException] { val actor = system.actorOf(Props[TestActor]) val future = actor ? "Failure" - Await.ready(future, timeout.duration) + FutureSpec.ready(future, timeout.duration) test(future, "Expected exception; to test fault-tolerance") system.stop(actor) } @@ -186,7 +198,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa val actor1 = system.actorOf(Props[TestActor]) val actor2 = system.actorOf(Props(new Actor { def receive = { case s: String ⇒ sender ! s.toUpperCase } })) val future = actor1 ? "Hello" flatMap { case s: String ⇒ actor2 ? s } - Await.ready(future, timeout.duration) + FutureSpec.ready(future, timeout.duration) test(future, "WORLD") system.stop(actor1) system.stop(actor2) @@ -198,20 +210,20 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa val actor1 = system.actorOf(Props[TestActor]) val actor2 = system.actorOf(Props(new Actor { def receive = { case s: String ⇒ sender ! Status.Failure(new ArithmeticException("/ by zero")) } })) val future = actor1 ? "Hello" flatMap { case s: String ⇒ actor2 ? s } - Await.ready(future, timeout.duration) + FutureSpec.ready(future, timeout.duration) test(future, "/ by zero") system.stop(actor1) system.stop(actor2) } } } - "will throw a MatchError when matching wrong type" must { - behave like futureWithException[MatchError] { test ⇒ - filterException[MatchError] { + "will throw a NoSuchElementException when matching wrong type" must { + behave like futureWithException[NoSuchElementException] { test ⇒ + filterException[NoSuchElementException] { val actor1 = system.actorOf(Props[TestActor]) val actor2 = system.actorOf(Props(new Actor { def receive = { case s: String ⇒ sender ! s.toUpperCase } })) val future = actor1 ? "Hello" flatMap { case i: Int ⇒ actor2 ? i } - Await.ready(future, timeout.duration) + FutureSpec.ready(future, timeout.duration) test(future, "World (of class java.lang.String)") system.stop(actor1) system.stop(actor2) @@ -253,7 +265,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa } "support pattern matching within a for-comprehension" in { - filterException[MatchError] { + filterException[NoSuchElementException] { case class Req[T](req: T) case class Res[T](res: T) val actor = system.actorOf(Props(new Actor { @@ -276,7 +288,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa } yield b + "-" + c Await.result(future1, timeout.duration) must be("10-14") - intercept[MatchError] { Await.result(future2, timeout.duration) } + intercept[NoSuchElementException] { Await.result(future2, timeout.duration) } system.stop(actor) } } @@ -331,15 +343,16 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa "recoverWith from exceptions" in { val o = new IllegalStateException("original") val r = new IllegalStateException("recovered") + val yay = Promise.successful("yay!").future intercept[IllegalStateException] { - Await.result(Promise.failed[String](o) recoverWith { case _ if false == true ⇒ Promise.successful("yay!") }, timeout.duration) + Await.result(Promise.failed[String](o).future recoverWith { case _ if false == true ⇒ yay }, timeout.duration) } must be(o) - Await.result(Promise.failed[String](o) recoverWith { case _ ⇒ Promise.successful("yay!") }, timeout.duration) must equal("yay!") + Await.result(Promise.failed[String](o).future recoverWith { case _ ⇒ yay }, timeout.duration) must equal("yay!") intercept[IllegalStateException] { - Await.result(Promise.failed[String](o) recoverWith { case _ ⇒ Promise.failed[String](r) }, timeout.duration) + Await.result(Promise.failed[String](o).future recoverWith { case _ ⇒ Promise.failed[String](r).future }, timeout.duration) } must be(r) } @@ -355,7 +368,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa } "firstCompletedOf" in { - val futures = Vector.fill[Future[Int]](10)(Promise[Int]()) :+ Promise.successful[Int](5) + val futures = Vector.fill[Future[Int]](10)(Promise[Int]().future) :+ Promise.successful[Int](5).future Await.result(Future.firstCompletedOf(futures), timeout.duration) must be(5) } @@ -383,18 +396,18 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa val timeout = 10000 millis val f = new IllegalStateException("test") intercept[IllegalStateException] { - Await.result(Promise.failed[String](f) zip Promise.successful("foo"), timeout) + Await.result(Promise.failed[String](f).future zip Promise.successful("foo").future, timeout) } must be(f) intercept[IllegalStateException] { - Await.result(Promise.successful("foo") zip Promise.failed[String](f), timeout) + Await.result(Promise.successful("foo").future zip Promise.failed[String](f).future, timeout) } must be(f) intercept[IllegalStateException] { - Await.result(Promise.failed[String](f) zip Promise.failed[String](f), timeout) + Await.result(Promise.failed[String](f).future zip Promise.failed[String](f).future, timeout) } must be(f) - Await.result(Promise.successful("foo") zip Promise.successful("foo"), timeout) must be(("foo", "foo")) + Await.result(Promise.successful("foo").future zip Promise.successful("foo").future, timeout) must be(("foo", "foo")) } "fold by composing" in { @@ -484,7 +497,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa val latch = new TestLatch val actor = system.actorOf(Props[TestActor]) actor ? "Hello" onSuccess { case "World" ⇒ latch.open() } - Await.ready(latch, 5 seconds) + FutureSpec.ready(latch, 5 seconds) system.stop(actor) } @@ -515,7 +528,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa intercept[ThrowableTest] { Await.result(f1, timeout.duration) } val latch = new TestLatch - val f2 = Future { Await.ready(latch, 5 seconds); "success" } + val f2 = Future { FutureSpec.ready(latch, 5 seconds); "success" } f2 foreach (_ ⇒ throw new ThrowableTest("dispatcher foreach")) f2 onSuccess { case _ ⇒ throw new ThrowableTest("dispatcher receive") } val f3 = f2 map (s ⇒ s.toUpperCase) @@ -530,18 +543,19 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa "shouldBlockUntilResult" in { val latch = new TestLatch - val f = Future { Await.ready(latch, 5 seconds); 5 } + val f = Future { FutureSpec.ready(latch, 5 seconds); 5 } val f2 = Future { Await.result(f, timeout.duration) + 5 } - intercept[TimeoutException](Await.ready(f2, 100 millis)) + intercept[TimeoutException](FutureSpec.ready(f2, 100 millis)) latch.open() assert(Await.result(f2, timeout.duration) === 10) val f3 = Future { Thread.sleep(100); 5 } - filterException[TimeoutException] { intercept[TimeoutException] { Await.ready(f3, 0 millis) } } + filterException[TimeoutException] { intercept[TimeoutException] { FutureSpec.ready(f3, 0 millis) } } } - "futureComposingWithContinuations" in { + //FIXME DATAFLOW + /*"futureComposingWithContinuations" in { import Future.flow val actor = system.actorOf(Props[TestActor]) @@ -614,14 +628,14 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa z() + y() } - Await.ready(ly, 100 milliseconds) - intercept[TimeoutException] { Await.ready(lz, 100 milliseconds) } + FutureSpec.ready(ly, 100 milliseconds) + intercept[TimeoutException] { FutureSpec.ready(lz, 100 milliseconds) } flow { x << 5 } assert(Await.result(y, timeout.duration) === 5) assert(Await.result(z, timeout.duration) === 5) - Await.ready(lz, timeout.duration) + FutureSpec.ready(lz, timeout.duration) assert(Await.result(result, timeout.duration) === 10) val a, b, c = Promise[Int]() @@ -651,14 +665,14 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa flow { one << 1 } - Await.ready(one, 1 minute) + FutureSpec.ready(one, 1 minute) assert(one.isCompleted) assert(List(two, simpleResult).forall(_.isCompleted == false)) flow { two << 9 } - Await.ready(two, 1 minute) + FutureSpec.ready(two, 1 minute) assert(List(one, two).forall(_.isCompleted == true)) assert(Await.result(simpleResult, timeout.duration) === 10) @@ -677,20 +691,20 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa lz.open() x1() + x2() } - Await.ready(lx, 2 seconds) + FutureSpec.ready(lx, 2 seconds) assert(!ly.isOpen) assert(!lz.isOpen) assert(List(x1, x2, y1, y2).forall(_.isCompleted == false)) flow { y1 << 1 } // When this is set, it should cascade down the line - Await.ready(ly, 2 seconds) + FutureSpec.ready(ly, 2 seconds) assert(Await.result(x1, 1 minute) === 1) assert(!lz.isOpen) flow { y2 << 9 } // When this is set, it should cascade down the line - Await.ready(lz, 2 seconds) + FutureSpec.ready(lz, 2 seconds) assert(Await.result(x2, 1 minute) === 9) assert(List(x1, x2, y1, y2).forall(_.isCompleted)) @@ -703,16 +717,16 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa val i1, i2, s1, s2 = new TestLatch - val callService1 = Future { i1.open(); Await.ready(s1, TestLatch.DefaultTimeout); 1 } - val callService2 = Future { i2.open(); Await.ready(s2, TestLatch.DefaultTimeout); 9 } + val callService1 = Future { i1.open(); FutureSpec.ready(s1, TestLatch.DefaultTimeout); 1 } + val callService2 = Future { i2.open(); FutureSpec.ready(s2, TestLatch.DefaultTimeout); 9 } val result = flow { callService1() + callService2() } assert(!s1.isOpen) assert(!s2.isOpen) assert(!result.isCompleted) - Await.ready(i1, 2 seconds) - Await.ready(i2, 2 seconds) + FutureSpec.ready(i1, 2 seconds) + FutureSpec.ready(i2, 2 seconds) s1.open() s2.open() assert(Await.result(result, timeout.duration) === 10) @@ -733,8 +747,8 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa lz.open() z() + y() + oops } - intercept[TimeoutException] { Await.ready(ly, 100 milliseconds) } - intercept[TimeoutException] { Await.ready(lz, 100 milliseconds) } + intercept[TimeoutException] { FutureSpec.ready(ly, 100 milliseconds) } + intercept[TimeoutException] { FutureSpec.ready(lz, 100 milliseconds) } flow { x << 5 } assert(Await.result(y, timeout.duration) === 5) @@ -749,7 +763,7 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa val latch = new TestLatch val future = Future { - Await.ready(latch, TestLatch.DefaultTimeout) + FutureSpec.ready(latch, TestLatch.DefaultTimeout) "Hello" } @@ -800,41 +814,41 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa flow { y << 2 } assert(Await.result(z, timeout.duration) === 42) - } + }*/ "run callbacks async" in { val latch = Vector.fill(10)(new TestLatch) - val f1 = Future { latch(0).open(); Await.ready(latch(1), TestLatch.DefaultTimeout); "Hello" } - val f2 = f1 map { s ⇒ latch(2).open(); Await.ready(latch(3), TestLatch.DefaultTimeout); s.length } + val f1 = Future { latch(0).open(); FutureSpec.ready(latch(1), TestLatch.DefaultTimeout); "Hello" } + val f2 = f1 map { s ⇒ latch(2).open(); FutureSpec.ready(latch(3), TestLatch.DefaultTimeout); s.length } f2 foreach (_ ⇒ latch(4).open()) - Await.ready(latch(0), TestLatch.DefaultTimeout) + FutureSpec.ready(latch(0), TestLatch.DefaultTimeout) f1 must not be ('completed) f2 must not be ('completed) latch(1).open() - Await.ready(latch(2), TestLatch.DefaultTimeout) + FutureSpec.ready(latch(2), TestLatch.DefaultTimeout) f1 must be('completed) f2 must not be ('completed) - val f3 = f1 map { s ⇒ latch(5).open(); Await.ready(latch(6), TestLatch.DefaultTimeout); s.length * 2 } + val f3 = f1 map { s ⇒ latch(5).open(); FutureSpec.ready(latch(6), TestLatch.DefaultTimeout); s.length * 2 } f3 foreach (_ ⇒ latch(3).open()) - Await.ready(latch(5), TestLatch.DefaultTimeout) + FutureSpec.ready(latch(5), TestLatch.DefaultTimeout) f3 must not be ('completed) latch(6).open() - Await.ready(latch(4), TestLatch.DefaultTimeout) + FutureSpec.ready(latch(4), TestLatch.DefaultTimeout) f2 must be('completed) f3 must be('completed) val p1 = Promise[String]() - val f4 = p1 map { s ⇒ latch(7).open(); Await.ready(latch(8), TestLatch.DefaultTimeout); s.length } + val f4 = p1.future map { s ⇒ latch(7).open(); FutureSpec.ready(latch(8), TestLatch.DefaultTimeout); s.length } f4 foreach (_ ⇒ latch(9).open()) p1 must not be ('completed) @@ -842,38 +856,62 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa p1 complete Right("Hello") - Await.ready(latch(7), TestLatch.DefaultTimeout) + FutureSpec.ready(latch(7), TestLatch.DefaultTimeout) p1 must be('completed) f4 must not be ('completed) latch(8).open() - Await.ready(latch(9), TestLatch.DefaultTimeout) + FutureSpec.ready(latch(9), TestLatch.DefaultTimeout) - Await.ready(f4, timeout.duration) must be('completed) + FutureSpec.ready(f4, timeout.duration) must be('completed) } "should not deadlock with nested await (ticket 1313)" in { val simple = Future() map (_ ⇒ Await.result((Future(()) map (_ ⇒ ())), timeout.duration)) - Await.ready(simple, timeout.duration) must be('completed) + FutureSpec.ready(simple, timeout.duration) must be('completed) val l1, l2 = new TestLatch val complex = Future() map { _ ⇒ - Future.blocking() + //FIXME implement _taskStack for Futures val nested = Future(()) nested foreach (_ ⇒ l1.open()) - Await.ready(l1, TestLatch.DefaultTimeout) // make sure nested is completed + FutureSpec.ready(l1, TestLatch.DefaultTimeout) // make sure nested is completed nested foreach (_ ⇒ l2.open()) - Await.ready(l2, TestLatch.DefaultTimeout) + FutureSpec.ready(l2, TestLatch.DefaultTimeout) } - Await.ready(complex, timeout.duration) must be('completed) + FutureSpec.ready(complex, timeout.duration) must be('completed) } - "should capture first exception with dataflow" in { + "re-use the same thread for nested futures with batching ExecutionContext" in { + val failCount = new java.util.concurrent.atomic.AtomicInteger + val f = Future() flatMap { _ ⇒ + val originalThread = Thread.currentThread + // run some nested futures + val nested = + for (i ← 1 to 100) + yield Future.successful("abc") flatMap { _ ⇒ + if (Thread.currentThread ne originalThread) + failCount.incrementAndGet + // another level of nesting + Future.successful("xyz") map { _ ⇒ + if (Thread.currentThread ne originalThread) + failCount.incrementAndGet + } + } + Future.sequence(nested) + } + Await.ready(f, timeout.duration) + // TODO re-enable once we're using the batching dispatcher + // failCount.get must be(0) + } + + //FIXME DATAFLOW + /*"should capture first exception with dataflow" in { import Future.flow val f1 = flow { 40 / 0 } intercept[java.lang.ArithmeticException](Await result (f1, TestLatch.DefaultTimeout)) - } + }*/ } } @@ -888,17 +926,17 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa "contain a value" in { f((future, result) ⇒ future.value must be(Some(Right(result)))) } "return result with 'get'" in { f((future, result) ⇒ Await.result(future, timeout.duration) must be(result)) } "return result with 'Await.result'" in { f((future, result) ⇒ Await.result(future, timeout.duration) must be(result)) } - "not timeout" in { f((future, _) ⇒ Await.ready(future, 0 millis)) } + "not timeout" in { f((future, _) ⇒ FutureSpec.ready(future, 0 millis)) } "filter result" in { f { (future, result) ⇒ Await.result((future filter (_ ⇒ true)), timeout.duration) must be(result) - (evaluating { Await.result((future filter (_ ⇒ false)), timeout.duration) } must produce[MatchError]).getMessage must startWith(result.toString) + (evaluating { Await.result((future filter (_ ⇒ false)), timeout.duration) } must produce[java.util.NoSuchElementException]).getMessage must endWith(result.toString) } } "transform result with map" in { f((future, result) ⇒ Await.result((future map (_.toString.length)), timeout.duration) must be(result.toString.length)) } "compose result with flatMap" in { f { (future, result) ⇒ - val r = for (r ← future; p ← Promise.successful("foo")) yield r.toString + p + val r = for (r ← future; p ← Promise.successful("foo").future) yield r.toString + p Await.result(r, timeout.duration) must be(result.toString + "foo") } } @@ -906,13 +944,13 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa f { (future, result) ⇒ val p = Promise[Any]() future foreach p.success - Await.result(p, timeout.duration) must be(result) + Await.result(p.future, timeout.duration) must be(result) } } "zip properly" in { f { (future, result) ⇒ - Await.result(future zip Promise.successful("foo"), timeout.duration) must be((result, "foo")) - (evaluating { Await.result(future zip Promise.failed(new RuntimeException("ohnoes")), timeout.duration) } must produce[RuntimeException]).getMessage must be("ohnoes") + Await.result(future zip Promise.successful("foo").future, timeout.duration) must be((result, "foo")) + (evaluating { Await.result(future zip Promise.failed(new RuntimeException("ohnoes")).future, timeout.duration) } must produce[RuntimeException]).getMessage must be("ohnoes") } } "not recover from exception" in { f((future, result) ⇒ Await.result(future.recover({ case _ ⇒ "pigdog" }), timeout.duration) must be(result)) } @@ -920,10 +958,10 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa f { (future, result) ⇒ val p = Promise[Any]() future.onSuccess { case x ⇒ p.success(x) } - Await.result(p, timeout.duration) must be(result) + Await.result(p.future, timeout.duration) must be(result) } } - "not project a failure" in { f((future, result) ⇒ (evaluating { Await.result(future.failed, timeout.duration) } must produce[NoSuchElementException]).getMessage must be("Future.failed not completed with a throwable. Instead completed with: " + result)) } + "not project a failure" in { f((future, result) ⇒ (evaluating { Await.result(future.failed, timeout.duration) } must produce[NoSuchElementException]).getMessage must be("Future.failed not completed with a throwable.")) } "not perform action on exception" is pending "cast using mapTo" in { f((future, result) ⇒ Await.result(future.mapTo[Boolean].recover({ case _: ClassCastException ⇒ false }), timeout.duration) must be(false)) } } @@ -937,20 +975,20 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa future.value.get.left.get.getMessage must be(message) }) } - "throw exception with 'get'" in { f((future, message) ⇒ (evaluating { Await.result(future, timeout.duration) } must produce[E]).getMessage must be(message)) } - "throw exception with 'Await.result'" in { f((future, message) ⇒ (evaluating { Await.result(future, timeout.duration) } must produce[E]).getMessage must be(message)) } + "throw exception with 'get'" in { f((future, message) ⇒ (evaluating { Await.result(future, timeout.duration) } must produce[java.lang.Exception]).getMessage must be(message)) } + "throw exception with 'Await.result'" in { f((future, message) ⇒ (evaluating { Await.result(future, timeout.duration) } must produce[java.lang.Exception]).getMessage must be(message)) } "retain exception with filter" in { f { (future, message) ⇒ - (evaluating { Await.result(future filter (_ ⇒ true), timeout.duration) } must produce[E]).getMessage must be(message) - (evaluating { Await.result(future filter (_ ⇒ false), timeout.duration) } must produce[E]).getMessage must be(message) + (evaluating { Await.result(future filter (_ ⇒ true), timeout.duration) } must produce[java.lang.Exception]).getMessage must be(message) + (evaluating { Await.result(future filter (_ ⇒ false), timeout.duration) } must produce[java.lang.Exception]).getMessage must be(message) } } - "retain exception with map" in { f((future, message) ⇒ (evaluating { Await.result(future map (_.toString.length), timeout.duration) } must produce[E]).getMessage must be(message)) } - "retain exception with flatMap" in { f((future, message) ⇒ (evaluating { Await.result(future flatMap (_ ⇒ Promise.successful[Any]("foo")), timeout.duration) } must produce[E]).getMessage must be(message)) } + "retain exception with map" in { f((future, message) ⇒ (evaluating { Await.result(future map (_.toString.length), timeout.duration) } must produce[java.lang.Exception]).getMessage must be(message)) } + "retain exception with flatMap" in { f((future, message) ⇒ (evaluating { Await.result(future flatMap (_ ⇒ Promise.successful[Any]("foo").future), timeout.duration) } must produce[java.lang.Exception]).getMessage must be(message)) } "not perform action with foreach" is pending "zip properly" in { - f { (future, message) ⇒ (evaluating { Await.result(future zip Promise.successful("foo"), timeout.duration) } must produce[E]).getMessage must be(message) } + f { (future, message) ⇒ (evaluating { Await.result(future zip Promise.successful("foo").future, timeout.duration) } must produce[java.lang.Exception]).getMessage must be(message) } } "recover from exception" in { f((future, message) ⇒ Await.result(future.recover({ case e if e.getMessage == message ⇒ "pigdog" }), timeout.duration) must be("pigdog")) } "not perform action on result" is pending @@ -959,10 +997,10 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa f { (future, message) ⇒ val p = Promise[Any]() future.onFailure { case _ ⇒ p.success(message) } - Await.result(p, timeout.duration) must be(message) + Await.result(p.future, timeout.duration) must be(message) } } - "always cast successfully using mapTo" in { f((future, message) ⇒ (evaluating { Await.result(future.mapTo[java.lang.Thread], timeout.duration) } must produce[E]).getMessage must be(message)) } + "always cast successfully using mapTo" in { f((future, message) ⇒ (evaluating { Await.result(future.mapTo[java.lang.Thread], timeout.duration) } must produce[java.lang.Exception]).getMessage must be(message)) } } sealed trait IntAction { def apply(that: Int): Int } @@ -979,17 +1017,17 @@ class FutureSpec extends AkkaSpec with Checkers with BeforeAndAfterAll with Defa case class MapAction(action: IntAction) extends FutureAction { def /:(that: Either[Throwable, Int]): Either[Throwable, Int] = that match { case Left(e) ⇒ that - case Right(r) ⇒ try { Right(action(r)) } catch { case e: RuntimeException ⇒ Left(e) } + case Right(r) ⇒ try { Right(action(r)) } catch { case e if NonFatal(e) ⇒ Left(e) } } - def /:(that: Future[Int]): Future[Int] = that map (action(_)) + def /:(that: Future[Int]): Future[Int] = that map action.apply } case class FlatMapAction(action: IntAction) extends FutureAction { def /:(that: Either[Throwable, Int]): Either[Throwable, Int] = that match { case Left(e) ⇒ that - case Right(r) ⇒ try { Right(action(r)) } catch { case e: RuntimeException ⇒ Left(e) } + case Right(r) ⇒ try { Right(action(r)) } catch { case e if NonFatal(e) ⇒ Left(e) } } - def /:(that: Future[Int]): Future[Int] = that flatMap (n ⇒ Future(action(n))) + def /:(that: Future[Int]): Future[Int] = that flatMap (n ⇒ Future.successful(action(n))) } implicit def arbFuture: Arbitrary[Future[Int]] = Arbitrary(for (n ← arbitrary[Int]) yield Future(n)) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala index ba025ffe3c..ed93362b6f 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/MailboxConfigSpec.scala @@ -3,15 +3,15 @@ */ package akka.dispatch +import language.postfixOps + import java.util.concurrent.{ ConcurrentLinkedQueue, BlockingQueue } - import org.scalatest.{ BeforeAndAfterEach, BeforeAndAfterAll } - import com.typesafe.config.Config - import akka.actor.{ RepointableRef, Props, DeadLetter, ActorSystem, ActorRefWithCell, ActorRef, ActorCell } import akka.testkit.AkkaSpec -import akka.util.duration.intToDurationInt +import scala.concurrent.{ Future, Promise, Await } +import scala.concurrent.util.duration.intToDurationInt @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAndAfterEach { @@ -76,7 +76,7 @@ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAn } }) t.start - result + result.future } def createMessageInvocation(msg: Any): Envelope = Envelope(msg, system.deadLetters, system) diff --git a/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala b/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala index 11f8760320..1d0f3ec416 100644 --- a/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/dispatch/PriorityDispatcherSpec.scala @@ -1,14 +1,16 @@ package akka.dispatch +import language.postfixOps + import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner - import com.typesafe.config.Config import akka.actor.{ Props, InternalActorRef, ActorSystem, Actor } import akka.pattern.ask import akka.testkit.{ DefaultTimeout, AkkaSpec } -import akka.util.duration.intToDurationInt +import scala.concurrent.Await +import scala.concurrent.util.duration.intToDurationInt object PriorityDispatcherSpec { val config = """ @@ -63,7 +65,7 @@ class PriorityDispatcherSpec extends AkkaSpec(PriorityDispatcherSpec.config) wit val msgs = (1 to 100).toList for (m ← msgs) actor ! m - actor.resume //Signal the actor to start treating it's message backlog + actor.resume(inResponseToFailure = false) //Signal the actor to start treating it's message backlog Await.result(actor.?('Result).mapTo[List[Int]], timeout.duration) must be === msgs.reverse } diff --git a/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala b/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala index b1cc485982..2703727f07 100644 --- a/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/EventBusSpec.scala @@ -4,9 +4,11 @@ package akka.event +import language.postfixOps + import org.scalatest.BeforeAndAfterEach import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ import java.util.concurrent.atomic._ import akka.actor.{ Props, Actor, ActorRef, ActorSystem } import java.util.Comparator diff --git a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala index d1846860f3..c027d805d1 100644 --- a/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/EventStreamSpec.scala @@ -3,8 +3,10 @@ */ package akka.event +import language.postfixOps + import akka.testkit.AkkaSpec -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.actor.{ Actor, ActorRef, ActorSystemImpl, ActorSystem, Props, UnhandledMessage } import com.typesafe.config.ConfigFactory import scala.collection.JavaConverters._ diff --git a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala index 787f29f93a..4bb99ec555 100644 --- a/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/event/LoggingReceiveSpec.scala @@ -3,11 +3,13 @@ */ package akka.event +import language.postfixOps + import org.scalatest.{ BeforeAndAfterAll, BeforeAndAfterEach } -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.testkit._ import org.scalatest.WordSpec -import akka.util.Duration +import scala.concurrent.util.Duration import com.typesafe.config.ConfigFactory import scala.collection.JavaConverters._ import java.util.Properties diff --git a/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala index f3c36665e8..cbb098defc 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/AskSpec.scala @@ -3,15 +3,21 @@ */ package akka.pattern -import akka.testkit.AkkaSpec -import akka.util.duration._ -import akka.testkit.DefaultTimeout +import language.postfixOps -class AskSpec extends AkkaSpec with DefaultTimeout { +import akka.testkit.AkkaSpec +import scala.concurrent.util.duration._ +import scala.concurrent.Await +import akka.testkit.DefaultTimeout +import akka.actor.{ Props, ActorRef } +import akka.util.Timeout + +class AskSpec extends AkkaSpec { "The “ask” pattern" must { "return broken promises on DeadLetters" in { + implicit val timeout = Timeout(5 seconds) val dead = system.actorFor("/system/deadLetters") val f = dead.ask(42)(1 second) f.isCompleted must be(true) @@ -22,6 +28,7 @@ class AskSpec extends AkkaSpec with DefaultTimeout { } "return broken promises on EmptyLocalActorRefs" in { + implicit val timeout = Timeout(5 seconds) val empty = system.actorFor("unknown") val f = empty ? 3.14 f.isCompleted must be(true) @@ -31,6 +38,45 @@ class AskSpec extends AkkaSpec with DefaultTimeout { } } + "return broken promises on unsupported ActorRefs" in { + implicit val timeout = Timeout(5 seconds) + val f = ask(null: ActorRef, 3.14) + f.isCompleted must be(true) + intercept[IllegalArgumentException] { + Await.result(f, remaining) + }.getMessage must be === "Unsupported type of ActorRef for the recipient. Question not sent to [null]" + } + + "return broken promises on 0 timeout" in { + implicit val timeout = Timeout(0 seconds) + val echo = system.actorOf(Props(ctx ⇒ { case x ⇒ ctx.sender ! x })) + val f = echo ? "foo" + val expectedMsg = "Timeout length for an `ask` must be greater or equal to 1. Question not sent to [%s]" format echo + intercept[IllegalArgumentException] { + Await.result(f, remaining) + }.getMessage must be === expectedMsg + } + + "return broken promises on < 0 timeout" in { + implicit val timeout = Timeout(-1000 seconds) + val echo = system.actorOf(Props(ctx ⇒ { case x ⇒ ctx.sender ! x })) + val f = echo ? "foo" + val expectedMsg = "Timeout length for an `ask` must be greater or equal to 1. Question not sent to [%s]" format echo + intercept[IllegalArgumentException] { + Await.result(f, remaining) + }.getMessage must be === expectedMsg + } + + "return broken promises on infinite timeout" in { + implicit val timeout = Timeout.never + val echo = system.actorOf(Props(ctx ⇒ { case x ⇒ ctx.sender ! x })) + val f = echo ? "foo" + val expectedMsg = "Timeouts to `ask` must be finite. Question not sent to [%s]" format echo + intercept[IllegalArgumentException] { + Await.result(f, remaining) + }.getMessage must be === expectedMsg + } + } } \ No newline at end of file diff --git a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala index c43531e493..7bf5afa6f9 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerMTSpec.scala @@ -4,12 +4,11 @@ package akka.pattern import akka.testkit._ -import akka.util.duration._ -import akka.dispatch.{ Promise, Await, Future } -import akka.actor.ActorSystem +import scala.concurrent.util.duration._ +import scala.concurrent.{ Promise, Future, Await } class CircuitBreakerMTSpec extends AkkaSpec { - + implicit val ec = system.dispatcher "A circuit breaker being called by many threads" must { val breaker = new CircuitBreaker(system.scheduler, 5, 100.millis.dilated, 500.millis.dilated) @@ -33,7 +32,7 @@ class CircuitBreakerMTSpec extends AkkaSpec { val futures = for (i ← 1 to 100) yield breaker.withCircuitBreaker(Future { Thread.sleep(10); "success" }) recoverWith { - case _: CircuitBreakerOpenException ⇒ Promise.successful("CBO") + case _: CircuitBreakerOpenException ⇒ Promise.successful("CBO").future } val result = Await.result(Future.sequence(futures), 5.second.dilated) @@ -53,7 +52,7 @@ class CircuitBreakerMTSpec extends AkkaSpec { val futures = for (i ← 1 to 100) yield breaker.withCircuitBreaker(Future { Thread.sleep(10); "succeed" }) recoverWith { - case _: CircuitBreakerOpenException ⇒ Promise.successful("CBO") + case _: CircuitBreakerOpenException ⇒ Promise.successful("CBO").future } val result = Await.result(Future.sequence(futures), 5.second.dilated) @@ -72,7 +71,7 @@ class CircuitBreakerMTSpec extends AkkaSpec { val futures = (1 to 100) map { i ⇒ breaker.withCircuitBreaker(Future { Thread.sleep(10); "succeed" }) recoverWith { - case _: CircuitBreakerOpenException ⇒ Promise.successful("CBO") + case _: CircuitBreakerOpenException ⇒ Promise.successful("CBO").future } } diff --git a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala index 2c2a07ee3f..0e108d1a3b 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/CircuitBreakerSpec.scala @@ -1,70 +1,54 @@ - /** * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.pattern -import akka.util.duration._ +import language.postfixOps + +import scala.concurrent.util.duration._ import akka.testkit._ import org.scalatest.BeforeAndAfter -import akka.dispatch.Future -import akka.dispatch.Await +import akka.actor.{ ActorSystem, Scheduler } +import concurrent.{ ExecutionContext, Future, Await } object CircuitBreakerSpec { class TestException extends RuntimeException + class Breaker(val instance: CircuitBreaker)(implicit system: ActorSystem) { + val halfOpenLatch = new TestLatch(1) + val openLatch = new TestLatch(1) + val closedLatch = new TestLatch(1) + def apply(): CircuitBreaker = instance + instance.onClose(closedLatch.countDown()).onHalfOpen(halfOpenLatch.countDown()).onOpen(openLatch.countDown()) + } + + def shortCallTimeoutCb()(implicit system: ActorSystem, ec: ExecutionContext): Breaker = + new Breaker(new CircuitBreaker(system.scheduler, 1, 50.millis.dilated, 500.millis.dilated)) + + def shortResetTimeoutCb()(implicit system: ActorSystem, ec: ExecutionContext): Breaker = + new Breaker(new CircuitBreaker(system.scheduler, 1, 1000.millis.dilated, 50.millis.dilated)) + + def longCallTimeoutCb()(implicit system: ActorSystem, ec: ExecutionContext): Breaker = + new Breaker(new CircuitBreaker(system.scheduler, 1, 5 seconds, 500.millis.dilated)) + + def longResetTimeoutCb()(implicit system: ActorSystem, ec: ExecutionContext): Breaker = + new Breaker(new CircuitBreaker(system.scheduler, 1, 100.millis.dilated, 5 seconds)) + + def multiFailureCb()(implicit system: ActorSystem, ec: ExecutionContext): Breaker = + new Breaker(new CircuitBreaker(system.scheduler, 5, 200.millis.dilated, 500.millis.dilated)) } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class CircuitBreakerSpec extends AkkaSpec with BeforeAndAfter { - import CircuitBreakerSpec.TestException + implicit def ec = system.dispatcher + implicit def s = system val awaitTimeout = 2.seconds.dilated - @volatile - var breakers: TestCircuitBreakers = null - - class TestCircuitBreakers { - val halfOpenLatch = new TestLatch(1) - val openLatch = new TestLatch(1) - val closedLatch = new TestLatch(1) - - val shortCallTimeoutCb = new CircuitBreaker(system.scheduler, 1, 50.millis.dilated, 500.millis.dilated) - .onClose(closedLatch.countDown()) - .onHalfOpen(halfOpenLatch.countDown()) - .onOpen(openLatch.countDown()) - - val shortResetTimeoutCb = new CircuitBreaker(system.scheduler, 1, 1000.millis.dilated, 50.millis.dilated) - .onClose(closedLatch.countDown()) - .onHalfOpen(halfOpenLatch.countDown()) - .onOpen(openLatch.countDown()) - - val longCallTimeoutCb = new CircuitBreaker(system.scheduler, 1, 5 seconds, 500.millis.dilated) - .onClose(closedLatch.countDown()) - .onHalfOpen(halfOpenLatch.countDown()) - .onOpen(openLatch.countDown()) - - val longResetTimeoutCb = new CircuitBreaker(system.scheduler, 1, 100.millis.dilated, 5 seconds) - .onClose(closedLatch.countDown()) - .onHalfOpen(halfOpenLatch.countDown()) - .onOpen(openLatch.countDown()) - - val multiFailureCb = new CircuitBreaker(system.scheduler, 5, 200.millis.dilated, 500.millis.dilated) - .onClose(closedLatch.countDown()) - .onHalfOpen(halfOpenLatch.countDown()) - .onOpen(openLatch.countDown()) - } - - before { - breakers = new TestCircuitBreakers - } - - def checkLatch(latch: TestLatch) { - Await.ready(latch, awaitTimeout) - } + def checkLatch(latch: TestLatch): Unit = Await.ready(latch, awaitTimeout) def throwException = throw new TestException @@ -72,171 +56,151 @@ class CircuitBreakerSpec extends AkkaSpec with BeforeAndAfter { "A synchronous circuit breaker that is open" must { "throw exceptions when called before reset timeout" in { + val breaker = CircuitBreakerSpec.longResetTimeoutCb() - intercept[TestException] { - breakers.longResetTimeoutCb.withSyncCircuitBreaker(throwException) - } - checkLatch(breakers.openLatch) + intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) } - intercept[CircuitBreakerOpenException] { - breakers.longResetTimeoutCb.withSyncCircuitBreaker(sayHi) - } + checkLatch(breaker.openLatch) + + intercept[CircuitBreakerOpenException] { breaker().withSyncCircuitBreaker(sayHi) } } "transition to half-open on reset timeout" in { - intercept[TestException] { - breakers.shortResetTimeoutCb.withSyncCircuitBreaker(throwException) - } - checkLatch(breakers.halfOpenLatch) + val breaker = CircuitBreakerSpec.shortResetTimeoutCb() + intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) } + checkLatch(breaker.halfOpenLatch) } } "A synchronous circuit breaker that is half-open" must { "pass through next call and close on success" in { - intercept[TestException] { - breakers.shortResetTimeoutCb.withSyncCircuitBreaker(throwException) - } - checkLatch(breakers.halfOpenLatch) - assert("hi" == breakers.shortResetTimeoutCb.withSyncCircuitBreaker(sayHi)) - checkLatch(breakers.closedLatch) + val breaker = CircuitBreakerSpec.shortResetTimeoutCb() + intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) } + checkLatch(breaker.halfOpenLatch) + assert("hi" == breaker().withSyncCircuitBreaker(sayHi)) + checkLatch(breaker.closedLatch) } "open on exception in call" in { - intercept[TestException] { - breakers.shortResetTimeoutCb.withSyncCircuitBreaker(throwException) - } - checkLatch(breakers.halfOpenLatch) - intercept[TestException] { - breakers.shortResetTimeoutCb.withSyncCircuitBreaker(throwException) - } - checkLatch(breakers.openLatch) + val breaker = CircuitBreakerSpec.shortResetTimeoutCb() + intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) } + checkLatch(breaker.halfOpenLatch) + intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) } + checkLatch(breaker.openLatch) } } "A synchronous circuit breaker that is closed" must { "allow calls through" in { - breakers.longCallTimeoutCb.withSyncCircuitBreaker(sayHi) must be("hi") + val breaker = CircuitBreakerSpec.longCallTimeoutCb() + breaker().withSyncCircuitBreaker(sayHi) must be("hi") } "increment failure count on failure" in { - intercept[TestException] { - breakers.longCallTimeoutCb.withSyncCircuitBreaker(throwException) - } - checkLatch(breakers.openLatch) - breakers.longCallTimeoutCb.currentFailureCount must be(1) + val breaker = CircuitBreakerSpec.longCallTimeoutCb() + breaker().currentFailureCount must be(0) + intercept[TestException] { breaker().withSyncCircuitBreaker(throwException) } + checkLatch(breaker.openLatch) + breaker().currentFailureCount must be(1) } "reset failure count after success" in { + val breaker = CircuitBreakerSpec.multiFailureCb() + breaker().currentFailureCount must be(0) intercept[TestException] { - breakers.multiFailureCb.withSyncCircuitBreaker(throwException) + val ct = Thread.currentThread() // Ensure that the thunk is executed in the tests thread + breaker().withSyncCircuitBreaker({ if (Thread.currentThread() eq ct) throwException else "fail" }) } - - breakers.multiFailureCb.currentFailureCount must be(1) - breakers.multiFailureCb.withSyncCircuitBreaker(sayHi) - breakers.multiFailureCb.currentFailureCount must be(0) + breaker().currentFailureCount must be === 1 + breaker().withSyncCircuitBreaker(sayHi) + breaker().currentFailureCount must be === 0 } "increment failure count on callTimeout" in { - breakers.shortCallTimeoutCb.withSyncCircuitBreaker({ - 100.millis.dilated.sleep() - }) - breakers.shortCallTimeoutCb.currentFailureCount must be(1) + val breaker = CircuitBreakerSpec.shortCallTimeoutCb() + breaker().withSyncCircuitBreaker(Thread.sleep(100.millis.dilated.toMillis)) + awaitCond(breaker().currentFailureCount == 1, remaining) } } "An asynchronous circuit breaker that is open" must { "throw exceptions when called before reset timeout" in { - breakers.longResetTimeoutCb.withCircuitBreaker(Future(throwException)) + val breaker = CircuitBreakerSpec.longResetTimeoutCb() + breaker().withCircuitBreaker(Future(throwException)) - checkLatch(breakers.openLatch) + checkLatch(breaker.openLatch) - intercept[CircuitBreakerOpenException] { - Await.result( - breakers.longResetTimeoutCb.withCircuitBreaker(Future(sayHi)), - awaitTimeout) - } + intercept[CircuitBreakerOpenException] { Await.result(breaker().withCircuitBreaker(Future(sayHi)), awaitTimeout) } } "transition to half-open on reset timeout" in { - breakers.shortResetTimeoutCb.withCircuitBreaker(Future(throwException)) - checkLatch(breakers.halfOpenLatch) + val breaker = CircuitBreakerSpec.shortResetTimeoutCb() + breaker().withCircuitBreaker(Future(throwException)) + checkLatch(breaker.halfOpenLatch) } } "An asynchronous circuit breaker that is half-open" must { "pass through next call and close on success" in { - breakers.shortResetTimeoutCb.withCircuitBreaker(Future(throwException)) - checkLatch(breakers.halfOpenLatch) - - Await.result( - breakers.shortResetTimeoutCb.withCircuitBreaker(Future(sayHi)), - awaitTimeout) must be("hi") - checkLatch(breakers.closedLatch) + val breaker = CircuitBreakerSpec.shortResetTimeoutCb() + breaker().withCircuitBreaker(Future(throwException)) + checkLatch(breaker.halfOpenLatch) + Await.result(breaker().withCircuitBreaker(Future(sayHi)), awaitTimeout) must be("hi") + checkLatch(breaker.closedLatch) } "re-open on exception in call" in { - breakers.shortResetTimeoutCb.withCircuitBreaker(Future(throwException)) - checkLatch(breakers.halfOpenLatch) - - intercept[TestException] { - Await.result( - breakers.shortResetTimeoutCb.withCircuitBreaker(Future(throwException)), - awaitTimeout) - } - checkLatch(breakers.openLatch) + val breaker = CircuitBreakerSpec.shortResetTimeoutCb() + breaker().withCircuitBreaker(Future(throwException)) + checkLatch(breaker.halfOpenLatch) + intercept[TestException] { Await.result(breaker().withCircuitBreaker(Future(throwException)), awaitTimeout) } + checkLatch(breaker.openLatch) } "re-open on async failure" in { - breakers.shortResetTimeoutCb.withCircuitBreaker(Future(throwException)) - checkLatch(breakers.halfOpenLatch) + val breaker = CircuitBreakerSpec.shortResetTimeoutCb() + breaker().withCircuitBreaker(Future(throwException)) + checkLatch(breaker.halfOpenLatch) - breakers.shortResetTimeoutCb.withCircuitBreaker(Future(throwException)) - checkLatch(breakers.openLatch) + breaker().withCircuitBreaker(Future(throwException)) + checkLatch(breaker.openLatch) } } "An asynchronous circuit breaker that is closed" must { "allow calls through" in { - Await.result( - breakers.longCallTimeoutCb.withCircuitBreaker(Future(sayHi)), - awaitTimeout) must be("hi") + val breaker = CircuitBreakerSpec.longCallTimeoutCb() + Await.result(breaker().withCircuitBreaker(Future(sayHi)), awaitTimeout) must be("hi") } "increment failure count on exception" in { - intercept[TestException] { - Await.result( - breakers.longCallTimeoutCb.withCircuitBreaker(Future(throwException)), - awaitTimeout) - } - checkLatch(breakers.openLatch) - breakers.longCallTimeoutCb.currentFailureCount must be(1) + val breaker = CircuitBreakerSpec.longCallTimeoutCb() + intercept[TestException] { Await.result(breaker().withCircuitBreaker(Future(throwException)), awaitTimeout) } + checkLatch(breaker.openLatch) + breaker().currentFailureCount must be(1) } "increment failure count on async failure" in { - breakers.longCallTimeoutCb.withCircuitBreaker(Future(throwException)) - checkLatch(breakers.openLatch) - breakers.longCallTimeoutCb.currentFailureCount must be(1) + val breaker = CircuitBreakerSpec.longCallTimeoutCb() + breaker().withCircuitBreaker(Future(throwException)) + checkLatch(breaker.openLatch) + breaker().currentFailureCount must be(1) } "reset failure count after success" in { - breakers.multiFailureCb.withCircuitBreaker(Future(sayHi)) - val latch = TestLatch(4) - for (n ← 1 to 4) breakers.multiFailureCb.withCircuitBreaker(Future(throwException)) - awaitCond(breakers.multiFailureCb.currentFailureCount == 4, awaitTimeout) - breakers.multiFailureCb.withCircuitBreaker(Future(sayHi)) - awaitCond(breakers.multiFailureCb.currentFailureCount == 0, awaitTimeout) + val breaker = CircuitBreakerSpec.multiFailureCb() + breaker().withCircuitBreaker(Future(sayHi)) + for (n ← 1 to 4) breaker().withCircuitBreaker(Future(throwException)) + awaitCond(breaker().currentFailureCount == 4, awaitTimeout) + breaker().withCircuitBreaker(Future(sayHi)) + awaitCond(breaker().currentFailureCount == 0, awaitTimeout) } "increment failure count on callTimeout" in { - breakers.shortCallTimeoutCb.withCircuitBreaker { - Future { - 100.millis.dilated.sleep() - sayHi - } - } - - checkLatch(breakers.openLatch) - breakers.shortCallTimeoutCb.currentFailureCount must be(1) + val breaker = CircuitBreakerSpec.shortCallTimeoutCb() + breaker().withCircuitBreaker(Future { Thread.sleep(100.millis.dilated.toMillis); sayHi }) + checkLatch(breaker.openLatch) + breaker().currentFailureCount must be(1) } } diff --git a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala index 68e6d40824..1c41364d05 100644 --- a/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/pattern/PatternSpec.scala @@ -4,25 +4,26 @@ package akka.pattern +import language.postfixOps + import akka.testkit.AkkaSpec -import akka.actor.Props -import akka.actor.Actor -import akka.util.Duration -import akka.util.duration._ -import akka.dispatch.{ Future, Promise, Await } +import akka.actor.{ Props, Actor } +import scala.concurrent.{ Future, Promise, Await } +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ object PatternSpec { case class Work(duration: Duration) class TargetActor extends Actor { def receive = { - case Work(duration) ⇒ duration.sleep() + case Work(duration) ⇒ Thread.sleep(duration.toMillis) } } } @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class PatternSpec extends AkkaSpec { - + implicit val ec = system.dispatcher import PatternSpec._ "pattern.gracefulStop" must { @@ -48,16 +49,16 @@ class PatternSpec extends AkkaSpec { "pattern.after" must { "be completed successfully eventually" in { - val f = after(1 second, using = system.scheduler)(Promise.successful(5)) + val f = after(1 second, using = system.scheduler)(Promise.successful(5).future) - val r = Future.firstCompletedOf(Seq(Promise[Int](), f)) + val r = Future.firstCompletedOf(Seq(Promise[Int]().future, f)) Await.result(r, remaining) must be(5) } "be completed abnormally eventually" in { - val f = after(1 second, using = system.scheduler)(Promise.failed(new IllegalStateException("Mexico"))) + val f = after(1 second, using = system.scheduler)(Promise.failed(new IllegalStateException("Mexico")).future) - val r = Future.firstCompletedOf(Seq(Promise[Int](), f)) + val r = Future.firstCompletedOf(Seq(Promise[Int]().future, f)) intercept[IllegalStateException] { Await.result(r, remaining) }.getMessage must be("Mexico") } } diff --git a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputComputationPerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputComputationPerformanceSpec.scala index 4bee0c8655..dccd0b243a 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputComputationPerformanceSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputComputationPerformanceSpec.scala @@ -4,8 +4,8 @@ import akka.performance.workbench.PerformanceSpec import akka.actor._ import java.util.concurrent.{ ThreadPoolExecutor, CountDownLatch, TimeUnit } import akka.dispatch._ -import akka.util.Duration -import akka.util.duration._ +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ // -server -Xms512M -Xmx1024M -XX:+UseParallelGC -Dbenchmark=true -Dbenchmark.repeatFactor=500 @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) diff --git a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPerformanceSpec.scala index f028fec6b0..f9a2ae2df8 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPerformanceSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/microbench/TellThroughputPerformanceSpec.scala @@ -4,8 +4,8 @@ import akka.performance.workbench.PerformanceSpec import akka.actor._ import java.util.concurrent.{ ThreadPoolExecutor, CountDownLatch, TimeUnit } import akka.dispatch._ -import akka.util.Duration -import akka.util.duration._ +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ // -server -Xms512M -Xmx1024M -XX:+UseParallelGC -Dbenchmark=true -Dbenchmark.repeatFactor=500 @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) diff --git a/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingSystem.scala b/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingSystem.scala index 1adb2ecbc7..be0cd105a0 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingSystem.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/trading/system/TradingSystem.scala @@ -1,5 +1,7 @@ package akka.performance.trading.system +import language.postfixOps + import akka.performance.trading.domain.Orderbook import akka.performance.trading.domain.OrderbookRepository import akka.actor.Actor._ @@ -44,7 +46,7 @@ class AkkaTradingSystem(val system: ActorSystem) extends TradingSystem { def matchingEngineDispatcher: Option[String] = Some("benchmark.trading-dispatcher") override val orderbooksGroupedByMatchingEngine: List[List[Orderbook]] = - for (groupOfSymbols: List[String] ← OrderbookRepository.orderbookSymbolsGroupedByMatchingEngine) + for (groupOfSymbols ← OrderbookRepository.orderbookSymbolsGroupedByMatchingEngine) yield groupOfSymbols map (s ⇒ Orderbook(s, false, system)) var matchingEngineForOrderbook: Map[String, ActorRef] = Map() diff --git a/akka-actor-tests/src/test/scala/akka/performance/workbench/BenchResultRepository.scala b/akka-actor-tests/src/test/scala/akka/performance/workbench/BenchResultRepository.scala index e0e6b0e1e2..1cccd19417 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/workbench/BenchResultRepository.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/workbench/BenchResultRepository.scala @@ -102,20 +102,21 @@ class FileBenchResultRepository extends BenchResultRepository { private def save(stats: Stats) { new File(serDir).mkdirs - if (!serDirExists) return - val timestamp = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date(stats.timestamp)) - val name = stats.name + "--" + timestamp + "--" + stats.load + ".ser" - val f = new File(serDir, name) - var out: ObjectOutputStream = null - try { - out = new ObjectOutputStream(new BufferedOutputStream(new FileOutputStream(f))) - out.writeObject(stats) - } catch { - case e: Exception ⇒ - val errMsg = "Failed to save [%s] to [%s], due to [%s]".format(stats, f.getAbsolutePath, e.getMessage) - throw new RuntimeException(errMsg) - } finally { - if (out ne null) try { out.close() } catch { case ignore: Exception ⇒ } + if (serDirExists) { + val timestamp = new SimpleDateFormat("yyyyMMddHHmmss").format(new Date(stats.timestamp)) + val name = stats.name + "--" + timestamp + "--" + stats.load + ".ser" + val f = new File(serDir, name) + var out: ObjectOutputStream = null + try { + out = new ObjectOutputStream(new BufferedOutputStream(new FileOutputStream(f))) + out.writeObject(stats) + } catch { + case e: Exception ⇒ + val errMsg = "Failed to save [%s] to [%s], due to [%s]".format(stats, f.getAbsolutePath, e.getMessage) + throw new RuntimeException(errMsg) + } finally { + if (out ne null) try { out.close() } catch { case ignore: Exception ⇒ } + } } } @@ -142,19 +143,20 @@ class FileBenchResultRepository extends BenchResultRepository { def saveHtmlReport(content: String, fileName: String) { new File(htmlDir).mkdirs - if (!htmlDirExists) return - val f = new File(htmlDir, fileName) - var writer: PrintWriter = null - try { - writer = new PrintWriter(new FileWriter(f)) - writer.print(content) - writer.flush() - } catch { - case e: Exception ⇒ - val errMsg = "Failed to save report to [%s], due to [%s]".format(f.getAbsolutePath, e.getMessage) - throw new RuntimeException(errMsg) - } finally { - if (writer ne null) try { writer.close() } catch { case ignore: Exception ⇒ } + if (htmlDirExists) { + val f = new File(htmlDir, fileName) + var writer: PrintWriter = null + try { + writer = new PrintWriter(new FileWriter(f)) + writer.print(content) + writer.flush() + } catch { + case e: Exception ⇒ + val errMsg = "Failed to save report to [%s], due to [%s]".format(f.getAbsolutePath, e.getMessage) + throw new RuntimeException(errMsg) + } finally { + if (writer ne null) try { writer.close() } catch { case ignore: Exception ⇒ } + } } } diff --git a/akka-actor-tests/src/test/scala/akka/performance/workbench/GoogleChartBuilder.scala b/akka-actor-tests/src/test/scala/akka/performance/workbench/GoogleChartBuilder.scala index c513200310..52b30ceee7 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/workbench/GoogleChartBuilder.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/workbench/GoogleChartBuilder.scala @@ -17,131 +17,133 @@ object GoogleChartBuilder { * Builds a bar chart for tps in the statistics. */ def tpsChartUrl(statsByTimestamp: TreeMap[Long, Seq[Stats]], title: String, legend: Stats ⇒ String): String = { - if (statsByTimestamp.isEmpty) return "" + if (statsByTimestamp.isEmpty) "" + else { + val loads = statsByTimestamp.values.head.map(_.load) + val allStats = statsByTimestamp.values.flatten - val loads = statsByTimestamp.values.head.map(_.load) - val allStats = statsByTimestamp.values.flatten + val sb = new StringBuilder + sb.append(BaseUrl) + // bar chart + sb.append("cht=bvg") + sb.append("&") + // size + sb.append("chs=").append(ChartWidth).append("x").append(ChartHeight) + sb.append("&") + // title + sb.append("chtt=").append(urlEncode(title)) + sb.append("&") + // axis locations + sb.append("chxt=y,x") + sb.append("&") + // labels + sb.append("chxl=1:|") + sb.append(loads.mkString("|")) + sb.append("&") - val sb = new StringBuilder - sb.append(BaseUrl) - // bar chart - sb.append("cht=bvg") - sb.append("&") - // size - sb.append("chs=").append(ChartWidth).append("x").append(ChartHeight) - sb.append("&") - // title - sb.append("chtt=").append(urlEncode(title)) - sb.append("&") - // axis locations - sb.append("chxt=y,x") - sb.append("&") - // labels - sb.append("chxl=1:|") - sb.append(loads.mkString("|")) - sb.append("&") + // label color and font + //sb.append("chxs=2,D65D82,11.5,0,lt,D65D82") + //sb.append("&") - // label color and font - //sb.append("chxs=2,D65D82,11.5,0,lt,D65D82") - //sb.append("&") + // legend + val legendStats = statsByTimestamp.values.map(_.head).toSeq + appendLegend(legendStats, sb, legend) + sb.append("&") + // bar spacing + sb.append("chbh=a,4,20") + sb.append("&") + // bar colors + barColors(statsByTimestamp.size, sb) + sb.append("&") - // legend - val legendStats = statsByTimestamp.values.map(_.head).toSeq - appendLegend(legendStats, sb, legend) - sb.append("&") - // bar spacing - sb.append("chbh=a,4,20") - sb.append("&") - // bar colors - barColors(statsByTimestamp.size, sb) - sb.append("&") + // data series + val loadStr = loads.mkString(",") + sb.append("chd=t:") + val maxValue = allStats.map(_.tps).max + val tpsSeries: Iterable[String] = + for (statsSeq ← statsByTimestamp.values) yield { + statsSeq.map(_.tps).mkString(",") + } + sb.append(tpsSeries.mkString("|")) - // data series - val loadStr = loads.mkString(",") - sb.append("chd=t:") - val maxValue = allStats.map(_.tps).max - val tpsSeries: Iterable[String] = - for (statsSeq ← statsByTimestamp.values) yield { - statsSeq.map(_.tps).mkString(",") - } - sb.append(tpsSeries.mkString("|")) + // y range + sb.append("&") + sb.append("chxr=0,0,").append(maxValue) + sb.append("&") + sb.append("chds=0,").append(maxValue) + sb.append("&") - // y range - sb.append("&") - sb.append("chxr=0,0,").append(maxValue) - sb.append("&") - sb.append("chds=0,").append(maxValue) - sb.append("&") + // grid lines + appendGridSpacing(maxValue.toLong, sb) - // grid lines - appendGridSpacing(maxValue.toLong, sb) - - return sb.toString + sb.toString + } } /** * Builds a bar chart for all percentiles and the mean in the statistics. */ def percentilesAndMeanChartUrl(statistics: Seq[Stats], title: String, legend: Stats ⇒ String): String = { - if (statistics.isEmpty) return "" + if (statistics.isEmpty) "" + else { + val current = statistics.last - val current = statistics.last + val sb = new StringBuilder + sb.append(BaseUrl) + // bar chart + sb.append("cht=bvg") + sb.append("&") + // size + sb.append("chs=").append(ChartWidth).append("x").append(ChartHeight) + sb.append("&") + // title + sb.append("chtt=").append(urlEncode(title)) + sb.append("&") + // axis locations + sb.append("chxt=y,x,y") + sb.append("&") + // labels + percentileLabels(current.percentiles, sb) + sb.append("|mean") + sb.append("|2:|min|mean|median") + sb.append("&") + // label positions + sb.append("chxp=2,").append(current.min).append(",").append(current.mean).append(",") + .append(current.median) + sb.append("&") + // label color and font + sb.append("chxs=2,D65D82,11.5,0,lt,D65D82") + sb.append("&") + // lines for min, mean, median + sb.append("chxtc=2,-1000") + sb.append("&") + // legend + appendLegend(statistics, sb, legend) + sb.append("&") + // bar spacing + sb.append("chbh=a,4,20") + sb.append("&") + // bar colors + barColors(statistics.size, sb) + sb.append("&") - val sb = new StringBuilder - sb.append(BaseUrl) - // bar chart - sb.append("cht=bvg") - sb.append("&") - // size - sb.append("chs=").append(ChartWidth).append("x").append(ChartHeight) - sb.append("&") - // title - sb.append("chtt=").append(urlEncode(title)) - sb.append("&") - // axis locations - sb.append("chxt=y,x,y") - sb.append("&") - // labels - percentileLabels(current.percentiles, sb) - sb.append("|mean") - sb.append("|2:|min|mean|median") - sb.append("&") - // label positions - sb.append("chxp=2,").append(current.min).append(",").append(current.mean).append(",") - .append(current.median) - sb.append("&") - // label color and font - sb.append("chxs=2,D65D82,11.5,0,lt,D65D82") - sb.append("&") - // lines for min, mean, median - sb.append("chxtc=2,-1000") - sb.append("&") - // legend - appendLegend(statistics, sb, legend) - sb.append("&") - // bar spacing - sb.append("chbh=a,4,20") - sb.append("&") - // bar colors - barColors(statistics.size, sb) - sb.append("&") + // data series + val maxValue = statistics.map(_.percentiles.last._2).max + sb.append("chd=t:") + dataSeries(statistics.map(_.percentiles), statistics.map(_.mean), sb) - // data series - val maxValue = statistics.map(_.percentiles.last._2).max - sb.append("chd=t:") - dataSeries(statistics.map(_.percentiles), statistics.map(_.mean), sb) + // y range + sb.append("&") + sb.append("chxr=0,0,").append(maxValue).append("|2,0,").append(maxValue) + sb.append("&") + sb.append("chds=0,").append(maxValue) + sb.append("&") - // y range - sb.append("&") - sb.append("chxr=0,0,").append(maxValue).append("|2,0,").append(maxValue) - sb.append("&") - sb.append("chds=0,").append(maxValue) - sb.append("&") + // grid lines + appendGridSpacing(maxValue, sb) - // grid lines - appendGridSpacing(maxValue, sb) - - return sb.toString + sb.toString + } } private def percentileLabels(percentiles: TreeMap[Int, Long], sb: StringBuilder) { @@ -197,108 +199,109 @@ object GoogleChartBuilder { } def latencyAndThroughputChartUrl(statistics: Seq[Stats], title: String): String = { - if (statistics.isEmpty) return "" + if (statistics.isEmpty) "" + else { + val sb = new StringBuilder + sb.append(BaseUrl) + // line chart + sb.append("cht=lxy") + sb.append("&") + // size + sb.append("chs=").append(ChartWidth).append("x").append(ChartHeight) + sb.append("&") + // title + sb.append("chtt=").append(urlEncode(title)) + sb.append("&") + // axis locations + sb.append("chxt=x,y,r,x,y,r") + sb.append("&") + // labels + sb.append("chxl=3:|clients|4:|Latency+(us)|5:|Throughput+(tps)") + sb.append("&") + // label color and font + sb.append("chxs=0,676767,11.5,0,lt,676767|1,676767,11.5,0,lt,676767|2,676767,11.5,0,lt,676767") + sb.append("&") + sb.append("chco=") + val seriesColors = List("25B33B", "3072F3", "FF0000", "37F0ED", "FF9900") + sb.append(seriesColors.mkString(",")) + sb.append("&") + // legend + sb.append("chdl=5th%20Percentile|Median|95th%20Percentile|Mean|Throughput") + sb.append("&") - val sb = new StringBuilder - sb.append(BaseUrl) - // line chart - sb.append("cht=lxy") - sb.append("&") - // size - sb.append("chs=").append(ChartWidth).append("x").append(ChartHeight) - sb.append("&") - // title - sb.append("chtt=").append(urlEncode(title)) - sb.append("&") - // axis locations - sb.append("chxt=x,y,r,x,y,r") - sb.append("&") - // labels - sb.append("chxl=3:|clients|4:|Latency+(us)|5:|Throughput+(tps)") - sb.append("&") - // label color and font - sb.append("chxs=0,676767,11.5,0,lt,676767|1,676767,11.5,0,lt,676767|2,676767,11.5,0,lt,676767") - sb.append("&") - sb.append("chco=") - val seriesColors = List("25B33B", "3072F3", "FF0000", "37F0ED", "FF9900") - sb.append(seriesColors.mkString(",")) - sb.append("&") - // legend - sb.append("chdl=5th%20Percentile|Median|95th%20Percentile|Mean|Throughput") - sb.append("&") + sb.append("chdlp=b") + sb.append("&") - sb.append("chdlp=b") - sb.append("&") + sb.append("chls=1|1|1") + sb.append("&") - sb.append("chls=1|1|1") - sb.append("&") + sb.append("chls=1|1|1") + sb.append("&") - sb.append("chls=1|1|1") - sb.append("&") + // margins + sb.append("chma=5,5,5,25") + sb.append("&") - // margins - sb.append("chma=5,5,5,25") - sb.append("&") + // data points + sb.append("chm=") + val chmStr = seriesColors.zipWithIndex.map(each ⇒ "o," + each._1 + "," + each._2 + ",-1,7").mkString("|") + sb.append(chmStr) + sb.append("&") - // data points - sb.append("chm=") - val chmStr = seriesColors.zipWithIndex.map(each ⇒ "o," + each._1 + "," + each._2 + ",-1,7").mkString("|") - sb.append(chmStr) - sb.append("&") + // data series + val loadStr = statistics.map(_.load).mkString(",") + sb.append("chd=t:") + val maxP = 95 + val percentiles = List(5, 50, maxP) + val maxValue = statistics.map(_.percentiles(maxP)).max + val percentileSeries: List[String] = + for (p ← percentiles) yield { + loadStr + "|" + statistics.map(_.percentiles(p)).mkString(",") + } + sb.append(percentileSeries.mkString("|")) - // data series - val loadStr = statistics.map(_.load).mkString(",") - sb.append("chd=t:") - val maxP = 95 - val percentiles = List(5, 50, maxP) - val maxValue = statistics.map(_.percentiles(maxP)).max - val percentileSeries: List[String] = - for (p ← percentiles) yield { - loadStr + "|" + statistics.map(_.percentiles(p)).mkString(",") + sb.append("|") + sb.append(loadStr).append("|") + val meanSeries = statistics.map(s ⇒ formatDouble(s.mean)).mkString(",") + sb.append(meanSeries) + + sb.append("|") + val maxTps: Double = statistics.map(_.tps).max + sb.append(loadStr).append("|") + val tpsSeries = statistics.map(s ⇒ formatDouble(s.tps)).mkString(",") + sb.append(tpsSeries) + + val minLoad = statistics.head.load + val maxLoad = statistics.last.load + + // y range + sb.append("&") + sb.append("chxr=0,").append(minLoad).append(",").append(maxLoad).append(",4").append("|1,0,").append(maxValue).append("|2,0,") + .append(formatDouble(maxTps)) + sb.append("&") + + sb.append("chds=") + for (p ← percentiles) { + sb.append(minLoad).append(",").append(maxLoad) + sb.append(",0,").append(maxValue) + sb.append(",") } - sb.append(percentileSeries.mkString("|")) - - sb.append("|") - sb.append(loadStr).append("|") - val meanSeries = statistics.map(s ⇒ formatDouble(s.mean)).mkString(",") - sb.append(meanSeries) - - sb.append("|") - val maxTps: Double = statistics.map(_.tps).max - sb.append(loadStr).append("|") - val tpsSeries = statistics.map(s ⇒ formatDouble(s.tps)).mkString(",") - sb.append(tpsSeries) - - val minLoad = statistics.head.load - val maxLoad = statistics.last.load - - // y range - sb.append("&") - sb.append("chxr=0,").append(minLoad).append(",").append(maxLoad).append(",4").append("|1,0,").append(maxValue).append("|2,0,") - .append(formatDouble(maxTps)) - sb.append("&") - - sb.append("chds=") - for (p ← percentiles) { sb.append(minLoad).append(",").append(maxLoad) - sb.append(",0,").append(maxValue) + sb.append(",0,").append(formatDouble(maxValue)) sb.append(",") + sb.append(minLoad).append(",").append(maxLoad) + sb.append(",0,").append(formatDouble(maxTps)) + sb.append("&") + + // label positions + sb.append("chxp=3,").append("50").append("|4,").append("100").append("|5,").append("100") + sb.append("&") + + // grid lines + appendGridSpacing(maxValue, sb) + + sb.toString } - sb.append(minLoad).append(",").append(maxLoad) - sb.append(",0,").append(formatDouble(maxValue)) - sb.append(",") - sb.append(minLoad).append(",").append(maxLoad) - sb.append(",0,").append(formatDouble(maxTps)) - sb.append("&") - - // label positions - sb.append("chxp=3,").append("50").append("|4,").append("100").append("|5,").append("100") - sb.append("&") - - // grid lines - appendGridSpacing(maxValue, sb) - - return sb.toString } def formatDouble(value: Double): String = { diff --git a/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala b/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala index ca23dd5a33..796a9f5835 100644 --- a/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/performance/workbench/PerformanceSpec.scala @@ -4,7 +4,7 @@ import scala.collection.immutable.TreeMap import org.apache.commons.math.stat.descriptive.DescriptiveStatistics import org.scalatest.BeforeAndAfterEach import akka.testkit.AkkaSpec -import akka.util.Duration +import scala.concurrent.util.Duration import com.typesafe.config.Config import java.util.concurrent.TimeUnit import akka.event.Logging diff --git a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala index 77ac5daf49..cde57e7607 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ConfiguredLocalRoutingSpec.scala @@ -3,14 +3,16 @@ */ package akka.routing +import language.postfixOps + import java.util.concurrent.atomic.AtomicInteger import org.junit.runner.RunWith import akka.actor.{ Props, Deploy, Actor, ActorRef } import akka.ConfigurationException -import akka.dispatch.Await +import scala.concurrent.Await import akka.pattern.{ ask, gracefulStop } import akka.testkit.{ TestLatch, ImplicitSender, DefaultTimeout, AkkaSpec } -import akka.util.duration.intToDurationInt +import scala.concurrent.util.duration.intToDurationInt import akka.actor.UnstartedCell object ConfiguredLocalRoutingSpec { diff --git a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala index 0a87273d61..0de9c9d1c8 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/ResizerSpec.scala @@ -3,15 +3,17 @@ */ package akka.routing +import language.postfixOps + import akka.actor.Actor import akka.testkit._ import akka.actor.Props -import akka.dispatch.Await -import akka.util.duration._ +import scala.concurrent.Await +import scala.concurrent.util.duration._ import akka.actor.ActorRef import java.util.concurrent.atomic.AtomicInteger import akka.pattern.ask -import akka.util.Duration +import scala.concurrent.util.Duration import java.util.concurrent.TimeoutException object ResizerSpec { @@ -172,7 +174,7 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with val router = system.actorOf(Props(new Actor { def receive = { - case d: Duration ⇒ d.dilated.sleep; sender ! "done" + case d: Duration ⇒ Thread.sleep(d.dilated.toMillis); sender ! "done" case "echo" ⇒ sender ! "reply" } }).withRouter(RoundRobinRouter(resizer = Some(resizer)))) @@ -219,26 +221,25 @@ class ResizerSpec extends AkkaSpec(ResizerSpec.config) with DefaultTimeout with val router = system.actorOf(Props(new Actor { def receive = { - case n: Int ⇒ - (n millis).dilated.sleep + case n: Int ⇒ Thread.sleep((n millis).dilated.toMillis) } }).withRouter(RoundRobinRouter(resizer = Some(resizer)))) // put some pressure on the router for (m ← 0 to 5) { router ! 100 - (5 millis).dilated.sleep + Thread.sleep((5 millis).dilated.toMillis) } val z = Await.result(router ? CurrentRoutees, 5 seconds).asInstanceOf[RouterRoutees].routees.size z must be >= (2) - (300 millis).dilated.sleep + Thread.sleep((300 millis).dilated.toMillis) // let it cool down for (m ← 0 to 5) { router ! 1 - (500 millis).dilated.sleep + Thread.sleep((500 millis).dilated.toMillis) } awaitCond( diff --git a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala index 2598f26233..8b20189dcb 100644 --- a/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/routing/RoutingSpec.scala @@ -3,13 +3,15 @@ */ package akka.routing +import language.postfixOps + import java.util.concurrent.atomic.AtomicInteger import akka.actor._ import scala.collection.mutable.LinkedList import akka.testkit._ -import akka.util.duration._ -import akka.dispatch.Await -import akka.util.Duration +import scala.concurrent.util.duration._ +import scala.concurrent.Await +import scala.concurrent.util.Duration import akka.ConfigurationException import com.typesafe.config.ConfigFactory import akka.pattern.{ ask, pipe } @@ -65,7 +67,7 @@ object RoutingSpec { @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class RoutingSpec extends AkkaSpec(RoutingSpec.config) with DefaultTimeout with ImplicitSender { - + implicit val ec = system.dispatcher import akka.routing.RoutingSpec._ "routers in general" must { diff --git a/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala b/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala index 82cd6e0e06..0c57b61f8c 100644 --- a/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/serialization/SerializeSpec.scala @@ -4,12 +4,14 @@ package akka.serialization +import language.postfixOps + import akka.testkit.{ AkkaSpec, EventFilter } import akka.actor._ import java.io._ -import akka.dispatch.Await +import scala.concurrent.Await import akka.util.Timeout -import akka.util.duration._ +import scala.concurrent.util.duration._ import scala.reflect.BeanInfo import com.google.protobuf.Message import akka.pattern.ask diff --git a/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala b/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala index b6638a63fa..ef300afbe5 100644 --- a/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/DurationSpec.scala @@ -3,9 +3,13 @@ */ package akka.util +import language.postfixOps + import org.scalatest.WordSpec import org.scalatest.matchers.MustMatchers -import duration._ +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ + import java.util.concurrent.TimeUnit._ class DurationSpec extends WordSpec with MustMatchers { @@ -13,8 +17,8 @@ class DurationSpec extends WordSpec with MustMatchers { "Duration" must { "form a one-dimensional vector field" in { - val zero = 0.seconds - val one = 1.second + val zero = 0 seconds + val one = 1 second val two = one + one val three = 3 * one (0 * one) must be(zero) @@ -51,7 +55,7 @@ class DurationSpec extends WordSpec with MustMatchers { assert(minf != one) } - "check its range" in { + /*"check its range" in { for (unit ← Seq(DAYS, HOURS, MINUTES, SECONDS, MILLISECONDS, MICROSECONDS, NANOSECONDS)) { val x = unit.convert(Long.MaxValue, NANOSECONDS) val dur = Duration(x, unit) @@ -78,7 +82,7 @@ class DurationSpec extends WordSpec with MustMatchers { intercept[IllegalArgumentException] { Duration("%.0f".format(x + 10000000d) + unit.toString.toLowerCase) } intercept[IllegalArgumentException] { Duration("-%.0f".format(x + 10000000d) + unit.toString.toLowerCase) } } - } + }*/ "support fromNow" in { val dead = 2.seconds.fromNow @@ -86,7 +90,7 @@ class DurationSpec extends WordSpec with MustMatchers { // view bounds vs. very local type inference vs. operator precedence: sigh dead.timeLeft must be > (1 second: Duration) dead2.timeLeft must be > (1 second: Duration) - 1.second.sleep + Thread.sleep(1.second.toMillis) dead.timeLeft must be < (1 second: Duration) dead2.timeLeft must be < (1 second: Duration) } diff --git a/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala b/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala index 4abe8c508d..d2d87b4c14 100644 --- a/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/IndexSpec.scala @@ -4,13 +4,14 @@ package akka.util import org.scalatest.matchers.MustMatchers -import akka.dispatch.{ Future, Await } +import scala.concurrent.Future import akka.testkit.AkkaSpec +import scala.concurrent.Await import scala.util.Random import akka.testkit.DefaultTimeout class IndexSpec extends AkkaSpec with MustMatchers with DefaultTimeout { - + implicit val ec = system.dispatcher private def emptyIndex = new Index[String, Int](100, _ compareTo _) private def indexWithValues = { diff --git a/akka-actor-tests/src/test/scala/akka/util/NonFatalSpec.scala b/akka-actor-tests/src/test/scala/akka/util/NonFatalSpec.scala index 0c4bc295fb..4c33c80b68 100644 --- a/akka-actor-tests/src/test/scala/akka/util/NonFatalSpec.scala +++ b/akka-actor-tests/src/test/scala/akka/util/NonFatalSpec.scala @@ -1,7 +1,7 @@ /** * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.util +/*package akka.util import org.scalatest.matchers.MustMatchers import akka.testkit.AkkaSpec @@ -59,4 +59,4 @@ class NonFatalSpec extends AkkaSpec with MustMatchers { } -} \ No newline at end of file +}*/ \ No newline at end of file diff --git a/akka-actor/src/main/java/akka/actor/AbstractActorCell.java b/akka-actor/src/main/java/akka/actor/cell/AbstractActorCell.java similarity index 63% rename from akka-actor/src/main/java/akka/actor/AbstractActorCell.java rename to akka-actor/src/main/java/akka/actor/cell/AbstractActorCell.java index 95fb7368bc..2d8c4fbc1e 100644 --- a/akka-actor/src/main/java/akka/actor/AbstractActorCell.java +++ b/akka-actor/src/main/java/akka/actor/cell/AbstractActorCell.java @@ -2,8 +2,9 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ -package akka.actor; +package akka.actor.cell; +import akka.actor.ActorCell; import akka.util.Unsafe; final class AbstractActorCell { @@ -13,9 +14,9 @@ final class AbstractActorCell { static { try { - mailboxOffset = Unsafe.instance.objectFieldOffset(ActorCell.class.getDeclaredField("_mailboxDoNotCallMeDirectly")); - childrenOffset = Unsafe.instance.objectFieldOffset(ActorCell.class.getDeclaredField("_childrenRefsDoNotCallMeDirectly")); - nextNameOffset = Unsafe.instance.objectFieldOffset(ActorCell.class.getDeclaredField("_nextNameDoNotCallMeDirectly")); + mailboxOffset = Unsafe.instance.objectFieldOffset(ActorCell.class.getDeclaredField("akka$actor$cell$Dispatch$$_mailboxDoNotCallMeDirectly")); + childrenOffset = Unsafe.instance.objectFieldOffset(ActorCell.class.getDeclaredField("akka$actor$cell$Children$$_childrenRefsDoNotCallMeDirectly")); + nextNameOffset = Unsafe.instance.objectFieldOffset(ActorCell.class.getDeclaredField("akka$actor$cell$Children$$_nextNameDoNotCallMeDirectly")); } catch(Throwable t){ throw new ExceptionInInitializerError(t); } diff --git a/akka-actor/src/main/java/akka/dispatch/AbstractPromise.java b/akka-actor/src/main/java/akka/dispatch/AbstractPromise.java deleted file mode 100644 index db11e84483..0000000000 --- a/akka-actor/src/main/java/akka/dispatch/AbstractPromise.java +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.dispatch; - -import akka.util.Unsafe; - -abstract class AbstractPromise { - private volatile Object _ref = DefaultPromise.EmptyPending(); - - final static long _refOffset; // Memory offset to _ref field - - static { - try { - _refOffset = Unsafe.instance.objectFieldOffset(AbstractPromise.class.getDeclaredField("_ref")); - } catch(Throwable t){ - throw new ExceptionInInitializerError(t); - } - } - - protected final boolean updateState(Object oldState, Object newState) { - return Unsafe.instance.compareAndSwapObject(this, _refOffset, oldState, newState); - } - - protected final Object getState() { - return _ref; - } -} diff --git a/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java b/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java deleted file mode 100644 index 4b1dd0fa1f..0000000000 --- a/akka-actor/src/main/java/akka/jsr166y/ForkJoinPool.java +++ /dev/null @@ -1,2858 +0,0 @@ -/* - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/publicdomain/zero/1.0/ - */ - -package akka.jsr166y; -import java.util.ArrayList; -import java.util.Arrays; -import java.util.Collection; -import java.util.Collections; -import java.util.List; -import java.util.Random; -import java.util.concurrent.AbstractExecutorService; -import java.util.concurrent.Callable; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Future; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.RunnableFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.concurrent.atomic.AtomicLong; -import java.util.concurrent.locks.AbstractQueuedSynchronizer; -import java.util.concurrent.locks.Condition; -import akka.util.Unsafe; - -/** - * An {@link ExecutorService} for running {@link ForkJoinTask}s. - * A {@code ForkJoinPool} provides the entry point for submissions - * from non-{@code ForkJoinTask} clients, as well as management and - * monitoring operations. - * - *

A {@code ForkJoinPool} differs from other kinds of {@link - * ExecutorService} mainly by virtue of employing - * work-stealing: all threads in the pool attempt to find and - * execute tasks submitted to the pool and/or created by other active - * tasks (eventually blocking waiting for work if none exist). This - * enables efficient processing when most tasks spawn other subtasks - * (as do most {@code ForkJoinTask}s), as well as when many small - * tasks are submitted to the pool from external clients. Especially - * when setting asyncMode to true in constructors, {@code - * ForkJoinPool}s may also be appropriate for use with event-style - * tasks that are never joined. - * - *

A {@code ForkJoinPool} is constructed with a given target - * parallelism level; by default, equal to the number of available - * processors. The pool attempts to maintain enough active (or - * available) threads by dynamically adding, suspending, or resuming - * internal worker threads, even if some tasks are stalled waiting to - * join others. However, no such adjustments are guaranteed in the - * face of blocked IO or other unmanaged synchronization. The nested - * {@link ManagedBlocker} interface enables extension of the kinds of - * synchronization accommodated. - * - *

In addition to execution and lifecycle control methods, this - * class provides status check methods (for example - * {@link #getStealCount}) that are intended to aid in developing, - * tuning, and monitoring fork/join applications. Also, method - * {@link #toString} returns indications of pool state in a - * convenient form for informal monitoring. - * - *

As is the case with other ExecutorServices, there are three - * main task execution methods summarized in the following table. - * These are designed to be used primarily by clients not already - * engaged in fork/join computations in the current pool. The main - * forms of these methods accept instances of {@code ForkJoinTask}, - * but overloaded forms also allow mixed execution of plain {@code - * Runnable}- or {@code Callable}- based activities as well. However, - * tasks that are already executing in a pool should normally instead - * use the within-computation forms listed in the table unless using - * async event-style tasks that are not usually joined, in which case - * there is little difference among choice of methods. - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - * - *
Call from non-fork/join clients Call from within fork/join computations
Arrange async execution {@link #execute(ForkJoinTask)} {@link ForkJoinTask#fork}
Await and obtain result {@link #invoke(ForkJoinTask)} {@link ForkJoinTask#invoke}
Arrange exec and obtain Future {@link #submit(ForkJoinTask)} {@link ForkJoinTask#fork} (ForkJoinTasks are Futures)
- * - *

Sample Usage. Normally a single {@code ForkJoinPool} is - * used for all parallel task execution in a program or subsystem. - * Otherwise, use would not usually outweigh the construction and - * bookkeeping overhead of creating a large set of threads. For - * example, a common pool could be used for the {@code SortTasks} - * illustrated in {@link RecursiveAction}. Because {@code - * ForkJoinPool} uses threads in {@linkplain java.lang.Thread#isDaemon - * daemon} mode, there is typically no need to explicitly {@link - * #shutdown} such a pool upon program exit. - * - *

 {@code
- * static final ForkJoinPool mainPool = new ForkJoinPool();
- * ...
- * public void sort(long[] array) {
- *   mainPool.invoke(new SortTask(array, 0, array.length));
- * }}
- * - *

Implementation notes: This implementation restricts the - * maximum number of running threads to 32767. Attempts to create - * pools with greater than the maximum number result in - * {@code IllegalArgumentException}. - * - *

This implementation rejects submitted tasks (that is, by throwing - * {@link RejectedExecutionException}) only when the pool is shut down - * or internal resources have been exhausted. - * - * @since 1.7 - * @author Doug Lea - */ -public class ForkJoinPool extends AbstractExecutorService { - - /* - * Implementation Overview - * - * This class and its nested classes provide the main - * functionality and control for a set of worker threads: - * Submissions from non-FJ threads enter into submission queues. - * Workers take these tasks and typically split them into subtasks - * that may be stolen by other workers. Preference rules give - * first priority to processing tasks from their own queues (LIFO - * or FIFO, depending on mode), then to randomized FIFO steals of - * tasks in other queues. - * - * WorkQueues - * ========== - * - * Most operations occur within work-stealing queues (in nested - * class WorkQueue). These are special forms of Deques that - * support only three of the four possible end-operations -- push, - * pop, and poll (aka steal), under the further constraints that - * push and pop are called only from the owning thread (or, as - * extended here, under a lock), while poll may be called from - * other threads. (If you are unfamiliar with them, you probably - * want to read Herlihy and Shavit's book "The Art of - * Multiprocessor programming", chapter 16 describing these in - * more detail before proceeding.) The main work-stealing queue - * design is roughly similar to those in the papers "Dynamic - * Circular Work-Stealing Deque" by Chase and Lev, SPAA 2005 - * (http://research.sun.com/scalable/pubs/index.html) and - * "Idempotent work stealing" by Michael, Saraswat, and Vechev, - * PPoPP 2009 (http://portal.acm.org/citation.cfm?id=1504186). - * The main differences ultimately stem from GC requirements that - * we null out taken slots as soon as we can, to maintain as small - * a footprint as possible even in programs generating huge - * numbers of tasks. To accomplish this, we shift the CAS - * arbitrating pop vs poll (steal) from being on the indices - * ("base" and "top") to the slots themselves. So, both a - * successful pop and poll mainly entail a CAS of a slot from - * non-null to null. Because we rely on CASes of references, we - * do not need tag bits on base or top. They are simple ints as - * used in any circular array-based queue (see for example - * ArrayDeque). Updates to the indices must still be ordered in a - * way that guarantees that top == base means the queue is empty, - * but otherwise may err on the side of possibly making the queue - * appear nonempty when a push, pop, or poll have not fully - * committed. Note that this means that the poll operation, - * considered individually, is not wait-free. One thief cannot - * successfully continue until another in-progress one (or, if - * previously empty, a push) completes. However, in the - * aggregate, we ensure at least probabilistic non-blockingness. - * If an attempted steal fails, a thief always chooses a different - * random victim target to try next. So, in order for one thief to - * progress, it suffices for any in-progress poll or new push on - * any empty queue to complete. (This is why we normally use - * method pollAt and its variants that try once at the apparent - * base index, else consider alternative actions, rather than - * method poll.) - * - * This approach also enables support of a user mode in which local - * task processing is in FIFO, not LIFO order, simply by using - * poll rather than pop. This can be useful in message-passing - * frameworks in which tasks are never joined. However neither - * mode considers affinities, loads, cache localities, etc, so - * rarely provide the best possible performance on a given - * machine, but portably provide good throughput by averaging over - * these factors. (Further, even if we did try to use such - * information, we do not usually have a basis for exploiting it. - * For example, some sets of tasks profit from cache affinities, - * but others are harmed by cache pollution effects.) - * - * WorkQueues are also used in a similar way for tasks submitted - * to the pool. We cannot mix these tasks in the same queues used - * for work-stealing (this would contaminate lifo/fifo - * processing). Instead, we loosely associate submission queues - * with submitting threads, using a form of hashing. The - * ThreadLocal Submitter class contains a value initially used as - * a hash code for choosing existing queues, but may be randomly - * repositioned upon contention with other submitters. In - * essence, submitters act like workers except that they never - * take tasks, and they are multiplexed on to a finite number of - * shared work queues. However, classes are set up so that future - * extensions could allow submitters to optionally help perform - * tasks as well. Insertion of tasks in shared mode requires a - * lock (mainly to protect in the case of resizing) but we use - * only a simple spinlock (using bits in field runState), because - * submitters encountering a busy queue move on to try or create - * other queues -- they block only when creating and registering - * new queues. - * - * Management - * ========== - * - * The main throughput advantages of work-stealing stem from - * decentralized control -- workers mostly take tasks from - * themselves or each other. We cannot negate this in the - * implementation of other management responsibilities. The main - * tactic for avoiding bottlenecks is packing nearly all - * essentially atomic control state into two volatile variables - * that are by far most often read (not written) as status and - * consistency checks. - * - * Field "ctl" contains 64 bits holding all the information needed - * to atomically decide to add, inactivate, enqueue (on an event - * queue), dequeue, and/or re-activate workers. To enable this - * packing, we restrict maximum parallelism to (1<<15)-1 (which is - * far in excess of normal operating range) to allow ids, counts, - * and their negations (used for thresholding) to fit into 16bit - * fields. - * - * Field "runState" contains 32 bits needed to register and - * deregister WorkQueues, as well as to enable shutdown. It is - * only modified under a lock (normally briefly held, but - * occasionally protecting allocations and resizings) but even - * when locked remains available to check consistency. - * - * Recording WorkQueues. WorkQueues are recorded in the - * "workQueues" array that is created upon pool construction and - * expanded if necessary. Updates to the array while recording - * new workers and unrecording terminated ones are protected from - * each other by a lock but the array is otherwise concurrently - * readable, and accessed directly. To simplify index-based - * operations, the array size is always a power of two, and all - * readers must tolerate null slots. Shared (submission) queues - * are at even indices, worker queues at odd indices. Grouping - * them together in this way simplifies and speeds up task - * scanning. - * - * All worker thread creation is on-demand, triggered by task - * submissions, replacement of terminated workers, and/or - * compensation for blocked workers. However, all other support - * code is set up to work with other policies. To ensure that we - * do not hold on to worker references that would prevent GC, ALL - * accesses to workQueues are via indices into the workQueues - * array (which is one source of some of the messy code - * constructions here). In essence, the workQueues array serves as - * a weak reference mechanism. Thus for example the wait queue - * field of ctl stores indices, not references. Access to the - * workQueues in associated methods (for example signalWork) must - * both index-check and null-check the IDs. All such accesses - * ignore bad IDs by returning out early from what they are doing, - * since this can only be associated with termination, in which - * case it is OK to give up. All uses of the workQueues array - * also check that it is non-null (even if previously - * non-null). This allows nulling during termination, which is - * currently not necessary, but remains an option for - * resource-revocation-based shutdown schemes. It also helps - * reduce JIT issuance of uncommon-trap code, which tends to - * unnecessarily complicate control flow in some methods. - * - * Event Queuing. Unlike HPC work-stealing frameworks, we cannot - * let workers spin indefinitely scanning for tasks when none can - * be found immediately, and we cannot start/resume workers unless - * there appear to be tasks available. On the other hand, we must - * quickly prod them into action when new tasks are submitted or - * generated. In many usages, ramp-up time to activate workers is - * the main limiting factor in overall performance (this is - * compounded at program start-up by JIT compilation and - * allocation). So we try to streamline this as much as possible. - * We park/unpark workers after placing in an event wait queue - * when they cannot find work. This "queue" is actually a simple - * Treiber stack, headed by the "id" field of ctl, plus a 15bit - * counter value (that reflects the number of times a worker has - * been inactivated) to avoid ABA effects (we need only as many - * version numbers as worker threads). Successors are held in - * field WorkQueue.nextWait. Queuing deals with several intrinsic - * races, mainly that a task-producing thread can miss seeing (and - * signalling) another thread that gave up looking for work but - * has not yet entered the wait queue. We solve this by requiring - * a full sweep of all workers (via repeated calls to method - * scan()) both before and after a newly waiting worker is added - * to the wait queue. During a rescan, the worker might release - * some other queued worker rather than itself, which has the same - * net effect. Because enqueued workers may actually be rescanning - * rather than waiting, we set and clear the "parker" field of - * WorkQueues to reduce unnecessary calls to unpark. (This - * requires a secondary recheck to avoid missed signals.) Note - * the unusual conventions about Thread.interrupts surrounding - * parking and other blocking: Because interrupts are used solely - * to alert threads to check termination, which is checked anyway - * upon blocking, we clear status (using Thread.interrupted) - * before any call to park, so that park does not immediately - * return due to status being set via some other unrelated call to - * interrupt in user code. - * - * Signalling. We create or wake up workers only when there - * appears to be at least one task they might be able to find and - * execute. When a submission is added or another worker adds a - * task to a queue that previously had fewer than two tasks, they - * signal waiting workers (or trigger creation of new ones if - * fewer than the given parallelism level -- see signalWork). - * These primary signals are buttressed by signals during rescans; - * together these cover the signals needed in cases when more - * tasks are pushed but untaken, and improve performance compared - * to having one thread wake up all workers. - * - * Trimming workers. To release resources after periods of lack of - * use, a worker starting to wait when the pool is quiescent will - * time out and terminate if the pool has remained quiescent for - * SHRINK_RATE nanosecs. This will slowly propagate, eventually - * terminating all workers after long periods of non-use. - * - * Shutdown and Termination. A call to shutdownNow atomically sets - * a runState bit and then (non-atomically) sets each worker's - * runState status, cancels all unprocessed tasks, and wakes up - * all waiting workers. Detecting whether termination should - * commence after a non-abrupt shutdown() call requires more work - * and bookkeeping. We need consensus about quiescence (i.e., that - * there is no more work). The active count provides a primary - * indication but non-abrupt shutdown still requires a rechecking - * scan for any workers that are inactive but not queued. - * - * Joining Tasks - * ============= - * - * Any of several actions may be taken when one worker is waiting - * to join a task stolen (or always held) by another. Because we - * are multiplexing many tasks on to a pool of workers, we can't - * just let them block (as in Thread.join). We also cannot just - * reassign the joiner's run-time stack with another and replace - * it later, which would be a form of "continuation", that even if - * possible is not necessarily a good idea since we sometimes need - * both an unblocked task and its continuation to progress. - * Instead we combine two tactics: - * - * Helping: Arranging for the joiner to execute some task that it - * would be running if the steal had not occurred. - * - * Compensating: Unless there are already enough live threads, - * method tryCompensate() may create or re-activate a spare - * thread to compensate for blocked joiners until they unblock. - * - * A third form (implemented in tryRemoveAndExec and - * tryPollForAndExec) amounts to helping a hypothetical - * compensator: If we can readily tell that a possible action of a - * compensator is to steal and execute the task being joined, the - * joining thread can do so directly, without the need for a - * compensation thread (although at the expense of larger run-time - * stacks, but the tradeoff is typically worthwhile). - * - * The ManagedBlocker extension API can't use helping so relies - * only on compensation in method awaitBlocker. - * - * The algorithm in tryHelpStealer entails a form of "linear" - * helping: Each worker records (in field currentSteal) the most - * recent task it stole from some other worker. Plus, it records - * (in field currentJoin) the task it is currently actively - * joining. Method tryHelpStealer uses these markers to try to - * find a worker to help (i.e., steal back a task from and execute - * it) that could hasten completion of the actively joined task. - * In essence, the joiner executes a task that would be on its own - * local deque had the to-be-joined task not been stolen. This may - * be seen as a conservative variant of the approach in Wagner & - * Calder "Leapfrogging: a portable technique for implementing - * efficient futures" SIGPLAN Notices, 1993 - * (http://portal.acm.org/citation.cfm?id=155354). It differs in - * that: (1) We only maintain dependency links across workers upon - * steals, rather than use per-task bookkeeping. This sometimes - * requires a linear scan of workQueues array to locate stealers, - * but often doesn't because stealers leave hints (that may become - * stale/wrong) of where to locate them. A stealHint is only a - * hint because a worker might have had multiple steals and the - * hint records only one of them (usually the most current). - * Hinting isolates cost to when it is needed, rather than adding - * to per-task overhead. (2) It is "shallow", ignoring nesting - * and potentially cyclic mutual steals. (3) It is intentionally - * racy: field currentJoin is updated only while actively joining, - * which means that we miss links in the chain during long-lived - * tasks, GC stalls etc (which is OK since blocking in such cases - * is usually a good idea). (4) We bound the number of attempts - * to find work (see MAX_HELP) and fall back to suspending the - * worker and if necessary replacing it with another. - * - * It is impossible to keep exactly the target parallelism number - * of threads running at any given time. Determining the - * existence of conservatively safe helping targets, the - * availability of already-created spares, and the apparent need - * to create new spares are all racy, so we rely on multiple - * retries of each. Compensation in the apparent absence of - * helping opportunities is challenging to control on JVMs, where - * GC and other activities can stall progress of tasks that in - * turn stall out many other dependent tasks, without us being - * able to determine whether they will ever require compensation. - * Even though work-stealing otherwise encounters little - * degradation in the presence of more threads than cores, - * aggressively adding new threads in such cases entails risk of - * unwanted positive feedback control loops in which more threads - * cause more dependent stalls (as well as delayed progress of - * unblocked threads to the point that we know they are available) - * leading to more situations requiring more threads, and so - * on. This aspect of control can be seen as an (analytically - * intractable) game with an opponent that may choose the worst - * (for us) active thread to stall at any time. We take several - * precautions to bound losses (and thus bound gains), mainly in - * methods tryCompensate and awaitJoin: (1) We only try - * compensation after attempting enough helping steps (measured - * via counting and timing) that we have already consumed the - * estimated cost of creating and activating a new thread. (2) We - * allow up to 50% of threads to be blocked before initially - * adding any others, and unless completely saturated, check that - * some work is available for a new worker before adding. Also, we - * create up to only 50% more threads until entering a mode that - * only adds a thread if all others are possibly blocked. All - * together, this means that we might be half as fast to react, - * and create half as many threads as possible in the ideal case, - * but present vastly fewer anomalies in all other cases compared - * to both more aggressive and more conservative alternatives. - * - * Style notes: There is a lot of representation-level coupling - * among classes ForkJoinPool, ForkJoinWorkerThread, and - * ForkJoinTask. The fields of WorkQueue maintain data structures - * managed by ForkJoinPool, so are directly accessed. There is - * little point trying to reduce this, since any associated future - * changes in representations will need to be accompanied by - * algorithmic changes anyway. Several methods intrinsically - * sprawl because they must accumulate sets of consistent reads of - * volatiles held in local variables. Methods signalWork() and - * scan() are the main bottlenecks, so are especially heavily - * micro-optimized/mangled. There are lots of inline assignments - * (of form "while ((local = field) != 0)") which are usually the - * simplest way to ensure the required read orderings (which are - * sometimes critical). This leads to a "C"-like style of listing - * declarations of these locals at the heads of methods or blocks. - * There are several occurrences of the unusual "do {} while - * (!cas...)" which is the simplest way to force an update of a - * CAS'ed variable. There are also other coding oddities that help - * some methods perform reasonably even when interpreted (not - * compiled). - * - * The order of declarations in this file is: - * (1) Static utility functions - * (2) Nested (static) classes - * (3) Static fields - * (4) Fields, along with constants used when unpacking some of them - * (5) Internal control methods - * (6) Callbacks and other support for ForkJoinTask methods - * (7) Exported methods - * (8) Static block initializing statics in minimally dependent order - */ - - // Static utilities - - /** - * If there is a security manager, makes sure caller has - * permission to modify threads. - */ - private static void checkPermission() { - SecurityManager security = System.getSecurityManager(); - if (security != null) - security.checkPermission(modifyThreadPermission); - } - - // Nested classes - - /** - * Factory for creating new {@link ForkJoinWorkerThread}s. - * A {@code ForkJoinWorkerThreadFactory} must be defined and used - * for {@code ForkJoinWorkerThread} subclasses that extend base - * functionality or initialize threads with different contexts. - */ - public static interface ForkJoinWorkerThreadFactory { - /** - * Returns a new worker thread operating in the given pool. - * - * @param pool the pool this thread works in - * @throws NullPointerException if the pool is null - */ - public ForkJoinWorkerThread newThread(ForkJoinPool pool); - } - - /** - * Default ForkJoinWorkerThreadFactory implementation; creates a - * new ForkJoinWorkerThread. - */ - static class DefaultForkJoinWorkerThreadFactory - implements ForkJoinWorkerThreadFactory { - public ForkJoinWorkerThread newThread(ForkJoinPool pool) { - return new ForkJoinWorkerThread(pool); - } - } - - /** - * A simple non-reentrant lock used for exclusion when managing - * queues and workers. We use a custom lock so that we can readily - * probe lock state in constructions that check among alternative - * actions. The lock is normally only very briefly held, and - * sometimes treated as a spinlock, but other usages block to - * reduce overall contention in those cases where locked code - * bodies perform allocation/resizing. - */ - static final class Mutex extends AbstractQueuedSynchronizer { - public final boolean tryAcquire(int ignore) { - return compareAndSetState(0, 1); - } - public final boolean tryRelease(int ignore) { - setState(0); - return true; - } - public final void lock() { acquire(0); } - public final void unlock() { release(0); } - public final boolean isHeldExclusively() { return getState() == 1; } - public final Condition newCondition() { return new ConditionObject(); } - } - - /** - * Class for artificial tasks that are used to replace the target - * of local joins if they are removed from an interior queue slot - * in WorkQueue.tryRemoveAndExec. We don't need the proxy to - * actually do anything beyond having a unique identity. - */ - static final class EmptyTask extends ForkJoinTask { - EmptyTask() { status = ForkJoinTask.NORMAL; } // force done - public final Void getRawResult() { return null; } - public final void setRawResult(Void x) {} - public final boolean exec() { return true; } - } - - /** - * Queues supporting work-stealing as well as external task - * submission. See above for main rationale and algorithms. - * Implementation relies heavily on "Unsafe" intrinsics - * and selective use of "volatile": - * - * Field "base" is the index (mod array.length) of the least valid - * queue slot, which is always the next position to steal (poll) - * from if nonempty. Reads and writes require volatile orderings - * but not CAS, because updates are only performed after slot - * CASes. - * - * Field "top" is the index (mod array.length) of the next queue - * slot to push to or pop from. It is written only by owner thread - * for push, or under lock for trySharedPush, and accessed by - * other threads only after reading (volatile) base. Both top and - * base are allowed to wrap around on overflow, but (top - base) - * (or more commonly -(base - top) to force volatile read of base - * before top) still estimates size. - * - * The array slots are read and written using the emulation of - * volatiles/atomics provided by Unsafe. Insertions must in - * general use putOrderedObject as a form of releasing store to - * ensure that all writes to the task object are ordered before - * its publication in the queue. (Although we can avoid one case - * of this when locked in trySharedPush.) All removals entail a - * CAS to null. The array is always a power of two. To ensure - * safety of Unsafe array operations, all accesses perform - * explicit null checks and implicit bounds checks via - * power-of-two masking. - * - * In addition to basic queuing support, this class contains - * fields described elsewhere to control execution. It turns out - * to work better memory-layout-wise to include them in this - * class rather than a separate class. - * - * Performance on most platforms is very sensitive to placement of - * instances of both WorkQueues and their arrays -- we absolutely - * do not want multiple WorkQueue instances or multiple queue - * arrays sharing cache lines. (It would be best for queue objects - * and their arrays to share, but there is nothing available to - * help arrange that). Unfortunately, because they are recorded - * in a common array, WorkQueue instances are often moved to be - * adjacent by garbage collectors. To reduce impact, we use field - * padding that works OK on common platforms; this effectively - * trades off slightly slower average field access for the sake of - * avoiding really bad worst-case access. (Until better JVM - * support is in place, this padding is dependent on transient - * properties of JVM field layout rules.) We also take care in - * allocating, sizing and resizing the array. Non-shared queue - * arrays are initialized (via method growArray) by workers before - * use. Others are allocated on first use. - */ - static final class WorkQueue { - /** - * Capacity of work-stealing queue array upon initialization. - * Must be a power of two; at least 4, but should be larger to - * reduce or eliminate cacheline sharing among queues. - * Currently, it is much larger, as a partial workaround for - * the fact that JVMs often place arrays in locations that - * share GC bookkeeping (especially cardmarks) such that - * per-write accesses encounter serious memory contention. - */ - static final int INITIAL_QUEUE_CAPACITY = 1 << 13; - - /** - * Maximum size for queue arrays. Must be a power of two less - * than or equal to 1 << (31 - width of array entry) to ensure - * lack of wraparound of index calculations, but defined to a - * value a bit less than this to help users trap runaway - * programs before saturating systems. - */ - static final int MAXIMUM_QUEUE_CAPACITY = 1 << 26; // 64M - - volatile long totalSteals; // cumulative number of steals - int seed; // for random scanning; initialize nonzero - volatile int eventCount; // encoded inactivation count; < 0 if inactive - int nextWait; // encoded record of next event waiter - int rescans; // remaining scans until block - int nsteals; // top-level task executions since last idle - final int mode; // lifo, fifo, or shared - int poolIndex; // index of this queue in pool (or 0) - int stealHint; // index of most recent known stealer - volatile int runState; // 1: locked, -1: terminate; else 0 - volatile int base; // index of next slot for poll - int top; // index of next slot for push - ForkJoinTask[] array; // the elements (initially unallocated) - final ForkJoinPool pool; // the containing pool (may be null) - final ForkJoinWorkerThread owner; // owning thread or null if shared - volatile Thread parker; // == owner during call to park; else null - ForkJoinTask currentJoin; // task being joined in awaitJoin - ForkJoinTask currentSteal; // current non-local task being executed - // Heuristic padding to ameliorate unfortunate memory placements - Object p00, p01, p02, p03, p04, p05, p06, p07; - Object p08, p09, p0a, p0b, p0c, p0d, p0e; - - WorkQueue(ForkJoinPool pool, ForkJoinWorkerThread owner, int mode) { - this.mode = mode; - this.pool = pool; - this.owner = owner; - // Place indices in the center of array (that is not yet allocated) - base = top = INITIAL_QUEUE_CAPACITY >>> 1; - } - - /** - * Returns the approximate number of tasks in the queue. - */ - final int queueSize() { - int n = base - top; // non-owner callers must read base first - return (n >= 0) ? 0 : -n; // ignore transient negative - } - - /** - * Provides a more accurate estimate of whether this queue has - * any tasks than does queueSize, by checking whether a - * near-empty queue has at least one unclaimed task. - */ - final boolean isEmpty() { - ForkJoinTask[] a; int m, s; - int n = base - (s = top); - return (n >= 0 || - (n == -1 && - ((a = array) == null || - (m = a.length - 1) < 0 || - U.getObjectVolatile - (a, ((m & (s - 1)) << ASHIFT) + ABASE) == null))); - } - - /** - * Pushes a task. Call only by owner in unshared queues. - * - * @param task the task. Caller must ensure non-null. - * @throw RejectedExecutionException if array cannot be resized - */ - final void push(ForkJoinTask task) { - ForkJoinTask[] a; ForkJoinPool p; - int s = top, m, n; - if ((a = array) != null) { // ignore if queue removed - U.putOrderedObject - (a, (((m = a.length - 1) & s) << ASHIFT) + ABASE, task); - if ((n = (top = s + 1) - base) <= 2) { - if ((p = pool) != null) - p.signalWork(); - } - else if (n >= m) - growArray(true); - } - } - - /** - * Pushes a task if lock is free and array is either big - * enough or can be resized to be big enough. - * - * @param task the task. Caller must ensure non-null. - * @return true if submitted - */ - final boolean trySharedPush(ForkJoinTask task) { - boolean submitted = false; - if (runState == 0 && U.compareAndSwapInt(this, RUNSTATE, 0, 1)) { - ForkJoinTask[] a = array; - int s = top; - try { - if ((a != null && a.length > s + 1 - base) || - (a = growArray(false)) != null) { // must presize - int j = (((a.length - 1) & s) << ASHIFT) + ABASE; - U.putObject(a, (long)j, task); // don't need "ordered" - top = s + 1; - submitted = true; - } - } finally { - runState = 0; // unlock - } - } - return submitted; - } - - /** - * Takes next task, if one exists, in LIFO order. Call only - * by owner in unshared queues. (We do not have a shared - * version of this method because it is never needed.) - */ - final ForkJoinTask pop() { - ForkJoinTask[] a; ForkJoinTask t; int m; - if ((a = array) != null && (m = a.length - 1) >= 0) { - for (int s; (s = top - 1) - base >= 0;) { - long j = ((m & s) << ASHIFT) + ABASE; - if ((t = (ForkJoinTask)U.getObject(a, j)) == null) - break; - if (U.compareAndSwapObject(a, j, t, null)) { - top = s; - return t; - } - } - } - return null; - } - - /** - * Takes a task in FIFO order if b is base of queue and a task - * can be claimed without contention. Specialized versions - * appear in ForkJoinPool methods scan and tryHelpStealer. - */ - final ForkJoinTask pollAt(int b) { - ForkJoinTask t; ForkJoinTask[] a; - if ((a = array) != null) { - int j = (((a.length - 1) & b) << ASHIFT) + ABASE; - if ((t = (ForkJoinTask)U.getObjectVolatile(a, j)) != null && - base == b && - U.compareAndSwapObject(a, j, t, null)) { - base = b + 1; - return t; - } - } - return null; - } - - /** - * Takes next task, if one exists, in FIFO order. - */ - final ForkJoinTask poll() { - ForkJoinTask[] a; int b; ForkJoinTask t; - while ((b = base) - top < 0 && (a = array) != null) { - int j = (((a.length - 1) & b) << ASHIFT) + ABASE; - t = (ForkJoinTask)U.getObjectVolatile(a, j); - if (t != null) { - if (base == b && - U.compareAndSwapObject(a, j, t, null)) { - base = b + 1; - return t; - } - } - else if (base == b) { - if (b + 1 == top) - break; - Thread.yield(); // wait for lagging update - } - } - return null; - } - - /** - * Takes next task, if one exists, in order specified by mode. - */ - final ForkJoinTask nextLocalTask() { - return mode == 0 ? pop() : poll(); - } - - /** - * Returns next task, if one exists, in order specified by mode. - */ - final ForkJoinTask peek() { - ForkJoinTask[] a = array; int m; - if (a == null || (m = a.length - 1) < 0) - return null; - int i = mode == 0 ? top - 1 : base; - int j = ((i & m) << ASHIFT) + ABASE; - return (ForkJoinTask)U.getObjectVolatile(a, j); - } - - /** - * Pops the given task only if it is at the current top. - */ - final boolean tryUnpush(ForkJoinTask t) { - ForkJoinTask[] a; int s; - if ((a = array) != null && (s = top) != base && - U.compareAndSwapObject - (a, (((a.length - 1) & --s) << ASHIFT) + ABASE, t, null)) { - top = s; - return true; - } - return false; - } - - /** - * Polls the given task only if it is at the current base. - */ - final boolean pollFor(ForkJoinTask task) { - ForkJoinTask[] a; int b; - if ((b = base) - top < 0 && (a = array) != null) { - int j = (((a.length - 1) & b) << ASHIFT) + ABASE; - if (U.getObjectVolatile(a, j) == task && base == b && - U.compareAndSwapObject(a, j, task, null)) { - base = b + 1; - return true; - } - } - return false; - } - - /** - * Initializes or doubles the capacity of array. Call either - * by owner or with lock held -- it is OK for base, but not - * top, to move while resizings are in progress. - * - * @param rejectOnFailure if true, throw exception if capacity - * exceeded (relayed ultimately to user); else return null. - */ - final ForkJoinTask[] growArray(boolean rejectOnFailure) { - ForkJoinTask[] oldA = array; - int size = oldA != null ? oldA.length << 1 : INITIAL_QUEUE_CAPACITY; - if (size <= MAXIMUM_QUEUE_CAPACITY) { - int oldMask, t, b; - ForkJoinTask[] a = array = new ForkJoinTask[size]; - if (oldA != null && (oldMask = oldA.length - 1) >= 0 && - (t = top) - (b = base) > 0) { - int mask = size - 1; - do { - ForkJoinTask x; - int oldj = ((b & oldMask) << ASHIFT) + ABASE; - int j = ((b & mask) << ASHIFT) + ABASE; - x = (ForkJoinTask)U.getObjectVolatile(oldA, oldj); - if (x != null && - U.compareAndSwapObject(oldA, oldj, x, null)) - U.putObjectVolatile(a, j, x); - } while (++b != t); - } - return a; - } - else if (!rejectOnFailure) - return null; - else - throw new RejectedExecutionException("Queue capacity exceeded"); - } - - /** - * Removes and cancels all known tasks, ignoring any exceptions. - */ - final void cancelAll() { - ForkJoinTask.cancelIgnoringExceptions(currentJoin); - ForkJoinTask.cancelIgnoringExceptions(currentSteal); - for (ForkJoinTask t; (t = poll()) != null; ) - ForkJoinTask.cancelIgnoringExceptions(t); - } - - /** - * Computes next value for random probes. Scans don't require - * a very high quality generator, but also not a crummy one. - * Marsaglia xor-shift is cheap and works well enough. Note: - * This is manually inlined in its usages in ForkJoinPool to - * avoid writes inside busy scan loops. - */ - final int nextSeed() { - int r = seed; - r ^= r << 13; - r ^= r >>> 17; - return seed = r ^= r << 5; - } - - // Execution methods - - /** - * Pops and runs tasks until empty. - */ - private void popAndExecAll() { - // A bit faster than repeated pop calls - ForkJoinTask[] a; int m, s; long j; ForkJoinTask t; - while ((a = array) != null && (m = a.length - 1) >= 0 && - (s = top - 1) - base >= 0 && - (t = ((ForkJoinTask) - U.getObject(a, j = ((m & s) << ASHIFT) + ABASE))) - != null) { - if (U.compareAndSwapObject(a, j, t, null)) { - top = s; - t.doExec(); - } - } - } - - /** - * Polls and runs tasks until empty. - */ - private void pollAndExecAll() { - for (ForkJoinTask t; (t = poll()) != null;) - t.doExec(); - } - - /** - * If present, removes from queue and executes the given task, or - * any other cancelled task. Returns (true) immediately on any CAS - * or consistency check failure so caller can retry. - * - * @return false if no progress can be made - */ - final boolean tryRemoveAndExec(ForkJoinTask task) { - boolean removed = false, empty = true, progress = true; - ForkJoinTask[] a; int m, s, b, n; - if ((a = array) != null && (m = a.length - 1) >= 0 && - (n = (s = top) - (b = base)) > 0) { - for (ForkJoinTask t;;) { // traverse from s to b - int j = ((--s & m) << ASHIFT) + ABASE; - t = (ForkJoinTask)U.getObjectVolatile(a, j); - if (t == null) // inconsistent length - break; - else if (t == task) { - if (s + 1 == top) { // pop - if (!U.compareAndSwapObject(a, j, task, null)) - break; - top = s; - removed = true; - } - else if (base == b) // replace with proxy - removed = U.compareAndSwapObject(a, j, task, - new EmptyTask()); - break; - } - else if (t.status >= 0) - empty = false; - else if (s + 1 == top) { // pop and throw away - if (U.compareAndSwapObject(a, j, t, null)) - top = s; - break; - } - if (--n == 0) { - if (!empty && base == b) - progress = false; - break; - } - } - } - if (removed) - task.doExec(); - return progress; - } - - /** - * Executes a top-level task and any local tasks remaining - * after execution. - */ - final void runTask(ForkJoinTask t) { - if (t != null) { - currentSteal = t; - t.doExec(); - if (top != base) { // process remaining local tasks - if (mode == 0) - popAndExecAll(); - else - pollAndExecAll(); - } - ++nsteals; - currentSteal = null; - } - } - - /** - * Executes a non-top-level (stolen) task. - */ - final void runSubtask(ForkJoinTask t) { - if (t != null) { - ForkJoinTask ps = currentSteal; - currentSteal = t; - t.doExec(); - currentSteal = ps; - } - } - - /** - * Returns true if owned and not known to be blocked. - */ - final boolean isApparentlyUnblocked() { - Thread wt; Thread.State s; - return (eventCount >= 0 && - (wt = owner) != null && - (s = wt.getState()) != Thread.State.BLOCKED && - s != Thread.State.WAITING && - s != Thread.State.TIMED_WAITING); - } - - /** - * If this owned and is not already interrupted, try to - * interrupt and/or unpark, ignoring exceptions. - */ - final void interruptOwner() { - Thread wt, p; - if ((wt = owner) != null && !wt.isInterrupted()) { - try { - wt.interrupt(); - } catch (SecurityException ignore) { - } - } - if ((p = parker) != null) - U.unpark(p); - } - - // Unsafe mechanics - private static final sun.misc.Unsafe U; - private static final long RUNSTATE; - private static final int ABASE; - private static final int ASHIFT; - static { - int s; - try { - U = getUnsafe(); - Class k = WorkQueue.class; - Class ak = ForkJoinTask[].class; - RUNSTATE = U.objectFieldOffset - (k.getDeclaredField("runState")); - ABASE = U.arrayBaseOffset(ak); - s = U.arrayIndexScale(ak); - } catch (Exception e) { - throw new Error(e); - } - if ((s & (s-1)) != 0) - throw new Error("data type scale not a power of two"); - ASHIFT = 31 - Integer.numberOfLeadingZeros(s); - } - } - - /** - * Per-thread records for threads that submit to pools. Currently - * holds only pseudo-random seed / index that is used to choose - * submission queues in method doSubmit. In the future, this may - * also incorporate a means to implement different task rejection - * and resubmission policies. - * - * Seeds for submitters and workers/workQueues work in basically - * the same way but are initialized and updated using slightly - * different mechanics. Both are initialized using the same - * approach as in class ThreadLocal, where successive values are - * unlikely to collide with previous values. This is done during - * registration for workers, but requires a separate AtomicInteger - * for submitters. Seeds are then randomly modified upon - * collisions using xorshifts, which requires a non-zero seed. - */ - static final class Submitter { - int seed; - Submitter() { - int s = nextSubmitterSeed.getAndAdd(SEED_INCREMENT); - seed = (s == 0) ? 1 : s; // ensure non-zero - } - } - - /** ThreadLocal class for Submitters */ - static final class ThreadSubmitter extends ThreadLocal { - public Submitter initialValue() { return new Submitter(); } - } - - // static fields (initialized in static initializer below) - - /** - * Creates a new ForkJoinWorkerThread. This factory is used unless - * overridden in ForkJoinPool constructors. - */ - public static final ForkJoinWorkerThreadFactory - defaultForkJoinWorkerThreadFactory; - - /** - * Generator for assigning sequence numbers as pool names. - */ - private static final AtomicInteger poolNumberGenerator; - - /** - * Generator for initial hashes/seeds for submitters. Accessed by - * Submitter class constructor. - */ - static final AtomicInteger nextSubmitterSeed; - - /** - * Permission required for callers of methods that may start or - * kill threads. - */ - private static final RuntimePermission modifyThreadPermission; - - /** - * Per-thread submission bookeeping. Shared across all pools - * to reduce ThreadLocal pollution and because random motion - * to avoid contention in one pool is likely to hold for others. - */ - private static final ThreadSubmitter submitters; - - // static constants - - /** - * The wakeup interval (in nanoseconds) for a worker waiting for a - * task when the pool is quiescent to instead try to shrink the - * number of workers. The exact value does not matter too - * much. It must be short enough to release resources during - * sustained periods of idleness, but not so short that threads - * are continually re-created. - */ - private static final long SHRINK_RATE = - 4L * 1000L * 1000L * 1000L; // 4 seconds - - /** - * The timeout value for attempted shrinkage, includes - * some slop to cope with system timer imprecision. - */ - private static final long SHRINK_TIMEOUT = SHRINK_RATE - (SHRINK_RATE / 10); - - /** - * The maximum stolen->joining link depth allowed in method - * tryHelpStealer. Must be a power of two. This value also - * controls the maximum number of times to try to help join a task - * without any apparent progress or change in pool state before - * giving up and blocking (see awaitJoin). Depths for legitimate - * chains are unbounded, but we use a fixed constant to avoid - * (otherwise unchecked) cycles and to bound staleness of - * traversal parameters at the expense of sometimes blocking when - * we could be helping. - */ - private static final int MAX_HELP = 32; - - /** - * Secondary time-based bound (in nanosecs) for helping attempts - * before trying compensated blocking in awaitJoin. Used in - * conjunction with MAX_HELP to reduce variance due to different - * polling rates associated with different helping options. The - * value should roughly approximate the time required to create - * and/or activate a worker thread. - */ - private static final long COMPENSATION_DELAY = 100L * 1000L; // 0.1 millisec - - /** - * Increment for seed generators. See class ThreadLocal for - * explanation. - */ - private static final int SEED_INCREMENT = 0x61c88647; - - /** - * Bits and masks for control variables - * - * Field ctl is a long packed with: - * AC: Number of active running workers minus target parallelism (16 bits) - * TC: Number of total workers minus target parallelism (16 bits) - * ST: true if pool is terminating (1 bit) - * EC: the wait count of top waiting thread (15 bits) - * ID: poolIndex of top of Treiber stack of waiters (16 bits) - * - * When convenient, we can extract the upper 32 bits of counts and - * the lower 32 bits of queue state, u = (int)(ctl >>> 32) and e = - * (int)ctl. The ec field is never accessed alone, but always - * together with id and st. The offsets of counts by the target - * parallelism and the positionings of fields makes it possible to - * perform the most common checks via sign tests of fields: When - * ac is negative, there are not enough active workers, when tc is - * negative, there are not enough total workers, and when e is - * negative, the pool is terminating. To deal with these possibly - * negative fields, we use casts in and out of "short" and/or - * signed shifts to maintain signedness. - * - * When a thread is queued (inactivated), its eventCount field is - * set negative, which is the only way to tell if a worker is - * prevented from executing tasks, even though it must continue to - * scan for them to avoid queuing races. Note however that - * eventCount updates lag releases so usage requires care. - * - * Field runState is an int packed with: - * SHUTDOWN: true if shutdown is enabled (1 bit) - * SEQ: a sequence number updated upon (de)registering workers (30 bits) - * INIT: set true after workQueues array construction (1 bit) - * - * The sequence number enables simple consistency checks: - * Staleness of read-only operations on the workQueues array can - * be checked by comparing runState before vs after the reads. - */ - - // bit positions/shifts for fields - private static final int AC_SHIFT = 48; - private static final int TC_SHIFT = 32; - private static final int ST_SHIFT = 31; - private static final int EC_SHIFT = 16; - - // bounds - private static final int SMASK = 0xffff; // short bits - private static final int MAX_CAP = 0x7fff; // max #workers - 1 - private static final int SQMASK = 0xfffe; // even short bits - private static final int SHORT_SIGN = 1 << 15; - private static final int INT_SIGN = 1 << 31; - - // masks - private static final long STOP_BIT = 0x0001L << ST_SHIFT; - private static final long AC_MASK = ((long)SMASK) << AC_SHIFT; - private static final long TC_MASK = ((long)SMASK) << TC_SHIFT; - - // units for incrementing and decrementing - private static final long TC_UNIT = 1L << TC_SHIFT; - private static final long AC_UNIT = 1L << AC_SHIFT; - - // masks and units for dealing with u = (int)(ctl >>> 32) - private static final int UAC_SHIFT = AC_SHIFT - 32; - private static final int UTC_SHIFT = TC_SHIFT - 32; - private static final int UAC_MASK = SMASK << UAC_SHIFT; - private static final int UTC_MASK = SMASK << UTC_SHIFT; - private static final int UAC_UNIT = 1 << UAC_SHIFT; - private static final int UTC_UNIT = 1 << UTC_SHIFT; - - // masks and units for dealing with e = (int)ctl - private static final int E_MASK = 0x7fffffff; // no STOP_BIT - private static final int E_SEQ = 1 << EC_SHIFT; - - // runState bits - private static final int SHUTDOWN = 1 << 31; - - // access mode for WorkQueue - static final int LIFO_QUEUE = 0; - static final int FIFO_QUEUE = 1; - static final int SHARED_QUEUE = -1; - - // Instance fields - - /* - * Field layout order in this class tends to matter more than one - * would like. Runtime layout order is only loosely related to - * declaration order and may differ across JVMs, but the following - * empirically works OK on current JVMs. - */ - - volatile long ctl; // main pool control - final int parallelism; // parallelism level - final int localMode; // per-worker scheduling mode - final int submitMask; // submit queue index bound - int nextSeed; // for initializing worker seeds - volatile int runState; // shutdown status and seq - WorkQueue[] workQueues; // main registry - final Mutex lock; // for registration - final Condition termination; // for awaitTermination - final ForkJoinWorkerThreadFactory factory; // factory for new workers - final Thread.UncaughtExceptionHandler ueh; // per-worker UEH - final AtomicLong stealCount; // collect counts when terminated - final AtomicInteger nextWorkerNumber; // to create worker name string - final String workerNamePrefix; // to create worker name string - - // Creating, registering, and deregistering workers - - /** - * Tries to create and start a worker - */ - private void addWorker() { - Throwable ex = null; - ForkJoinWorkerThread wt = null; - try { - if ((wt = factory.newThread(this)) != null) { - wt.start(); - return; - } - } catch (Throwable e) { - ex = e; - } - deregisterWorker(wt, ex); // adjust counts etc on failure - } - - /** - * Callback from ForkJoinWorkerThread constructor to assign a - * public name. This must be separate from registerWorker because - * it is called during the "super" constructor call in - * ForkJoinWorkerThread. - */ - final String nextWorkerName() { - return workerNamePrefix.concat - (Integer.toString(nextWorkerNumber.addAndGet(1))); - } - - /** - * Callback from ForkJoinWorkerThread constructor to establish its - * poolIndex and record its WorkQueue. To avoid scanning bias due - * to packing entries in front of the workQueues array, we treat - * the array as a simple power-of-two hash table using per-thread - * seed as hash, expanding as needed. - * - * @param w the worker's queue - */ - final void registerWorker(WorkQueue w) { - Mutex lock = this.lock; - lock.lock(); - try { - WorkQueue[] ws = workQueues; - if (w != null && ws != null) { // skip on shutdown/failure - int rs, n = ws.length, m = n - 1; - int s = nextSeed += SEED_INCREMENT; // rarely-colliding sequence - w.seed = (s == 0) ? 1 : s; // ensure non-zero seed - int r = (s << 1) | 1; // use odd-numbered indices - if (ws[r &= m] != null) { // collision - int probes = 0; // step by approx half size - int step = (n <= 4) ? 2 : ((n >>> 1) & SQMASK) + 2; - while (ws[r = (r + step) & m] != null) { - if (++probes >= n) { - workQueues = ws = Arrays.copyOf(ws, n <<= 1); - m = n - 1; - probes = 0; - } - } - } - w.eventCount = w.poolIndex = r; // establish before recording - ws[r] = w; // also update seq - runState = ((rs = runState) & SHUTDOWN) | ((rs + 2) & ~SHUTDOWN); - } - } finally { - lock.unlock(); - } - } - - /** - * Final callback from terminating worker, as well as upon failure - * to construct or start a worker in addWorker. Removes record of - * worker from array, and adjusts counts. If pool is shutting - * down, tries to complete termination. - * - * @param wt the worker thread or null if addWorker failed - * @param ex the exception causing failure, or null if none - */ - final void deregisterWorker(ForkJoinWorkerThread wt, Throwable ex) { - Mutex lock = this.lock; - WorkQueue w = null; - if (wt != null && (w = wt.workQueue) != null) { - w.runState = -1; // ensure runState is set - stealCount.getAndAdd(w.totalSteals + w.nsteals); - int idx = w.poolIndex; - lock.lock(); - try { // remove record from array - WorkQueue[] ws = workQueues; - if (ws != null && idx >= 0 && idx < ws.length && ws[idx] == w) - ws[idx] = null; - } finally { - lock.unlock(); - } - } - - long c; // adjust ctl counts - do {} while (!U.compareAndSwapLong - (this, CTL, c = ctl, (((c - AC_UNIT) & AC_MASK) | - ((c - TC_UNIT) & TC_MASK) | - (c & ~(AC_MASK|TC_MASK))))); - - if (!tryTerminate(false, false) && w != null) { - w.cancelAll(); // cancel remaining tasks - if (w.array != null) // suppress signal if never ran - signalWork(); // wake up or create replacement - if (ex == null) // help clean refs on way out - ForkJoinTask.helpExpungeStaleExceptions(); - } - - if (ex != null) // rethrow - U.throwException(ex); - } - - - // Submissions - - /** - * Unless shutting down, adds the given task to a submission queue - * at submitter's current queue index (modulo submission - * range). If no queue exists at the index, one is created. If - * the queue is busy, another index is randomly chosen. The - * submitMask bounds the effective number of queues to the - * (nearest power of two for) parallelism level. - * - * @param task the task. Caller must ensure non-null. - */ - private void doSubmit(ForkJoinTask task) { - Submitter s = submitters.get(); - for (int r = s.seed, m = submitMask;;) { - WorkQueue[] ws; WorkQueue q; - int k = r & m & SQMASK; // use only even indices - if (runState < 0 || (ws = workQueues) == null || ws.length <= k) - throw new RejectedExecutionException(); // shutting down - else if ((q = ws[k]) == null) { // create new queue - WorkQueue nq = new WorkQueue(this, null, SHARED_QUEUE); - Mutex lock = this.lock; // construct outside lock - lock.lock(); - try { // recheck under lock - int rs = runState; // to update seq - if (ws == workQueues && ws[k] == null) { - ws[k] = nq; - runState = ((rs & SHUTDOWN) | ((rs + 2) & ~SHUTDOWN)); - } - } finally { - lock.unlock(); - } - } - else if (q.trySharedPush(task)) { - signalWork(); - return; - } - else if (m > 1) { // move to a different index - r ^= r << 13; // same xorshift as WorkQueues - r ^= r >>> 17; - s.seed = r ^= r << 5; - } - else - Thread.yield(); // yield if no alternatives - } - } - - // Maintaining ctl counts - - /** - * Increments active count; mainly called upon return from blocking. - */ - final void incrementActiveCount() { - long c; - do {} while (!U.compareAndSwapLong(this, CTL, c = ctl, c + AC_UNIT)); - } - - /** - * Tries to activate or create a worker if too few are active. - */ - final void signalWork() { - long c; int u; - while ((u = (int)((c = ctl) >>> 32)) < 0) { // too few active - WorkQueue[] ws = workQueues; int e, i; WorkQueue w; Thread p; - if ((e = (int)c) > 0) { // at least one waiting - if (ws != null && (i = e & SMASK) < ws.length && - (w = ws[i]) != null && w.eventCount == (e | INT_SIGN)) { - long nc = (((long)(w.nextWait & E_MASK)) | - ((long)(u + UAC_UNIT) << 32)); - if (U.compareAndSwapLong(this, CTL, c, nc)) { - w.eventCount = (e + E_SEQ) & E_MASK; - if ((p = w.parker) != null) - U.unpark(p); // activate and release - break; - } - } - else - break; - } - else if (e == 0 && (u & SHORT_SIGN) != 0) { // too few total - long nc = (long)(((u + UTC_UNIT) & UTC_MASK) | - ((u + UAC_UNIT) & UAC_MASK)) << 32; - if (U.compareAndSwapLong(this, CTL, c, nc)) { - addWorker(); - break; - } - } - else - break; - } - } - - - // Scanning for tasks - - /** - * Top-level runloop for workers, called by ForkJoinWorkerThread.run. - */ - final void runWorker(WorkQueue w) { - w.growArray(false); // initialize queue array in this thread - do { w.runTask(scan(w)); } while (w.runState >= 0); - } - - /** - * Scans for and, if found, returns one task, else possibly - * inactivates the worker. This method operates on single reads of - * volatile state and is designed to be re-invoked continuously, - * in part because it returns upon detecting inconsistencies, - * contention, or state changes that indicate possible success on - * re-invocation. - * - * The scan searches for tasks across a random permutation of - * queues (starting at a random index and stepping by a random - * relative prime, checking each at least once). The scan - * terminates upon either finding a non-empty queue, or completing - * the sweep. If the worker is not inactivated, it takes and - * returns a task from this queue. On failure to find a task, we - * take one of the following actions, after which the caller will - * retry calling this method unless terminated. - * - * * If pool is terminating, terminate the worker. - * - * * If not a complete sweep, try to release a waiting worker. If - * the scan terminated because the worker is inactivated, then the - * released worker will often be the calling worker, and it can - * succeed obtaining a task on the next call. Or maybe it is - * another worker, but with same net effect. Releasing in other - * cases as well ensures that we have enough workers running. - * - * * If not already enqueued, try to inactivate and enqueue the - * worker on wait queue. Or, if inactivating has caused the pool - * to be quiescent, relay to idleAwaitWork to check for - * termination and possibly shrink pool. - * - * * If already inactive, and the caller has run a task since the - * last empty scan, return (to allow rescan) unless others are - * also inactivated. Field WorkQueue.rescans counts down on each - * scan to ensure eventual inactivation and blocking. - * - * * If already enqueued and none of the above apply, park - * awaiting signal, - * - * @param w the worker (via its WorkQueue) - * @return a task or null of none found - */ - private final ForkJoinTask scan(WorkQueue w) { - WorkQueue[] ws; // first update random seed - int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5; - int rs = runState, m; // volatile read order matters - if ((ws = workQueues) != null && (m = ws.length - 1) > 0) { - int ec = w.eventCount; // ec is negative if inactive - int step = (r >>> 16) | 1; // relative prime - for (int j = (m + 1) << 2; ; r += step) { - WorkQueue q; ForkJoinTask t; ForkJoinTask[] a; int b; - if ((q = ws[r & m]) != null && (b = q.base) - q.top < 0 && - (a = q.array) != null) { // probably nonempty - int i = (((a.length - 1) & b) << ASHIFT) + ABASE; - t = (ForkJoinTask)U.getObjectVolatile(a, i); - if (q.base == b && ec >= 0 && t != null && - U.compareAndSwapObject(a, i, t, null)) { - q.base = b + 1; // specialization of pollAt - return t; - } - else if (ec < 0 || j <= m) { - rs = 0; // mark scan as imcomplete - break; // caller can retry after release - } - } - if (--j < 0) - break; - } - long c = ctl; int e = (int)c, a = (int)(c >> AC_SHIFT), nr, ns; - if (e < 0) // decode ctl on empty scan - w.runState = -1; // pool is terminating - else if (rs == 0 || rs != runState) { // incomplete scan - WorkQueue v; Thread p; // try to release a waiter - if (e > 0 && a < 0 && w.eventCount == ec && - (v = ws[e & m]) != null && v.eventCount == (e | INT_SIGN)) { - long nc = ((long)(v.nextWait & E_MASK) | - ((c + AC_UNIT) & (AC_MASK|TC_MASK))); - if (ctl == c && U.compareAndSwapLong(this, CTL, c, nc)) { - v.eventCount = (e + E_SEQ) & E_MASK; - if ((p = v.parker) != null) - U.unpark(p); - } - } - } - else if (ec >= 0) { // try to enqueue/inactivate - long nc = (long)ec | ((c - AC_UNIT) & (AC_MASK|TC_MASK)); - w.nextWait = e; - w.eventCount = ec | INT_SIGN; // mark as inactive - if (ctl != c || !U.compareAndSwapLong(this, CTL, c, nc)) - w.eventCount = ec; // unmark on CAS failure - else { - if ((ns = w.nsteals) != 0) { - w.nsteals = 0; // set rescans if ran task - w.rescans = (a > 0) ? 0 : a + parallelism; - w.totalSteals += ns; - } - if (a == 1 - parallelism) // quiescent - idleAwaitWork(w, nc, c); - } - } - else if (w.eventCount < 0) { // already queued - if ((nr = w.rescans) > 0) { // continue rescanning - int ac = a + parallelism; - if (((w.rescans = (ac < nr) ? ac : nr - 1) & 3) == 0) - Thread.yield(); // yield before block - } - else { - Thread.interrupted(); // clear status - Thread wt = Thread.currentThread(); - U.putObject(wt, PARKBLOCKER, this); - w.parker = wt; // emulate LockSupport.park - if (w.eventCount < 0) // recheck - U.park(false, 0L); - w.parker = null; - U.putObject(wt, PARKBLOCKER, null); - } - } - } - return null; - } - - /** - * If inactivating worker w has caused the pool to become - * quiescent, checks for pool termination, and, so long as this is - * not the only worker, waits for event for up to SHRINK_RATE - * nanosecs. On timeout, if ctl has not changed, terminates the - * worker, which will in turn wake up another worker to possibly - * repeat this process. - * - * @param w the calling worker - * @param currentCtl the ctl value triggering possible quiescence - * @param prevCtl the ctl value to restore if thread is terminated - */ - private void idleAwaitWork(WorkQueue w, long currentCtl, long prevCtl) { - if (w.eventCount < 0 && !tryTerminate(false, false) && - (int)prevCtl != 0 && !hasQueuedSubmissions() && ctl == currentCtl) { - Thread wt = Thread.currentThread(); - Thread.yield(); // yield before block - while (ctl == currentCtl) { - long startTime = System.nanoTime(); - Thread.interrupted(); // timed variant of version in scan() - U.putObject(wt, PARKBLOCKER, this); - w.parker = wt; - if (ctl == currentCtl) - U.park(false, SHRINK_RATE); - w.parker = null; - U.putObject(wt, PARKBLOCKER, null); - if (ctl != currentCtl) - break; - if (System.nanoTime() - startTime >= SHRINK_TIMEOUT && - U.compareAndSwapLong(this, CTL, currentCtl, prevCtl)) { - w.eventCount = (w.eventCount + E_SEQ) | E_MASK; - w.runState = -1; // shrink - break; - } - } - } - } - - /** - * Tries to locate and execute tasks for a stealer of the given - * task, or in turn one of its stealers, Traces currentSteal -> - * currentJoin links looking for a thread working on a descendant - * of the given task and with a non-empty queue to steal back and - * execute tasks from. The first call to this method upon a - * waiting join will often entail scanning/search, (which is OK - * because the joiner has nothing better to do), but this method - * leaves hints in workers to speed up subsequent calls. The - * implementation is very branchy to cope with potential - * inconsistencies or loops encountering chains that are stale, - * unknown, or so long that they are likely cyclic. All of these - * cases are dealt with by just retrying by caller. - * - * @param joiner the joining worker - * @param task the task to join - * @return true if found or ran a task (and so is immediately retryable) - */ - private boolean tryHelpStealer(WorkQueue joiner, ForkJoinTask task) { - WorkQueue[] ws; - int m, depth = MAX_HELP; // remaining chain depth - boolean progress = false; - if ((ws = workQueues) != null && (m = ws.length - 1) > 0 && - task.status >= 0) { - ForkJoinTask subtask = task; // current target - outer: for (WorkQueue j = joiner;;) { - WorkQueue stealer = null; // find stealer of subtask - WorkQueue v = ws[j.stealHint & m]; // try hint - if (v != null && v.currentSteal == subtask) - stealer = v; - else { // scan - for (int i = 1; i <= m; i += 2) { - if ((v = ws[i]) != null && v.currentSteal == subtask && - v != joiner) { - stealer = v; - j.stealHint = i; // save hint - break; - } - } - if (stealer == null) - break; - } - - for (WorkQueue q = stealer;;) { // try to help stealer - ForkJoinTask[] a; ForkJoinTask t; int b; - if (task.status < 0) - break outer; - if ((b = q.base) - q.top < 0 && (a = q.array) != null) { - progress = true; - int i = (((a.length - 1) & b) << ASHIFT) + ABASE; - t = (ForkJoinTask)U.getObjectVolatile(a, i); - if (subtask.status < 0) // must recheck before taking - break outer; - if (t != null && - q.base == b && - U.compareAndSwapObject(a, i, t, null)) { - q.base = b + 1; - joiner.runSubtask(t); - } - else if (q.base == b) - break outer; // possibly stalled - } - else { // descend - ForkJoinTask next = stealer.currentJoin; - if (--depth <= 0 || subtask.status < 0 || - next == null || next == subtask) - break outer; // stale, dead-end, or cyclic - subtask = next; - j = stealer; - break; - } - } - } - } - return progress; - } - - /** - * If task is at base of some steal queue, steals and executes it. - * - * @param joiner the joining worker - * @param task the task - */ - private void tryPollForAndExec(WorkQueue joiner, ForkJoinTask task) { - WorkQueue[] ws; - if ((ws = workQueues) != null) { - for (int j = 1; j < ws.length && task.status >= 0; j += 2) { - WorkQueue q = ws[j]; - if (q != null && q.pollFor(task)) { - joiner.runSubtask(task); - break; - } - } - } - } - - /** - * Tries to decrement active count (sometimes implicitly) and - * possibly release or create a compensating worker in preparation - * for blocking. Fails on contention or termination. Otherwise, - * adds a new thread if no idle workers are available and either - * pool would become completely starved or: (at least half - * starved, and fewer than 50% spares exist, and there is at least - * one task apparently available). Even though the availability - * check requires a full scan, it is worthwhile in reducing false - * alarms. - * - * @param task if non-null, a task being waited for - * @param blocker if non-null, a blocker being waited for - * @return true if the caller can block, else should recheck and retry - */ - final boolean tryCompensate(ForkJoinTask task, ManagedBlocker blocker) { - int pc = parallelism, e; - long c = ctl; - WorkQueue[] ws = workQueues; - if ((e = (int)c) >= 0 && ws != null) { - int u, a, ac, hc; - int tc = (short)((u = (int)(c >>> 32)) >>> UTC_SHIFT) + pc; - boolean replace = false; - if ((a = u >> UAC_SHIFT) <= 0) { - if ((ac = a + pc) <= 1) - replace = true; - else if ((e > 0 || (task != null && - ac <= (hc = pc >>> 1) && tc < pc + hc))) { - WorkQueue w; - for (int j = 0; j < ws.length; ++j) { - if ((w = ws[j]) != null && !w.isEmpty()) { - replace = true; - break; // in compensation range and tasks available - } - } - } - } - if ((task == null || task.status >= 0) && // recheck need to block - (blocker == null || !blocker.isReleasable()) && ctl == c) { - if (!replace) { // no compensation - long nc = ((c - AC_UNIT) & AC_MASK) | (c & ~AC_MASK); - if (U.compareAndSwapLong(this, CTL, c, nc)) - return true; - } - else if (e != 0) { // release an idle worker - WorkQueue w; Thread p; int i; - if ((i = e & SMASK) < ws.length && (w = ws[i]) != null) { - long nc = ((long)(w.nextWait & E_MASK) | - (c & (AC_MASK|TC_MASK))); - if (w.eventCount == (e | INT_SIGN) && - U.compareAndSwapLong(this, CTL, c, nc)) { - w.eventCount = (e + E_SEQ) & E_MASK; - if ((p = w.parker) != null) - U.unpark(p); - return true; - } - } - } - else if (tc < MAX_CAP) { // create replacement - long nc = ((c + TC_UNIT) & TC_MASK) | (c & ~TC_MASK); - if (U.compareAndSwapLong(this, CTL, c, nc)) { - addWorker(); - return true; - } - } - } - } - return false; - } - - /** - * Helps and/or blocks until the given task is done. - * - * @param joiner the joining worker - * @param task the task - * @return task status on exit - */ - final int awaitJoin(WorkQueue joiner, ForkJoinTask task) { - int s; - ForkJoinTask prevJoin = joiner.currentJoin; - if ((s = task.status) >= 0) { - joiner.currentJoin = task; - long startTime = 0L; - for (int k = 0;;) { - if ((joiner.isEmpty() ? // try to help - !tryHelpStealer(joiner, task) : - !joiner.tryRemoveAndExec(task))) { - if (k == 0) { - startTime = System.nanoTime(); - tryPollForAndExec(joiner, task); // check uncommon case - } - else if ((k & (MAX_HELP - 1)) == 0 && - System.nanoTime() - startTime >= - COMPENSATION_DELAY && - tryCompensate(task, null)) { - if (task.trySetSignal() && task.status >= 0) { - synchronized (task) { - if (task.status >= 0) { - try { // see ForkJoinTask - task.wait(); // for explanation - } catch (InterruptedException ie) { - } - } - else - task.notifyAll(); - } - } - long c; // re-activate - do {} while (!U.compareAndSwapLong - (this, CTL, c = ctl, c + AC_UNIT)); - } - } - if ((s = task.status) < 0) { - joiner.currentJoin = prevJoin; - break; - } - else if ((k++ & (MAX_HELP - 1)) == MAX_HELP >>> 1) - Thread.yield(); // for politeness - } - } - return s; - } - - /** - * Stripped-down variant of awaitJoin used by timed joins. Tries - * to help join only while there is continuous progress. (Caller - * will then enter a timed wait.) - * - * @param joiner the joining worker - * @param task the task - * @return task status on exit - */ - final int helpJoinOnce(WorkQueue joiner, ForkJoinTask task) { - int s; - while ((s = task.status) >= 0 && - (joiner.isEmpty() ? - tryHelpStealer(joiner, task) : - joiner.tryRemoveAndExec(task))) - ; - return s; - } - - /** - * Returns a (probably) non-empty steal queue, if one is found - * during a random, then cyclic scan, else null. This method must - * be retried by caller if, by the time it tries to use the queue, - * it is empty. - */ - private WorkQueue findNonEmptyStealQueue(WorkQueue w) { - // Similar to loop in scan(), but ignoring submissions - int r = w.seed; r ^= r << 13; r ^= r >>> 17; w.seed = r ^= r << 5; - int step = (r >>> 16) | 1; - for (WorkQueue[] ws;;) { - int rs = runState, m; - if ((ws = workQueues) == null || (m = ws.length - 1) < 1) - return null; - for (int j = (m + 1) << 2; ; r += step) { - WorkQueue q = ws[((r << 1) | 1) & m]; - if (q != null && !q.isEmpty()) - return q; - else if (--j < 0) { - if (runState == rs) - return null; - break; - } - } - } - } - - /** - * Runs tasks until {@code isQuiescent()}. We piggyback on - * active count ctl maintenance, but rather than blocking - * when tasks cannot be found, we rescan until all others cannot - * find tasks either. - */ - final void helpQuiescePool(WorkQueue w) { - for (boolean active = true;;) { - ForkJoinTask localTask; // exhaust local queue - while ((localTask = w.nextLocalTask()) != null) - localTask.doExec(); - WorkQueue q = findNonEmptyStealQueue(w); - if (q != null) { - ForkJoinTask t; int b; - if (!active) { // re-establish active count - long c; - active = true; - do {} while (!U.compareAndSwapLong - (this, CTL, c = ctl, c + AC_UNIT)); - } - if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) - w.runSubtask(t); - } - else { - long c; - if (active) { // decrement active count without queuing - active = false; - do {} while (!U.compareAndSwapLong - (this, CTL, c = ctl, c -= AC_UNIT)); - } - else - c = ctl; // re-increment on exit - if ((int)(c >> AC_SHIFT) + parallelism == 0) { - do {} while (!U.compareAndSwapLong - (this, CTL, c = ctl, c + AC_UNIT)); - break; - } - } - } - } - - /** - * Gets and removes a local or stolen task for the given worker. - * - * @return a task, if available - */ - final ForkJoinTask nextTaskFor(WorkQueue w) { - for (ForkJoinTask t;;) { - WorkQueue q; int b; - if ((t = w.nextLocalTask()) != null) - return t; - if ((q = findNonEmptyStealQueue(w)) == null) - return null; - if ((b = q.base) - q.top < 0 && (t = q.pollAt(b)) != null) - return t; - } - } - - /** - * Returns the approximate (non-atomic) number of idle threads per - * active thread to offset steal queue size for method - * ForkJoinTask.getSurplusQueuedTaskCount(). - */ - final int idlePerActive() { - // Approximate at powers of two for small values, saturate past 4 - int p = parallelism; - int a = p + (int)(ctl >> AC_SHIFT); - return (a > (p >>>= 1) ? 0 : - a > (p >>>= 1) ? 1 : - a > (p >>>= 1) ? 2 : - a > (p >>>= 1) ? 4 : - 8); - } - - // Termination - - /** - * Possibly initiates and/or completes termination. The caller - * triggering termination runs three passes through workQueues: - * (0) Setting termination status, followed by wakeups of queued - * workers; (1) cancelling all tasks; (2) interrupting lagging - * threads (likely in external tasks, but possibly also blocked in - * joins). Each pass repeats previous steps because of potential - * lagging thread creation. - * - * @param now if true, unconditionally terminate, else only - * if no work and no active workers - * @param enable if true, enable shutdown when next possible - * @return true if now terminating or terminated - */ - private boolean tryTerminate(boolean now, boolean enable) { - Mutex lock = this.lock; - for (long c;;) { - if (((c = ctl) & STOP_BIT) != 0) { // already terminating - if ((short)(c >>> TC_SHIFT) == -parallelism) { - lock.lock(); // don't need try/finally - termination.signalAll(); // signal when 0 workers - lock.unlock(); - } - return true; - } - if (runState >= 0) { // not yet enabled - if (!enable) - return false; - lock.lock(); - runState |= SHUTDOWN; - lock.unlock(); - } - if (!now) { // check if idle & no tasks - if ((int)(c >> AC_SHIFT) != -parallelism || - hasQueuedSubmissions()) - return false; - // Check for unqueued inactive workers. One pass suffices. - WorkQueue[] ws = workQueues; WorkQueue w; - if (ws != null) { - for (int i = 1; i < ws.length; i += 2) { - if ((w = ws[i]) != null && w.eventCount >= 0) - return false; - } - } - } - if (U.compareAndSwapLong(this, CTL, c, c | STOP_BIT)) { - for (int pass = 0; pass < 3; ++pass) { - WorkQueue[] ws = workQueues; - if (ws != null) { - WorkQueue w; - int n = ws.length; - for (int i = 0; i < n; ++i) { - if ((w = ws[i]) != null) { - w.runState = -1; - if (pass > 0) { - w.cancelAll(); - if (pass > 1) - w.interruptOwner(); - } - } - } - // Wake up workers parked on event queue - int i, e; long cc; Thread p; - while ((e = (int)(cc = ctl) & E_MASK) != 0 && - (i = e & SMASK) < n && - (w = ws[i]) != null) { - long nc = ((long)(w.nextWait & E_MASK) | - ((cc + AC_UNIT) & AC_MASK) | - (cc & (TC_MASK|STOP_BIT))); - if (w.eventCount == (e | INT_SIGN) && - U.compareAndSwapLong(this, CTL, cc, nc)) { - w.eventCount = (e + E_SEQ) & E_MASK; - w.runState = -1; - if ((p = w.parker) != null) - U.unpark(p); - } - } - } - } - } - } - } - - // Exported methods - - // Constructors - - /** - * Creates a {@code ForkJoinPool} with parallelism equal to {@link - * java.lang.Runtime#availableProcessors}, using the {@linkplain - * #defaultForkJoinWorkerThreadFactory default thread factory}, - * no UncaughtExceptionHandler, and non-async LIFO processing mode. - * - * @throws SecurityException if a security manager exists and - * the caller is not permitted to modify threads - * because it does not hold {@link - * java.lang.RuntimePermission}{@code ("modifyThread")} - */ - public ForkJoinPool() { - this(Runtime.getRuntime().availableProcessors(), - defaultForkJoinWorkerThreadFactory, null, false); - } - - /** - * Creates a {@code ForkJoinPool} with the indicated parallelism - * level, the {@linkplain - * #defaultForkJoinWorkerThreadFactory default thread factory}, - * no UncaughtExceptionHandler, and non-async LIFO processing mode. - * - * @param parallelism the parallelism level - * @throws IllegalArgumentException if parallelism less than or - * equal to zero, or greater than implementation limit - * @throws SecurityException if a security manager exists and - * the caller is not permitted to modify threads - * because it does not hold {@link - * java.lang.RuntimePermission}{@code ("modifyThread")} - */ - public ForkJoinPool(int parallelism) { - this(parallelism, defaultForkJoinWorkerThreadFactory, null, false); - } - - /** - * Creates a {@code ForkJoinPool} with the given parameters. - * - * @param parallelism the parallelism level. For default value, - * use {@link java.lang.Runtime#availableProcessors}. - * @param factory the factory for creating new threads. For default value, - * use {@link #defaultForkJoinWorkerThreadFactory}. - * @param handler the handler for internal worker threads that - * terminate due to unrecoverable errors encountered while executing - * tasks. For default value, use {@code null}. - * @param asyncMode if true, - * establishes local first-in-first-out scheduling mode for forked - * tasks that are never joined. This mode may be more appropriate - * than default locally stack-based mode in applications in which - * worker threads only process event-style asynchronous tasks. - * For default value, use {@code false}. - * @throws IllegalArgumentException if parallelism less than or - * equal to zero, or greater than implementation limit - * @throws NullPointerException if the factory is null - * @throws SecurityException if a security manager exists and - * the caller is not permitted to modify threads - * because it does not hold {@link - * java.lang.RuntimePermission}{@code ("modifyThread")} - */ - public ForkJoinPool(int parallelism, - ForkJoinWorkerThreadFactory factory, - Thread.UncaughtExceptionHandler handler, - boolean asyncMode) { - checkPermission(); - if (factory == null) - throw new NullPointerException(); - if (parallelism <= 0 || parallelism > MAX_CAP) - throw new IllegalArgumentException(); - this.parallelism = parallelism; - this.factory = factory; - this.ueh = handler; - this.localMode = asyncMode ? FIFO_QUEUE : LIFO_QUEUE; - long np = (long)(-parallelism); // offset ctl counts - this.ctl = ((np << AC_SHIFT) & AC_MASK) | ((np << TC_SHIFT) & TC_MASK); - // Use nearest power 2 for workQueues size. See Hackers Delight sec 3.2. - int n = parallelism - 1; - n |= n >>> 1; n |= n >>> 2; n |= n >>> 4; n |= n >>> 8; n |= n >>> 16; - int size = (n + 1) << 1; // #slots = 2*#workers - this.submitMask = size - 1; // room for max # of submit queues - this.workQueues = new WorkQueue[size]; - this.termination = (this.lock = new Mutex()).newCondition(); - this.stealCount = new AtomicLong(); - this.nextWorkerNumber = new AtomicInteger(); - int pn = poolNumberGenerator.incrementAndGet(); - StringBuilder sb = new StringBuilder("ForkJoinPool-"); - sb.append(Integer.toString(pn)); - sb.append("-worker-"); - this.workerNamePrefix = sb.toString(); - lock.lock(); - this.runState = 1; // set init flag - lock.unlock(); - } - - // Execution methods - - /** - * Performs the given task, returning its result upon completion. - * If the computation encounters an unchecked Exception or Error, - * it is rethrown as the outcome of this invocation. Rethrown - * exceptions behave in the same way as regular exceptions, but, - * when possible, contain stack traces (as displayed for example - * using {@code ex.printStackTrace()}) of both the current thread - * as well as the thread actually encountering the exception; - * minimally only the latter. - * - * @param task the task - * @return the task's result - * @throws NullPointerException if the task is null - * @throws RejectedExecutionException if the task cannot be - * scheduled for execution - */ - public T invoke(ForkJoinTask task) { - if (task == null) - throw new NullPointerException(); - doSubmit(task); - return task.join(); - } - - /** - * Arranges for (asynchronous) execution of the given task. - * - * @param task the task - * @throws NullPointerException if the task is null - * @throws RejectedExecutionException if the task cannot be - * scheduled for execution - */ - public void execute(ForkJoinTask task) { - if (task == null) - throw new NullPointerException(); - doSubmit(task); - } - - // AbstractExecutorService methods - - /** - * @throws NullPointerException if the task is null - * @throws RejectedExecutionException if the task cannot be - * scheduled for execution - */ - public void execute(Runnable task) { - if (task == null) - throw new NullPointerException(); - ForkJoinTask job; - if (task instanceof ForkJoinTask) // avoid re-wrap - job = (ForkJoinTask) task; - else - job = new ForkJoinTask.AdaptedRunnableAction(task); - doSubmit(job); - } - - /** - * Submits a ForkJoinTask for execution. - * - * @param task the task to submit - * @return the task - * @throws NullPointerException if the task is null - * @throws RejectedExecutionException if the task cannot be - * scheduled for execution - */ - public ForkJoinTask submit(ForkJoinTask task) { - if (task == null) - throw new NullPointerException(); - doSubmit(task); - return task; - } - - /** - * @throws NullPointerException if the task is null - * @throws RejectedExecutionException if the task cannot be - * scheduled for execution - */ - public ForkJoinTask submit(Callable task) { - ForkJoinTask job = new ForkJoinTask.AdaptedCallable(task); - doSubmit(job); - return job; - } - - /** - * @throws NullPointerException if the task is null - * @throws RejectedExecutionException if the task cannot be - * scheduled for execution - */ - public ForkJoinTask submit(Runnable task, T result) { - ForkJoinTask job = new ForkJoinTask.AdaptedRunnable(task, result); - doSubmit(job); - return job; - } - - /** - * @throws NullPointerException if the task is null - * @throws RejectedExecutionException if the task cannot be - * scheduled for execution - */ - public ForkJoinTask submit(Runnable task) { - if (task == null) - throw new NullPointerException(); - ForkJoinTask job; - if (task instanceof ForkJoinTask) // avoid re-wrap - job = (ForkJoinTask) task; - else - job = new ForkJoinTask.AdaptedRunnableAction(task); - doSubmit(job); - return job; - } - - /** - * @throws NullPointerException {@inheritDoc} - * @throws RejectedExecutionException {@inheritDoc} - */ - public List> invokeAll(Collection> tasks) { - // In previous versions of this class, this method constructed - // a task to run ForkJoinTask.invokeAll, but now external - // invocation of multiple tasks is at least as efficient. - List> fs = new ArrayList>(tasks.size()); - // Workaround needed because method wasn't declared with - // wildcards in return type but should have been. - @SuppressWarnings({"unchecked", "rawtypes"}) - List> futures = (List>) (List) fs; - - boolean done = false; - try { - for (Callable t : tasks) { - ForkJoinTask f = new ForkJoinTask.AdaptedCallable(t); - doSubmit(f); - fs.add(f); - } - for (ForkJoinTask f : fs) - f.quietlyJoin(); - done = true; - return futures; - } finally { - if (!done) - for (ForkJoinTask f : fs) - f.cancel(false); - } - } - - /** - * Returns the factory used for constructing new workers. - * - * @return the factory used for constructing new workers - */ - public ForkJoinWorkerThreadFactory getFactory() { - return factory; - } - - /** - * Returns the handler for internal worker threads that terminate - * due to unrecoverable errors encountered while executing tasks. - * - * @return the handler, or {@code null} if none - */ - public Thread.UncaughtExceptionHandler getUncaughtExceptionHandler() { - return ueh; - } - - /** - * Returns the targeted parallelism level of this pool. - * - * @return the targeted parallelism level of this pool - */ - public int getParallelism() { - return parallelism; - } - - /** - * Returns the number of worker threads that have started but not - * yet terminated. The result returned by this method may differ - * from {@link #getParallelism} when threads are created to - * maintain parallelism when others are cooperatively blocked. - * - * @return the number of worker threads - */ - public int getPoolSize() { - return parallelism + (short)(ctl >>> TC_SHIFT); - } - - /** - * Returns {@code true} if this pool uses local first-in-first-out - * scheduling mode for forked tasks that are never joined. - * - * @return {@code true} if this pool uses async mode - */ - public boolean getAsyncMode() { - return localMode != 0; - } - - /** - * Returns an estimate of the number of worker threads that are - * not blocked waiting to join tasks or for other managed - * synchronization. This method may overestimate the - * number of running threads. - * - * @return the number of worker threads - */ - public int getRunningThreadCount() { - int rc = 0; - WorkQueue[] ws; WorkQueue w; - if ((ws = workQueues) != null) { - for (int i = 1; i < ws.length; i += 2) { - if ((w = ws[i]) != null && w.isApparentlyUnblocked()) - ++rc; - } - } - return rc; - } - - /** - * Returns an estimate of the number of threads that are currently - * stealing or executing tasks. This method may overestimate the - * number of active threads. - * - * @return the number of active threads - */ - public int getActiveThreadCount() { - int r = parallelism + (int)(ctl >> AC_SHIFT); - return (r <= 0) ? 0 : r; // suppress momentarily negative values - } - - /** - * Returns {@code true} if all worker threads are currently idle. - * An idle worker is one that cannot obtain a task to execute - * because none are available to steal from other threads, and - * there are no pending submissions to the pool. This method is - * conservative; it might not return {@code true} immediately upon - * idleness of all threads, but will eventually become true if - * threads remain inactive. - * - * @return {@code true} if all threads are currently idle - */ - public boolean isQuiescent() { - return (int)(ctl >> AC_SHIFT) + parallelism == 0; - } - - /** - * Returns an estimate of the total number of tasks stolen from - * one thread's work queue by another. The reported value - * underestimates the actual total number of steals when the pool - * is not quiescent. This value may be useful for monitoring and - * tuning fork/join programs: in general, steal counts should be - * high enough to keep threads busy, but low enough to avoid - * overhead and contention across threads. - * - * @return the number of steals - */ - public long getStealCount() { - long count = stealCount.get(); - WorkQueue[] ws; WorkQueue w; - if ((ws = workQueues) != null) { - for (int i = 1; i < ws.length; i += 2) { - if ((w = ws[i]) != null) - count += w.totalSteals; - } - } - return count; - } - - /** - * Returns an estimate of the total number of tasks currently held - * in queues by worker threads (but not including tasks submitted - * to the pool that have not begun executing). This value is only - * an approximation, obtained by iterating across all threads in - * the pool. This method may be useful for tuning task - * granularities. - * - * @return the number of queued tasks - */ - public long getQueuedTaskCount() { - long count = 0; - WorkQueue[] ws; WorkQueue w; - if ((ws = workQueues) != null) { - for (int i = 1; i < ws.length; i += 2) { - if ((w = ws[i]) != null) - count += w.queueSize(); - } - } - return count; - } - - /** - * Returns an estimate of the number of tasks submitted to this - * pool that have not yet begun executing. This method may take - * time proportional to the number of submissions. - * - * @return the number of queued submissions - */ - public int getQueuedSubmissionCount() { - int count = 0; - WorkQueue[] ws; WorkQueue w; - if ((ws = workQueues) != null) { - for (int i = 0; i < ws.length; i += 2) { - if ((w = ws[i]) != null) - count += w.queueSize(); - } - } - return count; - } - - /** - * Returns {@code true} if there are any tasks submitted to this - * pool that have not yet begun executing. - * - * @return {@code true} if there are any queued submissions - */ - public boolean hasQueuedSubmissions() { - WorkQueue[] ws; WorkQueue w; - if ((ws = workQueues) != null) { - for (int i = 0; i < ws.length; i += 2) { - if ((w = ws[i]) != null && !w.isEmpty()) - return true; - } - } - return false; - } - - /** - * Removes and returns the next unexecuted submission if one is - * available. This method may be useful in extensions to this - * class that re-assign work in systems with multiple pools. - * - * @return the next submission, or {@code null} if none - */ - protected ForkJoinTask pollSubmission() { - WorkQueue[] ws; WorkQueue w; ForkJoinTask t; - if ((ws = workQueues) != null) { - for (int i = 0; i < ws.length; i += 2) { - if ((w = ws[i]) != null && (t = w.poll()) != null) - return t; - } - } - return null; - } - - /** - * Removes all available unexecuted submitted and forked tasks - * from scheduling queues and adds them to the given collection, - * without altering their execution status. These may include - * artificially generated or wrapped tasks. This method is - * designed to be invoked only when the pool is known to be - * quiescent. Invocations at other times may not remove all - * tasks. A failure encountered while attempting to add elements - * to collection {@code c} may result in elements being in - * neither, either or both collections when the associated - * exception is thrown. The behavior of this operation is - * undefined if the specified collection is modified while the - * operation is in progress. - * - * @param c the collection to transfer elements into - * @return the number of elements transferred - */ - protected int drainTasksTo(Collection> c) { - int count = 0; - WorkQueue[] ws; WorkQueue w; ForkJoinTask t; - if ((ws = workQueues) != null) { - for (int i = 0; i < ws.length; ++i) { - if ((w = ws[i]) != null) { - while ((t = w.poll()) != null) { - c.add(t); - ++count; - } - } - } - } - return count; - } - - /** - * Returns a string identifying this pool, as well as its state, - * including indications of run state, parallelism level, and - * worker and task counts. - * - * @return a string identifying this pool, as well as its state - */ - public String toString() { - // Use a single pass through workQueues to collect counts - long qt = 0L, qs = 0L; int rc = 0; - long st = stealCount.get(); - long c = ctl; - WorkQueue[] ws; WorkQueue w; - if ((ws = workQueues) != null) { - for (int i = 0; i < ws.length; ++i) { - if ((w = ws[i]) != null) { - int size = w.queueSize(); - if ((i & 1) == 0) - qs += size; - else { - qt += size; - st += w.totalSteals; - if (w.isApparentlyUnblocked()) - ++rc; - } - } - } - } - int pc = parallelism; - int tc = pc + (short)(c >>> TC_SHIFT); - int ac = pc + (int)(c >> AC_SHIFT); - if (ac < 0) // ignore transient negative - ac = 0; - String level; - if ((c & STOP_BIT) != 0) - level = (tc == 0) ? "Terminated" : "Terminating"; - else - level = runState < 0 ? "Shutting down" : "Running"; - return super.toString() + - "[" + level + - ", parallelism = " + pc + - ", size = " + tc + - ", active = " + ac + - ", running = " + rc + - ", steals = " + st + - ", tasks = " + qt + - ", submissions = " + qs + - "]"; - } - - /** - * Initiates an orderly shutdown in which previously submitted - * tasks are executed, but no new tasks will be accepted. - * Invocation has no additional effect if already shut down. - * Tasks that are in the process of being submitted concurrently - * during the course of this method may or may not be rejected. - * - * @throws SecurityException if a security manager exists and - * the caller is not permitted to modify threads - * because it does not hold {@link - * java.lang.RuntimePermission}{@code ("modifyThread")} - */ - public void shutdown() { - checkPermission(); - tryTerminate(false, true); - } - - /** - * Attempts to cancel and/or stop all tasks, and reject all - * subsequently submitted tasks. Tasks that are in the process of - * being submitted or executed concurrently during the course of - * this method may or may not be rejected. This method cancels - * both existing and unexecuted tasks, in order to permit - * termination in the presence of task dependencies. So the method - * always returns an empty list (unlike the case for some other - * Executors). - * - * @return an empty list - * @throws SecurityException if a security manager exists and - * the caller is not permitted to modify threads - * because it does not hold {@link - * java.lang.RuntimePermission}{@code ("modifyThread")} - */ - public List shutdownNow() { - checkPermission(); - tryTerminate(true, true); - return Collections.emptyList(); - } - - /** - * Returns {@code true} if all tasks have completed following shut down. - * - * @return {@code true} if all tasks have completed following shut down - */ - public boolean isTerminated() { - long c = ctl; - return ((c & STOP_BIT) != 0L && - (short)(c >>> TC_SHIFT) == -parallelism); - } - - /** - * Returns {@code true} if the process of termination has - * commenced but not yet completed. This method may be useful for - * debugging. A return of {@code true} reported a sufficient - * period after shutdown may indicate that submitted tasks have - * ignored or suppressed interruption, or are waiting for IO, - * causing this executor not to properly terminate. (See the - * advisory notes for class {@link ForkJoinTask} stating that - * tasks should not normally entail blocking operations. But if - * they do, they must abort them on interrupt.) - * - * @return {@code true} if terminating but not yet terminated - */ - public boolean isTerminating() { - long c = ctl; - return ((c & STOP_BIT) != 0L && - (short)(c >>> TC_SHIFT) != -parallelism); - } - - /** - * Returns {@code true} if this pool has been shut down. - * - * @return {@code true} if this pool has been shut down - */ - public boolean isShutdown() { - return runState < 0; - } - - /** - * Blocks until all tasks have completed execution after a shutdown - * request, or the timeout occurs, or the current thread is - * interrupted, whichever happens first. - * - * @param timeout the maximum time to wait - * @param unit the time unit of the timeout argument - * @return {@code true} if this executor terminated and - * {@code false} if the timeout elapsed before termination - * @throws InterruptedException if interrupted while waiting - */ - public boolean awaitTermination(long timeout, TimeUnit unit) - throws InterruptedException { - long nanos = unit.toNanos(timeout); - final Mutex lock = this.lock; - lock.lock(); - try { - for (;;) { - if (isTerminated()) - return true; - if (nanos <= 0) - return false; - nanos = termination.awaitNanos(nanos); - } - } finally { - lock.unlock(); - } - } - - /** - * Interface for extending managed parallelism for tasks running - * in {@link ForkJoinPool}s. - * - *

A {@code ManagedBlocker} provides two methods. Method - * {@code isReleasable} must return {@code true} if blocking is - * not necessary. Method {@code block} blocks the current thread - * if necessary (perhaps internally invoking {@code isReleasable} - * before actually blocking). These actions are performed by any - * thread invoking {@link ForkJoinPool#managedBlock}. The - * unusual methods in this API accommodate synchronizers that may, - * but don't usually, block for long periods. Similarly, they - * allow more efficient internal handling of cases in which - * additional workers may be, but usually are not, needed to - * ensure sufficient parallelism. Toward this end, - * implementations of method {@code isReleasable} must be amenable - * to repeated invocation. - * - *

For example, here is a ManagedBlocker based on a - * ReentrantLock: - *

 {@code
-     * class ManagedLocker implements ManagedBlocker {
-     *   final ReentrantLock lock;
-     *   boolean hasLock = false;
-     *   ManagedLocker(ReentrantLock lock) { this.lock = lock; }
-     *   public boolean block() {
-     *     if (!hasLock)
-     *       lock.lock();
-     *     return true;
-     *   }
-     *   public boolean isReleasable() {
-     *     return hasLock || (hasLock = lock.tryLock());
-     *   }
-     * }}
- * - *

Here is a class that possibly blocks waiting for an - * item on a given queue: - *

 {@code
-     * class QueueTaker implements ManagedBlocker {
-     *   final BlockingQueue queue;
-     *   volatile E item = null;
-     *   QueueTaker(BlockingQueue q) { this.queue = q; }
-     *   public boolean block() throws InterruptedException {
-     *     if (item == null)
-     *       item = queue.take();
-     *     return true;
-     *   }
-     *   public boolean isReleasable() {
-     *     return item != null || (item = queue.poll()) != null;
-     *   }
-     *   public E getItem() { // call after pool.managedBlock completes
-     *     return item;
-     *   }
-     * }}
- */ - public static interface ManagedBlocker { - /** - * Possibly blocks the current thread, for example waiting for - * a lock or condition. - * - * @return {@code true} if no additional blocking is necessary - * (i.e., if isReleasable would return true) - * @throws InterruptedException if interrupted while waiting - * (the method is not required to do so, but is allowed to) - */ - boolean block() throws InterruptedException; - - /** - * Returns {@code true} if blocking is unnecessary. - */ - boolean isReleasable(); - } - - /** - * Blocks in accord with the given blocker. If the current thread - * is a {@link ForkJoinWorkerThread}, this method possibly - * arranges for a spare thread to be activated if necessary to - * ensure sufficient parallelism while the current thread is blocked. - * - *

If the caller is not a {@link ForkJoinTask}, this method is - * behaviorally equivalent to - *

 {@code
-     * while (!blocker.isReleasable())
-     *   if (blocker.block())
-     *     return;
-     * }
- * - * If the caller is a {@code ForkJoinTask}, then the pool may - * first be expanded to ensure parallelism, and later adjusted. - * - * @param blocker the blocker - * @throws InterruptedException if blocker.block did so - */ - public static void managedBlock(ManagedBlocker blocker) - throws InterruptedException { - Thread t = Thread.currentThread(); - ForkJoinPool p = ((t instanceof ForkJoinWorkerThread) ? - ((ForkJoinWorkerThread)t).pool : null); - while (!blocker.isReleasable()) { - if (p == null || p.tryCompensate(null, blocker)) { - try { - do {} while (!blocker.isReleasable() && !blocker.block()); - } finally { - if (p != null) - p.incrementActiveCount(); - } - break; - } - } - } - - // AbstractExecutorService overrides. These rely on undocumented - // fact that ForkJoinTask.adapt returns ForkJoinTasks that also - // implement RunnableFuture. - - protected RunnableFuture newTaskFor(Runnable runnable, T value) { - return new ForkJoinTask.AdaptedRunnable(runnable, value); - } - - protected RunnableFuture newTaskFor(Callable callable) { - return new ForkJoinTask.AdaptedCallable(callable); - } - - // Unsafe mechanics - private static final sun.misc.Unsafe U; - private static final long CTL; - private static final long PARKBLOCKER; - private static final int ABASE; - private static final int ASHIFT; - - static { - poolNumberGenerator = new AtomicInteger(); - nextSubmitterSeed = new AtomicInteger(0x55555555); - modifyThreadPermission = new RuntimePermission("modifyThread"); - defaultForkJoinWorkerThreadFactory = - new DefaultForkJoinWorkerThreadFactory(); - submitters = new ThreadSubmitter(); - int s; - try { - U = getUnsafe(); - Class k = ForkJoinPool.class; - Class ak = ForkJoinTask[].class; - CTL = U.objectFieldOffset - (k.getDeclaredField("ctl")); - Class tk = Thread.class; - PARKBLOCKER = U.objectFieldOffset - (tk.getDeclaredField("parkBlocker")); - ABASE = U.arrayBaseOffset(ak); - s = U.arrayIndexScale(ak); - } catch (Exception e) { - throw new Error(e); - } - if ((s & (s-1)) != 0) - throw new Error("data type scale not a power of two"); - ASHIFT = 31 - Integer.numberOfLeadingZeros(s); - } - - /** - * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. - * Replace with a simple call to Unsafe.getUnsafe when integrating - * into a jdk. - * - * @return a sun.misc.Unsafe - */ - private static sun.misc.Unsafe getUnsafe() { - return Unsafe.instance; - } - -} diff --git a/akka-actor/src/main/java/akka/jsr166y/ForkJoinTask.java b/akka-actor/src/main/java/akka/jsr166y/ForkJoinTask.java deleted file mode 100644 index fe12152c3a..0000000000 --- a/akka-actor/src/main/java/akka/jsr166y/ForkJoinTask.java +++ /dev/null @@ -1,1506 +0,0 @@ -/* - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/publicdomain/zero/1.0/ - */ - -package akka.jsr166y; -import java.io.Serializable; -import java.util.Collection; -import java.util.List; -import java.util.RandomAccess; -import java.lang.ref.WeakReference; -import java.lang.ref.ReferenceQueue; -import java.util.concurrent.Callable; -import java.util.concurrent.CancellationException; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.Future; -import java.util.concurrent.RejectedExecutionException; -import java.util.concurrent.RunnableFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.locks.ReentrantLock; -import java.lang.reflect.Constructor; -import akka.util.Unsafe; - -/** - * Abstract base class for tasks that run within a {@link ForkJoinPool}. - * A {@code ForkJoinTask} is a thread-like entity that is much - * lighter weight than a normal thread. Huge numbers of tasks and - * subtasks may be hosted by a small number of actual threads in a - * ForkJoinPool, at the price of some usage limitations. - * - *

A "main" {@code ForkJoinTask} begins execution when submitted - * to a {@link ForkJoinPool}. Once started, it will usually in turn - * start other subtasks. As indicated by the name of this class, - * many programs using {@code ForkJoinTask} employ only methods - * {@link #fork} and {@link #join}, or derivatives such as {@link - * #invokeAll(ForkJoinTask...) invokeAll}. However, this class also - * provides a number of other methods that can come into play in - * advanced usages, as well as extension mechanics that allow - * support of new forms of fork/join processing. - * - *

A {@code ForkJoinTask} is a lightweight form of {@link Future}. - * The efficiency of {@code ForkJoinTask}s stems from a set of - * restrictions (that are only partially statically enforceable) - * reflecting their main use as computational tasks calculating pure - * functions or operating on purely isolated objects. The primary - * coordination mechanisms are {@link #fork}, that arranges - * asynchronous execution, and {@link #join}, that doesn't proceed - * until the task's result has been computed. Computations should - * ideally avoid {@code synchronized} methods or blocks, and should - * minimize other blocking synchronization apart from joining other - * tasks or using synchronizers such as Phasers that are advertised to - * cooperate with fork/join scheduling. Subdividable tasks should also - * not perform blocking IO, and should ideally access variables that - * are completely independent of those accessed by other running - * tasks. These guidelines are loosely enforced by not permitting - * checked exceptions such as {@code IOExceptions} to be - * thrown. However, computations may still encounter unchecked - * exceptions, that are rethrown to callers attempting to join - * them. These exceptions may additionally include {@link - * RejectedExecutionException} stemming from internal resource - * exhaustion, such as failure to allocate internal task - * queues. Rethrown exceptions behave in the same way as regular - * exceptions, but, when possible, contain stack traces (as displayed - * for example using {@code ex.printStackTrace()}) of both the thread - * that initiated the computation as well as the thread actually - * encountering the exception; minimally only the latter. - * - *

It is possible to define and use ForkJoinTasks that may block, - * but doing do requires three further considerations: (1) Completion - * of few if any other tasks should be dependent on a task - * that blocks on external synchronization or IO. Event-style async - * tasks that are never joined often fall into this category. (2) To - * minimize resource impact, tasks should be small; ideally performing - * only the (possibly) blocking action. (3) Unless the {@link - * ForkJoinPool.ManagedBlocker} API is used, or the number of possibly - * blocked tasks is known to be less than the pool's {@link - * ForkJoinPool#getParallelism} level, the pool cannot guarantee that - * enough threads will be available to ensure progress or good - * performance. - * - *

The primary method for awaiting completion and extracting - * results of a task is {@link #join}, but there are several variants: - * The {@link Future#get} methods support interruptible and/or timed - * waits for completion and report results using {@code Future} - * conventions. Method {@link #invoke} is semantically - * equivalent to {@code fork(); join()} but always attempts to begin - * execution in the current thread. The "quiet" forms of - * these methods do not extract results or report exceptions. These - * may be useful when a set of tasks are being executed, and you need - * to delay processing of results or exceptions until all complete. - * Method {@code invokeAll} (available in multiple versions) - * performs the most common form of parallel invocation: forking a set - * of tasks and joining them all. - * - *

In the most typical usages, a fork-join pair act like a call - * (fork) and return (join) from a parallel recursive function. As is - * the case with other forms of recursive calls, returns (joins) - * should be performed innermost-first. For example, {@code a.fork(); - * b.fork(); b.join(); a.join();} is likely to be substantially more - * efficient than joining {@code a} before {@code b}. - * - *

The execution status of tasks may be queried at several levels - * of detail: {@link #isDone} is true if a task completed in any way - * (including the case where a task was cancelled without executing); - * {@link #isCompletedNormally} is true if a task completed without - * cancellation or encountering an exception; {@link #isCancelled} is - * true if the task was cancelled (in which case {@link #getException} - * returns a {@link java.util.concurrent.CancellationException}); and - * {@link #isCompletedAbnormally} is true if a task was either - * cancelled or encountered an exception, in which case {@link - * #getException} will return either the encountered exception or - * {@link java.util.concurrent.CancellationException}. - * - *

The ForkJoinTask class is not usually directly subclassed. - * Instead, you subclass one of the abstract classes that support a - * particular style of fork/join processing, typically {@link - * RecursiveAction} for computations that do not return results, or - * {@link RecursiveTask} for those that do. Normally, a concrete - * ForkJoinTask subclass declares fields comprising its parameters, - * established in a constructor, and then defines a {@code compute} - * method that somehow uses the control methods supplied by this base - * class. While these methods have {@code public} access (to allow - * instances of different task subclasses to call each other's - * methods), some of them may only be called from within other - * ForkJoinTasks (as may be determined using method {@link - * #inForkJoinPool}). Attempts to invoke them in other contexts - * result in exceptions or errors, possibly including - * {@code ClassCastException}. - * - *

Method {@link #join} and its variants are appropriate for use - * only when completion dependencies are acyclic; that is, the - * parallel computation can be described as a directed acyclic graph - * (DAG). Otherwise, executions may encounter a form of deadlock as - * tasks cyclically wait for each other. However, this framework - * supports other methods and techniques (for example the use of - * {@link Phaser}, {@link #helpQuiesce}, and {@link #complete}) that - * may be of use in constructing custom subclasses for problems that - * are not statically structured as DAGs. To support such usages a - * ForkJoinTask may be atomically marked using {@link - * #markForkJoinTask} and checked for marking using {@link - * #isMarkedForkJoinTask}. The ForkJoinTask implementation does not - * use these {@code protected} methods or marks for any purpose, but - * they may be of use in the construction of specialized subclasses. - * For example, parallel graph traversals can use the supplied methods - * to avoid revisiting nodes/tasks that have already been processed. - * Also, completion based designs can use them to record that one - * subtask has completed. (Method names for marking are bulky in part - * to encourage definition of methods that reflect their usage - * patterns.) - * - *

Most base support methods are {@code final}, to prevent - * overriding of implementations that are intrinsically tied to the - * underlying lightweight task scheduling framework. Developers - * creating new basic styles of fork/join processing should minimally - * implement {@code protected} methods {@link #exec}, {@link - * #setRawResult}, and {@link #getRawResult}, while also introducing - * an abstract computational method that can be implemented in its - * subclasses, possibly relying on other {@code protected} methods - * provided by this class. - * - *

ForkJoinTasks should perform relatively small amounts of - * computation. Large tasks should be split into smaller subtasks, - * usually via recursive decomposition. As a very rough rule of thumb, - * a task should perform more than 100 and less than 10000 basic - * computational steps, and should avoid indefinite looping. If tasks - * are too big, then parallelism cannot improve throughput. If too - * small, then memory and internal task maintenance overhead may - * overwhelm processing. - * - *

This class provides {@code adapt} methods for {@link Runnable} - * and {@link Callable}, that may be of use when mixing execution of - * {@code ForkJoinTasks} with other kinds of tasks. When all tasks are - * of this form, consider using a pool constructed in asyncMode. - * - *

ForkJoinTasks are {@code Serializable}, which enables them to be - * used in extensions such as remote execution frameworks. It is - * sensible to serialize tasks only before or after, but not during, - * execution. Serialization is not relied on during execution itself. - * - * @since 1.7 - * @author Doug Lea - */ -public abstract class ForkJoinTask implements Future, Serializable { - - /* - * See the internal documentation of class ForkJoinPool for a - * general implementation overview. ForkJoinTasks are mainly - * responsible for maintaining their "status" field amidst relays - * to methods in ForkJoinWorkerThread and ForkJoinPool. - * - * The methods of this class are more-or-less layered into - * (1) basic status maintenance - * (2) execution and awaiting completion - * (3) user-level methods that additionally report results. - * This is sometimes hard to see because this file orders exported - * methods in a way that flows well in javadocs. - */ - - /* - * The status field holds run control status bits packed into a - * single int to minimize footprint and to ensure atomicity (via - * CAS). Status is initially zero, and takes on nonnegative - * values until completed, upon which status (anded with - * DONE_MASK) holds value NORMAL, CANCELLED, or EXCEPTIONAL. Tasks - * undergoing blocking waits by other threads have the SIGNAL bit - * set. Completion of a stolen task with SIGNAL set awakens any - * waiters via notifyAll. Even though suboptimal for some - * purposes, we use basic builtin wait/notify to take advantage of - * "monitor inflation" in JVMs that we would otherwise need to - * emulate to avoid adding further per-task bookkeeping overhead. - * We want these monitors to be "fat", i.e., not use biasing or - * thin-lock techniques, so use some odd coding idioms that tend - * to avoid them, mainly by arranging that every synchronized - * block performs a wait, notifyAll or both. - */ - - /** The run status of this task */ - volatile int status; // accessed directly by pool and workers - static final int DONE_MASK = 0xf0000000; // mask out non-completion bits - static final int NORMAL = 0xf0000000; // must be negative - static final int CANCELLED = 0xc0000000; // must be < NORMAL - static final int EXCEPTIONAL = 0x80000000; // must be < CANCELLED - static final int SIGNAL = 0x00000001; - static final int MARKED = 0x00000002; - - /** - * Marks completion and wakes up threads waiting to join this - * task. A specialization for NORMAL completion is in method - * doExec. - * - * @param completion one of NORMAL, CANCELLED, EXCEPTIONAL - * @return completion status on exit - */ - private int setCompletion(int completion) { - for (int s;;) { - if ((s = status) < 0) - return s; - if (U.compareAndSwapInt(this, STATUS, s, s | completion)) { - if ((s & SIGNAL) != 0) - synchronized (this) { notifyAll(); } - return completion; - } - } - } - - /** - * Primary execution method for stolen tasks. Unless done, calls - * exec and records status if completed, but doesn't wait for - * completion otherwise. - * - * @return status on exit from this method - */ - final int doExec() { - int s; boolean completed; - if ((s = status) >= 0) { - try { - completed = exec(); - } catch (Throwable rex) { - return setExceptionalCompletion(rex); - } - while ((s = status) >= 0 && completed) { - if (U.compareAndSwapInt(this, STATUS, s, s | NORMAL)) { - if ((s & SIGNAL) != 0) - synchronized (this) { notifyAll(); } - return NORMAL; - } - } - } - return s; - } - - /** - * Tries to set SIGNAL status. Used by ForkJoinPool. Other - * variants are directly incorporated into externalAwaitDone etc. - * - * @return true if successful - */ - final boolean trySetSignal() { - int s; - return U.compareAndSwapInt(this, STATUS, s = status, s | SIGNAL); - } - - /** - * Blocks a non-worker-thread until completion. - * @return status upon completion - */ - private int externalAwaitDone() { - boolean interrupted = false; - int s; - while ((s = status) >= 0) { - if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) { - synchronized (this) { - if (status >= 0) { - try { - wait(); - } catch (InterruptedException ie) { - interrupted = true; - } - } - else - notifyAll(); - } - } - } - if (interrupted) - Thread.currentThread().interrupt(); - return s; - } - - /** - * Blocks a non-worker-thread until completion or interruption. - */ - private int externalInterruptibleAwaitDone() throws InterruptedException { - int s; - if (Thread.interrupted()) - throw new InterruptedException(); - while ((s = status) >= 0) { - if (U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) { - synchronized (this) { - if (status >= 0) - wait(); - else - notifyAll(); - } - } - } - return s; - } - - - /** - * Implementation for join, get, quietlyJoin. Directly handles - * only cases of already-completed, external wait, and - * unfork+exec. Others are relayed to ForkJoinPool.awaitJoin. - * - * @return status upon completion - */ - private int doJoin() { - int s; Thread t; ForkJoinWorkerThread wt; ForkJoinPool.WorkQueue w; - if ((s = status) >= 0) { - if (((t = Thread.currentThread()) instanceof ForkJoinWorkerThread)) { - if (!(w = (wt = (ForkJoinWorkerThread)t).workQueue). - tryUnpush(this) || (s = doExec()) >= 0) - s = wt.pool.awaitJoin(w, this); - } - else - s = externalAwaitDone(); - } - return s; - } - - /** - * Implementation for invoke, quietlyInvoke. - * - * @return status upon completion - */ - private int doInvoke() { - int s; Thread t; ForkJoinWorkerThread wt; - if ((s = doExec()) >= 0) { - if ((t = Thread.currentThread()) instanceof ForkJoinWorkerThread) - s = (wt = (ForkJoinWorkerThread)t).pool.awaitJoin(wt.workQueue, - this); - else - s = externalAwaitDone(); - } - return s; - } - - // Exception table support - - /** - * Table of exceptions thrown by tasks, to enable reporting by - * callers. Because exceptions are rare, we don't directly keep - * them with task objects, but instead use a weak ref table. Note - * that cancellation exceptions don't appear in the table, but are - * instead recorded as status values. - * - * Note: These statics are initialized below in static block. - */ - private static final ExceptionNode[] exceptionTable; - private static final ReentrantLock exceptionTableLock; - private static final ReferenceQueue exceptionTableRefQueue; - - /** - * Fixed capacity for exceptionTable. - */ - private static final int EXCEPTION_MAP_CAPACITY = 32; - - /** - * Key-value nodes for exception table. The chained hash table - * uses identity comparisons, full locking, and weak references - * for keys. The table has a fixed capacity because it only - * maintains task exceptions long enough for joiners to access - * them, so should never become very large for sustained - * periods. However, since we do not know when the last joiner - * completes, we must use weak references and expunge them. We do - * so on each operation (hence full locking). Also, some thread in - * any ForkJoinPool will call helpExpungeStaleExceptions when its - * pool becomes isQuiescent. - */ - static final class ExceptionNode extends WeakReference> { - final Throwable ex; - ExceptionNode next; - final long thrower; // use id not ref to avoid weak cycles - ExceptionNode(ForkJoinTask task, Throwable ex, ExceptionNode next) { - super(task, exceptionTableRefQueue); - this.ex = ex; - this.next = next; - this.thrower = Thread.currentThread().getId(); - } - } - - /** - * Records exception and sets exceptional completion. - * - * @return status on exit - */ - private int setExceptionalCompletion(Throwable ex) { - int h = System.identityHashCode(this); - final ReentrantLock lock = exceptionTableLock; - lock.lock(); - try { - expungeStaleExceptions(); - ExceptionNode[] t = exceptionTable; - int i = h & (t.length - 1); - for (ExceptionNode e = t[i]; ; e = e.next) { - if (e == null) { - t[i] = new ExceptionNode(this, ex, t[i]); - break; - } - if (e.get() == this) // already present - break; - } - } finally { - lock.unlock(); - } - return setCompletion(EXCEPTIONAL); - } - - /** - * Cancels, ignoring any exceptions thrown by cancel. Used during - * worker and pool shutdown. Cancel is spec'ed not to throw any - * exceptions, but if it does anyway, we have no recourse during - * shutdown, so guard against this case. - */ - static final void cancelIgnoringExceptions(ForkJoinTask t) { - if (t != null && t.status >= 0) { - try { - t.cancel(false); - } catch (Throwable ignore) { - } - } - } - - /** - * Removes exception node and clears status - */ - private void clearExceptionalCompletion() { - int h = System.identityHashCode(this); - final ReentrantLock lock = exceptionTableLock; - lock.lock(); - try { - ExceptionNode[] t = exceptionTable; - int i = h & (t.length - 1); - ExceptionNode e = t[i]; - ExceptionNode pred = null; - while (e != null) { - ExceptionNode next = e.next; - if (e.get() == this) { - if (pred == null) - t[i] = next; - else - pred.next = next; - break; - } - pred = e; - e = next; - } - expungeStaleExceptions(); - status = 0; - } finally { - lock.unlock(); - } - } - - /** - * Returns a rethrowable exception for the given task, if - * available. To provide accurate stack traces, if the exception - * was not thrown by the current thread, we try to create a new - * exception of the same type as the one thrown, but with the - * recorded exception as its cause. If there is no such - * constructor, we instead try to use a no-arg constructor, - * followed by initCause, to the same effect. If none of these - * apply, or any fail due to other exceptions, we return the - * recorded exception, which is still correct, although it may - * contain a misleading stack trace. - * - * @return the exception, or null if none - */ - private Throwable getThrowableException() { - if ((status & DONE_MASK) != EXCEPTIONAL) - return null; - int h = System.identityHashCode(this); - ExceptionNode e; - final ReentrantLock lock = exceptionTableLock; - lock.lock(); - try { - expungeStaleExceptions(); - ExceptionNode[] t = exceptionTable; - e = t[h & (t.length - 1)]; - while (e != null && e.get() != this) - e = e.next; - } finally { - lock.unlock(); - } - Throwable ex; - if (e == null || (ex = e.ex) == null) - return null; - if (e.thrower != Thread.currentThread().getId()) { - Class ec = ex.getClass(); - try { - Constructor noArgCtor = null; - Constructor[] cs = ec.getConstructors();// public ctors only - for (int i = 0; i < cs.length; ++i) { - Constructor c = cs[i]; - Class[] ps = c.getParameterTypes(); - if (ps.length == 0) - noArgCtor = c; - else if (ps.length == 1 && ps[0] == Throwable.class) - return (Throwable)(c.newInstance(ex)); - } - if (noArgCtor != null) { - Throwable wx = (Throwable)(noArgCtor.newInstance()); - wx.initCause(ex); - return wx; - } - } catch (Exception ignore) { - } - } - return ex; - } - - /** - * Poll stale refs and remove them. Call only while holding lock. - */ - private static void expungeStaleExceptions() { - for (Object x; (x = exceptionTableRefQueue.poll()) != null;) { - if (x instanceof ExceptionNode) { - ForkJoinTask key = ((ExceptionNode)x).get(); - ExceptionNode[] t = exceptionTable; - int i = System.identityHashCode(key) & (t.length - 1); - ExceptionNode e = t[i]; - ExceptionNode pred = null; - while (e != null) { - ExceptionNode next = e.next; - if (e == x) { - if (pred == null) - t[i] = next; - else - pred.next = next; - break; - } - pred = e; - e = next; - } - } - } - } - - /** - * If lock is available, poll stale refs and remove them. - * Called from ForkJoinPool when pools become quiescent. - */ - static final void helpExpungeStaleExceptions() { - final ReentrantLock lock = exceptionTableLock; - if (lock.tryLock()) { - try { - expungeStaleExceptions(); - } finally { - lock.unlock(); - } - } - } - - /** - * Throws exception, if any, associated with the given status. - */ - private void reportException(int s) { - Throwable ex = ((s == CANCELLED) ? new CancellationException() : - (s == EXCEPTIONAL) ? getThrowableException() : - null); - if (ex != null) - U.throwException(ex); - } - - // public methods - - /** - * Arranges to asynchronously execute this task. While it is not - * necessarily enforced, it is a usage error to fork a task more - * than once unless it has completed and been reinitialized. - * Subsequent modifications to the state of this task or any data - * it operates on are not necessarily consistently observable by - * any thread other than the one executing it unless preceded by a - * call to {@link #join} or related methods, or a call to {@link - * #isDone} returning {@code true}. - * - *

This method may be invoked only from within {@code - * ForkJoinPool} computations (as may be determined using method - * {@link #inForkJoinPool}). Attempts to invoke in other contexts - * result in exceptions or errors, possibly including {@code - * ClassCastException}. - * - * @return {@code this}, to simplify usage - */ - public final ForkJoinTask fork() { - ((ForkJoinWorkerThread)Thread.currentThread()).workQueue.push(this); - return this; - } - - /** - * Returns the result of the computation when it {@link #isDone is - * done}. This method differs from {@link #get()} in that - * abnormal completion results in {@code RuntimeException} or - * {@code Error}, not {@code ExecutionException}, and that - * interrupts of the calling thread do not cause the - * method to abruptly return by throwing {@code - * InterruptedException}. - * - * @return the computed result - */ - public final V join() { - int s; - if ((s = doJoin() & DONE_MASK) != NORMAL) - reportException(s); - return getRawResult(); - } - - /** - * Commences performing this task, awaits its completion if - * necessary, and returns its result, or throws an (unchecked) - * {@code RuntimeException} or {@code Error} if the underlying - * computation did so. - * - * @return the computed result - */ - public final V invoke() { - int s; - if ((s = doInvoke() & DONE_MASK) != NORMAL) - reportException(s); - return getRawResult(); - } - - /** - * Forks the given tasks, returning when {@code isDone} holds for - * each task or an (unchecked) exception is encountered, in which - * case the exception is rethrown. If more than one task - * encounters an exception, then this method throws any one of - * these exceptions. If any task encounters an exception, the - * other may be cancelled. However, the execution status of - * individual tasks is not guaranteed upon exceptional return. The - * status of each task may be obtained using {@link - * #getException()} and related methods to check if they have been - * cancelled, completed normally or exceptionally, or left - * unprocessed. - * - *

This method may be invoked only from within {@code - * ForkJoinPool} computations (as may be determined using method - * {@link #inForkJoinPool}). Attempts to invoke in other contexts - * result in exceptions or errors, possibly including {@code - * ClassCastException}. - * - * @param t1 the first task - * @param t2 the second task - * @throws NullPointerException if any task is null - */ - public static void invokeAll(ForkJoinTask t1, ForkJoinTask t2) { - int s1, s2; - t2.fork(); - if ((s1 = t1.doInvoke() & DONE_MASK) != NORMAL) - t1.reportException(s1); - if ((s2 = t2.doJoin() & DONE_MASK) != NORMAL) - t2.reportException(s2); - } - - /** - * Forks the given tasks, returning when {@code isDone} holds for - * each task or an (unchecked) exception is encountered, in which - * case the exception is rethrown. If more than one task - * encounters an exception, then this method throws any one of - * these exceptions. If any task encounters an exception, others - * may be cancelled. However, the execution status of individual - * tasks is not guaranteed upon exceptional return. The status of - * each task may be obtained using {@link #getException()} and - * related methods to check if they have been cancelled, completed - * normally or exceptionally, or left unprocessed. - * - *

This method may be invoked only from within {@code - * ForkJoinPool} computations (as may be determined using method - * {@link #inForkJoinPool}). Attempts to invoke in other contexts - * result in exceptions or errors, possibly including {@code - * ClassCastException}. - * - * @param tasks the tasks - * @throws NullPointerException if any task is null - */ - public static void invokeAll(ForkJoinTask... tasks) { - Throwable ex = null; - int last = tasks.length - 1; - for (int i = last; i >= 0; --i) { - ForkJoinTask t = tasks[i]; - if (t == null) { - if (ex == null) - ex = new NullPointerException(); - } - else if (i != 0) - t.fork(); - else if (t.doInvoke() < NORMAL && ex == null) - ex = t.getException(); - } - for (int i = 1; i <= last; ++i) { - ForkJoinTask t = tasks[i]; - if (t != null) { - if (ex != null) - t.cancel(false); - else if (t.doJoin() < NORMAL) - ex = t.getException(); - } - } - if (ex != null) - U.throwException(ex); - } - - /** - * Forks all tasks in the specified collection, returning when - * {@code isDone} holds for each task or an (unchecked) exception - * is encountered, in which case the exception is rethrown. If - * more than one task encounters an exception, then this method - * throws any one of these exceptions. If any task encounters an - * exception, others may be cancelled. However, the execution - * status of individual tasks is not guaranteed upon exceptional - * return. The status of each task may be obtained using {@link - * #getException()} and related methods to check if they have been - * cancelled, completed normally or exceptionally, or left - * unprocessed. - * - *

This method may be invoked only from within {@code - * ForkJoinPool} computations (as may be determined using method - * {@link #inForkJoinPool}). Attempts to invoke in other contexts - * result in exceptions or errors, possibly including {@code - * ClassCastException}. - * - * @param tasks the collection of tasks - * @return the tasks argument, to simplify usage - * @throws NullPointerException if tasks or any element are null - */ - public static > Collection invokeAll(Collection tasks) { - if (!(tasks instanceof RandomAccess) || !(tasks instanceof List)) { - invokeAll(tasks.toArray(new ForkJoinTask[tasks.size()])); - return tasks; - } - @SuppressWarnings("unchecked") - List> ts = - (List>) tasks; - Throwable ex = null; - int last = ts.size() - 1; - for (int i = last; i >= 0; --i) { - ForkJoinTask t = ts.get(i); - if (t == null) { - if (ex == null) - ex = new NullPointerException(); - } - else if (i != 0) - t.fork(); - else if (t.doInvoke() < NORMAL && ex == null) - ex = t.getException(); - } - for (int i = 1; i <= last; ++i) { - ForkJoinTask t = ts.get(i); - if (t != null) { - if (ex != null) - t.cancel(false); - else if (t.doJoin() < NORMAL) - ex = t.getException(); - } - } - if (ex != null) - U.throwException(ex); - return tasks; - } - - /** - * Attempts to cancel execution of this task. This attempt will - * fail if the task has already completed or could not be - * cancelled for some other reason. If successful, and this task - * has not started when {@code cancel} is called, execution of - * this task is suppressed. After this method returns - * successfully, unless there is an intervening call to {@link - * #reinitialize}, subsequent calls to {@link #isCancelled}, - * {@link #isDone}, and {@code cancel} will return {@code true} - * and calls to {@link #join} and related methods will result in - * {@code CancellationException}. - * - *

This method may be overridden in subclasses, but if so, must - * still ensure that these properties hold. In particular, the - * {@code cancel} method itself must not throw exceptions. - * - *

This method is designed to be invoked by other - * tasks. To terminate the current task, you can just return or - * throw an unchecked exception from its computation method, or - * invoke {@link #completeExceptionally}. - * - * @param mayInterruptIfRunning this value has no effect in the - * default implementation because interrupts are not used to - * control cancellation. - * - * @return {@code true} if this task is now cancelled - */ - public boolean cancel(boolean mayInterruptIfRunning) { - return (setCompletion(CANCELLED) & DONE_MASK) == CANCELLED; - } - - public final boolean isDone() { - return status < 0; - } - - public final boolean isCancelled() { - return (status & DONE_MASK) == CANCELLED; - } - - /** - * Returns {@code true} if this task threw an exception or was cancelled. - * - * @return {@code true} if this task threw an exception or was cancelled - */ - public final boolean isCompletedAbnormally() { - return status < NORMAL; - } - - /** - * Returns {@code true} if this task completed without throwing an - * exception and was not cancelled. - * - * @return {@code true} if this task completed without throwing an - * exception and was not cancelled - */ - public final boolean isCompletedNormally() { - return (status & DONE_MASK) == NORMAL; - } - - /** - * Returns the exception thrown by the base computation, or a - * {@code CancellationException} if cancelled, or {@code null} if - * none or if the method has not yet completed. - * - * @return the exception, or {@code null} if none - */ - public final Throwable getException() { - int s = status & DONE_MASK; - return ((s >= NORMAL) ? null : - (s == CANCELLED) ? new CancellationException() : - getThrowableException()); - } - - /** - * Completes this task abnormally, and if not already aborted or - * cancelled, causes it to throw the given exception upon - * {@code join} and related operations. This method may be used - * to induce exceptions in asynchronous tasks, or to force - * completion of tasks that would not otherwise complete. Its use - * in other situations is discouraged. This method is - * overridable, but overridden versions must invoke {@code super} - * implementation to maintain guarantees. - * - * @param ex the exception to throw. If this exception is not a - * {@code RuntimeException} or {@code Error}, the actual exception - * thrown will be a {@code RuntimeException} with cause {@code ex}. - */ - public void completeExceptionally(Throwable ex) { - setExceptionalCompletion((ex instanceof RuntimeException) || - (ex instanceof Error) ? ex : - new RuntimeException(ex)); - } - - /** - * Completes this task, and if not already aborted or cancelled, - * returning the given value as the result of subsequent - * invocations of {@code join} and related operations. This method - * may be used to provide results for asynchronous tasks, or to - * provide alternative handling for tasks that would not otherwise - * complete normally. Its use in other situations is - * discouraged. This method is overridable, but overridden - * versions must invoke {@code super} implementation to maintain - * guarantees. - * - * @param value the result value for this task - */ - public void complete(V value) { - try { - setRawResult(value); - } catch (Throwable rex) { - setExceptionalCompletion(rex); - return; - } - setCompletion(NORMAL); - } - - /** - * Waits if necessary for the computation to complete, and then - * retrieves its result. - * - * @return the computed result - * @throws CancellationException if the computation was cancelled - * @throws ExecutionException if the computation threw an - * exception - * @throws InterruptedException if the current thread is not a - * member of a ForkJoinPool and was interrupted while waiting - */ - public final V get() throws InterruptedException, ExecutionException { - int s = (Thread.currentThread() instanceof ForkJoinWorkerThread) ? - doJoin() : externalInterruptibleAwaitDone(); - Throwable ex; - if ((s &= DONE_MASK) == CANCELLED) - throw new CancellationException(); - if (s == EXCEPTIONAL && (ex = getThrowableException()) != null) - throw new ExecutionException(ex); - return getRawResult(); - } - - /** - * Waits if necessary for at most the given time for the computation - * to complete, and then retrieves its result, if available. - * - * @param timeout the maximum time to wait - * @param unit the time unit of the timeout argument - * @return the computed result - * @throws CancellationException if the computation was cancelled - * @throws ExecutionException if the computation threw an - * exception - * @throws InterruptedException if the current thread is not a - * member of a ForkJoinPool and was interrupted while waiting - * @throws TimeoutException if the wait timed out - */ - public final V get(long timeout, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { - if (Thread.interrupted()) - throw new InterruptedException(); - // Messy in part because we measure in nanosecs, but wait in millisecs - int s; long ns, ms; - if ((s = status) >= 0 && (ns = unit.toNanos(timeout)) > 0L) { - long deadline = System.nanoTime() + ns; - ForkJoinPool p = null; - ForkJoinPool.WorkQueue w = null; - Thread t = Thread.currentThread(); - if (t instanceof ForkJoinWorkerThread) { - ForkJoinWorkerThread wt = (ForkJoinWorkerThread)t; - p = wt.pool; - w = wt.workQueue; - s = p.helpJoinOnce(w, this); // no retries on failure - } - boolean canBlock = false; - boolean interrupted = false; - try { - while ((s = status) >= 0) { - if (w != null && w.runState < 0) - cancelIgnoringExceptions(this); - else if (!canBlock) { - if (p == null || p.tryCompensate(this, null)) - canBlock = true; - } - else { - if ((ms = TimeUnit.NANOSECONDS.toMillis(ns)) > 0L && - U.compareAndSwapInt(this, STATUS, s, s | SIGNAL)) { - synchronized (this) { - if (status >= 0) { - try { - wait(ms); - } catch (InterruptedException ie) { - if (p == null) - interrupted = true; - } - } - else - notifyAll(); - } - } - if ((s = status) < 0 || interrupted || - (ns = deadline - System.nanoTime()) <= 0L) - break; - } - } - } finally { - if (p != null && canBlock) - p.incrementActiveCount(); - } - if (interrupted) - throw new InterruptedException(); - } - if ((s &= DONE_MASK) != NORMAL) { - Throwable ex; - if (s == CANCELLED) - throw new CancellationException(); - if (s != EXCEPTIONAL) - throw new TimeoutException(); - if ((ex = getThrowableException()) != null) - throw new ExecutionException(ex); - } - return getRawResult(); - } - - /** - * Joins this task, without returning its result or throwing its - * exception. This method may be useful when processing - * collections of tasks when some have been cancelled or otherwise - * known to have aborted. - */ - public final void quietlyJoin() { - doJoin(); - } - - /** - * Commences performing this task and awaits its completion if - * necessary, without returning its result or throwing its - * exception. - */ - public final void quietlyInvoke() { - doInvoke(); - } - - /** - * Possibly executes tasks until the pool hosting the current task - * {@link ForkJoinPool#isQuiescent is quiescent}. This method may - * be of use in designs in which many tasks are forked, but none - * are explicitly joined, instead executing them until all are - * processed. - * - *

This method may be invoked only from within {@code - * ForkJoinPool} computations (as may be determined using method - * {@link #inForkJoinPool}). Attempts to invoke in other contexts - * result in exceptions or errors, possibly including {@code - * ClassCastException}. - */ - public static void helpQuiesce() { - ForkJoinWorkerThread wt = - (ForkJoinWorkerThread)Thread.currentThread(); - wt.pool.helpQuiescePool(wt.workQueue); - } - - /** - * Resets the internal bookkeeping state of this task, allowing a - * subsequent {@code fork}. This method allows repeated reuse of - * this task, but only if reuse occurs when this task has either - * never been forked, or has been forked, then completed and all - * outstanding joins of this task have also completed. Effects - * under any other usage conditions are not guaranteed. - * This method may be useful when executing - * pre-constructed trees of subtasks in loops. - * - *

Upon completion of this method, {@code isDone()} reports - * {@code false}, and {@code getException()} reports {@code - * null}. However, the value returned by {@code getRawResult} is - * unaffected. To clear this value, you can invoke {@code - * setRawResult(null)}. - */ - public void reinitialize() { - if ((status & DONE_MASK) == EXCEPTIONAL) - clearExceptionalCompletion(); - else - status = 0; - } - - /** - * Returns the pool hosting the current task execution, or null - * if this task is executing outside of any ForkJoinPool. - * - * @see #inForkJoinPool - * @return the pool, or {@code null} if none - */ - public static ForkJoinPool getPool() { - Thread t = Thread.currentThread(); - return (t instanceof ForkJoinWorkerThread) ? - ((ForkJoinWorkerThread) t).pool : null; - } - - /** - * Returns {@code true} if the current thread is a {@link - * ForkJoinWorkerThread} executing as a ForkJoinPool computation. - * - * @return {@code true} if the current thread is a {@link - * ForkJoinWorkerThread} executing as a ForkJoinPool computation, - * or {@code false} otherwise - */ - public static boolean inForkJoinPool() { - return Thread.currentThread() instanceof ForkJoinWorkerThread; - } - - /** - * Tries to unschedule this task for execution. This method will - * typically succeed if this task is the most recently forked task - * by the current thread, and has not commenced executing in - * another thread. This method may be useful when arranging - * alternative local processing of tasks that could have been, but - * were not, stolen. - * - *

This method may be invoked only from within {@code - * ForkJoinPool} computations (as may be determined using method - * {@link #inForkJoinPool}). Attempts to invoke in other contexts - * result in exceptions or errors, possibly including {@code - * ClassCastException}. - * - * @return {@code true} if unforked - */ - public boolean tryUnfork() { - return ((ForkJoinWorkerThread)Thread.currentThread()) - .workQueue.tryUnpush(this); - } - - /** - * Returns an estimate of the number of tasks that have been - * forked by the current worker thread but not yet executed. This - * value may be useful for heuristic decisions about whether to - * fork other tasks. - * - *

This method may be invoked only from within {@code - * ForkJoinPool} computations (as may be determined using method - * {@link #inForkJoinPool}). Attempts to invoke in other contexts - * result in exceptions or errors, possibly including {@code - * ClassCastException}. - * - * @return the number of tasks - */ - public static int getQueuedTaskCount() { - return ((ForkJoinWorkerThread) Thread.currentThread()) - .workQueue.queueSize(); - } - - /** - * Returns an estimate of how many more locally queued tasks are - * held by the current worker thread than there are other worker - * threads that might steal them. This value may be useful for - * heuristic decisions about whether to fork other tasks. In many - * usages of ForkJoinTasks, at steady state, each worker should - * aim to maintain a small constant surplus (for example, 3) of - * tasks, and to process computations locally if this threshold is - * exceeded. - * - *

This method may be invoked only from within {@code - * ForkJoinPool} computations (as may be determined using method - * {@link #inForkJoinPool}). Attempts to invoke in other contexts - * result in exceptions or errors, possibly including {@code - * ClassCastException}. - * - * @return the surplus number of tasks, which may be negative - */ - public static int getSurplusQueuedTaskCount() { - /* - * The aim of this method is to return a cheap heuristic guide - * for task partitioning when programmers, frameworks, tools, - * or languages have little or no idea about task granularity. - * In essence by offering this method, we ask users only about - * tradeoffs in overhead vs expected throughput and its - * variance, rather than how finely to partition tasks. - * - * In a steady state strict (tree-structured) computation, - * each thread makes available for stealing enough tasks for - * other threads to remain active. Inductively, if all threads - * play by the same rules, each thread should make available - * only a constant number of tasks. - * - * The minimum useful constant is just 1. But using a value of - * 1 would require immediate replenishment upon each steal to - * maintain enough tasks, which is infeasible. Further, - * partitionings/granularities of offered tasks should - * minimize steal rates, which in general means that threads - * nearer the top of computation tree should generate more - * than those nearer the bottom. In perfect steady state, each - * thread is at approximately the same level of computation - * tree. However, producing extra tasks amortizes the - * uncertainty of progress and diffusion assumptions. - * - * So, users will want to use values larger, but not much - * larger than 1 to both smooth over transient shortages and - * hedge against uneven progress; as traded off against the - * cost of extra task overhead. We leave the user to pick a - * threshold value to compare with the results of this call to - * guide decisions, but recommend values such as 3. - * - * When all threads are active, it is on average OK to - * estimate surplus strictly locally. In steady-state, if one - * thread is maintaining say 2 surplus tasks, then so are - * others. So we can just use estimated queue length. - * However, this strategy alone leads to serious mis-estimates - * in some non-steady-state conditions (ramp-up, ramp-down, - * other stalls). We can detect many of these by further - * considering the number of "idle" threads, that are known to - * have zero queued tasks, so compensate by a factor of - * (#idle/#active) threads. - */ - ForkJoinWorkerThread wt = - (ForkJoinWorkerThread)Thread.currentThread(); - return wt.workQueue.queueSize() - wt.pool.idlePerActive(); - } - - // Extension methods - - /** - * Returns the result that would be returned by {@link #join}, even - * if this task completed abnormally, or {@code null} if this task - * is not known to have been completed. This method is designed - * to aid debugging, as well as to support extensions. Its use in - * any other context is discouraged. - * - * @return the result, or {@code null} if not completed - */ - public abstract V getRawResult(); - - /** - * Forces the given value to be returned as a result. This method - * is designed to support extensions, and should not in general be - * called otherwise. - * - * @param value the value - */ - protected abstract void setRawResult(V value); - - /** - * Immediately performs the base action of this task. This method - * is designed to support extensions, and should not in general be - * called otherwise. The return value controls whether this task - * is considered to be done normally. It may return false in - * asynchronous actions that require explicit invocations of - * {@link #complete} to become joinable. It may also throw an - * (unchecked) exception to indicate abnormal exit. - * - * @return {@code true} if completed normally - */ - protected abstract boolean exec(); - - /** - * Returns, but does not unschedule or execute, a task queued by - * the current thread but not yet executed, if one is immediately - * available. There is no guarantee that this task will actually - * be polled or executed next. Conversely, this method may return - * null even if a task exists but cannot be accessed without - * contention with other threads. This method is designed - * primarily to support extensions, and is unlikely to be useful - * otherwise. - * - *

This method may be invoked only from within {@code - * ForkJoinPool} computations (as may be determined using method - * {@link #inForkJoinPool}). Attempts to invoke in other contexts - * result in exceptions or errors, possibly including {@code - * ClassCastException}. - * - * @return the next task, or {@code null} if none are available - */ - protected static ForkJoinTask peekNextLocalTask() { - return ((ForkJoinWorkerThread) Thread.currentThread()).workQueue.peek(); - } - - /** - * Unschedules and returns, without executing, the next task - * queued by the current thread but not yet executed. This method - * is designed primarily to support extensions, and is unlikely to - * be useful otherwise. - * - *

This method may be invoked only from within {@code - * ForkJoinPool} computations (as may be determined using method - * {@link #inForkJoinPool}). Attempts to invoke in other contexts - * result in exceptions or errors, possibly including {@code - * ClassCastException}. - * - * @return the next task, or {@code null} if none are available - */ - protected static ForkJoinTask pollNextLocalTask() { - return ((ForkJoinWorkerThread) Thread.currentThread()) - .workQueue.nextLocalTask(); - } - - /** - * Unschedules and returns, without executing, the next task - * queued by the current thread but not yet executed, if one is - * available, or if not available, a task that was forked by some - * other thread, if available. Availability may be transient, so a - * {@code null} result does not necessarily imply quiescence - * of the pool this task is operating in. This method is designed - * primarily to support extensions, and is unlikely to be useful - * otherwise. - * - *

This method may be invoked only from within {@code - * ForkJoinPool} computations (as may be determined using method - * {@link #inForkJoinPool}). Attempts to invoke in other contexts - * result in exceptions or errors, possibly including {@code - * ClassCastException}. - * - * @return a task, or {@code null} if none are available - */ - protected static ForkJoinTask pollTask() { - ForkJoinWorkerThread wt = - (ForkJoinWorkerThread)Thread.currentThread(); - return wt.pool.nextTaskFor(wt.workQueue); - } - - // Mark-bit operations - - /** - * Returns true if this task is marked. - * - * @return true if this task is marked - * @since 1.8 - */ - public final boolean isMarkedForkJoinTask() { - return (status & MARKED) != 0; - } - - /** - * Atomically sets the mark on this task. - * - * @return true if this task was previously unmarked - * @since 1.8 - */ - public final boolean markForkJoinTask() { - for (int s;;) { - if (((s = status) & MARKED) != 0) - return false; - if (U.compareAndSwapInt(this, STATUS, s, s | MARKED)) - return true; - } - } - - /** - * Atomically clears the mark on this task. - * - * @return true if this task was previously marked - * @since 1.8 - */ - public final boolean unmarkForkJoinTask() { - for (int s;;) { - if (((s = status) & MARKED) == 0) - return false; - if (U.compareAndSwapInt(this, STATUS, s, s & ~MARKED)) - return true; - } - } - - /** - * Adaptor for Runnables. This implements RunnableFuture - * to be compliant with AbstractExecutorService constraints - * when used in ForkJoinPool. - */ - static final class AdaptedRunnable extends ForkJoinTask - implements RunnableFuture { - final Runnable runnable; - T result; - AdaptedRunnable(Runnable runnable, T result) { - if (runnable == null) throw new NullPointerException(); - this.runnable = runnable; - this.result = result; // OK to set this even before completion - } - public final T getRawResult() { return result; } - public final void setRawResult(T v) { result = v; } - public final boolean exec() { runnable.run(); return true; } - public final void run() { invoke(); } - private static final long serialVersionUID = 5232453952276885070L; - } - - /** - * Adaptor for Runnables without results - */ - static final class AdaptedRunnableAction extends ForkJoinTask - implements RunnableFuture { - final Runnable runnable; - AdaptedRunnableAction(Runnable runnable) { - if (runnable == null) throw new NullPointerException(); - this.runnable = runnable; - } - public final Void getRawResult() { return null; } - public final void setRawResult(Void v) { } - public final boolean exec() { runnable.run(); return true; } - public final void run() { invoke(); } - private static final long serialVersionUID = 5232453952276885070L; - } - - /** - * Adaptor for Callables - */ - static final class AdaptedCallable extends ForkJoinTask - implements RunnableFuture { - final Callable callable; - T result; - AdaptedCallable(Callable callable) { - if (callable == null) throw new NullPointerException(); - this.callable = callable; - } - public final T getRawResult() { return result; } - public final void setRawResult(T v) { result = v; } - public final boolean exec() { - try { - result = callable.call(); - return true; - } catch (Error err) { - throw err; - } catch (RuntimeException rex) { - throw rex; - } catch (Exception ex) { - throw new RuntimeException(ex); - } - } - public final void run() { invoke(); } - private static final long serialVersionUID = 2838392045355241008L; - } - - /** - * Returns a new {@code ForkJoinTask} that performs the {@code run} - * method of the given {@code Runnable} as its action, and returns - * a null result upon {@link #join}. - * - * @param runnable the runnable action - * @return the task - */ - public static ForkJoinTask adapt(Runnable runnable) { - return new AdaptedRunnableAction(runnable); - } - - /** - * Returns a new {@code ForkJoinTask} that performs the {@code run} - * method of the given {@code Runnable} as its action, and returns - * the given result upon {@link #join}. - * - * @param runnable the runnable action - * @param result the result upon completion - * @return the task - */ - public static ForkJoinTask adapt(Runnable runnable, T result) { - return new AdaptedRunnable(runnable, result); - } - - /** - * Returns a new {@code ForkJoinTask} that performs the {@code call} - * method of the given {@code Callable} as its action, and returns - * its result upon {@link #join}, translating any checked exceptions - * encountered into {@code RuntimeException}. - * - * @param callable the callable action - * @return the task - */ - public static ForkJoinTask adapt(Callable callable) { - return new AdaptedCallable(callable); - } - - // Serialization support - - private static final long serialVersionUID = -7721805057305804111L; - - /** - * Saves this task to a stream (that is, serializes it). - * - * @serialData the current run status and the exception thrown - * during execution, or {@code null} if none - */ - private void writeObject(java.io.ObjectOutputStream s) - throws java.io.IOException { - s.defaultWriteObject(); - s.writeObject(getException()); - } - - /** - * Reconstitutes this task from a stream (that is, deserializes it). - */ - private void readObject(java.io.ObjectInputStream s) - throws java.io.IOException, ClassNotFoundException { - s.defaultReadObject(); - Object ex = s.readObject(); - if (ex != null) - setExceptionalCompletion((Throwable)ex); - } - - // Unsafe mechanics - private static final sun.misc.Unsafe U; - private static final long STATUS; - static { - exceptionTableLock = new ReentrantLock(); - exceptionTableRefQueue = new ReferenceQueue(); - exceptionTable = new ExceptionNode[EXCEPTION_MAP_CAPACITY]; - try { - U = getUnsafe(); - STATUS = U.objectFieldOffset - (ForkJoinTask.class.getDeclaredField("status")); - } catch (Exception e) { - throw new Error(e); - } - } - - /** - * Returns a sun.misc.Unsafe. Suitable for use in a 3rd party package. - * Replace with a simple call to Unsafe.getUnsafe when integrating - * into a jdk. - * - * @return a sun.misc.Unsafe - */ - private static sun.misc.Unsafe getUnsafe() { - return Unsafe.instance; - } -} diff --git a/akka-actor/src/main/java/akka/jsr166y/ForkJoinWorkerThread.java b/akka-actor/src/main/java/akka/jsr166y/ForkJoinWorkerThread.java deleted file mode 100644 index 4ff31f742d..0000000000 --- a/akka-actor/src/main/java/akka/jsr166y/ForkJoinWorkerThread.java +++ /dev/null @@ -1,119 +0,0 @@ -/* - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/publicdomain/zero/1.0/ - */ - -package akka.jsr166y; - -/** - * A thread managed by a {@link ForkJoinPool}, which executes - * {@link ForkJoinTask}s. - * This class is subclassable solely for the sake of adding - * functionality -- there are no overridable methods dealing with - * scheduling or execution. However, you can override initialization - * and termination methods surrounding the main task processing loop. - * If you do create such a subclass, you will also need to supply a - * custom {@link ForkJoinPool.ForkJoinWorkerThreadFactory} to use it - * in a {@code ForkJoinPool}. - * - * @since 1.7 - * @author Doug Lea - */ -public class ForkJoinWorkerThread extends Thread { - /* - * ForkJoinWorkerThreads are managed by ForkJoinPools and perform - * ForkJoinTasks. For explanation, see the internal documentation - * of class ForkJoinPool. - */ - - final ForkJoinPool.WorkQueue workQueue; // Work-stealing mechanics - final ForkJoinPool pool; // the pool this thread works in - - /** - * Creates a ForkJoinWorkerThread operating in the given pool. - * - * @param pool the pool this thread works in - * @throws NullPointerException if pool is null - */ - protected ForkJoinWorkerThread(ForkJoinPool pool) { - super(pool.nextWorkerName()); - setDaemon(true); - Thread.UncaughtExceptionHandler ueh = pool.ueh; - if (ueh != null) - setUncaughtExceptionHandler(ueh); - this.pool = pool; - pool.registerWorker(this.workQueue = new ForkJoinPool.WorkQueue - (pool, this, pool.localMode)); - } - - /** - * Returns the pool hosting this thread. - * - * @return the pool - */ - public ForkJoinPool getPool() { - return pool; - } - - /** - * Returns the index number of this thread in its pool. The - * returned value ranges from zero to the maximum number of - * threads (minus one) that have ever been created in the pool. - * This method may be useful for applications that track status or - * collect results per-worker rather than per-task. - * - * @return the index number - */ - public int getPoolIndex() { - return workQueue.poolIndex; - } - - /** - * Initializes internal state after construction but before - * processing any tasks. If you override this method, you must - * invoke {@code super.onStart()} at the beginning of the method. - * Initialization requires care: Most fields must have legal - * default values, to ensure that attempted accesses from other - * threads work correctly even before this thread starts - * processing tasks. - */ - protected void onStart() { - } - - /** - * Performs cleanup associated with termination of this worker - * thread. If you override this method, you must invoke - * {@code super.onTermination} at the end of the overridden method. - * - * @param exception the exception causing this thread to abort due - * to an unrecoverable error, or {@code null} if completed normally - */ - protected void onTermination(Throwable exception) { - } - - /** - * This method is required to be public, but should never be - * called explicitly. It performs the main run loop to execute - * {@link ForkJoinTask}s. - */ - public void run() { - Throwable exception = null; - try { - onStart(); - pool.runWorker(workQueue); - } catch (Throwable ex) { - exception = ex; - } finally { - try { - onTermination(exception); - } catch (Throwable ex) { - if (exception == null) - exception = ex; - } finally { - pool.deregisterWorker(this, exception); - } - } - } -} - diff --git a/akka-actor/src/main/java/akka/jsr166y/RecursiveAction.java b/akka-actor/src/main/java/akka/jsr166y/RecursiveAction.java deleted file mode 100644 index c13c513171..0000000000 --- a/akka-actor/src/main/java/akka/jsr166y/RecursiveAction.java +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/publicdomain/zero/1.0/ - */ - -package akka.jsr166y; - -/** - * A recursive resultless {@link ForkJoinTask}. This class - * establishes conventions to parameterize resultless actions as - * {@code Void} {@code ForkJoinTask}s. Because {@code null} is the - * only valid value of type {@code Void}, methods such as {@code join} - * always return {@code null} upon completion. - * - *

Sample Usages. Here is a simple but complete ForkJoin - * sort that sorts a given {@code long[]} array: - * - *

 {@code
- * static class SortTask extends RecursiveAction {
- *   final long[] array; final int lo, hi;
- *   SortTask(long[] array, int lo, int hi) {
- *     this.array = array; this.lo = lo; this.hi = hi;
- *   }
- *   SortTask(long[] array) { this(array, 0, array.length); }
- *   protected void compute() {
- *     if (hi - lo < THRESHOLD)
- *       sortSequentially(lo, hi);
- *     else {
- *       int mid = (lo + hi) >>> 1;
- *       invokeAll(new SortTask(array, lo, mid),
- *                 new SortTask(array, mid, hi));
- *       merge(lo, mid, hi);
- *     }
- *   }
- *   // implementation details follow:
- *   final static int THRESHOLD = 1000;
- *   void sortSequentially(int lo, int hi) {
- *     Arrays.sort(array, lo, hi);
- *   }
- *   void merge(int lo, int mid, int hi) {
- *     long[] buf = Arrays.copyOfRange(array, lo, mid);
- *     for (int i = 0, j = lo, k = mid; i < buf.length; j++)
- *       array[j] = (k == hi || buf[i] < array[k]) ?
- *         buf[i++] : array[k++];
- *   }
- * }}
- * - * You could then sort {@code anArray} by creating {@code new - * SortTask(anArray)} and invoking it in a ForkJoinPool. As a more - * concrete simple example, the following task increments each element - * of an array: - *
 {@code
- * class IncrementTask extends RecursiveAction {
- *   final long[] array; final int lo, hi;
- *   IncrementTask(long[] array, int lo, int hi) {
- *     this.array = array; this.lo = lo; this.hi = hi;
- *   }
- *   protected void compute() {
- *     if (hi - lo < THRESHOLD) {
- *       for (int i = lo; i < hi; ++i)
- *         array[i]++;
- *     }
- *     else {
- *       int mid = (lo + hi) >>> 1;
- *       invokeAll(new IncrementTask(array, lo, mid),
- *                 new IncrementTask(array, mid, hi));
- *     }
- *   }
- * }}
- * - *

The following example illustrates some refinements and idioms - * that may lead to better performance: RecursiveActions need not be - * fully recursive, so long as they maintain the basic - * divide-and-conquer approach. Here is a class that sums the squares - * of each element of a double array, by subdividing out only the - * right-hand-sides of repeated divisions by two, and keeping track of - * them with a chain of {@code next} references. It uses a dynamic - * threshold based on method {@code getSurplusQueuedTaskCount}, but - * counterbalances potential excess partitioning by directly - * performing leaf actions on unstolen tasks rather than further - * subdividing. - * - *

 {@code
- * double sumOfSquares(ForkJoinPool pool, double[] array) {
- *   int n = array.length;
- *   Applyer a = new Applyer(array, 0, n, null);
- *   pool.invoke(a);
- *   return a.result;
- * }
- *
- * class Applyer extends RecursiveAction {
- *   final double[] array;
- *   final int lo, hi;
- *   double result;
- *   Applyer next; // keeps track of right-hand-side tasks
- *   Applyer(double[] array, int lo, int hi, Applyer next) {
- *     this.array = array; this.lo = lo; this.hi = hi;
- *     this.next = next;
- *   }
- *
- *   double atLeaf(int l, int h) {
- *     double sum = 0;
- *     for (int i = l; i < h; ++i) // perform leftmost base step
- *       sum += array[i] * array[i];
- *     return sum;
- *   }
- *
- *   protected void compute() {
- *     int l = lo;
- *     int h = hi;
- *     Applyer right = null;
- *     while (h - l > 1 && getSurplusQueuedTaskCount() <= 3) {
- *        int mid = (l + h) >>> 1;
- *        right = new Applyer(array, mid, h, right);
- *        right.fork();
- *        h = mid;
- *     }
- *     double sum = atLeaf(l, h);
- *     while (right != null) {
- *        if (right.tryUnfork()) // directly calculate if not stolen
- *          sum += right.atLeaf(right.lo, right.hi);
- *       else {
- *          right.join();
- *          sum += right.result;
- *        }
- *        right = right.next;
- *      }
- *     result = sum;
- *   }
- * }}
- * - * @since 1.7 - * @author Doug Lea - */ -public abstract class RecursiveAction extends ForkJoinTask { - private static final long serialVersionUID = 5232453952276485070L; - - /** - * The main computation performed by this task. - */ - protected abstract void compute(); - - /** - * Always returns {@code null}. - * - * @return {@code null} always - */ - public final Void getRawResult() { return null; } - - /** - * Requires null completion value. - */ - protected final void setRawResult(Void mustBeNull) { } - - /** - * Implements execution conventions for RecursiveActions. - */ - protected final boolean exec() { - compute(); - return true; - } - -} diff --git a/akka-actor/src/main/java/akka/jsr166y/RecursiveTask.java b/akka-actor/src/main/java/akka/jsr166y/RecursiveTask.java deleted file mode 100644 index 12378ee6c8..0000000000 --- a/akka-actor/src/main/java/akka/jsr166y/RecursiveTask.java +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/publicdomain/zero/1.0/ - */ - -package akka.jsr166y; - -/** - * A recursive result-bearing {@link ForkJoinTask}. - * - *

For a classic example, here is a task computing Fibonacci numbers: - * - *

 {@code
- * class Fibonacci extends RecursiveTask {
- *   final int n;
- *   Fibonacci(int n) { this.n = n; }
- *   Integer compute() {
- *     if (n <= 1)
- *        return n;
- *     Fibonacci f1 = new Fibonacci(n - 1);
- *     f1.fork();
- *     Fibonacci f2 = new Fibonacci(n - 2);
- *     return f2.compute() + f1.join();
- *   }
- * }}
- * - * However, besides being a dumb way to compute Fibonacci functions - * (there is a simple fast linear algorithm that you'd use in - * practice), this is likely to perform poorly because the smallest - * subtasks are too small to be worthwhile splitting up. Instead, as - * is the case for nearly all fork/join applications, you'd pick some - * minimum granularity size (for example 10 here) for which you always - * sequentially solve rather than subdividing. - * - * @since 1.7 - * @author Doug Lea - */ -public abstract class RecursiveTask extends ForkJoinTask { - private static final long serialVersionUID = 5232453952276485270L; - - /** - * The result of the computation. - */ - V result; - - /** - * The main computation performed by this task. - */ - protected abstract V compute(); - - public final V getRawResult() { - return result; - } - - protected final void setRawResult(V value) { - result = value; - } - - /** - * Implements execution conventions for RecursiveTask. - */ - protected final boolean exec() { - result = compute(); - return true; - } - -} diff --git a/akka-actor/src/main/java/akka/jsr166y/ThreadLocalRandom.java b/akka-actor/src/main/java/akka/jsr166y/ThreadLocalRandom.java deleted file mode 100644 index d2dbd58120..0000000000 --- a/akka-actor/src/main/java/akka/jsr166y/ThreadLocalRandom.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Written by Doug Lea with assistance from members of JCP JSR-166 - * Expert Group and released to the public domain, as explained at - * http://creativecommons.org/publicdomain/zero/1.0/ - */ - -package akka.jsr166y; - -import java.util.Random; - -/** - * A random number generator isolated to the current thread. Like the - * global {@link java.util.Random} generator used by the {@link - * java.lang.Math} class, a {@code ThreadLocalRandom} is initialized - * with an internally generated seed that may not otherwise be - * modified. When applicable, use of {@code ThreadLocalRandom} rather - * than shared {@code Random} objects in concurrent programs will - * typically encounter much less overhead and contention. Use of - * {@code ThreadLocalRandom} is particularly appropriate when multiple - * tasks (for example, each a {@link ForkJoinTask}) use random numbers - * in parallel in thread pools. - * - *

Usages of this class should typically be of the form: - * {@code ThreadLocalRandom.current().nextX(...)} (where - * {@code X} is {@code Int}, {@code Long}, etc). - * When all usages are of this form, it is never possible to - * accidently share a {@code ThreadLocalRandom} across multiple threads. - * - *

This class also provides additional commonly used bounded random - * generation methods. - * - * @since 1.7 - * @author Doug Lea - */ -public class ThreadLocalRandom extends Random { - // same constants as Random, but must be redeclared because private - private static final long multiplier = 0x5DEECE66DL; - private static final long addend = 0xBL; - private static final long mask = (1L << 48) - 1; - - /** - * The random seed. We can't use super.seed. - */ - private long rnd; - - /** - * Initialization flag to permit calls to setSeed to succeed only - * while executing the Random constructor. We can't allow others - * since it would cause setting seed in one part of a program to - * unintentionally impact other usages by the thread. - */ - boolean initialized; - - // Padding to help avoid memory contention among seed updates in - // different TLRs in the common case that they are located near - // each other. - private long pad0, pad1, pad2, pad3, pad4, pad5, pad6, pad7; - - /** - * The actual ThreadLocal - */ - private static final ThreadLocal localRandom = - new ThreadLocal() { - protected ThreadLocalRandom initialValue() { - return new ThreadLocalRandom(); - } - }; - - - /** - * Constructor called only by localRandom.initialValue. - */ - ThreadLocalRandom() { - super(); - initialized = true; - } - - /** - * Returns the current thread's {@code ThreadLocalRandom}. - * - * @return the current thread's {@code ThreadLocalRandom} - */ - public static ThreadLocalRandom current() { - return localRandom.get(); - } - - /** - * Throws {@code UnsupportedOperationException}. Setting seeds in - * this generator is not supported. - * - * @throws UnsupportedOperationException always - */ - public void setSeed(long seed) { - if (initialized) - throw new UnsupportedOperationException(); - rnd = (seed ^ multiplier) & mask; - } - - protected int next(int bits) { - rnd = (rnd * multiplier + addend) & mask; - return (int) (rnd >>> (48-bits)); - } - - /** - * Returns a pseudorandom, uniformly distributed value between the - * given least value (inclusive) and bound (exclusive). - * - * @param least the least value returned - * @param bound the upper bound (exclusive) - * @throws IllegalArgumentException if least greater than or equal - * to bound - * @return the next value - */ - public int nextInt(int least, int bound) { - if (least >= bound) - throw new IllegalArgumentException(); - return nextInt(bound - least) + least; - } - - /** - * Returns a pseudorandom, uniformly distributed value - * between 0 (inclusive) and the specified value (exclusive). - * - * @param n the bound on the random number to be returned. Must be - * positive. - * @return the next value - * @throws IllegalArgumentException if n is not positive - */ - public long nextLong(long n) { - if (n <= 0) - throw new IllegalArgumentException("n must be positive"); - // Divide n by two until small enough for nextInt. On each - // iteration (at most 31 of them but usually much less), - // randomly choose both whether to include high bit in result - // (offset) and whether to continue with the lower vs upper - // half (which makes a difference only if odd). - long offset = 0; - while (n >= Integer.MAX_VALUE) { - int bits = next(2); - long half = n >>> 1; - long nextn = ((bits & 2) == 0) ? half : n - half; - if ((bits & 1) == 0) - offset += n - nextn; - n = nextn; - } - return offset + nextInt((int) n); - } - - /** - * Returns a pseudorandom, uniformly distributed value between the - * given least value (inclusive) and bound (exclusive). - * - * @param least the least value returned - * @param bound the upper bound (exclusive) - * @return the next value - * @throws IllegalArgumentException if least greater than or equal - * to bound - */ - public long nextLong(long least, long bound) { - if (least >= bound) - throw new IllegalArgumentException(); - return nextLong(bound - least) + least; - } - - /** - * Returns a pseudorandom, uniformly distributed {@code double} value - * between 0 (inclusive) and the specified value (exclusive). - * - * @param n the bound on the random number to be returned. Must be - * positive. - * @return the next value - * @throws IllegalArgumentException if n is not positive - */ - public double nextDouble(double n) { - if (n <= 0) - throw new IllegalArgumentException("n must be positive"); - return nextDouble() * n; - } - - /** - * Returns a pseudorandom, uniformly distributed value between the - * given least value (inclusive) and bound (exclusive). - * - * @param least the least value returned - * @param bound the upper bound (exclusive) - * @return the next value - * @throws IllegalArgumentException if least greater than or equal - * to bound - */ - public double nextDouble(double least, double bound) { - if (least >= bound) - throw new IllegalArgumentException(); - return nextDouble() * (bound - least) + least; - } - - private static final long serialVersionUID = -5851777807851030925L; -} diff --git a/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java b/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java index 7a497b8442..5bf04bd7c9 100644 --- a/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java +++ b/akka-actor/src/main/java/akka/util/internal/HashedWheelTimer.java @@ -16,7 +16,7 @@ package akka.util.internal; import akka.event.LoggingAdapter; -import akka.util.Duration; +import scala.concurrent.util.Duration; import java.util.*; import java.util.concurrent.ThreadFactory; diff --git a/akka-actor/src/main/java/akka/util/internal/SystemPropertyUtil.java b/akka-actor/src/main/java/akka/util/internal/SystemPropertyUtil.java deleted file mode 100644 index affef54bfc..0000000000 --- a/akka-actor/src/main/java/akka/util/internal/SystemPropertyUtil.java +++ /dev/null @@ -1,89 +0,0 @@ -/* - * Copyright 2009 Red Hat, Inc. - * - * Red Hat licenses this file to you under the Apache License, version 2.0 - * (the "License"); you may not use this file except in compliance with the - * License. You may obtain a copy of the License at: - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations - * under the License. - */ -package akka.util.internal; - -import java.util.regex.Pattern; - -/** - * Accesses the system property swallowing a {@link SecurityException}. - * - * @author The Netty Project - * @author Trustin Lee - * - * @version $Rev: 2161 $, $Date: 2010-02-18 11:12:15 +0900 (Thu, 18 Feb 2010) $ - * - */ -public class SystemPropertyUtil { - - /** - * Returns the value of the Java system property with the specified - * {@code key}. - * - * @return the property value. - * {@code null} if there's no such property or if an access to the - * specified property is not allowed. - */ - public static String get(String key) { - try { - return System.getProperty(key); - } catch (Exception e) { - return null; - } - } - - /** - * Returns the value of the Java system property with the specified - * {@code key}, while falling back to the specified default value if - * the property access fails. - * - * @return the property value. - * {@code def} if there's no such property or if an access to the - * specified property is not allowed. - */ - public static String get(String key, String def) { - String value = get(key); - if (value == null) { - value = def; - } - return value; - } - - /** - * Returns the value of the Java system property with the specified - * {@code key}, while falling back to the specified default value if - * the property access fails. - * - * @return the property value. - * {@code def} if there's no such property or if an access to the - * specified property is not allowed. - */ - public static int get(String key, int def) { - String value = get(key); - if (value == null) { - return def; - } - - if (Pattern.matches("-?[0-9]+", value)) { - return Integer.parseInt(value); - } else { - return def; - } - } - - private SystemPropertyUtil() { - // Unused - } -} diff --git a/akka-actor/src/main/java/akka/util/internal/Timer.java b/akka-actor/src/main/java/akka/util/internal/Timer.java index 9cb02794de..7086aef9c6 100644 --- a/akka-actor/src/main/java/akka/util/internal/Timer.java +++ b/akka-actor/src/main/java/akka/util/internal/Timer.java @@ -15,7 +15,7 @@ */ package akka.util.internal; -import akka.util.Duration; +import scala.concurrent.util.Duration; import java.util.Set; import java.util.concurrent.TimeUnit; diff --git a/akka-actor/src/main/scala/akka/actor/Actor.scala b/akka-actor/src/main/scala/akka/actor/Actor.scala index 8b9476efe9..c1ae9c57bf 100644 --- a/akka-actor/src/main/scala/akka/actor/Actor.scala +++ b/akka-actor/src/main/scala/akka/actor/Actor.scala @@ -122,12 +122,36 @@ class InvalidActorNameException(message: String) extends AkkaException(message) /** * An ActorInitializationException is thrown when the the initialization logic for an Actor fails. */ -class ActorInitializationException private[akka] (actor: ActorRef, message: String, cause: Throwable) - extends AkkaException(message, cause) /*with NoStackTrace*/ { +class ActorInitializationException private[akka] (val actor: ActorRef, message: String, cause: Throwable) + extends AkkaException(message, cause) { def this(msg: String) = this(null, msg, null) def this(actor: ActorRef, msg: String) = this(actor, msg, null) } +/** + * A PreRestartException is thrown when the preRestart() method failed. + * + * @param actor is the actor whose preRestart() hook failed + * @param cause is the exception thrown by that actor within preRestart() + * @param origCause is the exception which caused the restart in the first place + * @param msg is the message which was optionally passed into preRestart() + */ +class PreRestartException private[akka] (actor: ActorRef, cause: Throwable, val origCause: Throwable, val msg: Option[Any]) + extends ActorInitializationException(actor, "exception in preRestart(" + origCause.getClass + ", " + msg.map(_.getClass) + ")", cause) { +} + +/** + * A PostRestartException is thrown when constructor or postRestart() method + * fails during a restart attempt. + * + * @param actor is the actor whose constructor or postRestart() hook failed + * @param cause is the exception thrown by that actor within preRestart() + * @param origCause is the exception which caused the restart in the first place + */ +class PostRestartException private[akka] (actor: ActorRef, cause: Throwable, val origCause: Throwable) + extends ActorInitializationException(actor, "exception post restart (" + origCause.getClass + ")", cause) { +} + /** * InvalidMessageException is thrown when an invalid message is sent to an Actor. * Technically it's only "null" which is an InvalidMessageException but who knows, diff --git a/akka-actor/src/main/scala/akka/actor/ActorCell.scala b/akka-actor/src/main/scala/akka/actor/ActorCell.scala index f922daea70..6650fffa94 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorCell.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorCell.scala @@ -4,20 +4,17 @@ package akka.actor -import akka.dispatch._ -import scala.annotation.tailrec -import java.util.concurrent.TimeUnit -import java.util.concurrent.TimeUnit.MILLISECONDS -import akka.event.Logging.{ Debug, Warning, Error } -import akka.japi.Procedure -import java.io.{ NotSerializableException, ObjectOutputStream } -import akka.serialization.SerializationExtension -import akka.event.Logging.LogEventException -import collection.immutable.{ TreeSet, TreeMap } -import akka.util.{ Unsafe, Duration, Helpers, NonFatal } -import java.util.concurrent.atomic.AtomicLong +import java.io.{ ObjectOutputStream, NotSerializableException } -//TODO: everything here for current compatibility - could be limited more +import scala.annotation.tailrec +import scala.collection.immutable.TreeSet +import scala.concurrent.util.Duration +import scala.util.control.NonFatal + +import akka.actor.cell.ChildrenContainer +import akka.dispatch.{ Watch, Unwatch, Terminate, SystemMessage, Suspend, Supervise, Resume, Recreate, NoMessage, MessageDispatcher, Envelope, Create, ChildTerminated } +import akka.event.Logging.{ LogEvent, Debug } +import akka.japi.Procedure /** * The actor context - the view of the actor cell from the actor. @@ -93,9 +90,9 @@ trait ActorContext extends ActorRefFactory { def sender: ActorRef /** - * Returns all supervised children; this method returns a view onto the - * internal collection of children. Targeted lookups should be using - * `actorFor` instead for performance reasons: + * Returns all supervised children; this method returns a view (i.e. a lazy + * collection) onto the internal collection of children. Targeted lookups + * should be using `actorFor` instead for performance reasons: * * {{{ * val badLookup = context.children find (_.path.name == "kid") @@ -191,7 +188,7 @@ private[akka] trait Cell { /** * Recursively resume this actor and all its children. */ - def resume(): Unit + def resume(inResponseToFailure: Boolean): Unit /** * Restart this actor (will recursively restart or stop all children). */ @@ -212,7 +209,11 @@ private[akka] trait Cell { /** * All children of this actor, including only reserved-names. */ - def childrenRefs: ActorCell.ChildrenContainer + def childrenRefs: ChildrenContainer + /** + * Get the stats for the named child, if that exists. + */ + def getChildByName(name: String): Option[ChildRestartStats] /** * Enqueue a message to be sent to the actor; may or may not actually * schedule the actor to run, depending on which type of cell it is. @@ -255,436 +256,102 @@ private[akka] object ActorCell { def cancel() {} } - final val emptyReceiveTimeoutData: (Duration, Cancellable) = (Duration.Undefined, emptyCancellable) - final val emptyBehaviorStack: List[Actor.Receive] = Nil final val emptyActorRefSet: Set[ActorRef] = TreeSet.empty - - sealed trait SuspendReason - case object UserRequest extends SuspendReason - case class Recreation(cause: Throwable) extends SuspendReason - case object Termination extends SuspendReason - - trait ChildrenContainer { - def add(child: ActorRef): ChildrenContainer - def remove(child: ActorRef): ChildrenContainer - def getByName(name: String): Option[ChildRestartStats] - def getByRef(actor: ActorRef): Option[ChildRestartStats] - def children: Iterable[ActorRef] - def stats: Iterable[ChildRestartStats] - def shallDie(actor: ActorRef): ChildrenContainer - /** - * reserve that name or throw an exception - */ - def reserve(name: String): ChildrenContainer - /** - * cancel a reservation - */ - def unreserve(name: String): ChildrenContainer - } - - trait EmptyChildrenContainer extends ChildrenContainer { - val emptyStats = TreeMap.empty[String, ChildStats] - def add(child: ActorRef): ChildrenContainer = - new NormalChildrenContainer(emptyStats.updated(child.path.name, ChildRestartStats(child))) - def remove(child: ActorRef): ChildrenContainer = this - def getByName(name: String): Option[ChildRestartStats] = None - def getByRef(actor: ActorRef): Option[ChildRestartStats] = None - def children: Iterable[ActorRef] = Nil - def stats: Iterable[ChildRestartStats] = Nil - def shallDie(actor: ActorRef): ChildrenContainer = this - def reserve(name: String): ChildrenContainer = new NormalChildrenContainer(emptyStats.updated(name, ChildNameReserved)) - def unreserve(name: String): ChildrenContainer = this - override def toString = "no children" - } - - /** - * This is the empty container, shared among all leaf actors. - */ - object EmptyChildrenContainer extends EmptyChildrenContainer - - /** - * This is the empty container which is installed after the last child has - * terminated while stopping; it is necessary to distinguish from the normal - * empty state while calling handleChildTerminated() for the last time. - */ - object TerminatedChildrenContainer extends EmptyChildrenContainer { - override def add(child: ActorRef): ChildrenContainer = this - override def reserve(name: String): ChildrenContainer = - throw new IllegalStateException("cannot reserve actor name '" + name + "': already terminated") - } - - /** - * Normal children container: we do have at least one child, but none of our - * children are currently terminating (which is the time period between - * calling context.stop(child) and processing the ChildTerminated() system - * message). - */ - class NormalChildrenContainer(c: TreeMap[String, ChildStats]) extends ChildrenContainer { - - def add(child: ActorRef): ChildrenContainer = - new NormalChildrenContainer(c.updated(child.path.name, ChildRestartStats(child))) - - def remove(child: ActorRef): ChildrenContainer = NormalChildrenContainer(c - child.path.name) - - def getByName(name: String): Option[ChildRestartStats] = c.get(name) match { - case s @ Some(_: ChildRestartStats) ⇒ s.asInstanceOf[Option[ChildRestartStats]] - case _ ⇒ None - } - - def getByRef(actor: ActorRef): Option[ChildRestartStats] = c.get(actor.path.name) match { - case c @ Some(crs: ChildRestartStats) if (crs.child == actor) ⇒ c.asInstanceOf[Option[ChildRestartStats]] - case _ ⇒ None - } - - def children: Iterable[ActorRef] = c.values.view.collect { case ChildRestartStats(child, _, _) ⇒ child } - - def stats: Iterable[ChildRestartStats] = c.values.collect { case c: ChildRestartStats ⇒ c } - - def shallDie(actor: ActorRef): ChildrenContainer = TerminatingChildrenContainer(c, Set(actor), UserRequest) - - def reserve(name: String): ChildrenContainer = - if (c contains name) - throw new InvalidActorNameException("actor name " + name + " is not unique!") - else new NormalChildrenContainer(c.updated(name, ChildNameReserved)) - - def unreserve(name: String): ChildrenContainer = c.get(name) match { - case Some(ChildNameReserved) ⇒ NormalChildrenContainer(c - name) - case _ ⇒ this - } - - override def toString = - if (c.size > 20) c.size + " children" - else c.mkString("children:\n ", "\n ", "") - } - - object NormalChildrenContainer { - def apply(c: TreeMap[String, ChildStats]): ChildrenContainer = - if (c.isEmpty) EmptyChildrenContainer - else new NormalChildrenContainer(c) - } - - /** - * Waiting state: there are outstanding termination requests (i.e. context.stop(child) - * was called but the corresponding ChildTerminated() system message has not yet been - * processed). There could be no specific reason (UserRequested), we could be Restarting - * or Terminating. - * - * Removing the last child which was supposed to be terminating will return a different - * type of container, depending on whether or not children are left and whether or not - * the reason was “Terminating”. - */ - case class TerminatingChildrenContainer(c: TreeMap[String, ChildStats], toDie: Set[ActorRef], reason: SuspendReason) - extends ChildrenContainer { - - def add(child: ActorRef): ChildrenContainer = copy(c.updated(child.path.name, ChildRestartStats(child))) - - def remove(child: ActorRef): ChildrenContainer = { - val t = toDie - child - if (t.isEmpty) reason match { - case Termination ⇒ TerminatedChildrenContainer - case _ ⇒ NormalChildrenContainer(c - child.path.name) - } - else copy(c - child.path.name, t) - } - - def getByName(name: String): Option[ChildRestartStats] = c.get(name) match { - case s @ Some(_: ChildRestartStats) ⇒ s.asInstanceOf[Option[ChildRestartStats]] - case _ ⇒ None - } - - def getByRef(actor: ActorRef): Option[ChildRestartStats] = c.get(actor.path.name) match { - case c @ Some(crs: ChildRestartStats) if (crs.child == actor) ⇒ c.asInstanceOf[Option[ChildRestartStats]] - case _ ⇒ None - } - - def children: Iterable[ActorRef] = c.values.view.collect { case ChildRestartStats(child, _, _) ⇒ child } - - def stats: Iterable[ChildRestartStats] = c.values.collect { case c: ChildRestartStats ⇒ c } - - def shallDie(actor: ActorRef): ChildrenContainer = copy(toDie = toDie + actor) - - def reserve(name: String): ChildrenContainer = reason match { - case Termination ⇒ throw new IllegalStateException("cannot reserve actor name '" + name + "': terminating") - case _ ⇒ - if (c contains name) - throw new InvalidActorNameException("actor name " + name + " is not unique!") - else copy(c = c.updated(name, ChildNameReserved)) - } - - def unreserve(name: String): ChildrenContainer = c.get(name) match { - case Some(ChildNameReserved) ⇒ copy(c = c - name) - case _ ⇒ this - } - - override def toString = - if (c.size > 20) c.size + " children" - else c.mkString("children (" + toDie.size + " terminating):\n ", "\n ", "\n") + toDie - } } //ACTORCELL IS 64bytes and should stay that way unless very good reason not to (machine sympathy, cache line fit) //vars don't need volatile since it's protected with the mailbox status //Make sure that they are not read/written outside of a message processing (systemInvoke/invoke) +/** + * Everything in here is completely Akka PRIVATE. You will not find any + * supported APIs in this place. This is not the API you were looking + * for! (waves hand) + */ private[akka] class ActorCell( val system: ActorSystemImpl, val self: InternalActorRef, val props: Props, - @volatile var parent: InternalActorRef) extends UntypedActorContext with Cell { + val parent: InternalActorRef) + extends UntypedActorContext with Cell + with cell.ReceiveTimeout + with cell.Children + with cell.Dispatch + with cell.DeathWatch + with cell.FaultHandling { - import AbstractActorCell.{ mailboxOffset, childrenOffset, nextNameOffset } import ActorCell._ final def isLocal = true final def systemImpl = system - protected final def guardian = self - protected final def lookupRoot = self - final def provider = system.provider - override final def receiveTimeout: Option[Duration] = receiveTimeoutData._1 match { - case Duration.Undefined ⇒ None - case duration ⇒ Some(duration) - } - - final def setReceiveTimeout(timeout: Option[Duration]): Unit = setReceiveTimeout(timeout.getOrElse(Duration.Undefined)) - - override final def setReceiveTimeout(timeout: Duration): Unit = - receiveTimeoutData = ( - if (Duration.Undefined == timeout || timeout.toMillis < 1) Duration.Undefined else timeout, - receiveTimeoutData._2) - - final override def resetReceiveTimeout(): Unit = setReceiveTimeout(None) - - /** - * In milliseconds - */ - var receiveTimeoutData: (Duration, Cancellable) = emptyReceiveTimeoutData - - @volatile - private var _childrenRefsDoNotCallMeDirectly: ChildrenContainer = EmptyChildrenContainer - - def childrenRefs: ChildrenContainer = Unsafe.instance.getObjectVolatile(this, childrenOffset).asInstanceOf[ChildrenContainer] - - private def swapChildrenRefs(oldChildren: ChildrenContainer, newChildren: ChildrenContainer): Boolean = - Unsafe.instance.compareAndSwapObject(this, childrenOffset, oldChildren, newChildren) - - @tailrec private def reserveChild(name: String): Boolean = { - val c = childrenRefs - swapChildrenRefs(c, c.reserve(name)) || reserveChild(name) - } - - @tailrec private def unreserveChild(name: String): Boolean = { - val c = childrenRefs - swapChildrenRefs(c, c.unreserve(name)) || unreserveChild(name) - } - - @tailrec private def addChild(ref: ActorRef): Boolean = { - val c = childrenRefs - swapChildrenRefs(c, c.add(ref)) || addChild(ref) - } - - @tailrec private def shallDie(ref: ActorRef): Boolean = { - val c = childrenRefs - swapChildrenRefs(c, c.shallDie(ref)) || shallDie(ref) - } - - @tailrec private def removeChild(ref: ActorRef): ChildrenContainer = { - val c = childrenRefs - val n = c.remove(ref) - if (swapChildrenRefs(c, n)) n - else removeChild(ref) - } - - @tailrec private def setChildrenTerminationReason(reason: SuspendReason): Boolean = { - childrenRefs match { - case c: TerminatingChildrenContainer ⇒ swapChildrenRefs(c, c.copy(reason = reason)) || setChildrenTerminationReason(reason) - case _ ⇒ false - } - } - - private def isTerminating = childrenRefs match { - case TerminatingChildrenContainer(_, _, Termination) ⇒ true - case TerminatedChildrenContainer ⇒ true - case _ ⇒ false - } - private def isNormal = childrenRefs match { - case TerminatingChildrenContainer(_, _, Termination | _: Recreation) ⇒ false - case _ ⇒ true - } - - private def _actorOf(props: Props, name: String, async: Boolean): ActorRef = { - if (system.settings.SerializeAllCreators && !props.creator.isInstanceOf[NoSerializationVerificationNeeded]) { - val ser = SerializationExtension(system) - ser.serialize(props.creator) match { - case Left(t) ⇒ throw t - case Right(bytes) ⇒ ser.deserialize(bytes, props.creator.getClass) match { - case Left(t) ⇒ throw t - case _ ⇒ //All good - } - } - } - /* - * in case we are currently terminating, fail external attachChild requests - * (internal calls cannot happen anyway because we are suspended) - */ - if (isTerminating) throw new IllegalStateException("cannot create children while terminating or terminated") - else { - reserveChild(name) - // this name will either be unreserved or overwritten with a real child below - val actor = - try { - provider.actorOf(systemImpl, props, self, self.path / name, - systemService = false, deploy = None, lookupDeploy = true, async = async) - } catch { - case NonFatal(e) ⇒ - unreserveChild(name) - throw e - } - addChild(actor) - actor - } - } - - def actorOf(props: Props): ActorRef = _actorOf(props, randomName(), async = false) - - def actorOf(props: Props, name: String): ActorRef = _actorOf(props, checkName(name), async = false) - - private def checkName(name: String): String = { - import ActorPath.ElementRegex - name match { - case null ⇒ throw new InvalidActorNameException("actor name must not be null") - case "" ⇒ throw new InvalidActorNameException("actor name must not be empty") - case ElementRegex() ⇒ name - case _ ⇒ throw new InvalidActorNameException("illegal actor name '" + name + "', must conform to " + ElementRegex) - } - } - - private[akka] def attachChild(props: Props, name: String): ActorRef = - _actorOf(props, checkName(name), async = true) - - private[akka] def attachChild(props: Props): ActorRef = - _actorOf(props, randomName(), async = true) - - final def stop(actor: ActorRef): Unit = { - val started = actor match { - case r: RepointableRef ⇒ r.isStarted - case _ ⇒ true - } - if (childrenRefs.getByRef(actor).isDefined && started) shallDie(actor) - actor.asInstanceOf[InternalActorRef].stop() - } - + private[this] var _actor: Actor = _ + def actor: Actor = _actor + protected def actor_=(a: Actor): Unit = _actor = a var currentMessage: Envelope = _ - var actor: Actor = _ private var behaviorStack: List[Actor.Receive] = emptyBehaviorStack - var watching: Set[ActorRef] = emptyActorRefSet - var watchedBy: Set[ActorRef] = emptyActorRefSet - @volatile private var _nextNameDoNotCallMeDirectly = 0L - final protected def randomName(): String = { - @tailrec def inc(): Long = { - val current = Unsafe.instance.getLongVolatile(this, nextNameOffset) - if (Unsafe.instance.compareAndSwapLong(this, nextNameOffset, current, current + 1)) current - else inc() + /* + * MESSAGE PROCESSING + */ + //Memory consistency is handled by the Mailbox (reading mailbox status then processing messages, then writing mailbox status + final def systemInvoke(message: SystemMessage): Unit = try { + message match { + case Create() ⇒ create() + case Recreate(cause) ⇒ faultRecreate(cause) + case Watch(watchee, watcher) ⇒ addWatcher(watchee, watcher) + case Unwatch(watchee, watcher) ⇒ remWatcher(watchee, watcher) + case Suspend() ⇒ faultSuspend() + case Resume(inRespToFailure) ⇒ faultResume(inRespToFailure) + case Terminate() ⇒ terminate() + case Supervise(child) ⇒ supervise(child) + case ChildTerminated(child) ⇒ handleChildTerminated(child) + case NoMessage ⇒ // only here to suppress warning } - Helpers.base64(inc()) + } catch { + case e @ (_: InterruptedException | NonFatal(_)) ⇒ handleInvokeFailure(e, "error while processing " + message) } - @volatile private var _mailboxDoNotCallMeDirectly: Mailbox = _ //This must be volatile since it isn't protected by the mailbox status + //Memory consistency is handled by the Mailbox (reading mailbox status then processing messages, then writing mailbox status + final def invoke(messageHandle: Envelope): Unit = try { + currentMessage = messageHandle + cancelReceiveTimeout() // FIXME: leave this here??? + messageHandle.message match { + case msg: AutoReceivedMessage ⇒ autoReceiveMessage(messageHandle) + case msg ⇒ receiveMessage(msg) + } + currentMessage = null // reset current message after successful invocation + } catch { + case e @ (_: InterruptedException | NonFatal(_)) ⇒ handleInvokeFailure(e, e.getMessage) + } finally { + checkReceiveTimeout // Reschedule receive timeout + } - /** - * INTERNAL API - * - * Returns a reference to the current mailbox + def autoReceiveMessage(msg: Envelope): Unit = { + if (system.settings.DebugAutoReceive) + publish(Debug(self.path.toString, clazz(actor), "received AutoReceiveMessage " + msg)) + + msg.message match { + case Failed(cause) ⇒ handleFailure(sender, cause) + case t: Terminated ⇒ watchedActorTerminated(t.actor); receiveMessage(t) + case Kill ⇒ throw new ActorKilledException("Kill") + case PoisonPill ⇒ self.stop() + case SelectParent(m) ⇒ parent.tell(m, msg.sender) + case SelectChildName(name, m) ⇒ for (c ← getChildByName(name)) c.child.tell(m, msg.sender) + case SelectChildPattern(p, m) ⇒ for (c ← children if p.matcher(c.path.name).matches) c.tell(m, msg.sender) + } + } + + final def receiveMessage(msg: Any): Unit = behaviorStack.head.applyOrElse(msg, actor.unhandled) + + /* + * ACTOR CONTEXT IMPLEMENTATION */ - @inline final def mailbox: Mailbox = Unsafe.instance.getObjectVolatile(this, mailboxOffset).asInstanceOf[Mailbox] - - /** - * INTERNAL API - * - * replaces the current mailbox using getAndSet semantics - */ - @tailrec final def swapMailbox(newMailbox: Mailbox): Mailbox = { - val oldMailbox = mailbox - if (!Unsafe.instance.compareAndSwapObject(this, mailboxOffset, oldMailbox, newMailbox)) swapMailbox(newMailbox) - else oldMailbox - } - - final def hasMessages: Boolean = mailbox.hasMessages - - final def numberOfMessages: Int = mailbox.numberOfMessages - - val dispatcher: MessageDispatcher = system.dispatchers.lookup(props.dispatcher) - - /** - * UntypedActorContext impl - */ - final def getDispatcher(): MessageDispatcher = dispatcher - - final def isTerminated: Boolean = mailbox.isClosed - - final def start(): this.type = { - - /* - * Create the mailbox and enqueue the Create() message to ensure that - * this is processed before anything else. - */ - swapMailbox(dispatcher.createMailbox(this)) - mailbox.setActor(this) - - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - mailbox.systemEnqueue(self, Create()) - - // This call is expected to start off the actor by scheduling its mailbox. - dispatcher.attach(this) - - this - } - - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - final def suspend(): Unit = dispatcher.systemDispatch(this, Suspend()) - - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - final def resume(): Unit = dispatcher.systemDispatch(this, Resume()) - - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - final def stop(): Unit = dispatcher.systemDispatch(this, Terminate()) - - override final def watch(subject: ActorRef): ActorRef = subject match { - case a: InternalActorRef ⇒ - if (a != self && !watching.contains(a)) { - a.sendSystemMessage(Watch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - watching += a - } - a - } - - override final def unwatch(subject: ActorRef): ActorRef = subject match { - case a: InternalActorRef ⇒ - if (a != self && watching.contains(a)) { - a.sendSystemMessage(Unwatch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - watching -= a - } - a - } - - final def children: Iterable[ActorRef] = childrenRefs.children - - /** - * Impl UntypedActorContext - */ - final def getChildren(): java.lang.Iterable[ActorRef] = - scala.collection.JavaConverters.asJavaIterableConverter(children).asJava - - def tell(message: Any, sender: ActorRef): Unit = - dispatcher.dispatch(this, Envelope(message, if (sender eq null) system.deadLetters else sender, system)) - - override def sendSystemMessage(message: SystemMessage): Unit = dispatcher.systemDispatch(this, message) final def sender: ActorRef = currentMessage match { case null ⇒ system.deadLetters @@ -692,6 +359,25 @@ private[akka] class ActorCell( case _ ⇒ system.deadLetters } + def become(behavior: Actor.Receive, discardOld: Boolean = true): Unit = + behaviorStack = behavior :: (if (discardOld && behaviorStack.nonEmpty) behaviorStack.tail else behaviorStack) + + def become(behavior: Procedure[Any]): Unit = become(behavior, false) + + def become(behavior: Procedure[Any], discardOld: Boolean): Unit = + become({ case msg ⇒ behavior.apply(msg) }: Actor.Receive, discardOld) + + def unbecome(): Unit = { + val original = behaviorStack + behaviorStack = + if (original.isEmpty || original.tail.isEmpty) actor.receive :: emptyBehaviorStack + else original.tail + } + + /* + * ACTOR INSTANCE HANDLING + */ + //This method is in charge of setting up the contextStack and create a new instance of the Actor protected def newActor(): Actor = { contextStack.set(this :: contextStack.get) @@ -712,325 +398,43 @@ private[akka] class ActorCell( } } - //Memory consistency is handled by the Mailbox (reading mailbox status then processing messages, then writing mailbox status - final def systemInvoke(message: SystemMessage) { - - def create(): Unit = if (isNormal) { - try { - val created = newActor() - actor = created - created.preStart() - checkReceiveTimeout - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(created), "started (" + created + ")")) - } catch { - case NonFatal(i: InstantiationException) ⇒ - throw new ActorInitializationException(self, - """exception during creation, this problem is likely to occur because the class of the Actor you tried to create is either, + private def create(): Unit = if (isNormal) { + try { + val created = newActor() + actor = created + created.preStart() + checkReceiveTimeout + if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(created), "started (" + created + ")")) + } catch { + case NonFatal(i: InstantiationException) ⇒ + throw new ActorInitializationException(self, + """exception during creation, this problem is likely to occur because the class of the Actor you tried to create is either, a non-static inner class (in which case make it a static inner class or use Props(new ...) or Props( new UntypedActorFactory ... ) or is missing an appropriate, reachable no-args constructor. """, i.getCause) - case NonFatal(e) ⇒ throw new ActorInitializationException(self, "exception during creation", e) - } - } - - def recreate(cause: Throwable): Unit = if (isNormal) { - try { - val failedActor = actor - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(failedActor), "restarting")) - if (failedActor ne null) { - val c = currentMessage //One read only plz - try { - if (failedActor.context ne null) failedActor.preRestart(cause, if (c ne null) Some(c.message) else None) - } finally { - clearActorFields(failedActor) - } - } - childrenRefs match { - case ct: TerminatingChildrenContainer ⇒ - setChildrenTerminationReason(Recreation(cause)) - dispatcher suspend this - case _ ⇒ - doRecreate(cause, failedActor) - } - } catch { - case NonFatal(e) ⇒ throw new ActorInitializationException(self, "exception during creation", e match { - case i: InstantiationException ⇒ i.getCause - case other ⇒ other - }) - } - } - - def suspend(): Unit = if (isNormal) dispatcher suspend this - - def resume(): Unit = if (isNormal) dispatcher resume this - - def addWatcher(watchee: ActorRef, watcher: ActorRef): Unit = { - val watcheeSelf = watchee == self - val watcherSelf = watcher == self - - if (watcheeSelf && !watcherSelf) { - if (!watchedBy.contains(watcher)) { - watchedBy += watcher - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now monitoring " + watcher)) - } - } else if (!watcheeSelf && watcherSelf) { - watch(watchee) - } else { - system.eventStream.publish(Warning(self.path.toString, clazz(actor), "BUG: illegal Watch(%s,%s) for %s".format(watchee, watcher, self))) - } - } - - def remWatcher(watchee: ActorRef, watcher: ActorRef): Unit = { - val watcheeSelf = watchee == self - val watcherSelf = watcher == self - - if (watcheeSelf && !watcherSelf) { - if (watchedBy.contains(watcher)) { - watchedBy -= watcher - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopped monitoring " + watcher)) - } - } else if (!watcheeSelf && watcherSelf) { - unwatch(watchee) - } else { - system.eventStream.publish(Warning(self.path.toString, clazz(actor), "BUG: illegal Unwatch(%s,%s) for %s".format(watchee, watcher, self))) - } - } - - def terminate() { - setReceiveTimeout(None) - cancelReceiveTimeout - - // stop all children, which will turn childrenRefs into TerminatingChildrenContainer (if there are children) - children foreach stop - - childrenRefs match { - case ct: TerminatingChildrenContainer ⇒ - setChildrenTerminationReason(Termination) - // do not process normal messages while waiting for all children to terminate - dispatcher suspend this - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "stopping")) - case _ ⇒ doTerminate() - } - } - - def supervise(child: ActorRef): Unit = if (!isTerminating) { - if (childrenRefs.getByRef(child).isEmpty) addChild(child) - handleSupervise(child) - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(actor), "now supervising " + child)) - } - - try { - message match { - case Create() ⇒ create() - case Recreate(cause) ⇒ recreate(cause) - case Watch(watchee, watcher) ⇒ addWatcher(watchee, watcher) - case Unwatch(watchee, watcher) ⇒ remWatcher(watchee, watcher) - case Suspend() ⇒ suspend() - case Resume() ⇒ resume() - case Terminate() ⇒ terminate() - case Supervise(child) ⇒ supervise(child) - case ChildTerminated(child) ⇒ handleChildTerminated(child) - case NoMessage ⇒ // only here to suppress warning - } - } catch { - case e @ (_: InterruptedException | NonFatal(_)) ⇒ handleInvokeFailure(e, "error while processing " + message) + case NonFatal(e) ⇒ throw new ActorInitializationException(self, "exception during creation", e) } } - //Memory consistency is handled by the Mailbox (reading mailbox status then processing messages, then writing mailbox status - final def invoke(messageHandle: Envelope): Unit = try { - currentMessage = messageHandle - cancelReceiveTimeout() // FIXME: leave this here??? - messageHandle.message match { - case msg: AutoReceivedMessage ⇒ autoReceiveMessage(messageHandle) - case msg ⇒ receiveMessage(msg) - } - currentMessage = null // reset current message after successful invocation - } catch { - case e @ (_: InterruptedException | NonFatal(_)) ⇒ handleInvokeFailure(e, e.getMessage) - } finally { - checkReceiveTimeout // Reschedule receive timeout - } - - final def handleInvokeFailure(t: Throwable, message: String): Unit = try { - dispatcher.reportFailure(new LogEventException(Error(t, self.path.toString, clazz(actor), message), t)) - // prevent any further messages to be processed until the actor has been restarted - dispatcher.suspend(this) - if (actor ne null) actor.supervisorStrategy.handleSupervisorFailing(self, children) - } finally { - t match { // Wrap InterruptedExceptions and rethrow - case _: InterruptedException ⇒ parent.tell(Failed(new ActorInterruptedException(t)), self); throw t - case _ ⇒ parent.tell(Failed(t), self) - } - } - - def become(behavior: Actor.Receive, discardOld: Boolean = true): Unit = - behaviorStack = behavior :: (if (discardOld && behaviorStack.nonEmpty) behaviorStack.tail else behaviorStack) - - /** - * UntypedActorContext impl - */ - def become(behavior: Procedure[Any]): Unit = become(behavior, false) - - /* - * UntypedActorContext impl - */ - def become(behavior: Procedure[Any], discardOld: Boolean): Unit = - become({ case msg ⇒ behavior.apply(msg) }: Actor.Receive, discardOld) - - def unbecome(): Unit = { - val original = behaviorStack - behaviorStack = - if (original.isEmpty || original.tail.isEmpty) actor.receive :: emptyBehaviorStack - else original.tail - } - - def autoReceiveMessage(msg: Envelope): Unit = { - if (system.settings.DebugAutoReceive) - system.eventStream.publish(Debug(self.path.toString, clazz(actor), "received AutoReceiveMessage " + msg)) - - msg.message match { - case Failed(cause) ⇒ handleFailure(sender, cause) - case t: Terminated ⇒ watching -= t.actor; receiveMessage(t) - case Kill ⇒ throw new ActorKilledException("Kill") - case PoisonPill ⇒ self.stop() - case SelectParent(m) ⇒ parent.tell(m, msg.sender) - case SelectChildName(name, m) ⇒ for (c ← childrenRefs getByName name) c.child.tell(m, msg.sender) - case SelectChildPattern(p, m) ⇒ for (c ← children if p.matcher(c.path.name).matches) c.tell(m, msg.sender) - } - } - - final def receiveMessage(msg: Any): Unit = { - //FIXME replace with behaviorStack.head.applyOrElse(msg, unhandled) + "-optimize" - val head = behaviorStack.head - if (head.isDefinedAt(msg)) head.apply(msg) else actor.unhandled(msg) - } - - private def doTerminate() { - val a = actor - try { - try { - if (a ne null) a.postStop() - } finally { - dispatcher.detach(this) - } - } finally { - try { - parent.sendSystemMessage(ChildTerminated(self)) - - if (!watchedBy.isEmpty) { - val terminated = Terminated(self)(existenceConfirmed = true) - try { - watchedBy foreach { - watcher ⇒ - try watcher.tell(terminated, self) catch { - case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) - } - } - } finally watchedBy = emptyActorRefSet - } - - if (!watching.isEmpty) { - try { - watching foreach { // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - case watchee: InternalActorRef ⇒ try watchee.sendSystemMessage(Unwatch(watchee, self)) catch { - case NonFatal(t) ⇒ system.eventStream.publish(Error(t, self.path.toString, clazz(a), "deathwatch")) - } - } - } finally watching = emptyActorRefSet - } - if (system.settings.DebugLifecycle) - system.eventStream.publish(Debug(self.path.toString, clazz(a), "stopped")) - } finally { - behaviorStack = emptyBehaviorStack - clearActorFields(a) - actor = null - } - } - } - - private def doRecreate(cause: Throwable, failedActor: Actor): Unit = try { - // after all killed children have terminated, recreate the rest, then go on to start the new instance - actor.supervisorStrategy.handleSupervisorRestarted(cause, self, children) - val freshActor = newActor() - actor = freshActor // this must happen before postRestart has a chance to fail - if (freshActor eq failedActor) setActorFields(freshActor, this, self) // If the creator returns the same instance, we need to restore our nulled out fields. - - freshActor.postRestart(cause) - if (system.settings.DebugLifecycle) system.eventStream.publish(Debug(self.path.toString, clazz(freshActor), "restarted")) - - dispatcher.resume(this) - } catch { - case NonFatal(e) ⇒ try { - dispatcher.reportFailure(new LogEventException(Error(e, self.path.toString, clazz(actor), "error while creating actor"), e)) - // prevent any further messages to be processed until the actor has been restarted - dispatcher.suspend(this) - actor.supervisorStrategy.handleSupervisorFailing(self, children) // FIXME Should this be called on actor or failedActor? - clearActorFields(actor) // If this fails, we need to ensure that preRestart isn't called. - } finally { - parent.tell(Failed(new ActorInitializationException(self, "exception during re-creation", e)), self) - } - } - - final def handleFailure(child: ActorRef, cause: Throwable): Unit = childrenRefs.getByRef(child) match { - case Some(stats) ⇒ if (!actor.supervisorStrategy.handleFailure(this, child, cause, stats, childrenRefs.stats)) throw cause - case None ⇒ system.eventStream.publish(Warning(self.path.toString, clazz(actor), "dropping Failed(" + cause + ") from unknown child " + child)) - } - - final def handleChildTerminated(child: ActorRef): Unit = try { - childrenRefs match { - case tc @ TerminatingChildrenContainer(_, _, reason) ⇒ - val n = removeChild(child) - actor.supervisorStrategy.handleChildTerminated(this, child, children) - if (!n.isInstanceOf[TerminatingChildrenContainer]) reason match { - case Recreation(cause) ⇒ doRecreate(cause, actor) // doRecreate since this is the continuation of "recreate" - case Termination ⇒ doTerminate() - case _ ⇒ - } - case _ ⇒ - removeChild(child) - actor.supervisorStrategy.handleChildTerminated(this, child, children) - } - } catch { - case NonFatal(e) ⇒ - try { - dispatcher suspend this - actor.supervisorStrategy.handleSupervisorFailing(self, children) - } finally { - parent.tell(Failed(e), self) - } + private def supervise(child: ActorRef): Unit = if (!isTerminating) { + addChild(child) + handleSupervise(child) + if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(actor), "now supervising " + child)) } + // future extension point protected def handleSupervise(child: ActorRef): Unit = child match { case r: RepointableActorRef ⇒ r.activate() case _ ⇒ } - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - final def restart(cause: Throwable): Unit = dispatcher.systemDispatch(this, Recreate(cause)) - - final def checkReceiveTimeout() { - val recvtimeout = receiveTimeoutData - if (Duration.Undefined != recvtimeout._1 && !mailbox.hasMessages) { - recvtimeout._2.cancel() //Cancel any ongoing future - //Only reschedule if desired and there are currently no more messages to be processed - receiveTimeoutData = (recvtimeout._1, system.scheduler.scheduleOnce(recvtimeout._1, self, ReceiveTimeout)) - } else cancelReceiveTimeout() - - } - - final def cancelReceiveTimeout(): Unit = - if (receiveTimeoutData._2 ne emptyCancellable) { - receiveTimeoutData._2.cancel() - receiveTimeoutData = (receiveTimeoutData._1, emptyCancellable) - } - - final def clearActorFields(actorInstance: Actor): Unit = { + final protected def clearActorFields(actorInstance: Actor): Unit = { setActorFields(actorInstance, context = null, self = system.deadLetters) currentMessage = null + behaviorStack = emptyBehaviorStack } - final def setActorFields(actorInstance: Actor, context: ActorContext, self: ActorRef) { + final protected def setActorFields(actorInstance: Actor, context: ActorContext, self: ActorRef) { @tailrec def lookupAndSetField(clazz: Class[_], actor: Actor, name: String, value: Any): Boolean = { val success = try { @@ -1044,7 +448,7 @@ private[akka] class ActorCell( if (success) true else { - val parent = clazz.getSuperclass + val parent: Class[_] = clazz.getSuperclass if (parent eq null) throw new IllegalActorStateException(toString + " is not an Actor since it have not mixed in the 'Actor' trait") lookupAndSetField(parent, actor, name, value) } @@ -1055,6 +459,9 @@ private[akka] class ActorCell( } } - private final def clazz(o: AnyRef): Class[_] = if (o eq null) this.getClass else o.getClass + // logging is not the main purpose, and if it fails there’s nothing we can do + protected final def publish(e: LogEvent): Unit = try system.eventStream.publish(e) catch { case NonFatal(_) ⇒ } + + protected final def clazz(o: AnyRef): Class[_] = if (o eq null) this.getClass else o.getClass } diff --git a/akka-actor/src/main/scala/akka/actor/ActorRef.scala b/akka-actor/src/main/scala/akka/actor/ActorRef.scala index 8d42714b00..00a84f956a 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRef.scala @@ -191,7 +191,7 @@ private[akka] abstract class InternalActorRef extends ActorRef with ScalaActorRe /* * Actor life-cycle management, invoked only internally (in response to user requests via ActorContext). */ - def resume(): Unit + def resume(inResponseToFailure: Boolean): Unit def suspend(): Unit def restart(cause: Throwable): Unit def stop(): Unit @@ -262,10 +262,7 @@ private[akka] class LocalActorRef private[akka] ( * that is reached). */ private val actorCell: ActorCell = newActorCell(_system, this, _props, _supervisor) - actorCell.start() - - // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ - _supervisor.sendSystemMessage(akka.dispatch.Supervise(this)) + actorCell.start(sendSupervise = true) protected def newActorCell(system: ActorSystemImpl, ref: InternalActorRef, props: Props, supervisor: InternalActorRef): ActorCell = new ActorCell(system, ref, props, supervisor) @@ -291,7 +288,7 @@ private[akka] class LocalActorRef private[akka] ( /** * Resumes a suspended actor. */ - override def resume(): Unit = actorCell.resume() + override def resume(inResponseToFailure: Boolean): Unit = actorCell.resume(inResponseToFailure) /** * Shuts down the actor and its message queue @@ -307,7 +304,7 @@ private[akka] class LocalActorRef private[akka] ( * to inject “synthetic” actor paths like “/temp”. */ protected def getSingleChild(name: String): InternalActorRef = - actorCell.childrenRefs.getByName(name) match { + actorCell.getChildByName(name) match { case Some(crs) ⇒ crs.child.asInstanceOf[InternalActorRef] case None ⇒ Nobody } @@ -391,7 +388,7 @@ private[akka] trait MinimalActorRef extends InternalActorRef with LocalRef { override def getChild(names: Iterator[String]): InternalActorRef = if (names.forall(_.isEmpty)) this else Nobody override def suspend(): Unit = () - override def resume(): Unit = () + override def resume(inResponseToFailure: Boolean): Unit = () override def stop(): Unit = () override def isTerminated = false diff --git a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala index a5cc26c467..fe9c1bca03 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorRefProvider.scala @@ -4,12 +4,13 @@ package akka.actor -import java.util.concurrent.atomic.AtomicLong import akka.dispatch._ import akka.routing._ -import akka.AkkaException import akka.event._ -import akka.util.{ NonFatal, Switch, Helpers } +import akka.util.{ Switch, Helpers } +import scala.util.control.NonFatal +import scala.concurrent.{ Future, Promise } +import java.util.concurrent.atomic.AtomicLong /** * Interface for all ActorRef providers to implement. @@ -361,9 +362,7 @@ class LocalActorRefProvider( def provider: ActorRefProvider = LocalActorRefProvider.this - override def stop(): Unit = stopped switchOn { - terminationFuture.complete(causeOfTermination.toLeft(())) - } + override def stop(): Unit = stopped switchOn { terminationPromise.complete(causeOfTermination.toLeft(())) } override def isTerminated: Boolean = stopped.isOn @@ -458,7 +457,9 @@ class LocalActorRefProvider( def dispatcher: MessageDispatcher = system.dispatcher - lazy val terminationFuture: Promise[Unit] = Promise[Unit]()(dispatcher) + lazy val terminationPromise: Promise[Unit] = Promise[Unit]() + + def terminationFuture: Future[Unit] = terminationPromise.future @volatile private var extraNames: Map[String, InternalActorRef] = Map() diff --git a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala index 93a26c6289..0740d8724e 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSelection.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSelection.scala @@ -2,6 +2,9 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.actor + +import language.implicitConversions + import java.util.regex.Pattern import akka.util.Helpers diff --git a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala index a75e646678..ef23305b83 100644 --- a/akka-actor/src/main/scala/akka/actor/ActorSystem.scala +++ b/akka-actor/src/main/scala/akka/actor/ActorSystem.scala @@ -9,12 +9,15 @@ import akka.dispatch._ import akka.pattern.ask import com.typesafe.config.{ Config, ConfigFactory } import scala.annotation.tailrec +import scala.concurrent.util.Duration import java.io.Closeable -import akka.dispatch.Await.{ Awaitable, CanAwait } +import scala.concurrent.{ Await, Awaitable, CanAwait, Future } +import scala.util.control.NonFatal import akka.util._ import akka.util.internal.{ HashedWheelTimer, ConcurrentIdentityHashMap } import java.util.concurrent.{ ThreadFactory, CountDownLatch, TimeoutException, RejectedExecutionException } import java.util.concurrent.TimeUnit.MILLISECONDS +import akka.actor.cell.ChildrenContainer object ActorSystem { @@ -506,13 +509,12 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, def actorOf(props: Props): ActorRef = guardian.underlying.attachChild(props) def stop(actor: ActorRef): Unit = { - implicit val timeout = settings.CreationTimeout val path = actor.path val guard = guardian.path val sys = systemGuardian.path path.parent match { - case `guard` ⇒ Await.result(guardian ? StopChild(actor), timeout.duration) - case `sys` ⇒ Await.result(systemGuardian ? StopChild(actor), timeout.duration) + case `guard` ⇒ guardian ! StopChild(actor) + case `sys` ⇒ systemGuardian ! StopChild(actor) case _ ⇒ actor.asInstanceOf[InternalActorRef].stop() } } @@ -586,6 +588,7 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, def start(): this.type = _start private lazy val terminationCallbacks = { + implicit val d = dispatcher val callbacks = new TerminationCallbacks terminationFuture onComplete (_ ⇒ callbacks.run) callbacks @@ -659,7 +662,7 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, instance //Profit! } } catch { - case t ⇒ + case t: Throwable ⇒ extensions.remove(ext, inProcessOfRegistration) //In case shit hits the fan, remove the inProcess signal throw t //Escalate to caller } finally { @@ -698,19 +701,30 @@ private[akka] class ActorSystemImpl(val name: String, applicationConfig: Config, node match { case wc: ActorRefWithCell ⇒ val cell = wc.underlying - indent + "-> " + node.path.name + " " + Logging.simpleName(node) + " " + + (if (indent.isEmpty) "-> " else indent.dropRight(1) + "⌊-> ") + + node.path.name + " " + Logging.simpleName(node) + " " + (cell match { case real: ActorCell ⇒ if (real.actor ne null) real.actor.getClass else "null" case _ ⇒ Logging.simpleName(cell) }) + + (cell match { + case real: ActorCell ⇒ " status=" + real.mailbox.status + case _ ⇒ "" + }) + " " + (cell.childrenRefs match { - case ActorCell.TerminatingChildrenContainer(_, toDie, reason) ⇒ + case ChildrenContainer.TerminatingChildrenContainer(_, toDie, reason) ⇒ "Terminating(" + reason + ")" + - (toDie.toSeq.sorted mkString ("\n" + indent + " toDie: ", "\n" + indent + " ", "")) + (toDie.toSeq.sorted mkString ("\n" + indent + " | toDie: ", "\n" + indent + " | ", "")) + case x @ (ChildrenContainer.TerminatedChildrenContainer | ChildrenContainer.EmptyChildrenContainer) ⇒ x.toString + case n: ChildrenContainer.NormalChildrenContainer ⇒ n.c.size + " children" case x ⇒ Logging.simpleName(x) }) + (if (cell.childrenRefs.children.isEmpty) "" else "\n") + - (cell.childrenRefs.children.toSeq.sorted map (printNode(_, indent + " |")) mkString ("\n")) + ({ + val children = cell.childrenRefs.children.toSeq.sorted + val bulk = children.dropRight(1) map (printNode(_, indent + " |")) + bulk ++ (children.lastOption map (printNode(_, indent + " "))) + } mkString ("\n")) case _ ⇒ indent + node.path.name + " " + Logging.simpleName(node) } diff --git a/akka-actor/src/main/scala/akka/actor/Deployer.scala b/akka-actor/src/main/scala/akka/actor/Deployer.scala index 30118ae03a..0d87c849d1 100644 --- a/akka-actor/src/main/scala/akka/actor/Deployer.scala +++ b/akka-actor/src/main/scala/akka/actor/Deployer.scala @@ -4,7 +4,7 @@ package akka.actor -import akka.util.Duration +import scala.concurrent.util.Duration import com.typesafe.config._ import akka.routing._ import java.util.concurrent.{ TimeUnit } @@ -139,7 +139,7 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce val deployment = config.withFallback(default) - val routees = deployment.getStringList("routees.paths").asScala.toSeq + val routees = Vector() ++ deployment.getStringList("routees.paths").asScala val nrOfInstances = deployment.getInt("nr-of-instances") diff --git a/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala b/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala index 72ffbbe76e..08de413717 100644 --- a/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala +++ b/akka-actor/src/main/scala/akka/actor/DynamicAccess.scala @@ -3,8 +3,9 @@ */ package akka.actor -import akka.util.NonFatal +import scala.util.control.NonFatal import java.lang.reflect.InvocationTargetException +import scala.reflect.ClassTag /** * The DynamicAccess implementation is the class which is used for @@ -24,7 +25,7 @@ abstract class DynamicAccess { * val obj = DynamicAccess.createInstanceFor(clazz, Seq(classOf[Config] -> config, classOf[String] -> name)) * }}} */ - def createInstanceFor[T: ClassManifest](clazz: Class[_], args: Seq[(Class[_], AnyRef)]): Either[Throwable, T] = { + def createInstanceFor[T: ClassTag](clazz: Class[_], args: Seq[(Class[_], AnyRef)]): Either[Throwable, T] = { val types = args.map(_._1).toArray val values = args.map(_._2).toArray withErrorHandling { @@ -40,7 +41,7 @@ abstract class DynamicAccess { * Obtain a `Class[_]` object loaded with the right class loader (i.e. the one * returned by `classLoader`). */ - def getClassFor[T: ClassManifest](fqcn: String): Either[Throwable, Class[_ <: T]] + def getClassFor[T: ClassTag](fqcn: String): Either[Throwable, Class[_ <: T]] /** * Obtain an object conforming to the type T, which is expected to be @@ -49,12 +50,12 @@ abstract class DynamicAccess { * `args` argument. The exact usage of args depends on which type is requested, * see the relevant requesting code for details. */ - def createInstanceFor[T: ClassManifest](fqcn: String, args: Seq[(Class[_], AnyRef)]): Either[Throwable, T] + def createInstanceFor[T: ClassTag](fqcn: String, args: Seq[(Class[_], AnyRef)]): Either[Throwable, T] /** * Obtain the Scala “object” instance for the given fully-qualified class name, if there is one. */ - def getObjectFor[T: ClassManifest](fqcn: String): Either[Throwable, T] + def getObjectFor[T: ClassTag](fqcn: String): Either[Throwable, T] /** * This is the class loader to be used in those special cases where the @@ -89,7 +90,7 @@ abstract class DynamicAccess { */ class ReflectiveDynamicAccess(val classLoader: ClassLoader) extends DynamicAccess { //FIXME switch to Scala Reflection for 2.10 - override def getClassFor[T: ClassManifest](fqcn: String): Either[Throwable, Class[_ <: T]] = + override def getClassFor[T: ClassTag](fqcn: String): Either[Throwable, Class[_ <: T]] = try { val c = classLoader.loadClass(fqcn).asInstanceOf[Class[_ <: T]] val t = classManifest[T].erasure @@ -98,7 +99,7 @@ class ReflectiveDynamicAccess(val classLoader: ClassLoader) extends DynamicAcces case NonFatal(e) ⇒ Left(e) } - override def createInstanceFor[T: ClassManifest](fqcn: String, args: Seq[(Class[_], AnyRef)]): Either[Throwable, T] = + override def createInstanceFor[T: ClassTag](fqcn: String, args: Seq[(Class[_], AnyRef)]): Either[Throwable, T] = getClassFor(fqcn).fold(Left(_), { c ⇒ val types = args.map(_._1).toArray val values = args.map(_._2).toArray @@ -111,7 +112,7 @@ class ReflectiveDynamicAccess(val classLoader: ClassLoader) extends DynamicAcces } }) - override def getObjectFor[T: ClassManifest](fqcn: String): Either[Throwable, T] = { + override def getObjectFor[T: ClassTag](fqcn: String): Either[Throwable, T] = { getClassFor(fqcn).fold(Left(_), { c ⇒ withErrorHandling { val module = c.getDeclaredField("MODULE$") diff --git a/akka-actor/src/main/scala/akka/actor/Extension.scala b/akka-actor/src/main/scala/akka/actor/Extension.scala index 3adad6fd4b..da0e7e6769 100644 --- a/akka-actor/src/main/scala/akka/actor/Extension.scala +++ b/akka-actor/src/main/scala/akka/actor/Extension.scala @@ -3,6 +3,8 @@ */ package akka.actor +import scala.reflect.ClassTag + /** * The basic ActorSystem covers all that is needed for locally running actors, * using futures and so on. In addition, more features can hook into it and @@ -92,12 +94,12 @@ trait ExtensionIdProvider { * } * }}} */ -abstract class ExtensionKey[T <: Extension](implicit m: ClassManifest[T]) extends ExtensionId[T] with ExtensionIdProvider { - def this(clazz: Class[T]) = this()(ClassManifest.fromClass(clazz)) +abstract class ExtensionKey[T <: Extension](implicit m: ClassTag[T]) extends ExtensionId[T] with ExtensionIdProvider { + def this(clazz: Class[T]) = this()(ClassTag(clazz)) override def lookup(): ExtensionId[T] = this def createExtension(system: ExtendedActorSystem): T = - system.dynamicAccess.createInstanceFor[T](m.erasure, Seq(classOf[ExtendedActorSystem] -> system)) match { + system.dynamicAccess.createInstanceFor[T](m.runtimeClass, Seq(classOf[ExtendedActorSystem] -> system)) match { case Left(ex) ⇒ throw ex case Right(r) ⇒ r } diff --git a/akka-actor/src/main/scala/akka/actor/FSM.scala b/akka-actor/src/main/scala/akka/actor/FSM.scala index 50c769e156..04f9916ec1 100644 --- a/akka-actor/src/main/scala/akka/actor/FSM.scala +++ b/akka-actor/src/main/scala/akka/actor/FSM.scala @@ -3,8 +3,10 @@ */ package akka.actor -import akka.util._ +import language.implicitConversions +import akka.util._ +import scala.concurrent.util.Duration import scala.collection.mutable import akka.routing.{ Deafen, Listen, Listeners } diff --git a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala index 76eed2eca9..bdeec7a8bb 100644 --- a/akka-actor/src/main/scala/akka/actor/FaultHandling.scala +++ b/akka-actor/src/main/scala/akka/actor/FaultHandling.scala @@ -3,12 +3,13 @@ */ package akka.actor +import language.implicitConversions + import java.util.concurrent.TimeUnit import scala.collection.mutable.ArrayBuffer import scala.collection.JavaConversions._ import java.lang.{ Iterable ⇒ JIterable } -import akka.util.Duration - +import scala.concurrent.util.Duration /** * INTERNAL API */ @@ -207,19 +208,31 @@ object SupervisorStrategy extends SupervisorStrategyLowPriorityImplicits { } /** - * An Akka SupervisorStrategy is the policy to apply for crashing children + * An Akka SupervisorStrategy is the policy to apply for crashing children. + * + * IMPORTANT: + * + * You should not normally need to create new subclasses, instead use the + * existing [[akka.actor.OneForOneStrategy]] or [[akka.actor.AllForOneStrategy]], + * but if you do, please read the docs of the methods below carefully, as + * incorrect implementations may lead to “blocked” actor systems (i.e. + * permanently suspended actors). */ abstract class SupervisorStrategy { import SupervisorStrategy._ /** - * Returns the Decider that is associated with this SupervisorStrategy + * Returns the Decider that is associated with this SupervisorStrategy. + * The Decider is invoked by the default implementation of `handleFailure` + * to obtain the Directive to be applied. */ def decider: Decider /** * This method is called after the child has been removed from the set of children. + * It does not need to do anything special. Exceptions thrown from this method + * do NOT make the actor fail if this happens during termination. */ def handleChildTerminated(context: ActorContext, child: ActorRef, children: Iterable[ActorRef]): Unit @@ -228,27 +241,48 @@ abstract class SupervisorStrategy { */ def processFailure(context: ActorContext, restart: Boolean, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]): Unit - //FIXME docs - def handleSupervisorFailing(supervisor: ActorRef, children: Iterable[ActorRef]): Unit = - if (children.nonEmpty) children.foreach(_.asInstanceOf[InternalActorRef].suspend()) - - //FIXME docs - def handleSupervisorRestarted(cause: Throwable, supervisor: ActorRef, children: Iterable[ActorRef]): Unit = - if (children.nonEmpty) children.foreach(_.asInstanceOf[InternalActorRef].restart(cause)) - /** - * Returns whether it processed the failure or not + * This is the main entry point: in case of a child’s failure, this method + * must try to handle the failure by resuming, restarting or stopping the + * child (and returning `true`), or it returns `false` to escalate the + * failure, which will lead to this actor re-throwing the exception which + * caused the failure. The exception will not be wrapped. + * + * @param children is a lazy collection (a view) */ def handleFailure(context: ActorContext, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]): Boolean = { val directive = if (decider.isDefinedAt(cause)) decider(cause) else Escalate //FIXME applyOrElse in Scala 2.10 directive match { - case Resume ⇒ child.asInstanceOf[InternalActorRef].resume(); true + case Resume ⇒ resumeChild(child); true case Restart ⇒ processFailure(context, true, child, cause, stats, children); true case Stop ⇒ processFailure(context, false, child, cause, stats, children); true case Escalate ⇒ false } } + /** + * Resume the previously failed child: do never apply this to a child which + * is not the currently failing child. Suspend/resume needs to be done in + * matching pairs, otherwise actors will wake up too soon or never at all. + */ + final def resumeChild(child: ActorRef): Unit = child.asInstanceOf[InternalActorRef].resume(inResponseToFailure = true) + + /** + * Restart the given child, possibly suspending it first. + * + * IMPORTANT: + * + * If the child is the currently failing one, it will already have been + * suspended, hence `suspendFirst` is false. If the child is not the + * currently failing one, then it did not request this treatment and is + * therefore not prepared to be resumed without prior suspend. + */ + final def restartChild(child: ActorRef, cause: Throwable, suspendFirst: Boolean): Unit = { + val c = child.asInstanceOf[InternalActorRef] + if (suspendFirst) c.suspend() + c.restart(cause) + } + } /** @@ -287,7 +321,7 @@ case class AllForOneStrategy(maxNrOfRetries: Int = -1, withinTimeRange: Duration def processFailure(context: ActorContext, restart: Boolean, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]): Unit = { if (children.nonEmpty) { if (restart && children.forall(_.requestRestartPermission(retriesWindow))) - children.foreach(_.child.asInstanceOf[InternalActorRef].restart(cause)) + children foreach (crs ⇒ restartChild(crs.child, cause, suspendFirst = (crs.child != child))) else for (c ← children) context.stop(c.child) } @@ -329,7 +363,7 @@ case class OneForOneStrategy(maxNrOfRetries: Int = -1, withinTimeRange: Duration def processFailure(context: ActorContext, restart: Boolean, child: ActorRef, cause: Throwable, stats: ChildRestartStats, children: Iterable[ChildRestartStats]): Unit = { if (restart && stats.requestRestartPermission(retriesWindow)) - child.asInstanceOf[InternalActorRef].restart(cause) + restartChild(child, cause, suspendFirst = false) else context.stop(child) //TODO optimization to drop child here already? } diff --git a/akka-actor/src/main/scala/akka/actor/IO.scala b/akka-actor/src/main/scala/akka/actor/IO.scala index 07af4213fc..ac9e09ec47 100644 --- a/akka-actor/src/main/scala/akka/actor/IO.scala +++ b/akka-actor/src/main/scala/akka/actor/IO.scala @@ -3,8 +3,13 @@ */ package akka.actor -import akka.dispatch.{ Future, ExecutionContext } -import akka.util.{ ByteString, Duration, NonFatal } +import language.higherKinds +import language.postfixOps + +import scala.concurrent.{ ExecutionContext, Future } +import scala.concurrent.util.Duration +import scala.util.control.NonFatal +import akka.util.ByteString import java.net.{ SocketAddress, InetSocketAddress } import java.io.IOException import java.nio.ByteBuffer @@ -559,7 +564,7 @@ object IO { * A mutable reference to an [[akka.actor.IO.Iteratee]]. Not thread safe. * * Designed for use within an [[akka.actor.Actor]], although all actions - * perfomed on the Iteratee are processed within a [[akka.dispatch.Future]] + * perfomed on the Iteratee are processed within a [[scala.concurrent.Future]] * so it is not safe to refer to the Actor's state from within this Iteratee. * Messages should instead be sent to the Actor in order to modify state. * diff --git a/akka-actor/src/main/scala/akka/actor/Props.scala b/akka-actor/src/main/scala/akka/actor/Props.scala index f48bbe9573..459ab4da9d 100644 --- a/akka-actor/src/main/scala/akka/actor/Props.scala +++ b/akka-actor/src/main/scala/akka/actor/Props.scala @@ -4,9 +4,13 @@ package akka.actor +import language.existentials + import akka.dispatch._ import akka.japi.Creator +import scala.reflect.ClassTag import akka.routing._ +import akka.util.Reflect /** * Factory for Props instances. @@ -48,8 +52,8 @@ object Props { * * Scala API. */ - def apply[T <: Actor: ClassManifest](): Props = - default.withCreator(implicitly[ClassManifest[T]].erasure.asInstanceOf[Class[_ <: Actor]]) + def apply[T <: Actor: ClassTag](): Props = + default.withCreator(implicitly[ClassTag[T]].runtimeClass.asInstanceOf[Class[_ <: Actor]]) /** * Returns a Props that has default values except for "creator" which will be a function that creates an instance @@ -185,10 +189,5 @@ case class Props( * able to optimize serialization. */ private[akka] case class FromClassCreator(clazz: Class[_ <: Actor]) extends Function0[Actor] { - def apply(): Actor = try clazz.newInstance catch { - case iae: IllegalAccessException ⇒ - val ctor = clazz.getDeclaredConstructor() - ctor.setAccessible(true) - ctor.newInstance() - } + def apply(): Actor = Reflect.instantiate(clazz) } diff --git a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala index ad9a7cb0c4..caad67503a 100644 --- a/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala +++ b/akka-actor/src/main/scala/akka/actor/RepointableActorRef.scala @@ -16,6 +16,7 @@ import akka.dispatch.MessageDispatcher import java.util.concurrent.locks.ReentrantLock import akka.event.Logging.Warning import scala.collection.mutable.Queue +import akka.actor.cell.ChildrenContainer /** * This actor ref starts out with some dummy cell (by default just enqueuing @@ -76,11 +77,11 @@ private[akka] class RepointableActorRef( * This is called by activate() to obtain the cell which is to replace the * unstarted cell. The cell must be fully functional. */ - def newCell(): Cell = new ActorCell(system, this, props, supervisor).start() + def newCell(): Cell = new ActorCell(system, this, props, supervisor).start(sendSupervise = false) def suspend(): Unit = underlying.suspend() - def resume(): Unit = underlying.resume() + def resume(inResponseToFailure: Boolean): Unit = underlying.resume(inResponseToFailure) def stop(): Unit = underlying.stop() @@ -102,7 +103,7 @@ private[akka] class RepointableActorRef( case ".." ⇒ getParent.getChild(name) case "" ⇒ getChild(name) case other ⇒ - underlying.childrenRefs.getByName(other) match { + underlying.getChildByName(other) match { case Some(crs) ⇒ crs.child.asInstanceOf[InternalActorRef].getChild(name) case None ⇒ Nobody } @@ -129,6 +130,7 @@ private[akka] class UnstartedCell(val systemImpl: ActorSystemImpl, val self: Rep // use Envelope to keep on-send checks in the same place val queue: Queue[Envelope] = Queue() val systemQueue: Queue[SystemMessage] = Queue() + var suspendCount = 0 def replaceWith(cell: Cell): Unit = { lock.lock() @@ -161,18 +163,21 @@ private[akka] class UnstartedCell(val systemImpl: ActorSystemImpl, val self: Rep if (interrupted) throw new InterruptedException } finally try self.swapCell(cell) + finally try + for (_ ← 1 to suspendCount) cell.suspend() finally lock.unlock() } def system: ActorSystem = systemImpl - def suspend(): Unit = {} - def resume(): Unit = {} - def restart(cause: Throwable): Unit = {} + def suspend(): Unit = { lock.lock(); try suspendCount += 1 finally lock.unlock() } + def resume(inResponseToFailure: Boolean): Unit = { lock.lock(); try suspendCount -= 1 finally lock.unlock() } + def restart(cause: Throwable): Unit = { lock.lock(); try suspendCount -= 1 finally lock.unlock() } def stop(): Unit = sendSystemMessage(Terminate()) def isTerminated: Boolean = false def parent: InternalActorRef = supervisor - def childrenRefs: ActorCell.ChildrenContainer = ActorCell.EmptyChildrenContainer + def childrenRefs: ChildrenContainer = ChildrenContainer.EmptyChildrenContainer + def getChildByName(name: String): Option[ChildRestartStats] = None def tell(message: Any, sender: ActorRef): Unit = { lock.lock() try { diff --git a/akka-actor/src/main/scala/akka/actor/Scheduler.scala b/akka-actor/src/main/scala/akka/actor/Scheduler.scala index e1d36ab95d..c088fbce51 100644 --- a/akka-actor/src/main/scala/akka/actor/Scheduler.scala +++ b/akka-actor/src/main/scala/akka/actor/Scheduler.scala @@ -4,7 +4,7 @@ package akka.actor -import akka.util.Duration +import scala.concurrent.util.Duration import akka.util.internal.{ TimerTask, HashedWheelTimer, Timeout ⇒ HWTimeout, Timer } import akka.event.LoggingAdapter import akka.dispatch.MessageDispatcher diff --git a/akka-actor/src/main/scala/akka/actor/TypedActor.scala b/akka-actor/src/main/scala/akka/actor/TypedActor.scala index 1933015e88..ddc77a9627 100644 --- a/akka-actor/src/main/scala/akka/actor/TypedActor.scala +++ b/akka-actor/src/main/scala/akka/actor/TypedActor.scala @@ -1,17 +1,22 @@ -package akka.actor - /** * Copyright (C) 2009-2012 Typesafe Inc. */ +package akka.actor + +import language.existentials import akka.japi.{ Creator, Option ⇒ JOption } import java.lang.reflect.{ InvocationTargetException, Method, InvocationHandler, Proxy } -import akka.util.{ Timeout, NonFatal, Duration } -import java.util.concurrent.atomic.{ AtomicReference ⇒ AtomVar } +import akka.util.Timeout +import scala.util.control.NonFatal +import scala.concurrent.util.Duration +import scala.concurrent.{ Await, Future } +import akka.util.Reflect.instantiator import akka.dispatch._ +import java.util.concurrent.atomic.{ AtomicReference ⇒ AtomVar } import java.util.concurrent.TimeoutException import java.util.concurrent.TimeUnit.MILLISECONDS -import akka.actor.TypedActor.TypedActorInvocationHandler +import scala.reflect.ClassTag import akka.serialization.{ JavaSerializer, SerializationExtension } import java.io.ObjectStreamException @@ -403,9 +408,9 @@ object TypedActor extends ExtensionId[TypedActorExtension] with ExtensionIdProvi case m if m.returnsJOption_? || m.returnsOption_? ⇒ val f = ask(actor, m)(timeout) (try { Await.ready(f, timeout.duration).value } catch { case _: TimeoutException ⇒ None }) match { - case None | Some(Right(null)) ⇒ if (m.returnsJOption_?) JOption.none[Any] else None - case Some(Right(joption: AnyRef)) ⇒ joption - case Some(Left(ex)) ⇒ throw ex + case None | Some(Right(null)) ⇒ if (m.returnsJOption_?) JOption.none[Any] else None + case Some(Right(joption)) ⇒ joption.asInstanceOf[AnyRef] + case Some(Left(ex)) ⇒ throw ex } case m ⇒ Await.result(ask(actor, m)(timeout), timeout.duration).asInstanceOf[AnyRef] } @@ -462,7 +467,7 @@ object TypedProps { * Scala API */ def apply[T <: AnyRef](interface: Class[_ >: T], implementation: Class[T]): TypedProps[T] = - new TypedProps[T](extractInterfaces(interface), () ⇒ implementation.newInstance()) + new TypedProps[T](extractInterfaces(interface), instantiator(implementation)) /** * Uses the supplied thunk as the factory for the TypedActor implementation, @@ -481,8 +486,8 @@ object TypedProps { * * Scala API */ - def apply[T <: AnyRef: ClassManifest](): TypedProps[T] = - new TypedProps[T](implicitly[ClassManifest[T]].erasure.asInstanceOf[Class[T]]) + def apply[T <: AnyRef: ClassTag](): TypedProps[T] = + new TypedProps[T](implicitly[ClassTag[T]].runtimeClass.asInstanceOf[Class[T]]) } /** @@ -506,7 +511,7 @@ case class TypedProps[T <: AnyRef] protected[TypedProps] ( */ def this(implementation: Class[T]) = this(interfaces = TypedProps.extractInterfaces(implementation), - creator = () ⇒ implementation.newInstance()) + creator = instantiator(implementation)) /** * Uses the supplied Creator as the factory for the TypedActor implementation, @@ -518,7 +523,7 @@ case class TypedProps[T <: AnyRef] protected[TypedProps] ( */ def this(interface: Class[_ >: T], implementation: Creator[T]) = this(interfaces = TypedProps.extractInterfaces(interface), - creator = () ⇒ implementation.create()) + creator = implementation.create _) /** * Uses the supplied class as the factory for the TypedActor implementation, @@ -530,7 +535,7 @@ case class TypedProps[T <: AnyRef] protected[TypedProps] ( */ def this(interface: Class[_ >: T], implementation: Class[T]) = this(interfaces = TypedProps.extractInterfaces(interface), - creator = () ⇒ implementation.newInstance()) + creator = instantiator(implementation)) /** * Returns a new TypedProps with the specified dispatcher set. diff --git a/akka-actor/src/main/scala/akka/actor/cell/Children.scala b/akka-actor/src/main/scala/akka/actor/cell/Children.scala new file mode 100644 index 0000000000..eea7ed7508 --- /dev/null +++ b/akka-actor/src/main/scala/akka/actor/cell/Children.scala @@ -0,0 +1,188 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.actor.cell + +import scala.annotation.tailrec +import scala.collection.JavaConverters.asJavaIterableConverter +import scala.util.control.NonFatal + +import akka.actor.{ RepointableRef, Props, NoSerializationVerificationNeeded, InvalidActorNameException, InternalActorRef, ChildRestartStats, ActorRef } +import akka.actor.ActorCell +import akka.actor.ActorPath.ElementRegex +import akka.serialization.SerializationExtension +import akka.util.{ Unsafe, Helpers } + +private[akka] trait Children { this: ActorCell ⇒ + + import ChildrenContainer._ + + @volatile + private var _childrenRefsDoNotCallMeDirectly: ChildrenContainer = EmptyChildrenContainer + + def childrenRefs: ChildrenContainer = + Unsafe.instance.getObjectVolatile(this, AbstractActorCell.childrenOffset).asInstanceOf[ChildrenContainer] + + final def children: Iterable[ActorRef] = childrenRefs.children + final def getChildren(): java.lang.Iterable[ActorRef] = children.asJava + + def actorOf(props: Props): ActorRef = makeChild(this, props, randomName(), async = false) + def actorOf(props: Props, name: String): ActorRef = makeChild(this, props, checkName(name), async = false) + private[akka] def attachChild(props: Props): ActorRef = makeChild(this, props, randomName(), async = true) + private[akka] def attachChild(props: Props, name: String): ActorRef = makeChild(this, props, checkName(name), async = true) + + @volatile private var _nextNameDoNotCallMeDirectly = 0L + final protected def randomName(): String = { + @tailrec def inc(): Long = { + val current = Unsafe.instance.getLongVolatile(this, AbstractActorCell.nextNameOffset) + if (Unsafe.instance.compareAndSwapLong(this, AbstractActorCell.nextNameOffset, current, current + 1)) current + else inc() + } + Helpers.base64(inc()) + } + + final def stop(actor: ActorRef): Unit = { + val started = actor match { + case r: RepointableRef ⇒ r.isStarted + case _ ⇒ true + } + if (childrenRefs.getByRef(actor).isDefined && started) shallDie(actor) + actor.asInstanceOf[InternalActorRef].stop() + } + + /* + * low level CAS helpers + */ + + @inline private def swapChildrenRefs(oldChildren: ChildrenContainer, newChildren: ChildrenContainer): Boolean = + Unsafe.instance.compareAndSwapObject(this, AbstractActorCell.childrenOffset, oldChildren, newChildren) + + @tailrec final protected def reserveChild(name: String): Boolean = { + val c = childrenRefs + swapChildrenRefs(c, c.reserve(name)) || reserveChild(name) + } + + @tailrec final protected def unreserveChild(name: String): Boolean = { + val c = childrenRefs + swapChildrenRefs(c, c.unreserve(name)) || unreserveChild(name) + } + + final protected def addChild(ref: ActorRef): Boolean = { + @tailrec def rec(): Boolean = { + val c = childrenRefs + swapChildrenRefs(c, c.add(ref)) || rec() + } + /* + * This does not need to check getByRef every tailcall, because the change + * cannot happen in that direction as a race: the only entity removing a + * child is the actor itself, and the only entity which could be racing is + * somebody who calls attachChild, and there we are guaranteed that that + * child cannot yet have died (since it has not yet been created). + */ + if (childrenRefs.getByRef(ref).isEmpty) rec() else false + } + + @tailrec final protected def shallDie(ref: ActorRef): Boolean = { + val c = childrenRefs + swapChildrenRefs(c, c.shallDie(ref)) || shallDie(ref) + } + + @tailrec final private def removeChild(ref: ActorRef): ChildrenContainer = { + val c = childrenRefs + val n = c.remove(ref) + if (swapChildrenRefs(c, n)) n + else removeChild(ref) + } + + @tailrec final protected def setChildrenTerminationReason(reason: ChildrenContainer.SuspendReason): Boolean = { + childrenRefs match { + case c: ChildrenContainer.TerminatingChildrenContainer ⇒ + swapChildrenRefs(c, c.copy(reason = reason)) || setChildrenTerminationReason(reason) + case _ ⇒ false + } + } + + final protected def setTerminated(): Unit = Unsafe.instance.putObjectVolatile(this, AbstractActorCell.childrenOffset, TerminatedChildrenContainer) + + /* + * ActorCell-internal API + */ + + protected def isNormal = childrenRefs.isNormal + + protected def isTerminating = childrenRefs.isTerminating + + protected def suspendChildren(skip: Set[ActorRef] = Set.empty): Unit = + childrenRefs.stats foreach { + case ChildRestartStats(child, _, _) if !(skip contains child) ⇒ child.asInstanceOf[InternalActorRef].suspend() + case _ ⇒ + } + + protected def resumeChildren(): Unit = + childrenRefs.stats foreach (_.child.asInstanceOf[InternalActorRef].resume(inResponseToFailure = false)) + + def getChildByName(name: String): Option[ChildRestartStats] = childrenRefs.getByName(name) + + protected def getChildByRef(ref: ActorRef): Option[ChildRestartStats] = childrenRefs.getByRef(ref) + + protected def getAllChildStats: Iterable[ChildRestartStats] = childrenRefs.stats + + protected def removeChildAndGetStateChange(child: ActorRef): Option[SuspendReason] = { + childrenRefs match { + case TerminatingChildrenContainer(_, _, reason) ⇒ + val newContainer = removeChild(child) + if (!newContainer.isInstanceOf[TerminatingChildrenContainer]) Some(reason) else None + case _ ⇒ + removeChild(child) + None + } + } + + /* + * Private helpers + */ + + private def checkName(name: String): String = { + name match { + case null ⇒ throw new InvalidActorNameException("actor name must not be null") + case "" ⇒ throw new InvalidActorNameException("actor name must not be empty") + case ElementRegex() ⇒ name + case _ ⇒ throw new InvalidActorNameException("illegal actor name '" + name + "', must conform to " + ElementRegex) + } + } + + private def makeChild(cell: ActorCell, props: Props, name: String, async: Boolean): ActorRef = { + if (cell.system.settings.SerializeAllCreators && !props.creator.isInstanceOf[NoSerializationVerificationNeeded]) { + val ser = SerializationExtension(cell.system) + ser.serialize(props.creator) match { + case Left(t) ⇒ throw t + case Right(bytes) ⇒ ser.deserialize(bytes, props.creator.getClass) match { + case Left(t) ⇒ throw t + case _ ⇒ //All good + } + } + } + /* + * in case we are currently terminating, fail external attachChild requests + * (internal calls cannot happen anyway because we are suspended) + */ + if (cell.childrenRefs.isTerminating) throw new IllegalStateException("cannot create children while terminating or terminated") + else { + reserveChild(name) + // this name will either be unreserved or overwritten with a real child below + val actor = + try { + cell.provider.actorOf(cell.systemImpl, props, cell.self, cell.self.path / name, + systemService = false, deploy = None, lookupDeploy = true, async = async) + } catch { + case NonFatal(e) ⇒ + unreserveChild(name) + throw e + } + addChild(actor) + actor + } + } + +} \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/actor/cell/ChildrenContainer.scala b/akka-actor/src/main/scala/akka/actor/cell/ChildrenContainer.scala new file mode 100644 index 0000000000..98679862ba --- /dev/null +++ b/akka-actor/src/main/scala/akka/actor/cell/ChildrenContainer.scala @@ -0,0 +1,195 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.actor.cell + +import scala.collection.immutable.TreeMap + +import akka.actor.{ InvalidActorNameException, ChildStats, ChildRestartStats, ChildNameReserved, ActorRef } + +/** + * INTERNAL API + */ +private[akka] trait ChildrenContainer { + + def add(child: ActorRef): ChildrenContainer + def remove(child: ActorRef): ChildrenContainer + + def getByName(name: String): Option[ChildRestartStats] + def getByRef(actor: ActorRef): Option[ChildRestartStats] + + def children: Iterable[ActorRef] + def stats: Iterable[ChildRestartStats] + + def shallDie(actor: ActorRef): ChildrenContainer + + // reserve that name or throw an exception + def reserve(name: String): ChildrenContainer + // cancel a reservation + def unreserve(name: String): ChildrenContainer + + def isTerminating: Boolean = false + def isNormal: Boolean = true +} + +/** + * INTERNAL API + * + * This object holds the classes performing the logic of managing the children + * of an actor, hence they are intimately tied to ActorCell. + */ +private[akka] object ChildrenContainer { + + sealed trait SuspendReason + case object UserRequest extends SuspendReason + case class Recreation(cause: Throwable) extends SuspendReason + case object Termination extends SuspendReason + + trait EmptyChildrenContainer extends ChildrenContainer { + val emptyStats = TreeMap.empty[String, ChildStats] + override def add(child: ActorRef): ChildrenContainer = + new NormalChildrenContainer(emptyStats.updated(child.path.name, ChildRestartStats(child))) + override def remove(child: ActorRef): ChildrenContainer = this + override def getByName(name: String): Option[ChildRestartStats] = None + override def getByRef(actor: ActorRef): Option[ChildRestartStats] = None + override def children: Iterable[ActorRef] = Nil + override def stats: Iterable[ChildRestartStats] = Nil + override def shallDie(actor: ActorRef): ChildrenContainer = this + override def reserve(name: String): ChildrenContainer = new NormalChildrenContainer(emptyStats.updated(name, ChildNameReserved)) + override def unreserve(name: String): ChildrenContainer = this + } + + /** + * This is the empty container, shared among all leaf actors. + */ + object EmptyChildrenContainer extends EmptyChildrenContainer { + override def toString = "no children" + } + + /** + * This is the empty container which is installed after the last child has + * terminated while stopping; it is necessary to distinguish from the normal + * empty state while calling handleChildTerminated() for the last time. + */ + object TerminatedChildrenContainer extends EmptyChildrenContainer { + override def add(child: ActorRef): ChildrenContainer = this + override def reserve(name: String): ChildrenContainer = + throw new IllegalStateException("cannot reserve actor name '" + name + "': already terminated") + override def isTerminating: Boolean = true + override def isNormal: Boolean = false + override def toString = "terminated" + } + + /** + * Normal children container: we do have at least one child, but none of our + * children are currently terminating (which is the time period between + * calling context.stop(child) and processing the ChildTerminated() system + * message). + */ + class NormalChildrenContainer(val c: TreeMap[String, ChildStats]) extends ChildrenContainer { + + override def add(child: ActorRef): ChildrenContainer = + new NormalChildrenContainer(c.updated(child.path.name, ChildRestartStats(child))) + + override def remove(child: ActorRef): ChildrenContainer = NormalChildrenContainer(c - child.path.name) + + override def getByName(name: String): Option[ChildRestartStats] = c.get(name) match { + case s @ Some(_: ChildRestartStats) ⇒ s.asInstanceOf[Option[ChildRestartStats]] + case _ ⇒ None + } + + override def getByRef(actor: ActorRef): Option[ChildRestartStats] = c.get(actor.path.name) match { + case c @ Some(crs: ChildRestartStats) if (crs.child == actor) ⇒ c.asInstanceOf[Option[ChildRestartStats]] + case _ ⇒ None + } + + override def children: Iterable[ActorRef] = c.values.view.collect { case ChildRestartStats(child, _, _) ⇒ child } + + override def stats: Iterable[ChildRestartStats] = c.values.view.collect { case c: ChildRestartStats ⇒ c } + + override def shallDie(actor: ActorRef): ChildrenContainer = TerminatingChildrenContainer(c, Set(actor), UserRequest) + + override def reserve(name: String): ChildrenContainer = + if (c contains name) + throw new InvalidActorNameException("actor name " + name + " is not unique!") + else new NormalChildrenContainer(c.updated(name, ChildNameReserved)) + + override def unreserve(name: String): ChildrenContainer = c.get(name) match { + case Some(ChildNameReserved) ⇒ NormalChildrenContainer(c - name) + case _ ⇒ this + } + + override def toString = + if (c.size > 20) c.size + " children" + else c.mkString("children:\n ", "\n ", "") + } + + object NormalChildrenContainer { + def apply(c: TreeMap[String, ChildStats]): ChildrenContainer = + if (c.isEmpty) EmptyChildrenContainer + else new NormalChildrenContainer(c) + } + + /** + * Waiting state: there are outstanding termination requests (i.e. context.stop(child) + * was called but the corresponding ChildTerminated() system message has not yet been + * processed). There could be no specific reason (UserRequested), we could be Restarting + * or Terminating. + * + * Removing the last child which was supposed to be terminating will return a different + * type of container, depending on whether or not children are left and whether or not + * the reason was “Terminating”. + */ + case class TerminatingChildrenContainer(c: TreeMap[String, ChildStats], toDie: Set[ActorRef], reason: SuspendReason) + extends ChildrenContainer { + + override def add(child: ActorRef): ChildrenContainer = copy(c.updated(child.path.name, ChildRestartStats(child))) + + override def remove(child: ActorRef): ChildrenContainer = { + val t = toDie - child + if (t.isEmpty) reason match { + case Termination ⇒ TerminatedChildrenContainer + case _ ⇒ NormalChildrenContainer(c - child.path.name) + } + else copy(c - child.path.name, t) + } + + override def getByName(name: String): Option[ChildRestartStats] = c.get(name) match { + case s @ Some(_: ChildRestartStats) ⇒ s.asInstanceOf[Option[ChildRestartStats]] + case _ ⇒ None + } + + override def getByRef(actor: ActorRef): Option[ChildRestartStats] = c.get(actor.path.name) match { + case c @ Some(crs: ChildRestartStats) if (crs.child == actor) ⇒ c.asInstanceOf[Option[ChildRestartStats]] + case _ ⇒ None + } + + override def children: Iterable[ActorRef] = c.values.view.collect { case ChildRestartStats(child, _, _) ⇒ child } + + override def stats: Iterable[ChildRestartStats] = c.values.view.collect { case c: ChildRestartStats ⇒ c } + + override def shallDie(actor: ActorRef): ChildrenContainer = copy(toDie = toDie + actor) + + override def reserve(name: String): ChildrenContainer = reason match { + case Termination ⇒ throw new IllegalStateException("cannot reserve actor name '" + name + "': terminating") + case _ ⇒ + if (c contains name) + throw new InvalidActorNameException("actor name " + name + " is not unique!") + else copy(c = c.updated(name, ChildNameReserved)) + } + + override def unreserve(name: String): ChildrenContainer = c.get(name) match { + case Some(ChildNameReserved) ⇒ copy(c = c - name) + case _ ⇒ this + } + + override def isTerminating: Boolean = reason == Termination + override def isNormal: Boolean = reason == UserRequest + + override def toString = + if (c.size > 20) c.size + " children" + else c.mkString("children (" + toDie.size + " terminating):\n ", "\n ", "\n") + toDie + } + +} \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/actor/cell/DeathWatch.scala b/akka-actor/src/main/scala/akka/actor/cell/DeathWatch.scala new file mode 100644 index 0000000000..031019f3f6 --- /dev/null +++ b/akka-actor/src/main/scala/akka/actor/cell/DeathWatch.scala @@ -0,0 +1,95 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.actor.cell + +import akka.actor.{ Terminated, InternalActorRef, ActorRef, ActorCell, Actor } +import akka.dispatch.{ Watch, Unwatch } +import akka.event.Logging.{ Warning, Error, Debug } +import scala.util.control.NonFatal + +private[akka] trait DeathWatch { this: ActorCell ⇒ + + private var watching: Set[ActorRef] = ActorCell.emptyActorRefSet + private var watchedBy: Set[ActorRef] = ActorCell.emptyActorRefSet + + override final def watch(subject: ActorRef): ActorRef = subject match { + case a: InternalActorRef ⇒ + if (a != self && !watching.contains(a)) { + a.sendSystemMessage(Watch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + watching += a + } + a + } + + override final def unwatch(subject: ActorRef): ActorRef = subject match { + case a: InternalActorRef ⇒ + if (a != self && watching.contains(a)) { + a.sendSystemMessage(Unwatch(a, self)) // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + watching -= a + } + a + } + + protected def watchedActorTerminated(ref: ActorRef): Unit = watching -= ref + + protected def tellWatchersWeDied(actor: Actor): Unit = { + if (!watchedBy.isEmpty) { + val terminated = Terminated(self)(existenceConfirmed = true) + try { + watchedBy foreach { + watcher ⇒ + try watcher.tell(terminated, self) catch { + case NonFatal(t) ⇒ publish(Error(t, self.path.toString, clazz(actor), "deathwatch")) + } + } + } finally watchedBy = ActorCell.emptyActorRefSet + } + } + + protected def unwatchWatchedActors(actor: Actor): Unit = { + if (!watching.isEmpty) { + try { + watching foreach { // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + case watchee: InternalActorRef ⇒ try watchee.sendSystemMessage(Unwatch(watchee, self)) catch { + case NonFatal(t) ⇒ publish(Error(t, self.path.toString, clazz(actor), "deathwatch")) + } + } + } finally watching = ActorCell.emptyActorRefSet + } + } + + protected def addWatcher(watchee: ActorRef, watcher: ActorRef): Unit = { + val watcheeSelf = watchee == self + val watcherSelf = watcher == self + + if (watcheeSelf && !watcherSelf) { + if (!watchedBy.contains(watcher)) { + watchedBy += watcher + if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(actor), "now monitoring " + watcher)) + } + } else if (!watcheeSelf && watcherSelf) { + watch(watchee) + } else { + publish(Warning(self.path.toString, clazz(actor), "BUG: illegal Watch(%s,%s) for %s".format(watchee, watcher, self))) + } + } + + protected def remWatcher(watchee: ActorRef, watcher: ActorRef): Unit = { + val watcheeSelf = watchee == self + val watcherSelf = watcher == self + + if (watcheeSelf && !watcherSelf) { + if (watchedBy.contains(watcher)) { + watchedBy -= watcher + if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(actor), "stopped monitoring " + watcher)) + } + } else if (!watcheeSelf && watcherSelf) { + unwatch(watchee) + } else { + publish(Warning(self.path.toString, clazz(actor), "BUG: illegal Unwatch(%s,%s) for %s".format(watchee, watcher, self))) + } + } + +} \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/actor/cell/Dispatch.scala b/akka-actor/src/main/scala/akka/actor/cell/Dispatch.scala new file mode 100644 index 0000000000..8c849366d8 --- /dev/null +++ b/akka-actor/src/main/scala/akka/actor/cell/Dispatch.scala @@ -0,0 +1,78 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.actor.cell + +import scala.annotation.tailrec + +import akka.actor.{ ActorRef, ActorCell } +import akka.dispatch.{ Terminate, SystemMessage, Suspend, Resume, Recreate, MessageDispatcher, Mailbox, Envelope, Create } +import akka.util.Unsafe + +private[akka] trait Dispatch { this: ActorCell ⇒ + + @volatile private var _mailboxDoNotCallMeDirectly: Mailbox = _ //This must be volatile since it isn't protected by the mailbox status + + @inline final def mailbox: Mailbox = Unsafe.instance.getObjectVolatile(this, AbstractActorCell.mailboxOffset).asInstanceOf[Mailbox] + + @tailrec final def swapMailbox(newMailbox: Mailbox): Mailbox = { + val oldMailbox = mailbox + if (!Unsafe.instance.compareAndSwapObject(this, AbstractActorCell.mailboxOffset, oldMailbox, newMailbox)) swapMailbox(newMailbox) + else oldMailbox + } + + final def hasMessages: Boolean = mailbox.hasMessages + + final def numberOfMessages: Int = mailbox.numberOfMessages + + val dispatcher: MessageDispatcher = system.dispatchers.lookup(props.dispatcher) + + /** + * UntypedActorContext impl + */ + final def getDispatcher(): MessageDispatcher = dispatcher + + final def isTerminated: Boolean = mailbox.isClosed + + final def start(sendSupervise: Boolean): this.type = { + + /* + * Create the mailbox and enqueue the Create() message to ensure that + * this is processed before anything else. + */ + swapMailbox(dispatcher.createMailbox(this)) + mailbox.setActor(this) + + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + mailbox.systemEnqueue(self, Create()) + + if (sendSupervise) { + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + parent.sendSystemMessage(akka.dispatch.Supervise(self)) + } + + // This call is expected to start off the actor by scheduling its mailbox. + dispatcher.attach(this) + + this + } + + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + final def suspend(): Unit = dispatcher.systemDispatch(this, Suspend()) + + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + final def resume(inResponseToFailure: Boolean): Unit = dispatcher.systemDispatch(this, Resume(inResponseToFailure)) + + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + final def restart(cause: Throwable): Unit = dispatcher.systemDispatch(this, Recreate(cause)) + + // ➡➡➡ NEVER SEND THE SAME SYSTEM MESSAGE OBJECT TO TWO ACTORS ⬅⬅⬅ + final def stop(): Unit = dispatcher.systemDispatch(this, Terminate()) + + def tell(message: Any, sender: ActorRef): Unit = + dispatcher.dispatch(this, Envelope(message, if (sender eq null) system.deadLetters else sender, system)) + + override def sendSystemMessage(message: SystemMessage): Unit = dispatcher.systemDispatch(this, message) + +} \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/actor/cell/FaultHandling.scala b/akka-actor/src/main/scala/akka/actor/cell/FaultHandling.scala new file mode 100644 index 0000000000..fce6a22e6f --- /dev/null +++ b/akka-actor/src/main/scala/akka/actor/cell/FaultHandling.scala @@ -0,0 +1,210 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.actor.cell + +import scala.annotation.tailrec + +import akka.actor.{ PreRestartException, PostRestartException, InternalActorRef, Failed, ActorRef, ActorInterruptedException, ActorCell, Actor } +import akka.dispatch.{ Envelope, ChildTerminated } +import akka.event.Logging.{ Warning, Error, Debug } +import scala.util.control.NonFatal + +private[akka] trait FaultHandling { this: ActorCell ⇒ + + /* ================= + * T H E R U L E S + * ================= + * + * Actors can be suspended for two reasons: + * - they fail + * - their supervisor gets suspended + * + * In particular they are not suspended multiple times because of cascading + * own failures, i.e. while currentlyFailed() they do not fail again. In case + * of a restart, failures in constructor/preStart count as new failures. + */ + + private def suspendNonRecursive(): Unit = dispatcher suspend this + + private def resumeNonRecursive(): Unit = dispatcher resume this + + /* + * have we told our supervisor that we Failed() and have not yet heard back? + * (actually: we might have heard back but not yet acted upon it, in case of + * a restart with dying children) + * might well be replaced by ref to a Cancellable in the future (see #2299) + */ + private var _failed = false + private def isFailed: Boolean = _failed + private def setFailed(): Unit = _failed = true + private def clearFailed(): Unit = _failed = false + + /** + * Do re-create the actor in response to a failure. + */ + protected def faultRecreate(cause: Throwable): Unit = + if (isNormal) { + val failedActor = actor + if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(failedActor), "restarting")) + if (failedActor ne null) { + val optionalMessage = if (currentMessage ne null) Some(currentMessage.message) else None + try { + // if the actor fails in preRestart, we can do nothing but log it: it’s best-effort + if (failedActor.context ne null) failedActor.preRestart(cause, optionalMessage) + } catch { + case NonFatal(e) ⇒ + val ex = new PreRestartException(self, e, cause, optionalMessage) + publish(Error(ex, self.path.toString, clazz(failedActor), e.getMessage)) + } finally { + clearActorFields(failedActor) + } + } + assert(mailbox.isSuspended, "mailbox must be suspended during restart, status=" + mailbox.status) + if (!setChildrenTerminationReason(ChildrenContainer.Recreation(cause))) finishRecreate(cause, failedActor) + } else { + // need to keep that suspend counter balanced + faultResume(inResponseToFailure = false) + } + + /** + * Do suspend the actor in response to a failure of a parent (i.e. the + * “recursive suspend” feature). + */ + protected def faultSuspend(): Unit = { + // done always to keep that suspend counter balanced + suspendNonRecursive() + suspendChildren() + } + + /** + * Do resume the actor in response to a failure. + * + * @param inResponseToFailure signifies if it was our own failure which + * prompted this action. + */ + protected def faultResume(inResponseToFailure: Boolean): Unit = { + // done always to keep that suspend counter balanced + // must happen “atomically” + try resumeNonRecursive() + finally if (inResponseToFailure) clearFailed() + resumeChildren() + } + + protected def terminate() { + setReceiveTimeout(None) + cancelReceiveTimeout + + // stop all children, which will turn childrenRefs into TerminatingChildrenContainer (if there are children) + children foreach stop + + val wasTerminating = isTerminating + + if (setChildrenTerminationReason(ChildrenContainer.Termination)) { + if (!wasTerminating) { + // do not process normal messages while waiting for all children to terminate + suspendNonRecursive() + // do not propagate failures during shutdown to the supervisor + setFailed() + if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(actor), "stopping")) + } + } else { + setTerminated() + finishTerminate() + } + } + + final def handleInvokeFailure(t: Throwable, message: String): Unit = { + publish(Error(t, self.path.toString, clazz(actor), message)) + // prevent any further messages to be processed until the actor has been restarted + if (!isFailed) try { + suspendNonRecursive() + setFailed() + // suspend children + val skip: Set[ActorRef] = currentMessage match { + case Envelope(Failed(`t`), child) ⇒ Set(child) + case _ ⇒ Set.empty + } + suspendChildren(skip) + // tell supervisor + t match { // Wrap InterruptedExceptions and rethrow + case _: InterruptedException ⇒ parent.tell(Failed(new ActorInterruptedException(t)), self); throw t + case _ ⇒ parent.tell(Failed(t), self) + } + } catch { + case NonFatal(e) ⇒ + publish(Error(e, self.path.toString, clazz(actor), "emergency stop: exception in failure handling")) + try children foreach stop + finally finishTerminate() + } + } + + private def finishTerminate() { + val a = actor + try if (a ne null) a.postStop() + finally try dispatcher.detach(this) + finally try parent.sendSystemMessage(ChildTerminated(self)) + finally try tellWatchersWeDied(a) + finally try unwatchWatchedActors(a) + finally { + if (system.settings.DebugLifecycle) + publish(Debug(self.path.toString, clazz(a), "stopped")) + clearActorFields(a) + actor = null + } + } + + private def finishRecreate(cause: Throwable, failedActor: Actor): Unit = try { + try resumeNonRecursive() + finally clearFailed() // must happen in any case, so that failure is propagated + + // need to keep a snapshot of the surviving children before the new actor instance creates new ones + val survivors = children + + val freshActor = newActor() + actor = freshActor // this must happen before postRestart has a chance to fail + if (freshActor eq failedActor) setActorFields(freshActor, this, self) // If the creator returns the same instance, we need to restore our nulled out fields. + + freshActor.postRestart(cause) + if (system.settings.DebugLifecycle) publish(Debug(self.path.toString, clazz(freshActor), "restarted")) + + // only after parent is up and running again do restart the children which were not stopped + survivors foreach (child ⇒ + try child.asInstanceOf[InternalActorRef].restart(cause) + catch { + case NonFatal(e) ⇒ publish(Error(e, self.path.toString, clazz(freshActor), "restarting " + child)) + }) + } catch { + case NonFatal(e) ⇒ + clearActorFields(actor) // in order to prevent preRestart() from happening again + handleInvokeFailure(new PostRestartException(self, e, cause), e.getMessage) + } + + final protected def handleFailure(child: ActorRef, cause: Throwable): Unit = getChildByRef(child) match { + case Some(stats) ⇒ if (!actor.supervisorStrategy.handleFailure(this, child, cause, stats, getAllChildStats)) throw cause + case None ⇒ publish(Warning(self.path.toString, clazz(actor), "dropping Failed(" + cause + ") from unknown child " + child)) + } + + final protected def handleChildTerminated(child: ActorRef): Unit = { + val status = removeChildAndGetStateChange(child) + /* + * if this fails, we do nothing in case of terminating/restarting state, + * otherwise tell the supervisor etc. (in that second case, the match + * below will hit the empty default case, too) + */ + try actor.supervisorStrategy.handleChildTerminated(this, child, children) + catch { + case NonFatal(e) ⇒ handleInvokeFailure(e, "handleChildTerminated failed") + } + /* + * if the removal changed the state of the (terminating) children container, + * then we are continuing the previously suspended recreate/terminate action + */ + status match { + case Some(ChildrenContainer.Recreation(cause)) ⇒ finishRecreate(cause, actor) + case Some(ChildrenContainer.Termination) ⇒ finishTerminate() + case _ ⇒ + } + } +} diff --git a/akka-actor/src/main/scala/akka/actor/cell/ReceiveTimeout.scala b/akka-actor/src/main/scala/akka/actor/cell/ReceiveTimeout.scala new file mode 100644 index 0000000000..4fd46413ce --- /dev/null +++ b/akka-actor/src/main/scala/akka/actor/cell/ReceiveTimeout.scala @@ -0,0 +1,54 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.actor.cell + +import ReceiveTimeout.emptyReceiveTimeoutData +import akka.actor.ActorCell +import akka.actor.ActorCell.emptyCancellable +import akka.actor.Cancellable +import scala.concurrent.util.Duration + +private[akka] object ReceiveTimeout { + final val emptyReceiveTimeoutData: (Duration, Cancellable) = (Duration.Undefined, ActorCell.emptyCancellable) +} + +private[akka] trait ReceiveTimeout { this: ActorCell ⇒ + + import ReceiveTimeout._ + import ActorCell._ + + private var receiveTimeoutData: (Duration, Cancellable) = emptyReceiveTimeoutData + + final def receiveTimeout: Option[Duration] = receiveTimeoutData._1 match { + case Duration.Undefined ⇒ None + case duration ⇒ Some(duration) + } + + final def setReceiveTimeout(timeout: Option[Duration]): Unit = setReceiveTimeout(timeout.getOrElse(Duration.Undefined)) + + final def setReceiveTimeout(timeout: Duration): Unit = + receiveTimeoutData = ( + if (Duration.Undefined == timeout || timeout.toMillis < 1) Duration.Undefined else timeout, + receiveTimeoutData._2) + + final def resetReceiveTimeout(): Unit = setReceiveTimeout(None) + + final def checkReceiveTimeout() { + val recvtimeout = receiveTimeoutData + if (Duration.Undefined != recvtimeout._1 && !mailbox.hasMessages) { + recvtimeout._2.cancel() //Cancel any ongoing future + //Only reschedule if desired and there are currently no more messages to be processed + receiveTimeoutData = (recvtimeout._1, system.scheduler.scheduleOnce(recvtimeout._1, self, akka.actor.ReceiveTimeout)) + } else cancelReceiveTimeout() + + } + + final def cancelReceiveTimeout(): Unit = + if (receiveTimeoutData._2 ne emptyCancellable) { + receiveTimeoutData._2.cancel() + receiveTimeoutData = (receiveTimeoutData._1, emptyCancellable) + } + +} \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/actor/package.scala b/akka-actor/src/main/scala/akka/actor/package.scala index 3bf56b8bc4..deb9f0e7a9 100644 --- a/akka-actor/src/main/scala/akka/actor/package.scala +++ b/akka-actor/src/main/scala/akka/actor/package.scala @@ -4,6 +4,8 @@ package akka +import language.implicitConversions + package object actor { implicit def actorRef2Scala(ref: ActorRef): ScalaActorRef = ref.asInstanceOf[ScalaActorRef] implicit def scala2ActorRef(ref: ScalaActorRef): ActorRef = ref.asInstanceOf[ActorRef] diff --git a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala index 546373c33f..fff56a3776 100644 --- a/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/AbstractDispatcher.scala @@ -5,16 +5,17 @@ package akka.dispatch import java.util.concurrent._ -import akka.event.Logging.Error +import akka.event.Logging.{ Error, LogEventException } import akka.actor._ -import akka.actor.ActorSystem -import scala.annotation.tailrec import akka.event.EventStream import com.typesafe.config.Config import akka.serialization.SerializationExtension -import akka.event.Logging.LogEventException -import akka.jsr166y.{ ForkJoinTask, ForkJoinPool } -import akka.util.{ Unsafe, Duration, NonFatal, Index } +import akka.util.{ Unsafe, Index } +import scala.annotation.tailrec +import scala.concurrent.forkjoin.{ ForkJoinTask, ForkJoinPool } +import scala.concurrent.util.Duration +import scala.concurrent.{ ExecutionContext, Await, Awaitable } +import scala.util.control.NonFatal final case class Envelope private (val message: Any, val sender: ActorRef) @@ -89,7 +90,7 @@ private[akka] case class Suspend() extends SystemMessage // sent to self from Ac /** * INTERNAL API */ -private[akka] case class Resume() extends SystemMessage // sent to self from ActorCell.resume +private[akka] case class Resume(inResponseToFailure: Boolean) extends SystemMessage // sent to self from ActorCell.resume /** * INTERNAL API */ @@ -122,88 +123,6 @@ final case class TaskInvocation(eventStream: EventStream, runnable: Runnable, cl } finally cleanup() } -/** - * Java API to create ExecutionContexts - */ -object ExecutionContexts { - - /** - * Creates an ExecutionContext from the given ExecutorService - */ - def fromExecutorService(e: ExecutorService): ExecutionContextExecutorService = - new ExecutionContext.WrappedExecutorService(e) - - /** - * Creates an ExecutionContext from the given Executor - */ - def fromExecutor(e: Executor): ExecutionContextExecutor = - new ExecutionContext.WrappedExecutor(e) -} - -object ExecutionContext { - implicit def defaultExecutionContext(implicit system: ActorSystem): ExecutionContext = system.dispatcher - - /** - * Creates an ExecutionContext from the given ExecutorService - */ - def fromExecutorService(e: ExecutorService): ExecutionContext with ExecutorService = new WrappedExecutorService(e) - - /** - * Creates an ExecutionContext from the given Executor - */ - def fromExecutor(e: Executor): ExecutionContext with Executor = new WrappedExecutor(e) - - /** - * Internal Akka use only - */ - private[akka] class WrappedExecutorService(val executor: ExecutorService) extends ExecutorServiceDelegate with ExecutionContextExecutorService { - override def reportFailure(t: Throwable): Unit = t match { - case e: LogEventException ⇒ e.getCause.printStackTrace() - case _ ⇒ t.printStackTrace() - } - } - - /** - * Internal Akka use only - */ - private[akka] class WrappedExecutor(val executor: Executor) extends ExecutionContextExecutor { - override final def execute(runnable: Runnable): Unit = executor.execute(runnable) - override def reportFailure(t: Throwable): Unit = t match { - case e: LogEventException ⇒ e.getCause.printStackTrace() - case _ ⇒ t.printStackTrace() - } - } -} - -/** - * Union interface since Java does not support union types - */ -trait ExecutionContextExecutor extends ExecutionContext with Executor - -/** - * Union interface since Java does not support union types - */ -trait ExecutionContextExecutorService extends ExecutionContextExecutor with ExecutorService - -/** - * An ExecutionContext is essentially the same thing as a java.util.concurrent.Executor - * This interface/trait exists to decouple the concept of execution from Actors & MessageDispatchers - * It is also needed to provide a fallback implicit default instance (in the companion object). - */ -trait ExecutionContext { - - /** - * Submits the runnable for execution - */ - def execute(runnable: Runnable): Unit - - /** - * Failed tasks should call reportFailure to let the ExecutionContext - * log the problem or whatever is appropriate for the implementation. - */ - def reportFailure(t: Throwable): Unit -} - /** * INTERNAL API */ @@ -226,8 +145,7 @@ private[akka] object MessageDispatcher { def printActors: Unit = if (debug) { for { d ← actors.keys - val c = println(d + " inhabitants: " + d.inhabitants) - a ← actors.valueIterator(d) + a ← { println(d + " inhabitants: " + d.inhabitants); actors.valueIterator(d) } } { val status = if (a.isTerminated) " (terminated)" else " (alive)" val messages = a match { @@ -289,25 +207,21 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext /** * Detaches the specified actor instance from this dispatcher */ - final def detach(actor: ActorCell): Unit = try { - unregister(actor) - } finally { - ifSensibleToDoSoThenScheduleShutdown() - } + final def detach(actor: ActorCell): Unit = try unregister(actor) finally ifSensibleToDoSoThenScheduleShutdown() - final def execute(runnable: Runnable): Unit = { + final override def execute(runnable: Runnable): Unit = { val invocation = TaskInvocation(eventStream, runnable, taskCleanup) addInhabitants(+1) try { executeTask(invocation) } catch { - case t ⇒ + case t: Throwable ⇒ addInhabitants(-1) throw t } } - def reportFailure(t: Throwable): Unit = t match { + override def reportFailure(t: Throwable): Unit = t match { case e: LogEventException ⇒ prerequisites.eventStream.publish(e.event) case _ ⇒ prerequisites.eventStream.publish(Error(t, getClass.getName, getClass, t.getMessage)) } @@ -392,7 +306,7 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext def suspend(actor: ActorCell): Unit = { val mbox = actor.mailbox if ((mbox.actor eq actor) && (mbox.dispatcher eq this)) - mbox.becomeSuspended() + mbox.suspend() } /* @@ -400,7 +314,7 @@ abstract class MessageDispatcher(val prerequisites: DispatcherPrerequisites) ext */ def resume(actor: ActorCell): Unit = { val mbox = actor.mailbox - if ((mbox.actor eq actor) && (mbox.dispatcher eq this) && mbox.becomeOpen()) + if ((mbox.actor eq actor) && (mbox.dispatcher eq this) && mbox.resume()) registerForExecution(mbox, false, false) } @@ -576,7 +490,7 @@ object ForkJoinExecutorConfigurator { final override def setRawResult(u: Unit): Unit = () final override def getRawResult(): Unit = () final override def exec(): Boolean = try { mailbox.run; true } catch { - case anything ⇒ + case anything: Throwable ⇒ val t = Thread.currentThread t.getUncaughtExceptionHandler match { case null ⇒ diff --git a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala index 5b8c5209b0..3897027d9b 100644 --- a/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/BalancingDispatcher.scala @@ -5,8 +5,9 @@ package akka.dispatch import akka.actor.{ ActorCell, ActorRef } -import annotation.tailrec -import akka.util.{ Duration, Helpers } +import scala.annotation.tailrec +import scala.concurrent.util.Duration +import akka.util.Helpers import java.util.{ Comparator, Iterator } import java.util.concurrent.{ Executor, LinkedBlockingQueue, ConcurrentLinkedQueue, ConcurrentSkipListSet } import akka.actor.ActorSystemImpl diff --git a/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala b/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala new file mode 100644 index 0000000000..d0092d77e0 --- /dev/null +++ b/akka-actor/src/main/scala/akka/dispatch/BatchingExecutor.scala @@ -0,0 +1,116 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + +package akka.dispatch + +import java.util.concurrent.{ Executor } +import scala.concurrent._ +import scala.annotation.tailrec + +/** + * All Batchables are automatically batched when submitted to a BatchingExecutor + */ +private[akka] trait Batchable extends Runnable + +/** + * Mixin trait for an Executor + * which groups multiple nested `Runnable.run()` calls + * into a single Runnable passed to the original + * Executor. This can be a useful optimization + * because it bypasses the original context's task + * queue and keeps related (nested) code on a single + * thread which may improve CPU affinity. However, + * if tasks passed to the Executor are blocking + * or expensive, this optimization can prevent work-stealing + * and make performance worse. Also, some ExecutionContext + * may be fast enough natively that this optimization just + * adds overhead. + * The default ExecutionContext.global is already batching + * or fast enough not to benefit from it; while + * `fromExecutor` and `fromExecutorService` do NOT add + * this optimization since they don't know whether the underlying + * executor will benefit from it. + * A batching executor can create deadlocks if code does + * not use `scala.concurrent.blocking` when it should, + * because tasks created within other tasks will block + * on the outer task completing. + * This executor may run tasks in any order, including LIFO order. + * There are no ordering guarantees. + * + * WARNING: The underlying Executor's execute-method must not execute the submitted Runnable + * in the calling thread synchronously. It must enqueue/handoff the Runnable. + */ +private[akka] trait BatchingExecutor extends Executor { + + // invariant: if "_tasksLocal.get ne null" then we are inside BatchingRunnable.run; if it is null, we are outside + private val _tasksLocal = new ThreadLocal[List[Runnable]]() + + private class Batch(val initial: List[Runnable]) extends Runnable with BlockContext { + private var parentBlockContext: BlockContext = _ + // this method runs in the delegate ExecutionContext's thread + override def run(): Unit = { + require(_tasksLocal.get eq null) + + val prevBlockContext = BlockContext.current + BlockContext.withBlockContext(this) { + try { + parentBlockContext = prevBlockContext + + @tailrec def processBatch(batch: List[Runnable]): Unit = batch match { + case Nil ⇒ () + case head :: tail ⇒ + _tasksLocal set tail + try { + head.run() + } catch { + case t: Throwable ⇒ + // if one task throws, move the + // remaining tasks to another thread + // so we can throw the exception + // up to the invoking executor + val remaining = _tasksLocal.get + _tasksLocal set Nil + unbatchedExecute(new Batch(remaining)) //TODO what if this submission fails? + throw t // rethrow + } + processBatch(_tasksLocal.get) // since head.run() can add entries, always do _tasksLocal.get here + } + + processBatch(initial) + } finally { + _tasksLocal.remove() + parentBlockContext = null + } + } + } + + override def blockOn[T](thunk: ⇒ T)(implicit permission: CanAwait): T = { + // if we know there will be blocking, we don't want to keep tasks queued up because it could deadlock. + { + val tasks = _tasksLocal.get + _tasksLocal set Nil + if ((tasks ne null) && tasks.nonEmpty) + unbatchedExecute(new Batch(tasks)) + } + + // now delegate the blocking to the previous BC + require(parentBlockContext ne null) + parentBlockContext.blockOn(thunk) + } + } + + protected def unbatchedExecute(r: Runnable): Unit = super.execute(r) + + abstract override def execute(runnable: Runnable): Unit = { + if (batchable(runnable)) { // If we can batch the runnable + _tasksLocal.get match { + case null ⇒ unbatchedExecute(new Batch(List(runnable))) // If we aren't in batching mode yet, enqueue batch + case some ⇒ _tasksLocal.set(runnable :: some) // If we are already in batching mode, add to batch + } + } else unbatchedExecute(runnable) // If not batchable, just delegate to underlying + } + + /** Override this to define which runnables will be batched. */ + def batchable(runnable: Runnable): Boolean = runnable.isInstanceOf[Batchable] +} diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala index d382cc5ecc..21f4612750 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatcher.scala @@ -5,11 +5,13 @@ package akka.dispatch import akka.event.Logging.Error -import java.util.concurrent.atomic.AtomicReference import akka.actor.ActorCell -import akka.util.Duration -import java.util.concurrent._ import akka.event.Logging +import java.util.concurrent.atomic.AtomicReference +import java.util.concurrent.{ ExecutorService, RejectedExecutionException } +import scala.concurrent.forkjoin.ForkJoinPool +import scala.concurrent.util.Duration +import scala.concurrent.Awaitable /** * The event-based ``Dispatcher`` binds a set of Actors to a thread pool backed up by a diff --git a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala index e148129bce..1e6dbc8546 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Dispatchers.scala @@ -9,7 +9,7 @@ import com.typesafe.config.{ ConfigFactory, Config } import akka.actor.{ Scheduler, DynamicAccess, ActorSystem } import akka.event.Logging.Warning import akka.event.EventStream -import akka.util.Duration +import scala.concurrent.util.Duration /** * DispatcherPrerequisites represents useful contextual pieces when constructing a MessageDispatcher diff --git a/akka-actor/src/main/scala/akka/dispatch/Future.scala b/akka-actor/src/main/scala/akka/dispatch/Future.scala index e29f1b8dae..80d57a6dca 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Future.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Future.scala @@ -4,75 +4,63 @@ package akka.dispatch -import akka.event.Logging.Error -import scala.Option -import akka.japi.{ Function ⇒ JFunc, Option ⇒ JOption } -import scala.util.continuations._ +import scala.runtime.{ BoxedUnit, AbstractPartialFunction } +import akka.japi.{ Function ⇒ JFunc, Option ⇒ JOption, Procedure } +import scala.concurrent.{ Future, Promise, ExecutionContext, ExecutionContextExecutor, ExecutionContextExecutorService } import java.lang.{ Iterable ⇒ JIterable } import java.util.{ LinkedList ⇒ JLinkedList } -import scala.annotation.tailrec -import scala.collection.mutable.Stack -import akka.util.{ Duration, BoxedType } -import akka.dispatch.Await.CanAwait -import akka.util.NonFatal -import akka.event.Logging.LogEventException -import akka.event.Logging.Debug -import java.util.concurrent.TimeUnit.NANOSECONDS -import java.util.concurrent.{ ExecutionException, Callable, TimeoutException } -import java.util.concurrent.atomic.{ AtomicInteger } -import akka.pattern.AskTimeoutException -import scala.util.DynamicVariable -import scala.runtime.BoxedUnit - -object Await { +import java.util.concurrent.{ Executor, ExecutorService, ExecutionException, Callable, TimeoutException } +/** + * ExecutionContexts is the Java API for ExecutionContexts + */ +object ExecutionContexts { /** - * Internal Akka use only - */ - sealed trait CanAwait - - /** - * Classes that implement Awaitable can be used with Await, - * this is used to do blocking operations (blocking in the "pause this thread" sense) - */ - trait Awaitable[+T] { - /** - * Should throw [[java.util.concurrent.TimeoutException]] if times out - * This method should not be called directly. - */ - @throws(classOf[TimeoutException]) - def ready(atMost: Duration)(implicit permit: CanAwait): this.type - - /** - * Throws exceptions if cannot produce a T within the specified time - * This method should not be called directly. - */ - @throws(classOf[Exception]) - def result(atMost: Duration)(implicit permit: CanAwait): T - } - - private[this] implicit final val permit = new CanAwait {} - - /** - * Blocks the current Thread to wait for the given awaitable to be ready. - * WARNING: Blocking operation, use with caution. + * Returns a new ExecutionContextExecutor which will delegate execution to the underlying Executor, + * and which will use the default error reporter. * - * @throws [[java.util.concurrent.TimeoutException]] if times out - * @return The returned value as returned by Awaitable.ready + * @param executor the Executor which will be used for the ExecutionContext + * @return a new ExecutionContext */ - @throws(classOf[TimeoutException]) - def ready[T <: Awaitable[_]](awaitable: T, atMost: Duration): T = awaitable.ready(atMost) + def fromExecutor(executor: Executor): ExecutionContextExecutor = + ExecutionContext.fromExecutor(executor) /** - * Blocks the current Thread to wait for the given awaitable to have a result. - * WARNING: Blocking operation, use with caution. + * Returns a new ExecutionContextExecutor which will delegate execution to the underlying Executor, + * and which will use the provided error reporter. * - * @throws [[java.util.concurrent.TimeoutException]] if times out - * @throws [[java.lang.Throwable]] (throws clause is Exception due to Java) if there was a problem - * @return The returned value as returned by Awaitable.result + * @param executor the Executor which will be used for the ExecutionContext + * @param errorReporter a Procedure that will log any exceptions passed to it + * @return a new ExecutionContext */ - @throws(classOf[Exception]) - def result[T](awaitable: Awaitable[T], atMost: Duration): T = awaitable.result(atMost) + def fromExecutor(executor: Executor, errorReporter: Procedure[Throwable]): ExecutionContextExecutor = + ExecutionContext.fromExecutor(executor, errorReporter.apply) + + /** + * Returns a new ExecutionContextExecutorService which will delegate execution to the underlying ExecutorService, + * and which will use the default error reporter. + * + * @param executor the ExecutorService which will be used for the ExecutionContext + * @return a new ExecutionContext + */ + def fromExecutorService(executorService: ExecutorService): ExecutionContextExecutorService = + ExecutionContext.fromExecutorService(executorService) + + /** + * Returns a new ExecutionContextExecutorService which will delegate execution to the underlying ExecutorService, + * and which will use the provided error reporter. + * + * @param executor the ExecutorService which will be used for the ExecutionContext + * @param errorReporter a Procedure that will log any exceptions passed to it + * @return a new ExecutionContext + */ + def fromExecutorService(executorService: ExecutorService, errorReporter: Procedure[Throwable]): ExecutionContextExecutorService = + ExecutionContext.fromExecutorService(executorService, errorReporter.apply) + + /** + * @return a reference to the global ExecutionContext + */ + def global(): ExecutionContext = ExecutionContext.global } /** @@ -88,23 +76,24 @@ object Futures { /** * Java API, equivalent to Promise.apply */ - def promise[T](executor: ExecutionContext): Promise[T] = Promise[T]()(executor) + def promise[T](): Promise[T] = Promise[T]() /** * Java API, creates an already completed Promise with the specified exception */ - def failed[T](exception: Throwable, executor: ExecutionContext): Promise[T] = Promise.failed(exception)(executor) + def failed[T](exception: Throwable): Future[T] = Future.failed(exception) /** * Java API, Creates an already completed Promise with the specified result */ - def successful[T](result: T, executor: ExecutionContext): Promise[T] = Promise.successful(result)(executor) + def successful[T](result: T): Future[T] = Future.successful(result) /** * Java API. * Returns a Future that will hold the optional result of the first Future with a result that matches the predicate */ def find[T <: AnyRef](futures: JIterable[Future[T]], predicate: JFunc[T, java.lang.Boolean], executor: ExecutionContext): Future[JOption[T]] = { + implicit val ec = executor Future.find[T]((scala.collection.JavaConversions.iterableAsScalaIterable(futures)))(predicate.apply(_))(executor).map(JOption.fromScalaOption(_)) } @@ -159,786 +148,6 @@ object Futures { for (r ← fr; b ← fb) yield { r add b; r } } } - - /** - * Signals that the current thread of execution will potentially engage - * an action that will take a non-trivial amount of time, perhaps by using blocking.IO or using a lot of CPU time, - * giving the system a chance to spawn new threads, reuse old threads or otherwise, - * to prevent starvation and/or unfairness. - * - * Assures that any Future tasks initiated in the current thread will be - * executed asynchronously, including any tasks currently queued to be - * executed in the current thread. This is needed if the current task may - * block, causing delays in executing the remaining tasks which in some - * cases may cause a deadlock. - * - * Usage: Call this method in a callback (map, flatMap etc also count) to a Future, - * if you will be doing blocking in the callback. - * - * Note: Calling 'Await.result(future)' or 'Await.ready(future)' will automatically trigger this method. - * - */ - def blocking(): Unit = Future.blocking() -} - -object Future { - - /** - * This method constructs and returns a Future that will eventually hold the result of the execution of the supplied body - * The execution is performed by the specified Dispatcher. - */ - def apply[T](body: ⇒ T)(implicit executor: ExecutionContext): Future[T] = { - val promise = Promise[T]() - executor.execute(new Runnable { - def run = - promise complete { - try { - Right(body) - } catch { - case NonFatal(e) ⇒ - executor.reportFailure(new LogEventException(Debug("Future", getClass, e.getMessage), e)) - Left(e) - } - } - }) - promise - } - - import scala.collection.mutable.Builder - import scala.collection.generic.CanBuildFrom - - /** - * Simple version of Futures.traverse. Transforms a Traversable[Future[A]] into a Future[Traversable[A]]. - * Useful for reducing many Futures into a single Future. - */ - def sequence[A, M[_] <: Traversable[_]](in: M[Future[A]])(implicit cbf: CanBuildFrom[M[Future[A]], A, M[A]], executor: ExecutionContext): Future[M[A]] = - in.foldLeft(Promise.successful(cbf(in)): Future[Builder[A, M[A]]])((fr, fa) ⇒ for (r ← fr; a ← fa.asInstanceOf[Future[A]]) yield (r += a)).map(_.result) - - /** - * Returns a Future to the result of the first future in the list that is completed - */ - def firstCompletedOf[T](futures: Traversable[Future[T]])(implicit executor: ExecutionContext): Future[T] = { - val futureResult = Promise[T]() - - val completeFirst: Either[Throwable, T] ⇒ Unit = futureResult tryComplete _ - futures.foreach(_ onComplete completeFirst) - - futureResult - } - - /** - * Returns a Future that will hold the optional result of the first Future with a result that matches the predicate - */ - def find[T](futures: Traversable[Future[T]])(predicate: T ⇒ Boolean)(implicit executor: ExecutionContext): Future[Option[T]] = { - if (futures.isEmpty) Promise.successful[Option[T]](None) - else { - val result = Promise[Option[T]]() - val ref = new AtomicInteger(futures.size) - val search: Either[Throwable, T] ⇒ Unit = v ⇒ try { - v match { - case Right(r) ⇒ if (predicate(r)) result tryComplete Right(Some(r)) - case _ ⇒ - } - } finally { - if (ref.decrementAndGet == 0) - result tryComplete Right(None) - } - - futures.foreach(_ onComplete search) - - result - } - } - - /** - * A non-blocking fold over the specified futures, with the start value of the given zero. - * The fold is performed on the thread where the last future is completed, - * the result will be the first failure of any of the futures, or any failure in the actual fold, - * or the result of the fold. - * Example: - *

-   *   val result = Await.result(Future.fold(futures)(0)(_ + _), 5 seconds)
-   * 
- */ - def fold[T, R](futures: Traversable[Future[T]])(zero: R)(foldFun: (R, T) ⇒ R)(implicit executor: ExecutionContext): Future[R] = { - if (futures.isEmpty) Promise.successful(zero) - else sequence(futures).map(_.foldLeft(zero)(foldFun)) - } - - /** - * Reduces the results of the supplied futures and binary operation. - * Example: - *
-   *   val result = Await.result(Futures.reduce(futures)(_ + _), 5 seconds)
-   * 
- */ - def reduce[T, R >: T](futures: Traversable[Future[T]])(op: (R, T) ⇒ R)(implicit executor: ExecutionContext): Future[R] = { - if (futures.isEmpty) Promise[R].failure(new NoSuchElementException("reduce attempted on empty collection")) - else sequence(futures).map(_ reduceLeft op) - } - /** - * Transforms a Traversable[A] into a Future[Traversable[B]] using the provided Function A ⇒ Future[B]. - * This is useful for performing a parallel map. For example, to apply a function to all items of a list - * in parallel: - *
-   * val myFutureList = Future.traverse(myList)(x ⇒ Future(myFunc(x)))
-   * 
- */ - def traverse[A, B, M[_] <: Traversable[_]](in: M[A])(fn: A ⇒ Future[B])(implicit cbf: CanBuildFrom[M[A], B, M[B]], executor: ExecutionContext): Future[M[B]] = - in.foldLeft(Promise.successful(cbf(in)): Future[Builder[B, M[B]]]) { (fr, a) ⇒ - val fb = fn(a.asInstanceOf[A]) - for (r ← fr; b ← fb) yield (r += b) - }.map(_.result) - - /** - * Captures a block that will be transformed into 'Continuation Passing Style' using Scala's Delimited - * Continuations plugin. - * - * Within the block, the result of a Future may be accessed by calling Future.apply. At that point - * execution is suspended with the rest of the block being stored in a continuation until the result - * of the Future is available. If an Exception is thrown while processing, it will be contained - * within the resulting Future. - * - * This allows working with Futures in an imperative style without blocking for each result. - * - * Completing a Future using 'Promise << Future' will also suspend execution until the - * value of the other Future is available. - * - * The Delimited Continuations compiler plugin must be enabled in order to use this method. - */ - def flow[A](body: ⇒ A @cps[Future[Any]])(implicit executor: ExecutionContext): Future[A] = { - val p = Promise[A] - dispatchTask({ () ⇒ - try { - (reify(body) foreachFull (p success, p failure): Future[Any]) onFailure { - case NonFatal(e) ⇒ p tryComplete Left(e) - } - } catch { - case NonFatal(e) ⇒ p tryComplete Left(e) - } - }, true) - p.future - } - - /** - * Signals that the current thread of execution will potentially engage - * an action that will take a non-trivial amount of time, perhaps by using blocking.IO or using a lot of CPU time, - * giving the system a chance to spawn new threads, reuse old threads or otherwise, - * to prevent starvation and/or unfairness. - * - * Assures that any Future tasks initiated in the current thread will be - * executed asynchronously, including any tasks currently queued to be - * executed in the current thread. This is needed if the current task may - * block, causing delays in executing the remaining tasks which in some - * cases may cause a deadlock. - * - * Note: Calling 'Await.result(future)' or 'Await.ready(future)' will automatically trigger this method. - * - * For example, in the following block of code the call to 'latch.open' - * might not be executed until after the call to 'latch.await', causing - * a deadlock. By adding 'Future.blocking()' the call to 'latch.open' - * will instead be dispatched separately from the current block, allowing - * it to be run in parallel: - *
-   * val latch = new StandardLatch
-   * val future = Future() map { _ ⇒
-   *   Future.blocking()
-   *   val nested = Future()
-   *   nested foreach (_ ⇒ latch.open)
-   *   latch.await
-   * }
-   * 
- */ - def blocking(): Unit = - _taskStack.get match { - case stack if (stack ne null) && stack.nonEmpty ⇒ - val executionContext = _executionContext.value match { - case null ⇒ throw new IllegalStateException("'blocking' needs to be invoked inside a Future callback.") - case some ⇒ some - } - val tasks = stack.elems - stack.clear() - _taskStack.remove() - dispatchTask(() ⇒ _taskStack.get.elems = tasks, true)(executionContext) - case _ ⇒ _taskStack.remove() - } - - private val _taskStack = new ThreadLocal[Stack[() ⇒ Unit]]() - private val _executionContext = new DynamicVariable[ExecutionContext](null) - - /** - * Internal API, do not call - */ - private[akka] def dispatchTask(task: () ⇒ Unit, force: Boolean = false)(implicit executor: ExecutionContext): Unit = - _taskStack.get match { - case stack if (stack ne null) && (executor eq _executionContext.value) && !force ⇒ stack push task - case _ ⇒ executor.execute( - new Runnable { - def run = - try { - _executionContext.withValue(executor) { - val taskStack = Stack.empty[() ⇒ Unit] - taskStack push task - _taskStack set taskStack - - while (taskStack.nonEmpty) { - val next = taskStack.pop() - try { - next.apply() - } catch { - case NonFatal(e) ⇒ executor.reportFailure(e) - } - } - } - } finally { - _taskStack.remove() - } - }) - } - -} - -/** - * Trait representing a value that may not have been computed yet. - * - * @define asyncCallbackWarning - * - * Note: the callback function may (and probably will) run in another thread, - * and therefore should not refer to any unsynchronized state. In - * particular, if using this method from an actor, do not access - * the state of the actor from the callback function. - * [[akka.dispatch.Promise]].`completeWith`, - * [[akka.pattern.PipeToSupport.PipeableFuture]].`pipeTo`, - * and [[akka.dispatch.Future]].`fallbackTo` are some methods to consider - * using when possible, to avoid concurrent callbacks. - */ -sealed trait Future[+T] extends Await.Awaitable[T] { - - protected implicit def executor: ExecutionContext - - protected final def resolve[X](source: Either[Throwable, X]): Either[Throwable, X] = source match { - case Left(t: scala.runtime.NonLocalReturnControl[_]) ⇒ Right(t.value.asInstanceOf[X]) - case Left(t: InterruptedException) ⇒ Left(new RuntimeException("Boxed InterruptedException", t)) - case _ ⇒ source - } - - /** - * @return a new Future that will contain a tuple containing the successful result of this and that Future. - * If this or that fail, they will race to complete the returned Future with their failure. - * The returned Future will not be completed if neither this nor that are completed. - */ - def zip[U](that: Future[U]): Future[(T, U)] = { - val p = Promise[(T, U)]() - onComplete { - case Left(t) ⇒ p failure t - case Right(r) ⇒ that onSuccess { case r2 ⇒ p success ((r, r2)) } - } - that onFailure { case f ⇒ p tryComplete Left(f) } - p.future - } - - /** - * For use only within a Future.flow block or another compatible Delimited Continuations reset block. - * - * Returns the result of this Future without blocking, by suspending execution and storing it as a - * continuation until the result is available. - */ - def apply(): T @cps[Future[Any]] = shift(this flatMap (_: T ⇒ Future[Any])) - - /** - * Tests whether this Future has been completed. - */ - def isCompleted: Boolean - - /** - * The contained value of this Future. Before this Future is completed - * the value will be None. After completion the value will be Some(Right(t)) - * if it contains a valid result, or Some(Left(error)) if it contains - * an exception. - */ - def value: Option[Either[Throwable, T]] - - /** - * When this Future is completed, apply the provided function to the - * Future. If the Future has already been completed, this will apply - * immediately. Multiple - * callbacks may be registered; there is no guarantee that they will be - * executed in a particular order. - * - * $asyncCallbackWarning - */ - def onComplete[U](func: Either[Throwable, T] ⇒ U): this.type - - /** - * When the future is completed with a valid result, apply the provided - * PartialFunction to the result. See `onComplete` for more details. - *
-   *   future onSuccess {
-   *     case Foo ⇒ target ! "foo"
-   *     case Bar ⇒ target ! "bar"
-   *   }
-   * 
- * - * $asyncCallbackWarning - */ - final def onSuccess[U](pf: PartialFunction[T, U]): this.type = onComplete { - case Right(r) if pf isDefinedAt r ⇒ pf(r) - case _ ⇒ - } - - /** - * When the future is completed with an exception, apply the provided - * PartialFunction to the exception. See `onComplete` for more details. - *
-   *   future onFailure {
-   *     case NumberFormatException ⇒ target ! "wrong format"
-   *   }
-   * 
- * - * $asyncCallbackWarning - */ - final def onFailure[U](pf: PartialFunction[Throwable, U]): this.type = onComplete { - case Left(ex) if pf isDefinedAt ex ⇒ pf(ex) - case _ ⇒ - } - - /** - * Returns a failure projection of this Future - * If `this` becomes completed with a failure, that failure will be the success of the returned Future - * If `this` becomes completed with a result, then the returned future will fail with a NoSuchElementException - */ - final def failed: Future[Throwable] = { - val p = Promise[Throwable]() - this.onComplete { - case Left(t) ⇒ p success t - case Right(r) ⇒ p failure new NoSuchElementException("Future.failed not completed with a throwable. Instead completed with: " + r) - } - p.future - } - - /** - * Returns a new Future that will either hold the successful value of this Future, - * or, it this Future fails, it will hold the result of "that" Future. - */ - def fallbackTo[U >: T](that: Future[U]): Future[U] = { - val p = Promise[U]() - onComplete { - case r @ Right(_) ⇒ p complete r - case _ ⇒ p completeWith that - } - p.future - } - - /** - * Creates a new Future that will handle any matching Throwable that this - * Future might contain. If there is no match, or if this Future contains - * a valid result then the new Future will contain the same. - * Example: - *
-   * Future(6 / 0) recover { case e: ArithmeticException ⇒ 0 } // result: 0
-   * Future(6 / 0) recover { case e: NotFoundException   ⇒ 0 } // result: exception
-   * Future(6 / 2) recover { case e: ArithmeticException ⇒ 0 } // result: 3
-   * 
- * - * $asyncCallbackWarning - */ - final def recover[A >: T](pf: PartialFunction[Throwable, A]): Future[A] = { - val p = Promise[A]() - onComplete { - case Left(e) if pf isDefinedAt e ⇒ p.complete(try { Right(pf(e)) } catch { case NonFatal(x) ⇒ Left(x) }) - case otherwise ⇒ p complete otherwise - } - p.future - } - - /** - * Returns a new Future that will, in case this future fails, - * be completed with the resulting Future of the given PartialFunction, - * if the given PartialFunction matches the failure of the original Future. - * - * If the PartialFunction throws, that Throwable will be propagated to the returned Future. - * - * Example: - * - * {{{ - * val f = Future { Int.MaxValue } - * Future (6 / 0) recoverWith { case e: ArithmeticException => f } // result: Int.MaxValue - * }}} - * - * $asyncCallbackWarning - */ - def recoverWith[U >: T](pf: PartialFunction[Throwable, Future[U]]): Future[U] = { - val p = Promise[U]() - - onComplete { - case Left(t) if pf isDefinedAt t ⇒ - try { p completeWith pf(t) } catch { case NonFatal(t) ⇒ p complete resolve(Left(t)) } - case otherwise ⇒ p complete otherwise - } - - p.future - } - - /** - * Returns a new Future that will contain the completed result of this Future, - * and which will invoke the supplied PartialFunction when completed. - * - * This allows for establishing order of side-effects. - * - * {{{ - * Future { 5 } andThen { - * case something => assert(something is awesome) - * } andThen { - * case Left(t) => handleProblem(t) - * case Right(v) => dealWithSuccess(v) - * } - * }}} - * - * $asyncCallbackWarning - */ - def andThen[U](pf: PartialFunction[Either[Throwable, T], U]): Future[T] = { - val p = Promise[T]() - onComplete { case r ⇒ try if (pf isDefinedAt r) pf(r) finally p complete r } - p.future - } - - /** - * Creates a new Future by applying a function to the successful result of - * this Future. If this Future is completed with an exception then the new - * Future will also contain this exception. - * Example: - *
-   * val future1 = for {
-   *   a: Int    <- actor ? "Hello" // returns 5
-   *   b: String <- actor ? a       // returns "10"
-   *   c: String <- actor ? 7       // returns "14"
-   * } yield b + "-" + c
-   * 
- * - * $asyncCallbackWarning - */ - final def map[A](f: T ⇒ A): Future[A] = { - val future = Promise[A]() - onComplete { - case l: Left[_, _] ⇒ future complete l.asInstanceOf[Either[Throwable, A]] - case Right(res) ⇒ - future complete (try { - Right(f(res)) - } catch { - case NonFatal(e) ⇒ - executor.reportFailure(new LogEventException(Debug("Future", getClass, e.getMessage), e)) - Left(e) - }) - } - future - } - - /** - * Creates a new Future[A] which is completed with this Future's result if - * that conforms to A's erased type or a ClassCastException otherwise. - * - * When used from Java, to create the Manifest, use: - * import static akka.japi.Util.manifest; - * future.mapTo(manifest(MyClass.class)); - */ - final def mapTo[A](implicit m: Manifest[A]): Future[A] = { - val fa = Promise[A]() - onComplete { - case l: Left[_, _] ⇒ fa complete l.asInstanceOf[Either[Throwable, A]] - case Right(t) ⇒ - fa complete (try { - Right(BoxedType(m.erasure).cast(t).asInstanceOf[A]) - } catch { - case e: ClassCastException ⇒ Left(e) - }) - } - fa.future - } - - /** - * Creates a new Future by applying a function to the successful result of - * this Future, and returns the result of the function as the new Future. - * If this Future is completed with an exception then the new Future will - * also contain this exception. - * Example: - *
-   * val future1 = for {
-   *   a: Int    <- actor ? "Hello" // returns 5
-   *   b: String <- actor ? a       // returns "10"
-   *   c: String <- actor ? 7       // returns "14"
-   * } yield b + "-" + c
-   * 
- * - * $asyncCallbackWarning - */ - final def flatMap[A](f: T ⇒ Future[A]): Future[A] = { - val p = Promise[A]() - - onComplete { - case l: Left[_, _] ⇒ p complete l.asInstanceOf[Either[Throwable, A]] - case Right(r) ⇒ - try { - p completeWith f(r) - } catch { - case NonFatal(e) ⇒ - executor.reportFailure(new LogEventException(Debug("Future", getClass, e.getMessage), e)) - p complete Left(e) - case t ⇒ - p complete Left(new ExecutionException(t)); throw t - } - } - p.future - } - - /** - * Same as onSuccess { case r => f(r) } but is also used in for-comprehensions - * - * $asyncCallbackWarning - */ - final def foreach[U](f: T ⇒ U): Unit = onComplete { - case Right(r) ⇒ f(r) - case _ ⇒ - } - - /** - * Used by for-comprehensions - * - * $asyncCallbackWarning - */ - final def withFilter(p: T ⇒ Boolean) = new FutureWithFilter[T](this, p) - - final class FutureWithFilter[+A](self: Future[A], p: A ⇒ Boolean) { - def foreach(f: A ⇒ Unit): Unit = self filter p foreach f - def map[B](f: A ⇒ B): Future[B] = self filter p map f - def flatMap[B](f: A ⇒ Future[B]): Future[B] = self filter p flatMap f - def withFilter(q: A ⇒ Boolean): FutureWithFilter[A] = new FutureWithFilter[A](self, x ⇒ p(x) && q(x)) - } - - /** - * Returns a new Future that will hold the successful result of this Future if it matches - * the given predicate, if it doesn't match, the resulting Future will be a failed Future - * with a MatchError, of if this Future fails, that failure will be propagated to the returned Future - * - * $asyncCallbackWarning - */ - final def filter(pred: T ⇒ Boolean): Future[T] = { - val p = Promise[T]() - onComplete { - case l: Left[_, _] ⇒ p complete l.asInstanceOf[Either[Throwable, T]] - case r @ Right(res) ⇒ p complete (try { - if (pred(res)) r else Left(new MatchError(res)) - } catch { - case NonFatal(e) ⇒ - executor.reportFailure(new LogEventException(Debug("Future", getClass, e.getMessage), e)) - Left(e) - }) - } - p.future - } - -} - -object Promise { - /** - * Creates a non-completed Promise - * - * Scala API - */ - def apply[A]()(implicit executor: ExecutionContext): Promise[A] = new DefaultPromise[A]() - - /** - * Creates an already completed Promise with the specified exception - */ - def failed[T](exception: Throwable)(implicit executor: ExecutionContext): Promise[T] = new KeptPromise[T](Left(exception)) - - /** - * Creates an already completed Promise with the specified result - */ - def successful[T](result: T)(implicit executor: ExecutionContext): Promise[T] = new KeptPromise[T](Right(result)) -} - -/** - * Essentially this is the Promise (or write-side) of a Future (read-side). - */ -trait Promise[T] extends Future[T] { - - /** - * Returns the Future associated with this Promise - */ - def future: Future[T] = this - - /** - * Completes this Promise with the specified result, if not already completed. - * @return whether this call completed the Promise - */ - def tryComplete(value: Either[Throwable, T]): Boolean - - /** - * Completes this Promise with the specified result, if not already completed. - * @throws IllegalStateException if already completed, this is to aid in debugging of complete-races, - * use tryComplete to do a conditional complete. - * @return this - */ - final def complete(value: Either[Throwable, T]): this.type = - if (tryComplete(value)) this else throw new IllegalStateException("Promise already completed: " + this + " tried to complete with " + value) - - /** - * Completes this Promise with the specified result, if not already completed. - * @return this - */ - final def success(result: T): this.type = complete(Right(result)) - - /** - * Completes this Promise with the specified exception, if not already completed. - * @return this - */ - final def failure(exception: Throwable): this.type = complete(Left(exception)) - - /** - * Completes this Promise with the specified other Future, when that Future is completed, - * unless this Promise has already been completed. - * @return this. - */ - final def completeWith(other: Future[T]): this.type = { - other onComplete { tryComplete(_) } - this - } - - final def <<(value: T): Future[T] @cps[Future[Any]] = shift { cont: (Future[T] ⇒ Future[Any]) ⇒ cont(complete(Right(value))) } - - final def <<(other: Future[T]): Future[T] @cps[Future[Any]] = shift { cont: (Future[T] ⇒ Future[Any]) ⇒ - val fr = Promise[Any]() - val thisPromise = this - thisPromise completeWith other onComplete { v ⇒ - try { - fr completeWith cont(thisPromise) - } catch { - case NonFatal(e) ⇒ - executor.reportFailure(new LogEventException(Debug("Future", getClass, e.getMessage), e)) - fr failure e - } - } - fr - } -} - -//Companion object to FState, just to provide a cheap, immutable default entry -private[dispatch] object DefaultPromise { - def EmptyPending[T](): List[T] = Nil -} - -/** - * The default concrete Future implementation. - */ -class DefaultPromise[T](implicit val executor: ExecutionContext) extends AbstractPromise with Promise[T] { - self ⇒ - - protected final def tryAwait(atMost: Duration): Boolean = { - Future.blocking - - @tailrec - def awaitUnsafe(waitTimeNanos: Long): Boolean = { - if (!isCompleted && waitTimeNanos > 0) { - val ms = NANOSECONDS.toMillis(waitTimeNanos) - val ns = (waitTimeNanos % 1000000l).toInt //As per object.wait spec - val start = System.nanoTime() - try { synchronized { if (!isCompleted) wait(ms, ns) } } catch { case e: InterruptedException ⇒ } - - awaitUnsafe(waitTimeNanos - (System.nanoTime() - start)) - } else isCompleted - } - awaitUnsafe(if (atMost.isFinite) atMost.toNanos else Long.MaxValue) - } - - @throws(classOf[TimeoutException]) - def ready(atMost: Duration)(implicit permit: CanAwait): this.type = - if (isCompleted || tryAwait(atMost)) this - else throw new TimeoutException("Futures timed out after [" + atMost + "]") - - @throws(classOf[Exception]) - def result(atMost: Duration)(implicit permit: CanAwait): T = - ready(atMost).value.get match { - case Left(e: AskTimeoutException) ⇒ throw new AskTimeoutException(e.getMessage, e) // to get meaningful stack trace - case Left(e) ⇒ throw e - case Right(r) ⇒ r - } - - def value: Option[Either[Throwable, T]] = getState match { - case _: List[_] ⇒ None - case c: Either[_, _] ⇒ Some(c.asInstanceOf[Either[Throwable, T]]) - } - - def isCompleted(): Boolean = getState match { - case _: Either[_, _] ⇒ true - case _ ⇒ false - } - - def tryComplete(value: Either[Throwable, T]): Boolean = { - val callbacks: List[Either[Throwable, T] ⇒ Unit] = { - try { - @tailrec - def tryComplete(v: Either[Throwable, T]): List[Either[Throwable, T] ⇒ Unit] = { - getState match { - case raw: List[_] ⇒ - val cur = raw.asInstanceOf[List[Either[Throwable, T] ⇒ Unit]] - if (updateState(cur, v)) cur else tryComplete(v) - case _ ⇒ null - } - } - tryComplete(resolve(value)) - } finally { - synchronized { notifyAll() } //Notify any evil blockers - } - } - - callbacks match { - case null ⇒ false - case cs if cs.isEmpty ⇒ true - case cs ⇒ Future.dispatchTask(() ⇒ cs.foreach(f ⇒ notifyCompleted(f, value))); true - } - } - - def onComplete[U](func: Either[Throwable, T] ⇒ U): this.type = { - @tailrec //Returns whether the future has already been completed or not - def tryAddCallback(): Either[Throwable, T] = { - val cur = getState - cur match { - case r: Either[_, _] ⇒ r.asInstanceOf[Either[Throwable, T]] - case listeners: List[_] ⇒ if (updateState(listeners, func :: listeners)) null else tryAddCallback() - } - } - - tryAddCallback() match { - case null ⇒ this - case completed ⇒ - Future.dispatchTask(() ⇒ notifyCompleted(func, completed)) - this - } - } - - private final def notifyCompleted[U](func: Either[Throwable, T] ⇒ U, result: Either[Throwable, T]): Unit = - try func(result) catch { case NonFatal(e) ⇒ executor reportFailure e } -} - -/** - * An already completed Future is seeded with it's result at creation, is useful for when you are participating in - * a Future-composition but you already have a value to contribute. - */ -final class KeptPromise[T](suppliedValue: Either[Throwable, T])(implicit val executor: ExecutionContext) extends Promise[T] { - val value = Some(resolve(suppliedValue)) - - def tryComplete(value: Either[Throwable, T]): Boolean = false - def onComplete[U](func: Either[Throwable, T] ⇒ U): this.type = { - val completedAs = value.get - Future dispatchTask (() ⇒ func(completedAs)) - this - } - def isCompleted(): Boolean = true - def ready(atMost: Duration)(implicit permit: CanAwait): this.type = this - def result(atMost: Duration)(implicit permit: CanAwait): T = value.get match { - case Left(e) ⇒ throw e - case Right(r) ⇒ r - } } /** @@ -947,7 +156,7 @@ final class KeptPromise[T](suppliedValue: Either[Throwable, T])(implicit val exe */ object japi { @deprecated("Do not use this directly, use subclasses of this", "2.0") - class CallbackBridge[-T] extends PartialFunction[T, BoxedUnit] { + class CallbackBridge[-T] extends AbstractPartialFunction[T, BoxedUnit] { override final def isDefinedAt(t: T): Boolean = true override final def apply(t: T): BoxedUnit = { internal(t) @@ -957,7 +166,7 @@ object japi { } @deprecated("Do not use this directly, use 'Recover'", "2.0") - class RecoverBridge[+T] extends PartialFunction[Throwable, T] { + class RecoverBridge[+T] extends AbstractPartialFunction[Throwable, T] { override final def isDefinedAt(t: Throwable): Boolean = true override final def apply(t: Throwable): T = internal(t) protected def internal(result: Throwable): T = null.asInstanceOf[T] @@ -971,10 +180,11 @@ object japi { @deprecated("Do not use this directly, use subclasses of this", "2.0") class UnitFunctionBridge[-T] extends (T ⇒ BoxedUnit) { - override final def apply(t: T): BoxedUnit = { - internal(t) - BoxedUnit.UNIT - } + final def apply$mcLJ$sp(l: Long): BoxedUnit = { internal(l.asInstanceOf[T]); BoxedUnit.UNIT } + final def apply$mcLI$sp(i: Int): BoxedUnit = { internal(i.asInstanceOf[T]); BoxedUnit.UNIT } + final def apply$mcLF$sp(f: Float): BoxedUnit = { internal(f.asInstanceOf[T]); BoxedUnit.UNIT } + final def apply$mcLD$sp(d: Double): BoxedUnit = { internal(d.asInstanceOf[T]); BoxedUnit.UNIT } + override final def apply(t: T): BoxedUnit = { internal(t); BoxedUnit.UNIT } protected def internal(result: T): Unit = () } } diff --git a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala index 36b386cef1..baf7b682c2 100644 --- a/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala +++ b/akka-actor/src/main/scala/akka/dispatch/Mailbox.scala @@ -3,15 +3,16 @@ */ package akka.dispatch -import akka.AkkaException import java.util.{ Comparator, PriorityQueue, Queue, Deque } -import akka.util._ -import akka.actor.{ ActorCell, ActorRef, Cell } import java.util.concurrent._ -import annotation.tailrec +import akka.AkkaException +import akka.actor.{ ActorCell, ActorRef, Cell, ActorSystem, InternalActorRef, DeadLetter } +import akka.util.{ Unsafe, BoundedBlockingQueue } import akka.event.Logging.Error +import scala.concurrent.util.Duration +import scala.annotation.tailrec +import scala.util.control.NonFatal import com.typesafe.config.Config -import akka.actor._ /** * INTERNAL API @@ -24,12 +25,16 @@ private[akka] object Mailbox { * the following assigned numbers CANNOT be changed without looking at the code which uses them! */ - // primary status: only first three + // primary status final val Open = 0 // _status is not initialized in AbstractMailbox, so default must be zero! Deliberately without type ascription to make it a compile-time constant - final val Suspended = 1 // Deliberately without type ascription to make it a compile-time constant - final val Closed = 2 // Deliberately without type ascription to make it a compile-time constant + final val Closed = 1 // Deliberately without type ascription to make it a compile-time constant // secondary status: Scheduled bit may be added to Open/Suspended - final val Scheduled = 4 // Deliberately without type ascription to make it a compile-time constant + final val Scheduled = 2 // Deliberately without type ascription to make it a compile-time constant + // shifted by 2: the suspend count! + final val shouldScheduleMask = 3 + final val shouldNotProcessMask = ~2 + final val suspendMask = ~3 + final val suspendUnit = 4 // mailbox debugging helper using println (see below) // since this is a compile-time constant, scalac will elide code behind if (Mailbox.debug) (RK checked with 2.9.1) @@ -100,10 +105,10 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) final def status: Mailbox.Status = Unsafe.instance.getIntVolatile(this, AbstractMailbox.mailboxStatusOffset) @inline - final def shouldProcessMessage: Boolean = (status & 3) == Open + final def shouldProcessMessage: Boolean = (status & shouldNotProcessMask) == 0 @inline - final def isSuspended: Boolean = (status & 3) == Suspended + final def isSuspended: Boolean = (status & suspendMask) != 0 @inline final def isClosed: Boolean = status == Closed @@ -120,23 +125,32 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) Unsafe.instance.putIntVolatile(this, AbstractMailbox.mailboxStatusOffset, newStatus) /** - * set new primary status Open. Caller does not need to worry about whether + * Reduce the suspend count by one. Caller does not need to worry about whether * status was Scheduled or not. + * + * @returns true if the suspend count reached zero */ @tailrec - final def becomeOpen(): Boolean = status match { + final def resume(): Boolean = status match { case Closed ⇒ setStatus(Closed); false - case s ⇒ updateStatus(s, Open | s & Scheduled) || becomeOpen() + case s ⇒ + val next = if (s < suspendUnit) s else s - suspendUnit + if (updateStatus(s, next)) next < suspendUnit + else resume() } /** - * set new primary status Suspended. Caller does not need to worry about whether + * Increment the suspend count by one. Caller does not need to worry about whether * status was Scheduled or not. + * + * @returns true if the previous suspend count was zero */ @tailrec - final def becomeSuspended(): Boolean = status match { + final def suspend(): Boolean = status match { case Closed ⇒ setStatus(Closed); false - case s ⇒ updateStatus(s, Suspended | s & Scheduled) || becomeSuspended() + case s ⇒ + if (updateStatus(s, s + suspendUnit)) s < suspendUnit + else suspend() } /** @@ -157,11 +171,10 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) val s = status /* * only try to add Scheduled bit if pure Open/Suspended, not Closed or with - * Scheduled bit already set (this is one of the reasons why the numbers - * cannot be changed in object Mailbox above) + * Scheduled bit already set */ - if (s <= Suspended) updateStatus(s, s | Scheduled) || setAsScheduled() - else false + if ((s & shouldScheduleMask) != Open) false + else updateStatus(s, s | Scheduled) || setAsScheduled() } /** @@ -170,12 +183,6 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue) @tailrec final def setAsIdle(): Boolean = { val s = status - /* - * only try to remove Scheduled bit if currently Scheduled, not Closed or - * without Scheduled bit set (this is one of the reasons why the numbers - * cannot be changed in object Mailbox above) - */ - updateStatus(s, s & ~Scheduled) || setAsIdle() } diff --git a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala index 246b1f591a..8fdde39cb3 100644 --- a/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala +++ b/akka-actor/src/main/scala/akka/dispatch/PinnedDispatcher.scala @@ -5,7 +5,7 @@ package akka.dispatch import akka.actor.ActorCell -import akka.util.Duration +import scala.concurrent.util.Duration /** * Dedicates a unique thread for each actor passed in as reference. Served through its messageQueue. diff --git a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala index 963299debc..2d38128abe 100644 --- a/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala +++ b/akka-actor/src/main/scala/akka/dispatch/ThreadPoolBuilder.scala @@ -5,20 +5,23 @@ package akka.dispatch import java.util.Collection -import akka.util.Duration -import akka.jsr166y._ -import java.util.concurrent.atomic.AtomicLong -import java.util.concurrent.ArrayBlockingQueue -import java.util.concurrent.BlockingQueue -import java.util.concurrent.Callable -import java.util.concurrent.ExecutorService -import java.util.concurrent.LinkedBlockingQueue -import java.util.concurrent.RejectedExecutionHandler -import java.util.concurrent.RejectedExecutionException -import java.util.concurrent.SynchronousQueue -import java.util.concurrent.TimeUnit -import java.util.concurrent.ThreadFactory -import java.util.concurrent.ThreadPoolExecutor +import scala.concurrent.{ Awaitable, BlockContext, CanAwait } +import scala.concurrent.util.Duration +import scala.concurrent.forkjoin._ +import java.util.concurrent.{ + ArrayBlockingQueue, + BlockingQueue, + Callable, + ExecutorService, + LinkedBlockingQueue, + RejectedExecutionHandler, + RejectedExecutionException, + SynchronousQueue, + TimeUnit, + ThreadFactory, + ThreadPoolExecutor +} +import java.util.concurrent.atomic.{ AtomicReference, AtomicLong } object ThreadPoolConfig { type QueueFactory = () ⇒ BlockingQueue[Runnable] @@ -154,6 +157,20 @@ case class ThreadPoolConfigBuilder(config: ThreadPoolConfig) { object MonitorableThreadFactory { val doNothing: Thread.UncaughtExceptionHandler = new Thread.UncaughtExceptionHandler() { def uncaughtException(thread: Thread, cause: Throwable) = () } + + private[akka] class AkkaForkJoinWorkerThread(_pool: ForkJoinPool) extends ForkJoinWorkerThread(_pool) with BlockContext { + override def blockOn[T](thunk: ⇒ T)(implicit permission: CanAwait): T = { + val result = new AtomicReference[Option[T]](None) + ForkJoinPool.managedBlock(new ForkJoinPool.ManagedBlocker { + def block(): Boolean = { + result.set(Some(thunk)) + true + } + def isReleasable = result.get.isDefined + }) + result.get.get // Exception intended if None + } + } } case class MonitorableThreadFactory(name: String, @@ -164,7 +181,7 @@ case class MonitorableThreadFactory(name: String, protected val counter = new AtomicLong def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = { - val t = wire(ForkJoinPool.defaultForkJoinWorkerThreadFactory.newThread(pool)) + val t = wire(new MonitorableThreadFactory.AkkaForkJoinWorkerThread(pool)) // Name of the threads for the ForkJoinPool are not customizable. Change it here. t.setName(name + "-" + counter.incrementAndGet()) t diff --git a/akka-actor/src/main/scala/akka/event/EventStream.scala b/akka-actor/src/main/scala/akka/event/EventStream.scala index 2cc9bf8c2b..7fa6c8a5cf 100644 --- a/akka-actor/src/main/scala/akka/event/EventStream.scala +++ b/akka-actor/src/main/scala/akka/event/EventStream.scala @@ -3,6 +3,8 @@ */ package akka.event +import language.implicitConversions + import akka.actor.{ ActorRef, ActorSystem } import akka.event.Logging.simpleName import akka.util.Subclassification diff --git a/akka-actor/src/main/scala/akka/event/Logging.scala b/akka-actor/src/main/scala/akka/event/Logging.scala index 111d5d5dd8..05e8ce3ca0 100644 --- a/akka-actor/src/main/scala/akka/event/Logging.scala +++ b/akka-actor/src/main/scala/akka/event/Logging.scala @@ -3,15 +3,17 @@ */ package akka.event +import language.existentials + import akka.actor._ import akka.{ ConfigurationException, AkkaException } import akka.actor.ActorSystem.Settings import akka.util.{ Timeout, ReentrantGuard } -import akka.util.duration._ +import scala.concurrent.util.duration._ import java.util.concurrent.atomic.AtomicInteger import scala.util.control.NoStackTrace import java.util.concurrent.TimeoutException -import akka.dispatch.Await +import scala.concurrent.Await import annotation.implicitNotFound /** @@ -398,7 +400,12 @@ object Logging { /** * Marker trait for annotating LogLevel, which must be Int after erasure. */ - trait LogLevelType + case class LogLevel(asInt: Int) extends AnyVal { + @inline final def >=(other: LogLevel): Boolean = asInt >= other.asInt + @inline final def <=(other: LogLevel): Boolean = asInt <= other.asInt + @inline final def >(other: LogLevel): Boolean = asInt > other.asInt + @inline final def <(other: LogLevel): Boolean = asInt < other.asInt + } /** * Log level in numeric form, used when deciding whether a certain log @@ -406,11 +413,10 @@ object Logging { * to DebugLevel (4). In case you want to add more levels, loggers need to * be subscribed to their event bus channels manually. */ - type LogLevel = Int with LogLevelType - final val ErrorLevel = 1.asInstanceOf[Int with LogLevelType] - final val WarningLevel = 2.asInstanceOf[Int with LogLevelType] - final val InfoLevel = 3.asInstanceOf[Int with LogLevelType] - final val DebugLevel = 4.asInstanceOf[Int with LogLevelType] + final val ErrorLevel = LogLevel(1) + final val WarningLevel = LogLevel(2) + final val InfoLevel = LogLevel(3) + final val DebugLevel = LogLevel(4) /** * Returns the LogLevel associated with the given string, @@ -448,7 +454,7 @@ object Logging { } // these type ascriptions/casts are necessary to avoid CCEs during construction while retaining correct type - val AllLogLevels = Seq(ErrorLevel: AnyRef, WarningLevel, InfoLevel, DebugLevel).asInstanceOf[Seq[LogLevel]] + val AllLogLevels: Seq[LogLevel] = Seq(ErrorLevel, WarningLevel, InfoLevel, DebugLevel) /** * Obtain LoggingAdapter for the given actor system and source object. This @@ -719,7 +725,7 @@ object Logging { * logger. */ class DefaultLogger extends Actor with StdOutLogger { - def receive = { + override def receive: Receive = { case InitializeLogger(_) ⇒ sender ! LoggerInitialized case event: LogEvent ⇒ print(event) } diff --git a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala index 337815eed1..03739de894 100644 --- a/akka-actor/src/main/scala/akka/event/LoggingReceive.scala +++ b/akka-actor/src/main/scala/akka/event/LoggingReceive.scala @@ -3,6 +3,8 @@ */ package akka.event +import language.existentials + import akka.actor.Actor.Receive import akka.actor.ActorContext import akka.actor.ActorCell diff --git a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala index b5a53d1fe5..99670252dc 100644 --- a/akka-actor/src/main/scala/akka/japi/JavaAPI.scala +++ b/akka-actor/src/main/scala/akka/japi/JavaAPI.scala @@ -4,7 +4,10 @@ package akka.japi +import language.implicitConversions + import scala.Some +import scala.reflect.ClassTag import scala.util.control.NoStackTrace /** @@ -184,10 +187,7 @@ object Option { * This class hold common utilities for Java */ object Util { - /** - * Given a Class returns a Scala Manifest of that Class - */ - def manifest[T](clazz: Class[T]): Manifest[T] = Manifest.classType(clazz) + def classTag[T](clazz: Class[T]): ClassTag[T] = ClassTag(clazz) def arrayToSeq[T](arr: Array[T]): Seq[T] = arr.toSeq diff --git a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala index c66fa4178d..d7ac4e0ce1 100644 --- a/akka-actor/src/main/scala/akka/pattern/AskSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/AskSupport.scala @@ -3,11 +3,15 @@ */ package akka.pattern +import language.implicitConversions + import java.util.concurrent.TimeoutException -import annotation.tailrec import akka.actor._ import akka.dispatch._ -import akka.util.{ NonFatal, Timeout, Unsafe } +import scala.annotation.tailrec +import scala.util.control.NonFatal +import scala.concurrent.{ Future, Promise, ExecutionContext } +import akka.util.{ Timeout, Unsafe } /** * This is what is used to complete a Future that is returned from an ask/? call, @@ -41,7 +45,7 @@ trait AskSupport { implicit def ask(actorRef: ActorRef): AskableActorRef = new AskableActorRef(actorRef) /** - * Sends a message asynchronously and returns a [[akka.dispatch.Future]] + * Sends a message asynchronously and returns a [[scala.concurrent.Future]] * holding the eventual reply message; this means that the target actor * needs to send the result to the `sender` reference provided. The Future * will be completed with an [[akka.pattern.AskTimeoutException]] after the @@ -66,23 +70,22 @@ trait AskSupport { * } pipeTo nextActor * }}} * - * [see [[akka.dispatch.Future]] for a description of `flow`] + * [see [[scala.concurrent.Future]] for a description of `flow`] */ def ask(actorRef: ActorRef, message: Any)(implicit timeout: Timeout): Future[Any] = actorRef match { case ref: InternalActorRef if ref.isTerminated ⇒ actorRef.tell(message) - Promise.failed(new AskTimeoutException("sending to terminated ref breaks promises"))(ref.provider.dispatcher) + Future.failed[Any](new AskTimeoutException("Recipient[%s] had already been terminated." format actorRef)) case ref: InternalActorRef ⇒ - val provider = ref.provider - if (timeout.duration.length <= 0) { - actorRef.tell(message) - Promise.failed(new AskTimeoutException("not asking with negative timeout"))(provider.dispatcher) - } else { + if (!timeout.duration.isFinite) Future.failed[Any](new IllegalArgumentException("Timeouts to `ask` must be finite. Question not sent to [%s]" format actorRef)) + else if (timeout.duration.length <= 0) Future.failed[Any](new IllegalArgumentException("Timeout length for an `ask` must be greater or equal to 1. Question not sent to [%s]" format actorRef)) + else { + val provider = ref.provider val a = PromiseActorRef(provider, timeout) actorRef.tell(message, a) - a.result + a.result.future } - case _ ⇒ throw new IllegalArgumentException("incompatible ActorRef " + actorRef) + case _ ⇒ Future.failed[Any](new IllegalArgumentException("Unsupported type of ActorRef for the recipient. Question not sent to [%s]" format actorRef)) } /** @@ -91,7 +94,7 @@ trait AskSupport { private[akka] final class AskableActorRef(val actorRef: ActorRef) { /** - * Sends a message asynchronously and returns a [[akka.dispatch.Future]] + * Sends a message asynchronously and returns a [[scala.concurrent.Future]] * holding the eventual reply message; this means that the target actor * needs to send the result to the `sender` reference provided. The Future * will be completed with an [[akka.pattern.AskTimeoutException]] after the @@ -116,12 +119,12 @@ trait AskSupport { * } pipeTo nextActor * }}} * - * [see the [[akka.dispatch.Future]] companion object for a description of `flow`] + * [see the [[scala.concurrent.Future]] companion object for a description of `flow`] */ def ask(message: Any)(implicit timeout: Timeout): Future[Any] = akka.pattern.ask(actorRef, message)(timeout) /** - * Sends a message asynchronously and returns a [[akka.dispatch.Future]] + * Sends a message asynchronously and returns a [[scala.concurrent.Future]] * holding the eventual reply message; this means that the target actor * needs to send the result to the `sender` reference provided. The Future * will be completed with an [[akka.pattern.AskTimeoutException]] after the @@ -146,7 +149,7 @@ trait AskSupport { * } pipeTo nextActor * }}} * - * [see the [[akka.dispatch.Future]] companion object for a description of `flow`] + * [see the [[scala.concurrent.Future]] companion object for a description of `flow`] */ def ?(message: Any)(implicit timeout: Timeout): Future[Any] = akka.pattern.ask(actorRef, message)(timeout) } @@ -275,7 +278,7 @@ private[akka] final class PromiseActorRef private (val provider: ActorRefProvide @tailrec override def stop(): Unit = { def ensureCompleted(): Unit = { - if (!result.isCompleted) result.tryComplete(Left(new ActorKilledException("Stopped"))) + result.tryComplete(Left(new ActorKilledException("Stopped"))) val watchers = clearWatchers() if (!watchers.isEmpty) { val termination = Terminated(this)(existenceConfirmed = true) @@ -302,10 +305,11 @@ private[akka] object PromiseActorRef { private case class StoppedWithPath(path: ActorPath) def apply(provider: ActorRefProvider, timeout: Timeout): PromiseActorRef = { - val result = Promise[Any]()(provider.dispatcher) + implicit val ec = provider.dispatcher // TODO should we take an ExecutionContext in the method signature? + val result = Promise[Any]() val a = new PromiseActorRef(provider, result) val f = provider.scheduler.scheduleOnce(timeout.duration) { result.tryComplete(Left(new AskTimeoutException("Timed out"))) } - result onComplete { _ ⇒ try a.stop() finally f.cancel() } + result.future onComplete { _ ⇒ try a.stop() finally f.cancel() } a } } \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala index f8daae1cbc..2bd4dfb71c 100644 --- a/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala +++ b/akka-actor/src/main/scala/akka/pattern/CircuitBreaker.scala @@ -6,11 +6,13 @@ package akka.pattern import java.util.concurrent.atomic.{ AtomicInteger, AtomicLong, AtomicBoolean } import akka.AkkaException import akka.actor.Scheduler -import akka.dispatch.{ Future, ExecutionContext, Await, Promise } -import akka.util.{ Deadline, Duration, NonFatal, Unsafe } -import akka.util.duration._ -import util.control.NoStackTrace +import akka.util.Unsafe +import scala.util.control.NoStackTrace import java.util.concurrent.{ Callable, CopyOnWriteArrayList } +import scala.concurrent.{ ExecutionContext, Future, Promise, Await } +import scala.concurrent.util.{ Duration, Deadline } +import scala.concurrent.util.duration._ +import scala.util.control.NonFatal /** * Companion object providing factory methods for Circuit Breaker which runs callbacks in caller's thread @@ -21,9 +23,8 @@ object CircuitBreaker { * Synchronous execution context to run in caller's thread - used by companion object factory methods */ private[CircuitBreaker] val syncExecutionContext = new ExecutionContext { - def execute(runnable: Runnable): Unit = runnable.run() - - def reportFailure(t: Throwable): Unit = () + override def execute(runnable: Runnable): Unit = runnable.run() + override def reportFailure(t: Throwable): Unit = () } /** @@ -40,6 +41,9 @@ object CircuitBreaker { new CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Duration, resetTimeout: Duration)(syncExecutionContext) /** + * Callbacks run in caller's thread when using withSyncCircuitBreaker, and in same ExecutionContext as the passed + * in Future when using withCircuitBreaker. To use another ExecutionContext for the callbacks you can specify the + * executor in the constructor. * Java API alias for apply * * @param scheduler Reference to Akka scheduler @@ -108,22 +112,18 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati * * @param body Call needing protected * @tparam T return type from call - * @return [[akka.dispatch.Future]] containing the call result + * @return [[scala.concurrent.Future]] containing the call result */ - def withCircuitBreaker[T](body: ⇒ Future[T]): Future[T] = { - currentState.invoke(body) - } + def withCircuitBreaker[T](body: ⇒ Future[T]): Future[T] = currentState.invoke(body) /** * Java API for withCircuitBreaker * * @param body Call needing protected * @tparam T return type from call - * @return [[akka.dispatch.Future]] containing the call result + * @return [[scala.concurrent.Future]] containing the call result */ - def callWithCircuitBreaker[T](body: Callable[Future[T]]): Future[T] = { - withCircuitBreaker(body.call) - } + def callWithCircuitBreaker[T](body: Callable[Future[T]]): Future[T] = withCircuitBreaker(body.call) /** * Wraps invocations of synchronous calls that need to be protected @@ -134,15 +134,10 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati * @tparam T return type from call * @return The result of the call */ - def withSyncCircuitBreaker[T](body: ⇒ T): T = { - Await.result(withCircuitBreaker({ - try - Promise.successful(body)(CircuitBreaker.syncExecutionContext) - catch { - case NonFatal(t) ⇒ Promise.failed(t)(CircuitBreaker.syncExecutionContext) - } - }), callTimeout) - } + def withSyncCircuitBreaker[T](body: ⇒ T): T = + Await.result( + withCircuitBreaker(try Future.successful(body) catch { case NonFatal(t) ⇒ Future.failed(t) }), + callTimeout) /** * Java API for withSyncCircuitBreaker @@ -152,9 +147,7 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati * @return The result of the call */ - def callWithSyncCircuitBreaker[T](body: Callable[T]): T = { - withSyncCircuitBreaker(body.call) - } + def callWithSyncCircuitBreaker[T](body: Callable[T]): T = withSyncCircuitBreaker(body.call) /** * Adds a callback to execute when circuit breaker opens @@ -177,9 +170,7 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati * @tparam T Type supplied to assist with type inference, otherwise ignored by implementation * @return CircuitBreaker for fluent usage */ - def onOpen[T](callback: Callable[T]): CircuitBreaker = { - onOpen(callback.call) - } + def onOpen[T](callback: Callable[T]): CircuitBreaker = onOpen(callback.call) /** * Adds a callback to execute when circuit breaker transitions to half-open @@ -202,9 +193,7 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati * @tparam T Type supplied to assist with type inference, otherwise ignored by implementation * @return CircuitBreaker for fluent usage */ - def onHalfOpen[T](callback: Callable[T]): CircuitBreaker = { - onHalfOpen(callback.call) - } + def onHalfOpen[T](callback: Callable[T]): CircuitBreaker = onHalfOpen(callback.call) /** * Adds a callback to execute when circuit breaker state closes @@ -227,9 +216,7 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati * @tparam T Type supplied to assist with type inference, otherwise ignored by implementation * @return CircuitBreaker for fluent usage */ - def onClose[T](callback: Callable[T]): CircuitBreaker = { - onClose(callback.call) - } + def onClose[T](callback: Callable[T]): CircuitBreaker = onClose(callback.call) /** * Retrieves current failure count. @@ -245,37 +232,30 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati * @param toState State being transitioning from * @throws IllegalStateException if an invalid transition is attempted */ - private def transition(fromState: State, toState: State): Unit = { + private def transition(fromState: State, toState: State): Unit = if (swapState(fromState, toState)) toState.enter() else throw new IllegalStateException("Illegal transition attempted from: " + fromState + " to " + toState) - } /** * Trips breaker to an open state. This is valid from Closed or Half-Open states. * * @param fromState State we're coming from (Closed or Half-Open) */ - private def tripBreaker(fromState: State): Unit = { - transition(fromState, Open) - } + private def tripBreaker(fromState: State): Unit = transition(fromState, Open) /** * Resets breaker to a closed state. This is valid from an Half-Open state only. * */ - private def resetBreaker(): Unit = { - transition(HalfOpen, Closed) - } + private def resetBreaker(): Unit = transition(HalfOpen, Closed) /** * Attempts to reset breaker by transitioning to a half-open state. This is valid from an Open state only. * */ - private def attemptReset(): Unit = { - transition(Open, HalfOpen) - } + private def attemptReset(): Unit = transition(Open, HalfOpen) /** * Internal state abstraction @@ -289,9 +269,7 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati * @param listener listener implementation * @tparam T return type of listener, not used - but supplied for type inference purposes */ - def addListener[T](listener: () ⇒ T) { - listeners add listener - } + def addListener[T](listener: () ⇒ T): Unit = listeners add listener /** * Test for whether listeners exist @@ -311,7 +289,7 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati while (iterator.hasNext) { val listener = iterator.next //FIXME per @viktorklang: it's a bit wasteful to create Futures for one-offs, just use EC.execute instead - Future(listener()) + Future(listener())(executor) } } } @@ -326,16 +304,12 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati */ def callThrough[T](body: ⇒ Future[T]): Future[T] = { val deadline = callTimeout.fromNow - val bodyFuture = try body catch { - case NonFatal(t) ⇒ Promise.failed(t) - } - bodyFuture onFailure { - case _ ⇒ callFails() - } onSuccess { - case _ ⇒ - if (deadline.isOverdue()) callFails() - else callSucceeds() - } + val bodyFuture = try body catch { case NonFatal(t) ⇒ Future.failed(t) } + bodyFuture.onComplete({ + case Right(_) if !deadline.isOverdue() ⇒ callSucceeds() + case _ ⇒ callFails() + })(CircuitBreaker.syncExecutionContext) + bodyFuture } /** @@ -388,16 +362,14 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati * @tparam T Return type of protected call * @return Future containing result of protected call */ - override def invoke[T](body: ⇒ Future[T]): Future[T] = { - callThrough(body) - } + override def invoke[T](body: ⇒ Future[T]): Future[T] = callThrough(body) /** * On successful call, the failure count is reset to 0 * * @return */ - override def callSucceeds(): Unit = { set(0) } + override def callSucceeds(): Unit = set(0) /** * On failed call, the failure count is incremented. The count is checked against the configured maxFailures, and @@ -405,27 +377,21 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati * * @return */ - override def callFails(): Unit = { - if (incrementAndGet() == maxFailures) tripBreaker(Closed) - } + override def callFails(): Unit = if (incrementAndGet() == maxFailures) tripBreaker(Closed) /** * On entry of this state, failure count is reset. * * @return */ - override def _enter(): Unit = { - set(0) - } + override def _enter(): Unit = set(0) /** * Override for more descriptive toString * * @return */ - override def toString: String = { - "Closed with failure count = " + get() - } + override def toString: String = "Closed with failure count = " + get() } /** @@ -441,44 +407,36 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati * @tparam T Return type of protected call * @return Future containing result of protected call */ - override def invoke[T](body: ⇒ Future[T]): Future[T] = { - if (compareAndSet(true, false)) - callThrough(body) - else - Promise.failed[T](new CircuitBreakerOpenException(Duration.Zero)) - } + override def invoke[T](body: ⇒ Future[T]): Future[T] = + if (compareAndSet(true, false)) callThrough(body) else Promise.failed[T](new CircuitBreakerOpenException(Duration.Zero)).future /** * Reset breaker on successful call. * * @return */ - override def callSucceeds(): Unit = { resetBreaker() } + override def callSucceeds(): Unit = resetBreaker() /** * Reopen breaker on failed call. * * @return */ - override def callFails(): Unit = { tripBreaker(HalfOpen) } + override def callFails(): Unit = tripBreaker(HalfOpen) /** * On entry, guard should be reset for that first call to get in * * @return */ - override def _enter(): Unit = { - set(true) - } + override def _enter(): Unit = set(true) /** * Override for more descriptive toString * * @return */ - override def toString: String = { - "Half-Open currently testing call for success = " + get() - } + override def toString: String = "Half-Open currently testing call for success = " + get() } /** @@ -493,9 +451,8 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati * @tparam T Return type of protected call * @return Future containing result of protected call */ - override def invoke[T](body: ⇒ Future[T]): Future[T] = { - Promise.failed[T](new CircuitBreakerOpenException(remainingTimeout().timeLeft)) - } + override def invoke[T](body: ⇒ Future[T]): Future[T] = + Promise.failed[T](new CircuitBreakerOpenException(remainingTimeout().timeLeft)).future /** * Calculate remaining timeout to inform the caller in case a backoff algorithm is useful @@ -512,14 +469,14 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati * * @return */ - override def callSucceeds(): Unit = {} + override def callSucceeds(): Unit = () /** * No-op for open, calls are never executed so cannot succeed or fail * * @return */ - override def callFails(): Unit = {} + override def callFails(): Unit = () /** * On entering this state, schedule an attempted reset via [[akka.actor.Scheduler]] and store the entry time to @@ -539,9 +496,7 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Durati * * @return */ - override def toString: String = { - "Open" - } + override def toString: String = "Open" } } diff --git a/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala b/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala index 25bc199ff5..72335d810b 100644 --- a/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/FutureTimeoutSupport.scala @@ -4,20 +4,22 @@ package akka.pattern * Copyright (C) 2009-2012 Typesafe Inc. */ -import akka.util.Duration +import scala.concurrent.util.Duration +import scala.concurrent.{ ExecutionContext, Promise, Future } import akka.actor._ -import akka.dispatch.{ ExecutionContext, Promise, Future } +import scala.util.control.NonFatal trait FutureTimeoutSupport { /** - * Returns a [[akka.dispatch.Future]] that will be completed with the success or failure of the provided value + * Returns a [[scala.concurrent.Future]] that will be completed with the success or failure of the provided value * after the specified duration. */ def after[T](duration: Duration, using: Scheduler)(value: ⇒ Future[T])(implicit ec: ExecutionContext): Future[T] = - if (duration.isFinite() && duration.length < 1) value else { + if (duration.isFinite() && duration.length < 1) { + try value catch { case NonFatal(t) ⇒ Future.failed(t) } + } else { val p = Promise[T]() - val c = using.scheduleOnce(duration) { p completeWith value } - p onComplete { _ ⇒ c.cancel() } - p + using.scheduleOnce(duration) { p completeWith { try value catch { case NonFatal(t) ⇒ Future.failed(t) } } } + p.future } } \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala index 91293cb0d1..4a11da219a 100644 --- a/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/GracefulStopSupport.scala @@ -5,12 +5,14 @@ package akka.pattern import akka.actor._ -import akka.util.{ Timeout, Duration } -import akka.dispatch.{ Unwatch, Watch, Promise, Future } +import akka.util.{ Timeout } +import akka.dispatch.{ Unwatch, Watch } +import scala.concurrent.Future +import scala.concurrent.util.Duration trait GracefulStopSupport { /** - * Returns a [[akka.dispatch.Future]] that will be completed with success (value `true`) when + * Returns a [[scala.concurrent.Future]] that will be completed with success (value `true`) when * existing messages of the target actor has been processed and the actor has been * terminated. * @@ -30,22 +32,24 @@ trait GracefulStopSupport { * } * }}} * - * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] + * If the target actor isn't terminated within the timeout the [[scala.concurrent.Future]] * is completed with failure [[akka.pattern.AskTimeoutException]]. */ def gracefulStop(target: ActorRef, timeout: Duration)(implicit system: ActorSystem): Future[Boolean] = { - if (target.isTerminated) Promise.successful(true) + if (target.isTerminated) Future successful true else system match { case e: ExtendedActorSystem ⇒ + import e.dispatcher // TODO take implicit ExecutionContext/MessageDispatcher in method signature? val internalTarget = target.asInstanceOf[InternalActorRef] val ref = PromiseActorRef(e.provider, Timeout(timeout)) internalTarget.sendSystemMessage(Watch(target, ref)) - ref.result onComplete { // Just making sure we're not leaking here + val f = ref.result.future + f onComplete { // Just making sure we're not leaking here case Right(Terminated(`target`)) ⇒ () case _ ⇒ internalTarget.sendSystemMessage(Unwatch(target, ref)) } target ! PoisonPill - ref.result map { + f map { case Terminated(`target`) ⇒ true case _ ⇒ false } diff --git a/akka-actor/src/main/scala/akka/pattern/Patterns.scala b/akka-actor/src/main/scala/akka/pattern/Patterns.scala index 853b46e318..8dda900e35 100644 --- a/akka-actor/src/main/scala/akka/pattern/Patterns.scala +++ b/akka-actor/src/main/scala/akka/pattern/Patterns.scala @@ -4,18 +4,19 @@ package akka.pattern import akka.actor.Scheduler -import akka.dispatch.ExecutionContext +import scala.concurrent.ExecutionContext import java.util.concurrent.Callable object Patterns { import akka.actor.{ ActorRef, ActorSystem } - import akka.dispatch.Future import akka.pattern.{ ask ⇒ scalaAsk, pipe ⇒ scalaPipe, gracefulStop ⇒ scalaGracefulStop, after ⇒ scalaAfter } - import akka.util.{ Timeout, Duration } + import akka.util.Timeout + import scala.concurrent.Future + import scala.concurrent.util.Duration /** * Java API for `akka.pattern.ask`: - * Sends a message asynchronously and returns a [[akka.dispatch.Future]] + * Sends a message asynchronously and returns a [[scala.concurrent.Future]] * holding the eventual reply message; this means that the target actor * needs to send the result to the `sender` reference provided. The Future * will be completed with an [[akka.pattern.AskTimeoutException]] after the @@ -46,7 +47,7 @@ object Patterns { /** * Java API for `akka.pattern.ask`: - * Sends a message asynchronously and returns a [[akka.dispatch.Future]] + * Sends a message asynchronously and returns a [[scala.concurrent.Future]] * holding the eventual reply message; this means that the target actor * needs to send the result to the `sender` reference provided. The Future * will be completed with an [[akka.pattern.AskTimeoutException]] after the @@ -76,7 +77,7 @@ object Patterns { def ask(actor: ActorRef, message: Any, timeoutMillis: Long): Future[AnyRef] = scalaAsk(actor, message)(new Timeout(timeoutMillis)).asInstanceOf[Future[AnyRef]] /** - * Register an onComplete callback on this [[akka.dispatch.Future]] to send + * Register an onComplete callback on this [[scala.concurrent.Future]] to send * the result to the given actor reference. Returns the original Future to * allow method chaining. * @@ -90,30 +91,30 @@ object Patterns { * Patterns.pipe(transformed).to(nextActor); * }}} */ - def pipe[T](future: Future[T]): PipeableFuture[T] = scalaPipe(future) + def pipe[T](future: Future[T], context: ExecutionContext): PipeableFuture[T] = scalaPipe(future)(context) /** - * Returns a [[akka.dispatch.Future]] that will be completed with success (value `true`) when + * Returns a [[scala.concurrent.Future]] that will be completed with success (value `true`) when * existing messages of the target actor has been processed and the actor has been * terminated. * * Useful when you need to wait for termination or compose ordered termination of several actors. * - * If the target actor isn't terminated within the timeout the [[akka.dispatch.Future]] + * If the target actor isn't terminated within the timeout the [[scala.concurrent.Future]] * is completed with failure [[akka.pattern.AskTimeoutException]]. */ def gracefulStop(target: ActorRef, timeout: Duration, system: ActorSystem): Future[java.lang.Boolean] = scalaGracefulStop(target, timeout)(system).asInstanceOf[Future[java.lang.Boolean]] /** - * Returns a [[akka.dispatch.Future]] that will be completed with the success or failure of the provided Callable + * Returns a [[scala.concurrent.Future]] that will be completed with the success or failure of the provided Callable * after the specified duration. */ def after[T](duration: Duration, scheduler: Scheduler, context: ExecutionContext, value: Callable[Future[T]]): Future[T] = scalaAfter(duration, scheduler)(value.call())(context) /** - * Returns a [[akka.dispatch.Future]] that will be completed with the success or failure of the provided value + * Returns a [[scala.concurrent.Future]] that will be completed with the success or failure of the provided value * after the specified duration. */ def after[T](duration: Duration, scheduler: Scheduler, context: ExecutionContext, value: Future[T]): Future[T] = diff --git a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala index d73e146dfb..8eb3859f12 100644 --- a/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala +++ b/akka-actor/src/main/scala/akka/pattern/PipeToSupport.scala @@ -3,17 +3,21 @@ */ package akka.pattern -import akka.dispatch.Future +import language.implicitConversions + +import scala.concurrent.{ Future, ExecutionContext } import akka.actor.{ Status, ActorRef } trait PipeToSupport { - final class PipeableFuture[T](val future: Future[T]) { - def pipeTo(recipient: ActorRef)(implicit sender: ActorRef = null): Future[T] = + final class PipeableFuture[T](val future: Future[T])(implicit executionContext: ExecutionContext) { + def pipeTo(recipient: ActorRef)(implicit sender: ActorRef = null): Future[T] = { future onComplete { case Right(r) ⇒ recipient ! r case Left(f) ⇒ recipient ! Status.Failure(f) } + future + } def to(recipient: ActorRef): PipeableFuture[T] = to(recipient, null) def to(recipient: ActorRef, sender: ActorRef): PipeableFuture[T] = { pipeTo(recipient)(sender) @@ -22,7 +26,7 @@ trait PipeToSupport { } /** - * Import this implicit conversion to gain the `pipeTo` method on [[akka.dispatch.Future]]: + * Import this implicit conversion to gain the `pipeTo` method on [[scala.concurrent.Future]]: * * {{{ * import akka.pattern.pipe @@ -35,5 +39,5 @@ trait PipeToSupport { * * }}} */ - implicit def pipe[T](future: Future[T]): PipeableFuture[T] = new PipeableFuture(future) + implicit def pipe[T](future: Future[T])(implicit executionContext: ExecutionContext): PipeableFuture[T] = new PipeableFuture(future) } \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/pattern/package.scala b/akka-actor/src/main/scala/akka/pattern/package.scala index f467ca72f2..b2f232de8f 100644 --- a/akka-actor/src/main/scala/akka/pattern/package.scala +++ b/akka-actor/src/main/scala/akka/pattern/package.scala @@ -4,8 +4,6 @@ package akka import akka.actor._ -import akka.dispatch.{ Future, Promise } -import akka.util.{ Timeout, Duration } /** * == Commonly Used Patterns With Akka == @@ -17,7 +15,7 @@ import akka.util.{ Timeout, Duration } * *
    *
  • ask: create a temporary one-off actor for receiving a reply to a - * message and complete a [[akka.dispatch.Future]] with it; returns said + * message and complete a [[scala.concurrent.Future]] with it; returns said * Future.
  • *
  • pipeTo: feed eventually computed value of a future to an actor as * a message.
  • diff --git a/akka-actor/src/main/scala/akka/routing/Routing.scala b/akka-actor/src/main/scala/akka/routing/Routing.scala index cb0f5ee09b..cca90d6543 100644 --- a/akka-actor/src/main/scala/akka/routing/Routing.scala +++ b/akka-actor/src/main/scala/akka/routing/Routing.scala @@ -3,20 +3,21 @@ */ package akka.routing +import language.implicitConversions +import language.postfixOps + import akka.actor._ -import akka.util.Duration -import akka.util.duration._ +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ import akka.ConfigurationException import akka.pattern.pipe import com.typesafe.config.Config import scala.collection.JavaConversions.iterableAsScalaIterable import java.util.concurrent.atomic.{ AtomicLong, AtomicBoolean } import java.util.concurrent.TimeUnit -import akka.jsr166y.ThreadLocalRandom -import akka.util.Unsafe +import scala.concurrent.forkjoin.ThreadLocalRandom import akka.dispatch.Dispatchers import scala.annotation.tailrec -import scala.runtime.ScalaRunTime /** * A RoutedActorRef is an ActorRef that has a set of connected ActorRef and it uses a Router to @@ -72,7 +73,7 @@ private[akka] class RoutedActorCell(_system: ActorSystemImpl, _ref: InternalActo if (routerConfig.resizer.isEmpty && _routees.isEmpty) throw new ActorInitializationException("router " + routerConfig + " did not register routees!") - start() + start(sendSupervise = false) /* * end of construction @@ -1074,8 +1075,9 @@ trait ScatterGatherFirstCompletedLike { this: RouterConfig ⇒ { case (sender, message) ⇒ val provider: ActorRefProvider = routeeProvider.context.asInstanceOf[ActorCell].systemImpl.provider + implicit val ec = provider.dispatcher val asker = akka.pattern.PromiseActorRef(provider, within) - asker.result.pipeTo(sender) + asker.result.future.pipeTo(sender) toAll(asker, routeeProvider.routees) } } diff --git a/akka-actor/src/main/scala/akka/serialization/Serialization.scala b/akka-actor/src/main/scala/akka/serialization/Serialization.scala index 7355e4f7fb..ee5e87466b 100644 --- a/akka-actor/src/main/scala/akka/serialization/Serialization.scala +++ b/akka-actor/src/main/scala/akka/serialization/Serialization.scala @@ -10,7 +10,7 @@ import com.typesafe.config.Config import akka.actor.{ Extension, ExtendedActorSystem, Address, DynamicAccess } import akka.event.Logging import java.util.concurrent.ConcurrentHashMap -import akka.util.NonFatal +import scala.util.control.NonFatal import scala.collection.mutable.ArrayBuffer import java.io.NotSerializableException @@ -145,7 +145,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { */ private[akka] val bindings: Seq[ClassSerializer] = { val configuredBindings = for ((k: String, v: String) ← settings.SerializationBindings if v != "none") yield { - val c = system.dynamicAccess.getClassFor(k).fold(throw _, identity[Class[_]]) + val c = system.dynamicAccess.getClassFor[Any](k).fold(throw _, identity[Class[_]]) (c, serializers(v)) } sort(configuredBindings) @@ -168,11 +168,8 @@ class Serialization(val system: ExtendedActorSystem) extends Extension { * serializerMap is a Map whose keys is the class that is serializable and values is the serializer * to be used for that class. */ - private val serializerMap: ConcurrentHashMap[Class[_], Serializer] = { - val serializerMap = new ConcurrentHashMap[Class[_], Serializer] - for ((c, s) ← bindings) serializerMap.put(c, s) - serializerMap - } + private val serializerMap: ConcurrentHashMap[Class[_], Serializer] = + (new ConcurrentHashMap[Class[_], Serializer] /: bindings) { case (map, (c, s)) ⇒ map.put(c, s); map } /** * Maps from a Serializer Identity (Int) to a Serializer instance (optimization) diff --git a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala index c7c8308de0..613599fa8e 100644 --- a/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala +++ b/akka-actor/src/main/scala/akka/util/BoundedBlockingQueue.scala @@ -7,6 +7,7 @@ package akka.util import java.util.concurrent.locks.ReentrantLock import java.util.concurrent.{ TimeUnit, BlockingQueue } import java.util.{ AbstractQueue, Queue, Collection, Iterator } +import annotation.tailrec /** * BoundedBlockingQueue wraps any Queue and turns the result into a BlockingQueue with a limited capacity @@ -80,15 +81,17 @@ class BoundedBlockingQueue[E <: AnyRef]( var nanos = unit.toNanos(timeout) lock.lockInterruptibly() try { - while (backing.size() == maxCapacity) { - if (nanos <= 0) - return false - else - nanos = notFull.awaitNanos(nanos) - } - require(backing.offer(e)) //Should never fail - notEmpty.signal() - true + @tailrec def awaitNotFull(ns: Long): Boolean = + if (backing.size() == maxCapacity) { + if (ns > 0) awaitNotFull(notFull.awaitNanos(ns)) + else false + } else true + + if (awaitNotFull(nanos)) { + require(backing.offer(e)) //Should never fail + notEmpty.signal() + true + } else false } finally { lock.unlock() } @@ -208,17 +211,14 @@ class BoundedBlockingQueue[E <: AnyRef]( else { lock.lock() try { - var n = 0 - var e: E = null.asInstanceOf[E] - while (n < maxElements) { - backing.poll() match { - case null ⇒ return n - case e ⇒ - c add e - n += 1 - } - } - n + @tailrec def drainOne(n: Int): Int = + if (n < maxElements) { + backing.poll() match { + case null ⇒ n + case e ⇒ c add e; drainOne(n + 1) + } + } else n + drainOne(0) } finally { lock.unlock() } @@ -285,14 +285,14 @@ class BoundedBlockingQueue[E <: AnyRef]( last = -1 //To avoid 2 subsequent removes without a next in between lock.lock() try { - val i = backing.iterator() - while (i.hasNext) { + @tailrec def removeTarget(i: Iterator[E] = backing.iterator()): Unit = if (i.hasNext) { if (i.next eq target) { i.remove() notFull.signal() - return () - } + } else removeTarget(i) } + + removeTarget() } finally { lock.unlock() } diff --git a/akka-actor/src/main/scala/akka/util/ByteIterator.scala b/akka-actor/src/main/scala/akka/util/ByteIterator.scala index dc5e4a3d5c..ef4c0c49bc 100644 --- a/akka-actor/src/main/scala/akka/util/ByteIterator.scala +++ b/akka-actor/src/main/scala/akka/util/ByteIterator.scala @@ -12,8 +12,7 @@ import scala.collection.immutable.{ IndexedSeq, VectorBuilder } import scala.collection.generic.CanBuildFrom import scala.collection.mutable.{ ListBuffer } import scala.annotation.tailrec - -import java.nio.ByteBuffer +import scala.reflect.ClassTag object ByteIterator { object ByteArrayIterator { @@ -302,7 +301,7 @@ object ByteIterator { } final override def toByteString: ByteString = { - if (iterators.tail isEmpty) iterators.head.toByteString + if (iterators.tail.isEmpty) iterators.head.toByteString else { val result = iterators.foldLeft(ByteString.empty) { _ ++ _.toByteString } clear() @@ -455,7 +454,7 @@ abstract class ByteIterator extends BufferedIterator[Byte] { acc } - override def toArray[B >: Byte](implicit arg0: ClassManifest[B]): Array[B] = { + override def toArray[B >: Byte](implicit arg0: ClassTag[B]): Array[B] = { val target = Array.ofDim[B](len) copyToArray(target) target diff --git a/akka-actor/src/main/scala/akka/util/ByteString.scala b/akka-actor/src/main/scala/akka/util/ByteString.scala index c2a690aa26..ac2af5c1b5 100644 --- a/akka-actor/src/main/scala/akka/util/ByteString.scala +++ b/akka-actor/src/main/scala/akka/util/ByteString.scala @@ -10,6 +10,7 @@ import scala.collection.IndexedSeqOptimized import scala.collection.mutable.{ Builder, WrappedArray } import scala.collection.immutable.{ IndexedSeq, VectorBuilder } import scala.collection.generic.CanBuildFrom +import scala.reflect.ClassTag object ByteString { @@ -281,7 +282,7 @@ sealed abstract class ByteString extends IndexedSeq[Byte] with IndexedSeqOptimiz override def indexWhere(p: Byte ⇒ Boolean): Int = iterator.indexWhere(p) override def indexOf[B >: Byte](elem: B): Int = iterator.indexOf(elem) - override def toArray[B >: Byte](implicit arg0: ClassManifest[B]): Array[B] = iterator.toArray + override def toArray[B >: Byte](implicit arg0: ClassTag[B]): Array[B] = iterator.toArray override def copyToArray[B >: Byte](xs: Array[B], start: Int, len: Int): Unit = iterator.copyToArray(xs, start, len) @@ -349,10 +350,8 @@ object CompactByteString { /** * Creates a new CompactByteString by copying a byte array. */ - def apply(bytes: Array[Byte]): CompactByteString = { - if (bytes.isEmpty) empty - else ByteString.ByteString1C(bytes.clone) - } + def apply(bytes: Array[Byte]): CompactByteString = + if (bytes.isEmpty) empty else ByteString.ByteString1C(bytes.clone) /** * Creates a new CompactByteString by copying bytes. @@ -394,10 +393,8 @@ object CompactByteString { /** * Creates a new CompactByteString by encoding a String with a charset. */ - def apply(string: String, charset: String): CompactByteString = { - if (string.isEmpty) empty - else ByteString.ByteString1C(string.getBytes(charset)) - } + def apply(string: String, charset: String): CompactByteString = + if (string.isEmpty) empty else ByteString.ByteString1C(string.getBytes(charset)) /** * Creates a new CompactByteString by copying length bytes starting at offset from @@ -451,7 +448,7 @@ final class ByteStringBuilder extends Builder[Byte, ByteString] { this } - protected def fillByteBuffer(len: Int, byteOrder: ByteOrder)(fill: ByteBuffer ⇒ Unit): this.type = { + @inline protected final def fillByteBuffer(len: Int, byteOrder: ByteOrder)(fill: ByteBuffer ⇒ Unit): this.type = { fillArray(len) { case (array, start) ⇒ val buffer = ByteBuffer.wrap(array, start, len) diff --git a/akka-actor/src/main/scala/akka/util/Duration.scala b/akka-actor/src/main/scala/akka/util/Duration.scala index 3a1c2e80c8..b8de5a62bf 100644 --- a/akka-actor/src/main/scala/akka/util/Duration.scala +++ b/akka-actor/src/main/scala/akka/util/Duration.scala @@ -4,451 +4,11 @@ package akka.util +import language.implicitConversions + import java.util.concurrent.TimeUnit -import TimeUnit._ import java.lang.{ Double ⇒ JDouble } - -//TODO add @SerialVersionUID(1L) when SI-4804 is fixed -case class Deadline private (time: Duration) extends Ordered[Deadline] { - def +(other: Duration): Deadline = copy(time = time + other) - def -(other: Duration): Deadline = copy(time = time - other) - def -(other: Deadline): Duration = time - other.time - def timeLeft: Duration = this - Deadline.now - def hasTimeLeft(): Boolean = !isOverdue() //Code reuse FTW - def isOverdue(): Boolean = (time.toNanos - System.nanoTime()) < 0 - def compare(that: Deadline) = this.time compare that.time -} - -object Deadline { - def now: Deadline = Deadline(Duration(System.nanoTime, NANOSECONDS)) - - implicit object DeadlineIsOrdered extends Ordering[Deadline] { - def compare(a: Deadline, b: Deadline) = a compare b - } -} - -object Duration { - implicit def timeLeft(implicit d: Deadline): Duration = d.timeLeft - - def apply(length: Long, unit: TimeUnit): FiniteDuration = new FiniteDuration(length, unit) - def apply(length: Double, unit: TimeUnit): FiniteDuration = fromNanos(unit.toNanos(1) * length) - def apply(length: Long, unit: String): FiniteDuration = new FiniteDuration(length, timeUnit(unit)) - - def fromNanos(nanos: Long): FiniteDuration = { - if (nanos % 86400000000000L == 0) { - Duration(nanos / 86400000000000L, DAYS) - } else if (nanos % 3600000000000L == 0) { - Duration(nanos / 3600000000000L, HOURS) - } else if (nanos % 60000000000L == 0) { - Duration(nanos / 60000000000L, MINUTES) - } else if (nanos % 1000000000L == 0) { - Duration(nanos / 1000000000L, SECONDS) - } else if (nanos % 1000000L == 0) { - Duration(nanos / 1000000L, MILLISECONDS) - } else if (nanos % 1000L == 0) { - Duration(nanos / 1000L, MICROSECONDS) - } else { - Duration(nanos, NANOSECONDS) - } - } - - def fromNanos(nanos: Double): FiniteDuration = { - if (nanos > Long.MaxValue || nanos < Long.MinValue) - throw new IllegalArgumentException("trying to construct too large duration with " + nanos + "ns") - fromNanos((nanos + 0.5).asInstanceOf[Long]) - } - - /** - * Construct a Duration by parsing a String. In case of a format error, a - * RuntimeException is thrown. See `unapply(String)` for more information. - */ - def apply(s: String): Duration = unapply(s) getOrElse sys.error("format error") - - /** - * Deconstruct a Duration into length and unit if it is finite. - */ - def unapply(d: Duration): Option[(Long, TimeUnit)] = { - if (d.finite_?) { - Some((d.length, d.unit)) - } else { - None - } - } - - private val RE = ("""^\s*(-?\d+(?:\.\d+)?)\s*""" + // length part - "(?:" + // units are distinguished in separate match groups - "(d|day|days)|" + - "(h|hour|hours)|" + - "(min|minute|minutes)|" + - "(s|sec|second|seconds)|" + - "(ms|milli|millis|millisecond|milliseconds)|" + - "(µs|micro|micros|microsecond|microseconds)|" + - "(ns|nano|nanos|nanosecond|nanoseconds)" + - """)\s*$""").r // close the non-capturing group - private val REinf = """^\s*Inf\s*$""".r - private val REminf = """^\s*(?:-\s*|Minus)Inf\s*""".r - - /** - * Parse String, return None if no match. Format is `""`, where - * whitespace is allowed before, between and after the parts. Infinities are - * designated by `"Inf"` and `"-Inf"` or `"MinusInf"`. - */ - def unapply(s: String): Option[Duration] = s match { - case RE(length, d, h, m, s, ms, mus, ns) ⇒ - if (d ne null) Some(Duration(JDouble.parseDouble(length), DAYS)) else if (h ne null) Some(Duration(JDouble.parseDouble(length), HOURS)) else if (m ne null) Some(Duration(JDouble.parseDouble(length), MINUTES)) else if (s ne null) Some(Duration(JDouble.parseDouble(length), SECONDS)) else if (ms ne null) Some(Duration(JDouble.parseDouble(length), MILLISECONDS)) else if (mus ne null) Some(Duration(JDouble.parseDouble(length), MICROSECONDS)) else if (ns ne null) Some(Duration(JDouble.parseDouble(length), NANOSECONDS)) else - sys.error("made some error in regex (should not be possible)") - case REinf() ⇒ Some(Inf) - case REminf() ⇒ Some(MinusInf) - case _ ⇒ None - } - - /** - * Parse TimeUnit from string representation. - */ - def timeUnit(unit: String) = unit.toLowerCase match { - case "d" | "day" | "days" ⇒ DAYS - case "h" | "hour" | "hours" ⇒ HOURS - case "min" | "minute" | "minutes" ⇒ MINUTES - case "s" | "sec" | "second" | "seconds" ⇒ SECONDS - case "ms" | "milli" | "millis" | "millisecond" | "milliseconds" ⇒ MILLISECONDS - case "µs" | "micro" | "micros" | "microsecond" | "microseconds" ⇒ MICROSECONDS - case "ns" | "nano" | "nanos" | "nanosecond" | "nanoseconds" ⇒ NANOSECONDS - } - - val Zero: FiniteDuration = new FiniteDuration(0, NANOSECONDS) - - val Undefined: Duration = new Duration with Infinite { - override def toString = "Duration.Undefined" - override def equals(other: Any) = other.asInstanceOf[AnyRef] eq this - override def +(other: Duration): Duration = throw new IllegalArgumentException("cannot add Undefined duration") - override def -(other: Duration): Duration = throw new IllegalArgumentException("cannot subtract Undefined duration") - override def *(factor: Double): Duration = throw new IllegalArgumentException("cannot multiply Undefined duration") - override def /(factor: Double): Duration = throw new IllegalArgumentException("cannot divide Undefined duration") - override def /(other: Duration): Double = throw new IllegalArgumentException("cannot divide Undefined duration") - def compare(other: Duration) = throw new IllegalArgumentException("cannot compare Undefined duration") - def unary_- : Duration = throw new IllegalArgumentException("cannot negate Undefined duration") - } - - trait Infinite { - this: Duration ⇒ - - def +(other: Duration): Duration = - other match { - case _: this.type ⇒ this - case _: Infinite ⇒ throw new IllegalArgumentException("illegal addition of infinities") - case _ ⇒ this - } - def -(other: Duration): Duration = - other match { - case _: this.type ⇒ throw new IllegalArgumentException("illegal subtraction of infinities") - case _ ⇒ this - } - def *(factor: Double): Duration = this - def /(factor: Double): Duration = this - def /(other: Duration): Double = - other match { - case _: Infinite ⇒ throw new IllegalArgumentException("illegal division of infinities") - // maybe questionable but pragmatic: Inf / 0 => Inf - case x ⇒ Double.PositiveInfinity * (if ((this > Zero) ^ (other >= Zero)) -1 else 1) - } - - def finite_? = false - - def length: Long = throw new IllegalArgumentException("length not allowed on infinite Durations") - def unit: TimeUnit = throw new IllegalArgumentException("unit not allowed on infinite Durations") - def toNanos: Long = throw new IllegalArgumentException("toNanos not allowed on infinite Durations") - def toMicros: Long = throw new IllegalArgumentException("toMicros not allowed on infinite Durations") - def toMillis: Long = throw new IllegalArgumentException("toMillis not allowed on infinite Durations") - def toSeconds: Long = throw new IllegalArgumentException("toSeconds not allowed on infinite Durations") - def toMinutes: Long = throw new IllegalArgumentException("toMinutes not allowed on infinite Durations") - def toHours: Long = throw new IllegalArgumentException("toHours not allowed on infinite Durations") - def toDays: Long = throw new IllegalArgumentException("toDays not allowed on infinite Durations") - def toUnit(unit: TimeUnit): Double = throw new IllegalArgumentException("toUnit not allowed on infinite Durations") - - def printHMS = toString - } - - /** - * Infinite duration: greater than any other and not equal to any other, - * including itself. - */ - val Inf: Duration = new Duration with Infinite { - override def toString: String = "Duration.Inf" - def compare(other: Duration): Int = if (other eq this) 0 else 1 - def unary_- : Duration = MinusInf - } - - /** - * Infinite negative duration: lesser than any other and not equal to any other, - * including itself. - */ - val MinusInf: Duration = new Duration with Infinite { - override def toString = "Duration.MinusInf" - def compare(other: Duration): Int = if (other eq this) 0 else -1 - def unary_- : Duration = Inf - } - - // Java Factories - def create(length: Long, unit: TimeUnit): FiniteDuration = apply(length, unit) - def create(length: Double, unit: TimeUnit): FiniteDuration = apply(length, unit) - def create(length: Long, unit: String): FiniteDuration = apply(length, unit) - def parse(s: String): Duration = unapply(s).get - - implicit object DurationIsOrdered extends Ordering[Duration] { - def compare(a: Duration, b: Duration): Int = a compare b - } -} - -/** - * Utility for working with java.util.concurrent.TimeUnit durations. - * - *

    - * Examples of usage from Java: - *

    - * import akka.util.FiniteDuration;
    - * import java.util.concurrent.TimeUnit;
    - *
    - * Duration duration = new FiniteDuration(100, MILLISECONDS);
    - * Duration duration = new FiniteDuration(5, "seconds");
    - *
    - * duration.toNanos();
    - * 
    - * - *

    - * Examples of usage from Scala: - *

    - * import akka.util.Duration
    - * import java.util.concurrent.TimeUnit
    - *
    - * val duration = Duration(100, MILLISECONDS)
    - * val duration = Duration(100, "millis")
    - *
    - * duration.toNanos
    - * duration < 1.second
    - * duration <= Duration.Inf
    - * 
    - * - *

    - * Implicits are also provided for Int, Long and Double. Example usage: - *

    - * import akka.util.duration._
    - *
    - * val duration = 100 millis
    - * 
    - * - * Extractors, parsing and arithmetic are also included: - *
    - * val d = Duration("1.2 µs")
    - * val Duration(length, unit) = 5 millis
    - * val d2 = d * 2.5
    - * val d3 = d2 + 1.millisecond
    - * 
    - */ -//TODO add @SerialVersionUID(1L) when SI-4804 is fixed -abstract class Duration extends Serializable with Ordered[Duration] { - def length: Long - def unit: TimeUnit - def toNanos: Long - def toMicros: Long - def toMillis: Long - def toSeconds: Long - def toMinutes: Long - def toHours: Long - def toDays: Long - def toUnit(unit: TimeUnit): Double - def printHMS: String - def +(other: Duration): Duration - def -(other: Duration): Duration - def *(factor: Double): Duration - def /(factor: Double): Duration - def /(other: Duration): Double - def unary_- : Duration - def finite_? : Boolean - def min(other: Duration): Duration = if (this < other) this else other - def max(other: Duration): Duration = if (this > other) this else other - def sleep(): Unit = Thread.sleep(toMillis) - def fromNow: Deadline = Deadline.now + this - - // Java API - def lt(other: Duration): Boolean = this < other - def lteq(other: Duration): Boolean = this <= other - def gt(other: Duration): Boolean = this > other - def gteq(other: Duration): Boolean = this >= other - def plus(other: Duration): Duration = this + other - def minus(other: Duration): Duration = this - other - def mul(factor: Double): Duration = this * factor - def div(factor: Double): Duration = this / factor - def div(other: Duration): Double = this / other - def neg(): Duration = -this - def isFinite(): Boolean = finite_? -} - -object FiniteDuration { - implicit object FiniteDurationIsOrdered extends Ordering[FiniteDuration] { - def compare(a: FiniteDuration, b: FiniteDuration) = a compare b - } -} - -//TODO add @SerialVersionUID(1L) when SI-4804 is fixed -class FiniteDuration(val length: Long, val unit: TimeUnit) extends Duration { - import Duration._ - - require { - unit match { - /* - * sorted so that the first cases should be most-used ones, because enum - * is checked one after the other. - */ - case NANOSECONDS ⇒ true - case MICROSECONDS ⇒ length <= 9223372036854775L && length >= -9223372036854775L - case MILLISECONDS ⇒ length <= 9223372036854L && length >= -9223372036854L - case SECONDS ⇒ length <= 9223372036L && length >= -9223372036L - case MINUTES ⇒ length <= 153722867L && length >= -153722867L - case HOURS ⇒ length <= 2562047L && length >= -2562047L - case DAYS ⇒ length <= 106751L && length >= -106751L - case _ ⇒ - val v = unit.convert(length, DAYS) - v <= 106751L && v >= -106751L - } - } - - def this(length: Long, unit: String) = this(length, Duration.timeUnit(unit)) - - def toNanos = unit.toNanos(length) - def toMicros = unit.toMicros(length) - def toMillis = unit.toMillis(length) - def toSeconds = unit.toSeconds(length) - def toMinutes = unit.toMinutes(length) - def toHours = unit.toHours(length) - def toDays = unit.toDays(length) - def toUnit(u: TimeUnit) = long2double(toNanos) / NANOSECONDS.convert(1, u) - - override def toString = this match { - case Duration(1, DAYS) ⇒ "1 day" - case Duration(x, DAYS) ⇒ x + " days" - case Duration(1, HOURS) ⇒ "1 hour" - case Duration(x, HOURS) ⇒ x + " hours" - case Duration(1, MINUTES) ⇒ "1 minute" - case Duration(x, MINUTES) ⇒ x + " minutes" - case Duration(1, SECONDS) ⇒ "1 second" - case Duration(x, SECONDS) ⇒ x + " seconds" - case Duration(1, MILLISECONDS) ⇒ "1 millisecond" - case Duration(x, MILLISECONDS) ⇒ x + " milliseconds" - case Duration(1, MICROSECONDS) ⇒ "1 microsecond" - case Duration(x, MICROSECONDS) ⇒ x + " microseconds" - case Duration(1, NANOSECONDS) ⇒ "1 nanosecond" - case Duration(x, NANOSECONDS) ⇒ x + " nanoseconds" - } - - def printHMS = "%02d:%02d:%06.3f".format(toHours, toMinutes % 60, toMillis / 1000d % 60) - - def compare(other: Duration) = - if (other.finite_?) { - val me = toNanos - val o = other.toNanos - if (me > o) 1 else if (me < o) -1 else 0 - } else -other.compare(this) - - private def add(a: Long, b: Long): Long = { - val c = a + b - // check if the signs of the top bit of both summands differ from the sum - if (((a ^ c) & (b ^ c)) < 0) throw new IllegalArgumentException("") - else c - } - - def +(other: Duration): Duration = if (!other.finite_?) other else fromNanos(add(toNanos, other.toNanos)) - - def -(other: Duration): Duration = if (!other.finite_?) other else fromNanos(add(toNanos, -other.toNanos)) - - def *(factor: Double): FiniteDuration = fromNanos(long2double(toNanos) * factor) - - def /(factor: Double): FiniteDuration = fromNanos(long2double(toNanos) / factor) - - def /(other: Duration): Double = if (other.finite_?) long2double(toNanos) / other.toNanos else 0 - - def unary_- : FiniteDuration = Duration(-length, unit) - - def finite_? : Boolean = true - - override def equals(other: Any) = - (other.asInstanceOf[AnyRef] eq this) || other.isInstanceOf[FiniteDuration] && - toNanos == other.asInstanceOf[FiniteDuration].toNanos - - override def hashCode = { - val nanos = toNanos - (nanos ^ (nanos >> 32)).asInstanceOf[Int] - } -} - -private[akka] trait DurationOps { - import duration.Classifier - protected def from(timeUnit: TimeUnit): FiniteDuration - def nanoseconds: FiniteDuration = from(NANOSECONDS) - def nanos: FiniteDuration = from(NANOSECONDS) - def nanosecond: FiniteDuration = from(NANOSECONDS) - def nano: FiniteDuration = from(NANOSECONDS) - - def microseconds: FiniteDuration = from(MICROSECONDS) - def micros: FiniteDuration = from(MICROSECONDS) - def microsecond: FiniteDuration = from(MICROSECONDS) - def micro: FiniteDuration = from(MICROSECONDS) - - def milliseconds: FiniteDuration = from(MILLISECONDS) - def millis: FiniteDuration = from(MILLISECONDS) - def millisecond: FiniteDuration = from(MILLISECONDS) - def milli: FiniteDuration = from(MILLISECONDS) - - def seconds: FiniteDuration = from(SECONDS) - def second: FiniteDuration = from(SECONDS) - - def minutes: FiniteDuration = from(MINUTES) - def minute: FiniteDuration = from(MINUTES) - - def hours: FiniteDuration = from(HOURS) - def hour: FiniteDuration = from(HOURS) - - def days: FiniteDuration = from(DAYS) - def day: FiniteDuration = from(DAYS) - - def nanoseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(NANOSECONDS)) - def nanos[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(NANOSECONDS)) - def nanosecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(NANOSECONDS)) - def nano[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(NANOSECONDS)) - - def microseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MICROSECONDS)) - def micros[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MICROSECONDS)) - def microsecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MICROSECONDS)) - def micro[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MICROSECONDS)) - - def milliseconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MILLISECONDS)) - def millis[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MILLISECONDS)) - def millisecond[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MILLISECONDS)) - def milli[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MILLISECONDS)) - - def seconds[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(SECONDS)) - def second[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(SECONDS)) - - def minutes[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MINUTES)) - def minute[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(MINUTES)) - - def hours[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(HOURS)) - def hour[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(HOURS)) - - def days[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(DAYS)) - def day[C, CC <: Classifier[C]](c: C)(implicit ev: CC): CC#R = ev.convert(from(DAYS)) -} - -class DurationInt(n: Int) extends DurationOps { - override protected def from(timeUnit: TimeUnit): FiniteDuration = Duration(n, timeUnit) -} - -class DurationLong(n: Long) extends DurationOps { - override protected def from(timeUnit: TimeUnit): FiniteDuration = Duration(n, timeUnit) -} - -class DurationDouble(d: Double) extends DurationOps { - override protected def from(timeUnit: TimeUnit): FiniteDuration = Duration(d, timeUnit) -} +import scala.concurrent.util.Duration //TODO add @SerialVersionUID(1L) when SI-4804 is fixed case class Timeout(duration: Duration) { diff --git a/akka-actor/src/main/scala/akka/util/Helpers.scala b/akka-actor/src/main/scala/akka/util/Helpers.scala index a3618359ac..8c0cfec86c 100644 --- a/akka-actor/src/main/scala/akka/util/Helpers.scala +++ b/akka-actor/src/main/scala/akka/util/Helpers.scala @@ -44,21 +44,4 @@ object Helpers { if (next == 0) sb.toString else base64(next, sb) } - - //FIXME docs - def ignore[E: Manifest](body: ⇒ Unit): Unit = - try body catch { case e if manifest[E].erasure.isAssignableFrom(e.getClass) ⇒ () } - - //FIXME docs - def withPrintStackTraceOnError(body: ⇒ Unit): Unit = { - try body catch { - case e: Throwable ⇒ - val sw = new java.io.StringWriter() - var root = e - while (root.getCause ne null) root = e.getCause - root.printStackTrace(new java.io.PrintWriter(sw)) - System.err.println(sw.toString) - throw e - } - } } diff --git a/akka-actor/src/main/scala/akka/util/LockUtil.scala b/akka-actor/src/main/scala/akka/util/LockUtil.scala index c465002b70..91c837063a 100644 --- a/akka-actor/src/main/scala/akka/util/LockUtil.scala +++ b/akka-actor/src/main/scala/akka/util/LockUtil.scala @@ -25,9 +25,9 @@ class Switch(startAsOn: Boolean = false) { protected def transcend(from: Boolean, action: ⇒ Unit): Boolean = synchronized { if (switch.compareAndSet(from, !from)) { try action catch { - case e ⇒ + case t: Throwable ⇒ switch.compareAndSet(!from, from) // revert status - throw e + throw t } true } else false diff --git a/akka-actor/src/main/scala/akka/util/NonFatal.scala b/akka-actor/src/main/scala/akka/util/NonFatal.scala deleted file mode 100644 index e14a491910..0000000000 --- a/akka-actor/src/main/scala/akka/util/NonFatal.scala +++ /dev/null @@ -1,29 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ -package akka.util - -/** - * Extractor of non-fatal Throwables. Will not match fatal errors - * like VirtualMachineError (OutOfMemoryError) - * ThreadDeath, LinkageError and InterruptedException. - * StackOverflowError is matched, i.e. considered non-fatal. - * - * Usage to catch all harmless throwables: - * {{{ - * try { - * // dangerous stuff - * } catch { - * case NonFatal(e) => log.error(e, "Something not that bad") - * } - * }}} - */ -object NonFatal { - def unapply(t: Throwable): Option[Throwable] = t match { - case e: StackOverflowError ⇒ Some(e) // StackOverflowError ok even though it is a VirtualMachineError - // VirtualMachineError includes OutOfMemoryError and other fatal errors - case _: VirtualMachineError | _: ThreadDeath | _: InterruptedException | _: LinkageError ⇒ None - case e ⇒ Some(e) - } -} - diff --git a/akka-actor/src/main/scala/akka/util/Reflect.scala b/akka-actor/src/main/scala/akka/util/Reflect.scala index 3a46edeab1..f62ea5fb8c 100644 --- a/akka-actor/src/main/scala/akka/util/Reflect.scala +++ b/akka-actor/src/main/scala/akka/util/Reflect.scala @@ -2,6 +2,7 @@ * Copyright (C) 2009-2012 Typesafe Inc. */ package akka.util +import scala.util.control.NonFatal /** * Collection of internal reflection utilities which may or may not be @@ -29,4 +30,24 @@ private[akka] object Reflect { } } + /** + * INTERNAL API + * @param clazz the class which to instantiate an instance of + * @tparam T the type of the instance that will be created + * @return a new instance from the default constructor of the given class + */ + private[akka] def instantiate[T](clazz: Class[T]): T = try clazz.newInstance catch { + case iae: IllegalAccessException ⇒ + val ctor = clazz.getDeclaredConstructor() + ctor.setAccessible(true) + ctor.newInstance() + } + + /** + * INTERNAL API + * @param clazz the class which to instantiate an instance of + * @tparam T the type of the instance that will be created + * @return a function which when applied will create a new instance from the default constructor of the given class + */ + private[akka] def instantiator[T](clazz: Class[T]): () ⇒ T = () ⇒ instantiate(clazz) } \ No newline at end of file diff --git a/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala b/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala index 4149a0b0b1..d07fff3a32 100644 --- a/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala +++ b/akka-actor/src/main/scala/akka/util/SubclassifiedIndex.scala @@ -79,14 +79,14 @@ class SubclassifiedIndex[K, V] private (private var values: Set[V])(implicit sc: * Add key to this index which inherits its value set from the most specific * super-class which is known. */ - def addKey(key: K): Changes = { - for (n ← subkeys) { - if (sc.isEqual(n.key, key)) return Nil - else if (sc.isSubclass(key, n.key)) return n.addKey(key) + def addKey(key: K): Changes = + subkeys collectFirst { + case n if sc.isEqual(n.key, key) ⇒ Nil + case n if sc.isSubclass(key, n.key) ⇒ n.addKey(key) + } getOrElse { + integrate(new Nonroot(key, values)) + List((key, values)) } - integrate(new Nonroot(key, values)) - (key, values) :: Nil - } /** * Add value to all keys which are subclasses of the given key. If the key diff --git a/akka-actor/src/main/scala/akka/util/duration/package.scala b/akka-actor/src/main/scala/akka/util/duration/package.scala deleted file mode 100644 index 6a7d28a6e6..0000000000 --- a/akka-actor/src/main/scala/akka/util/duration/package.scala +++ /dev/null @@ -1,46 +0,0 @@ -/** - * Copyright (C) 2009-2012 Typesafe Inc. - */ - -package akka.util - -import java.util.concurrent.TimeUnit -//FIXME Needs docs -package object duration { - trait Classifier[C] { - type R - def convert(d: FiniteDuration): R - } - - object span - implicit object spanConvert extends Classifier[span.type] { - type R = FiniteDuration - def convert(d: FiniteDuration): FiniteDuration = d - } - - object fromNow - implicit object fromNowConvert extends Classifier[fromNow.type] { - type R = Deadline - def convert(d: FiniteDuration): Deadline = Deadline.now + d - } - - implicit def intToDurationInt(n: Int): DurationInt = new DurationInt(n) - implicit def longToDurationLong(n: Long): DurationLong = new DurationLong(n) - implicit def doubleToDurationDouble(d: Double): DurationDouble = new DurationDouble(d) - - implicit def pairIntToDuration(p: (Int, TimeUnit)): FiniteDuration = Duration(p._1, p._2) - implicit def pairLongToDuration(p: (Long, TimeUnit)): FiniteDuration = Duration(p._1, p._2) - implicit def durationToPair(d: Duration): (Long, TimeUnit) = (d.length, d.unit) - - /* - * avoid reflection based invocation by using non-duck type - */ - class IntMult(i: Int) { def *(d: Duration): Duration = d * i } - implicit def intMult(i: Int): IntMult = new IntMult(i) - - class LongMult(l: Long) { def *(d: Duration): Duration = d * l } - implicit def longMult(l: Long): LongMult = new LongMult(l) - - class DoubleMult(f: Double) { def *(d: Duration): Duration = d * f } - implicit def doubleMult(f: Double): DoubleMult = new DoubleMult(f) -} diff --git a/akka-agent/src/main/scala/akka/agent/Agent.scala b/akka-agent/src/main/scala/akka/agent/Agent.scala index ea3d8719cd..bd77c1d9ff 100644 --- a/akka-agent/src/main/scala/akka/agent/Agent.scala +++ b/akka-agent/src/main/scala/akka/agent/Agent.scala @@ -6,10 +6,10 @@ package akka.agent import akka.actor._ import akka.japi.{ Function ⇒ JFunc, Procedure ⇒ JProc } -import akka.dispatch._ import akka.pattern.ask import akka.util.Timeout import scala.concurrent.stm._ +import scala.concurrent.{ Future, Promise, Await } /** * Used internally to send functions. @@ -127,9 +127,9 @@ class Agent[T](initialValue: T, system: ActorSystem) { def dispatch = ask(updater, Alter(f))(timeout).asInstanceOf[Future[T]] val txn = Txn.findCurrent if (txn.isDefined) { - val result = Promise[T]()(system.dispatcher) + val result = Promise[T]() Txn.afterCommit(status ⇒ result completeWith dispatch)(txn.get) - result + result.future } else dispatch } @@ -168,14 +168,14 @@ class Agent[T](initialValue: T, system: ActorSystem) { * still be executed in order. */ def alterOff(f: T ⇒ T)(timeout: Timeout): Future[T] = { - val result = Promise[T]()(system.dispatcher) + val result = Promise[T]() send((value: T) ⇒ { suspend() val threadBased = system.actorOf(Props(new ThreadBasedAgentUpdater(this, ref)).withDispatcher("akka.agent.alter-off-dispatcher")) result completeWith ask(threadBased, Alter(f))(timeout).asInstanceOf[Future[T]] value }) - result + result.future } /** @@ -214,7 +214,7 @@ class Agent[T](initialValue: T, system: ActorSystem) { /** * Resumes processing of `send` actions for the agent. */ - def resume(): Unit = updater.resume() + def resume(): Unit = updater.resume(inResponseToFailure = false) /** * Closes the agents and makes it eligible for garbage collection. diff --git a/akka-agent/src/test/scala/akka/agent/AgentSpec.scala b/akka-agent/src/test/scala/akka/agent/AgentSpec.scala index eb33dd2e23..dd57abe33f 100644 --- a/akka-agent/src/test/scala/akka/agent/AgentSpec.scala +++ b/akka-agent/src/test/scala/akka/agent/AgentSpec.scala @@ -1,8 +1,11 @@ package akka.agent -import akka.dispatch.{ Await, Future } -import akka.util.{ Duration, Timeout } -import akka.util.duration._ +import language.postfixOps + +import scala.concurrent.{ Await, Future } +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ +import akka.util.Timeout import akka.testkit._ import scala.concurrent.stm._ import java.util.concurrent.{ CountDownLatch, TimeUnit } @@ -51,6 +54,7 @@ class AgentSpec extends AkkaSpec { } "maintain order between alter and alterOff" in { + import system.dispatcher val l1, l2 = new CountDownLatch(1) val agent = Agent("a") diff --git a/akka-camel/src/main/scala/akka/camel/Activation.scala b/akka-camel/src/main/scala/akka/camel/Activation.scala index d01c990136..768ea22b3a 100644 --- a/akka-camel/src/main/scala/akka/camel/Activation.scala +++ b/akka-camel/src/main/scala/akka/camel/Activation.scala @@ -4,19 +4,20 @@ package akka.camel -import internal._ -import akka.util.{ Timeout, Duration } -import akka.dispatch.Future +import akka.camel.internal._ +import akka.util.Timeout +import scala.concurrent.Future import java.util.concurrent.TimeoutException import akka.actor.{ ActorSystem, Props, ActorRef } import akka.pattern._ +import scala.concurrent.util.Duration /** * Activation trait that can be used to wait on activation or de-activation of Camel endpoints. * The Camel endpoints are activated asynchronously. This trait can signal when an endpoint is activated or de-activated. */ trait Activation { - import akka.dispatch.Await + import scala.concurrent.Await def system: ActorSystem //FIXME Why is this here, what's it needed for and who should use it? @@ -51,10 +52,10 @@ trait Activation { * @param timeout the timeout for the Future */ def activationFutureFor(endpoint: ActorRef, timeout: Duration): Future[ActorRef] = - (activationTracker.ask(AwaitActivation(endpoint))(Timeout(timeout))).map[ActorRef] { + (activationTracker.ask(AwaitActivation(endpoint))(Timeout(timeout))).map[ActorRef]({ case EndpointActivated(_) ⇒ endpoint case EndpointFailedToActivate(_, cause) ⇒ throw cause - } + })(system.dispatcher) /** * Similar to awaitDeactivation but returns a future instead. @@ -62,10 +63,10 @@ trait Activation { * @param timeout the timeout of the Future */ def deactivationFutureFor(endpoint: ActorRef, timeout: Duration): Future[Unit] = - (activationTracker.ask(AwaitDeActivation(endpoint))(Timeout(timeout))).map[Unit] { + (activationTracker.ask(AwaitDeActivation(endpoint))(Timeout(timeout))).map[Unit]({ case EndpointDeActivated(_) ⇒ () case EndpointFailedToDeActivate(_, cause) ⇒ throw cause - } + })(system.dispatcher) } /** diff --git a/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala b/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala index ce8d19bec6..ebc99c7a92 100644 --- a/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala +++ b/akka-camel/src/main/scala/akka/camel/ActorRouteDefinition.scala @@ -7,7 +7,7 @@ package akka.camel import internal.component.CamelPath import akka.actor.ActorRef import org.apache.camel.model.ProcessorDefinition -import akka.util.Duration +import scala.concurrent.util.Duration /** * Wraps a [[org.apache.camel.model.ProcessorDefinition]]. diff --git a/akka-camel/src/main/scala/akka/camel/Camel.scala b/akka-camel/src/main/scala/akka/camel/Camel.scala index fb80b530e3..0ba241590b 100644 --- a/akka-camel/src/main/scala/akka/camel/Camel.scala +++ b/akka-camel/src/main/scala/akka/camel/Camel.scala @@ -8,7 +8,7 @@ import internal._ import akka.actor._ import org.apache.camel.{ ProducerTemplate, CamelContext } import com.typesafe.config.Config -import akka.util.Duration +import scala.concurrent.util.Duration import java.util.concurrent.TimeUnit._ /** diff --git a/akka-camel/src/main/scala/akka/camel/CamelSupport.scala b/akka-camel/src/main/scala/akka/camel/CamelSupport.scala index decdd720ed..84cd23e339 100644 --- a/akka-camel/src/main/scala/akka/camel/CamelSupport.scala +++ b/akka-camel/src/main/scala/akka/camel/CamelSupport.scala @@ -2,7 +2,7 @@ package akka.camel import akka.actor.Actor import com.typesafe.config.Config -import akka.util.Duration +import scala.concurrent.util.Duration import java.util.concurrent.TimeUnit._ private[camel] trait CamelSupport { this: Actor ⇒ diff --git a/akka-camel/src/main/scala/akka/camel/Consumer.scala b/akka-camel/src/main/scala/akka/camel/Consumer.scala index a354ae190c..72daa89da0 100644 --- a/akka-camel/src/main/scala/akka/camel/Consumer.scala +++ b/akka-camel/src/main/scala/akka/camel/Consumer.scala @@ -4,11 +4,13 @@ package akka.camel +import language.postfixOps + +import internal.component.DurationTypeConverter import org.apache.camel.model.{ RouteDefinition, ProcessorDefinition } import akka.actor._ -import akka.util.Duration -import akka.util.duration._ -import java.util.concurrent.TimeUnit.MILLISECONDS +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ /** * Mixed in by Actor implementations that consume message from Camel endpoints. @@ -35,11 +37,6 @@ trait Consumer extends Actor with CamelSupport with ConsumerConfig { } } -case object DefaultConsumerParameters { - val replyTimeout = 1 minute - val autoAck = true -} - trait ConsumerConfig { this: CamelSupport ⇒ /** * How long the actor should wait for activation before it fails. diff --git a/akka-camel/src/main/scala/akka/camel/internal/ConsumerRegistry.scala b/akka-camel/src/main/scala/akka/camel/internal/ConsumerRegistry.scala index ec24832396..e172598b57 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ConsumerRegistry.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ConsumerRegistry.scala @@ -14,7 +14,7 @@ import akka.actor._ import collection.mutable import org.apache.camel.model.RouteDefinition import org.apache.camel.CamelContext -import akka.util.Duration +import scala.concurrent.util.Duration /** * For internal use only. diff --git a/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala b/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala index 8d207f3b7d..59752526f4 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/DefaultCamel.scala @@ -1,12 +1,14 @@ package akka.camel.internal import akka.actor.ActorSystem -import component.{ DurationTypeConverter, ActorComponent } +import akka.camel.internal.component.{ DurationTypeConverter, ActorComponent } import org.apache.camel.impl.DefaultCamelContext import scala.Predef._ import akka.event.Logging import akka.camel.{ CamelSettings, Camel } -import akka.util.{ NonFatal, Duration } +import scala.util.control.NonFatal +import scala.concurrent.util.Duration + import org.apache.camel.{ ProducerTemplate, CamelContext } /** diff --git a/akka-camel/src/main/scala/akka/camel/internal/ProducerRegistry.scala b/akka-camel/src/main/scala/akka/camel/internal/ProducerRegistry.scala index d338dbfdea..8631039a11 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/ProducerRegistry.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/ProducerRegistry.scala @@ -5,7 +5,7 @@ import org.apache.camel.processor.SendProcessor import akka.actor.{ Props, ActorRef, Terminated, Actor } import org.apache.camel.Endpoint import akka.camel._ -import akka.util.NonFatal +import scala.util.control.NonFatal /** * Watches the end of life of Producers. diff --git a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala index 3c7f6325b7..e12456d2cf 100644 --- a/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala +++ b/akka-camel/src/main/scala/akka/camel/internal/component/ActorComponent.scala @@ -4,6 +4,8 @@ package akka.camel.internal.component +import language.postfixOps + import java.util.{ Map ⇒ JMap } import org.apache.camel._ @@ -13,11 +15,14 @@ import akka.actor._ import akka.pattern._ import scala.reflect.BeanProperty +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration +import scala.concurrent.{ ExecutionContext, Future } +import scala.util.control.NonFatal import java.util.concurrent.{ TimeoutException, CountDownLatch } +import akka.util.Timeout import akka.camel.internal.CamelExchangeAdapter -import akka.util.{ NonFatal, Duration, Timeout } import akka.camel.{ ActorNotRegisteredException, Camel, Ack, FailureResult, CamelMessage } -import java.util.concurrent.TimeUnit.MILLISECONDS /** * For internal use only. * Creates Camel [[org.apache.camel.Endpoint]]s that send messages to [[akka.camel.Consumer]] actors through an [[akka.camel.internal.component.ActorProducer]]. @@ -143,49 +148,31 @@ private[camel] class ActorProducer(val endpoint: ActorEndpoint, camel: Camel) ex * @return (doneSync) true to continue execute synchronously, false to continue being executed asynchronously */ private[camel] def processExchangeAdapter(exchange: CamelExchangeAdapter, callback: AsyncCallback): Boolean = { - - // these notify methods are just a syntax sugar - def notifyDoneSynchronously[A](a: A = null): Unit = callback.done(true) - def notifyDoneAsynchronously[A](a: A = null): Unit = callback.done(false) - - def message: CamelMessage = messageFor(exchange) - - if (exchange.isOutCapable) { //InOut - sendAsync(message, onComplete = forwardResponseTo(exchange) andThen notifyDoneAsynchronously) - } else { // inOnly - if (endpoint.autoAck) { //autoAck - fireAndForget(message, exchange) - notifyDoneSynchronously() - true // done sync - } else { //manualAck - sendAsync(message, onComplete = forwardAckTo(exchange) andThen notifyDoneAsynchronously) - } + if (!exchange.isOutCapable && endpoint.autoAck) { + fireAndForget(messageFor(exchange), exchange) + callback.done(true) + true // done sync + } else { + val action: PartialFunction[Either[Throwable, Any], Unit] = + if (exchange.isOutCapable) { + case Right(failure: FailureResult) ⇒ exchange.setFailure(failure) + case Right(msg) ⇒ exchange.setResponse(CamelMessage.canonicalize(msg)) + case Left(e: TimeoutException) ⇒ exchange.setFailure(FailureResult(new TimeoutException("Failed to get response from the actor [%s] within timeout [%s]. Check replyTimeout and blocking settings [%s]" format (endpoint.path, endpoint.replyTimeout, endpoint)))) + case Left(throwable) ⇒ exchange.setFailure(FailureResult(throwable)) + } else { + case Right(Ack) ⇒ () /* no response message to set */ + case Right(failure: FailureResult) ⇒ exchange.setFailure(failure) + case Right(msg) ⇒ exchange.setFailure(FailureResult(new IllegalArgumentException("Expected Ack or Failure message, but got: [%s] from actor [%s]" format (msg, endpoint.path)))) + case Left(e: TimeoutException) ⇒ exchange.setFailure(FailureResult(new TimeoutException("Failed to get Ack or Failure response from the actor [%s] within timeout [%s]. Check replyTimeout and blocking settings [%s]" format (endpoint.path, endpoint.replyTimeout, endpoint)))) + case Left(throwable) ⇒ exchange.setFailure(FailureResult(throwable)) + } + val async = try actorFor(endpoint.path).ask(messageFor(exchange))(Timeout(endpoint.replyTimeout)) catch { case NonFatal(e) ⇒ Future.failed(e) } + implicit val ec = camel.system.dispatcher // FIXME which ExecutionContext should be used here? + async.onComplete(action andThen { _ ⇒ callback.done(false) }) + false } } - private def forwardResponseTo(exchange: CamelExchangeAdapter): PartialFunction[Either[Throwable, Any], Unit] = { - case Right(failure: FailureResult) ⇒ exchange.setFailure(failure) - case Right(msg) ⇒ exchange.setResponse(CamelMessage.canonicalize(msg)) - case Left(e: TimeoutException) ⇒ exchange.setFailure(FailureResult(new TimeoutException("Failed to get response from the actor [%s] within timeout [%s]. Check replyTimeout [%s]" format (endpoint.path, endpoint.replyTimeout, endpoint)))) - case Left(throwable) ⇒ exchange.setFailure(FailureResult(throwable)) - } - - private def forwardAckTo(exchange: CamelExchangeAdapter): PartialFunction[Either[Throwable, Any], Unit] = { - case Right(Ack) ⇒ { /* no response message to set */ } - case Right(failure: FailureResult) ⇒ exchange.setFailure(failure) - case Right(msg) ⇒ exchange.setFailure(FailureResult(new IllegalArgumentException("Expected Ack or Failure message, but got: [%s] from actor [%s]" format (msg, endpoint.path)))) - case Left(e: TimeoutException) ⇒ exchange.setFailure(FailureResult(new TimeoutException("Failed to get Ack or Failure response from the actor [%s] within timeout [%s]. Check replyTimeout [%s]" format (endpoint.path, endpoint.replyTimeout, endpoint)))) - case Left(throwable) ⇒ exchange.setFailure(FailureResult(throwable)) - } - - private def sendAsync(message: CamelMessage, onComplete: PartialFunction[Either[Throwable, Any], Unit]): Boolean = { - try { - actorFor(endpoint.path).ask(message)(Timeout(endpoint.replyTimeout)).onComplete(onComplete) - } catch { - case NonFatal(e) ⇒ onComplete(Left(e)) - } - false // Done async - } private def fireAndForget(message: CamelMessage, exchange: CamelExchangeAdapter): Unit = try { actorFor(endpoint.path) ! message } catch { case NonFatal(e) ⇒ exchange.setFailure(new FailureResult(e)) } diff --git a/akka-camel/src/main/scala/akka/package.scala b/akka-camel/src/main/scala/akka/package.scala index f8cbc7d069..e1f2c0756e 100644 --- a/akka-camel/src/main/scala/akka/package.scala +++ b/akka-camel/src/main/scala/akka/package.scala @@ -4,6 +4,8 @@ package akka +import language.implicitConversions + import org.apache.camel.model.ProcessorDefinition package object camel { diff --git a/akka-camel/src/test/java/akka/camel/ConsumerJavaTestBase.java b/akka-camel/src/test/java/akka/camel/ConsumerJavaTestBase.java index fec093d108..6a1eec27de 100644 --- a/akka-camel/src/test/java/akka/camel/ConsumerJavaTestBase.java +++ b/akka-camel/src/test/java/akka/camel/ConsumerJavaTestBase.java @@ -7,7 +7,7 @@ package akka.camel; import akka.actor.ActorRef; import akka.actor.ActorSystem; import akka.actor.Props; -import akka.util.FiniteDuration; +import scala.concurrent.util.FiniteDuration; import org.junit.AfterClass; import org.junit.Test; diff --git a/akka-camel/src/test/java/akka/camel/CustomRouteTestBase.java b/akka-camel/src/test/java/akka/camel/CustomRouteTestBase.java index 8e7361506d..517557f0a7 100644 --- a/akka-camel/src/test/java/akka/camel/CustomRouteTestBase.java +++ b/akka-camel/src/test/java/akka/camel/CustomRouteTestBase.java @@ -4,7 +4,7 @@ import akka.actor.*; import akka.camel.internal.component.CamelPath; import akka.camel.javaapi.UntypedConsumerActor; import akka.camel.javaapi.UntypedProducerActor; -import akka.util.FiniteDuration; +import scala.concurrent.util.FiniteDuration; import org.apache.camel.CamelExecutionException; import org.apache.camel.Exchange; import org.apache.camel.Predicate; diff --git a/akka-camel/src/test/java/akka/camel/SampleErrorHandlingConsumer.java b/akka-camel/src/test/java/akka/camel/SampleErrorHandlingConsumer.java index 5bde5f8976..f0bd9ebf3f 100644 --- a/akka-camel/src/test/java/akka/camel/SampleErrorHandlingConsumer.java +++ b/akka-camel/src/test/java/akka/camel/SampleErrorHandlingConsumer.java @@ -6,7 +6,7 @@ package akka.camel; import akka.actor.Status; import akka.camel.javaapi.UntypedConsumerActor; -import akka.util.Duration; +import scala.concurrent.util.Duration; import org.apache.camel.builder.Builder; import org.apache.camel.model.ProcessorDefinition; import org.apache.camel.model.RouteDefinition; diff --git a/akka-camel/src/test/scala/akka/camel/ActivationIntegrationTest.scala b/akka-camel/src/test/scala/akka/camel/ActivationIntegrationTest.scala index e36a3e1afe..c4788bb113 100644 --- a/akka-camel/src/test/scala/akka/camel/ActivationIntegrationTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ActivationIntegrationTest.scala @@ -4,15 +4,17 @@ package akka.camel +import language.postfixOps + import org.scalatest.matchers.MustMatchers -import akka.util.duration._ +import scala.concurrent.util.duration._ import org.apache.camel.ProducerTemplate import akka.actor._ import akka.util.Timeout import TestSupport._ import org.scalatest.WordSpec import akka.testkit.TestLatch -import akka.dispatch.Await +import scala.concurrent.Await class ActivationIntegrationTest extends WordSpec with MustMatchers with SharedCamelSystem { implicit val timeout = Timeout(10 seconds) diff --git a/akka-camel/src/test/scala/akka/camel/CamelExchangeAdapterTest.scala b/akka-camel/src/test/scala/akka/camel/CamelExchangeAdapterTest.scala index 1b08df493c..ca7d57a0aa 100644 --- a/akka-camel/src/test/scala/akka/camel/CamelExchangeAdapterTest.scala +++ b/akka-camel/src/test/scala/akka/camel/CamelExchangeAdapterTest.scala @@ -4,6 +4,8 @@ package akka.camel +import language.implicitConversions + import internal.CamelExchangeAdapter import org.apache.camel.impl.DefaultExchange import org.apache.camel.{ Exchange, ExchangePattern } diff --git a/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala b/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala index 383405db61..2d8d780264 100644 --- a/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ConsumerIntegrationTest.scala @@ -4,9 +4,12 @@ package akka.camel +import language.postfixOps +import language.existentials + import akka.actor._ import org.scalatest.matchers.MustMatchers -import akka.util.duration._ +import scala.concurrent.util.duration._ import TestSupport._ import org.scalatest.WordSpec import org.apache.camel.model.RouteDefinition @@ -14,7 +17,7 @@ import org.apache.camel.builder.Builder import org.apache.camel.{ FailedToCreateRouteException, CamelExecutionException } import java.util.concurrent.{ ExecutionException, TimeUnit, TimeoutException } import akka.testkit.TestLatch -import akka.dispatch.Await +import scala.concurrent.Await import akka.actor.Status.Failure class ConsumerIntegrationTest extends WordSpec with MustMatchers with NonSharedCamelSystem { diff --git a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala index a7e5b9e4cb..3de8055875 100644 --- a/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ProducerFeatureTest.scala @@ -4,16 +4,18 @@ package akka.camel +import language.postfixOps + import org.apache.camel.{ Exchange, Processor } import org.apache.camel.builder.RouteBuilder import org.apache.camel.component.mock.MockEndpoint -import akka.dispatch.Await +import scala.concurrent.Await import akka.camel.TestSupport.SharedCamelSystem import akka.actor.SupervisorStrategy.Stop import org.scalatest.{ BeforeAndAfterEach, BeforeAndAfterAll, WordSpec } import akka.actor._ import akka.pattern._ -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.util.Timeout import org.scalatest.matchers.MustMatchers import akka.testkit.TestLatch diff --git a/akka-camel/src/test/scala/akka/camel/ProducerRegistryTest.scala b/akka-camel/src/test/scala/akka/camel/ProducerRegistryTest.scala index 876e0cc47a..43f9498bdd 100644 --- a/akka-camel/src/test/scala/akka/camel/ProducerRegistryTest.scala +++ b/akka-camel/src/test/scala/akka/camel/ProducerRegistryTest.scala @@ -4,10 +4,12 @@ package akka.camel +import language.postfixOps + import org.scalatest.matchers.MustMatchers import org.scalatest.WordSpec import akka.camel.TestSupport.SharedCamelSystem -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.actor.{ ActorRef, Props } class ProducerRegistryTest extends WordSpec with MustMatchers with SharedCamelSystem { diff --git a/akka-camel/src/test/scala/akka/camel/TestSupport.scala b/akka-camel/src/test/scala/akka/camel/TestSupport.scala index cbc6f43fd7..045f75e0ef 100644 --- a/akka-camel/src/test/scala/akka/camel/TestSupport.scala +++ b/akka-camel/src/test/scala/akka/camel/TestSupport.scala @@ -4,12 +4,15 @@ package akka.camel +import language.postfixOps +import language.implicitConversions + import akka.actor.{ Props, ActorSystem, Actor } -import akka.util.duration._ +import scala.concurrent.util.duration._ import java.util.concurrent.{ TimeoutException, ExecutionException, TimeUnit } import org.scalatest.{ BeforeAndAfterEach, BeforeAndAfterAll, Suite } import org.scalatest.matchers.{ BePropertyMatcher, BePropertyMatchResult } -import akka.util.{ FiniteDuration, Duration } +import scala.concurrent.util.{ FiniteDuration, Duration } private[camel] object TestSupport { diff --git a/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala b/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala index 411aa0b938..f7c68fa791 100644 --- a/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala +++ b/akka-camel/src/test/scala/akka/camel/UntypedProducerTest.scala @@ -4,6 +4,8 @@ package akka.camel +import language.postfixOps + import org.apache.camel.{ Exchange, Processor } import org.apache.camel.builder.RouteBuilder import org.apache.camel.component.mock.MockEndpoint @@ -11,8 +13,8 @@ import org.apache.camel.component.mock.MockEndpoint import akka.camel.TestSupport.SharedCamelSystem import akka.actor.Props import akka.pattern._ -import akka.dispatch.Await -import akka.util.duration._ +import scala.concurrent.Await +import scala.concurrent.util.duration._ import org.scalatest._ import matchers.MustMatchers diff --git a/akka-camel/src/test/scala/akka/camel/internal/ActivationTrackerTest.scala b/akka-camel/src/test/scala/akka/camel/internal/ActivationTrackerTest.scala index fef69ec6db..1b763706ed 100644 --- a/akka-camel/src/test/scala/akka/camel/internal/ActivationTrackerTest.scala +++ b/akka-camel/src/test/scala/akka/camel/internal/ActivationTrackerTest.scala @@ -1,10 +1,12 @@ package akka.camel.internal +import language.postfixOps + import org.scalatest.matchers.MustMatchers -import akka.util.duration._ +import scala.concurrent.util.duration._ import org.scalatest.{ GivenWhenThen, BeforeAndAfterEach, BeforeAndAfterAll, WordSpec } import akka.actor.{ Props, ActorSystem } -import akka.util.Duration +import scala.concurrent.util.Duration import akka.camel._ import akka.testkit.{ TimingTest, TestProbe, TestKit } diff --git a/akka-camel/src/test/scala/akka/camel/internal/component/ActorComponentConfigurationTest.scala b/akka-camel/src/test/scala/akka/camel/internal/component/ActorComponentConfigurationTest.scala index c057b83cba..09f9c1aa62 100644 --- a/akka-camel/src/test/scala/akka/camel/internal/component/ActorComponentConfigurationTest.scala +++ b/akka-camel/src/test/scala/akka/camel/internal/component/ActorComponentConfigurationTest.scala @@ -4,8 +4,10 @@ package akka.camel.internal.component +import language.postfixOps + import org.scalatest.matchers.MustMatchers -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.camel.TestSupport.SharedCamelSystem import org.apache.camel.Component import org.scalatest.WordSpec diff --git a/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala b/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala index d40dab2a56..b874849fd5 100644 --- a/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala +++ b/akka-camel/src/test/scala/akka/camel/internal/component/ActorProducerTest.scala @@ -4,13 +4,15 @@ package akka.camel.internal.component +import language.postfixOps + import org.scalatest.mock.MockitoSugar import org.mockito.Matchers.any import org.mockito.Mockito._ import org.apache.camel.{ CamelContext, ProducerTemplate, AsyncCallback } import java.util.concurrent.atomic.AtomicBoolean -import akka.util.duration._ -import akka.util.Duration +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration import akka.testkit.{ TestKit, TestProbe } import java.lang.String import akka.actor.{ ActorRef, Props, ActorSystem, Actor } @@ -275,6 +277,7 @@ trait ActorProducerFixture extends MockitoSugar with BeforeAndAfterAll with Befo val sys = mock[ActorSystem] val config = ConfigFactory.defaultReference() + when(sys.dispatcher) thenReturn system.dispatcher when(sys.settings) thenReturn (new Settings(this.getClass.getClassLoader, config, "mocksystem")) when(sys.name) thenReturn ("mocksystem") @@ -322,17 +325,12 @@ trait ActorProducerFixture extends MockitoSugar with BeforeAndAfterAll with Befo callbackReceived.countDown() } - private[this] def valueWithin(implicit timeout: Duration) = { + private[this] def valueWithin(implicit timeout: Duration) = if (!callbackReceived.await(timeout.toNanos, TimeUnit.NANOSECONDS)) fail("Callback not received!") - callbackValue.get - } + else callbackValue.get - def expectDoneSyncWithin(implicit timeout: Duration) { - if (!valueWithin(timeout)) fail("Expected to be done Synchronously") - } - def expectDoneAsyncWithin(implicit timeout: Duration) { - if (valueWithin(timeout)) fail("Expected to be done Asynchronously") - } + def expectDoneSyncWithin(implicit timeout: Duration): Unit = if (!valueWithin(timeout)) fail("Expected to be done Synchronously") + def expectDoneAsyncWithin(implicit timeout: Duration): Unit = if (valueWithin(timeout)) fail("Expected to be done Asynchronously") } diff --git a/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala b/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala index 53729a0b6f..a77f7dea77 100644 --- a/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala +++ b/akka-camel/src/test/scala/akka/camel/internal/component/DurationConverterTest.scala @@ -4,9 +4,11 @@ package akka.camel.internal.component +import language.postfixOps + import org.scalatest.matchers.MustMatchers -import akka.util.duration._ -import akka.util.Duration +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration import org.scalatest.WordSpec import org.apache.camel.NoTypeConversionAvailableException diff --git a/akka-camel/src/main/scala/akka/camelexamples/ExamplesSupport.scala b/akka-camel/src/test/scala/akka/camelexamples/ExamplesSupport.scala similarity index 94% rename from akka-camel/src/main/scala/akka/camelexamples/ExamplesSupport.scala rename to akka-camel/src/test/scala/akka/camelexamples/ExamplesSupport.scala index df5b0e5508..ff84b5d085 100644 --- a/akka-camel/src/main/scala/akka/camelexamples/ExamplesSupport.scala +++ b/akka-camel/src/test/scala/akka/camelexamples/ExamplesSupport.scala @@ -4,8 +4,10 @@ package akka.camelexamples +import language.postfixOps + import akka.camel._ -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.actor.{ Actor, OneForOneStrategy } import akka.actor.SupervisorStrategy._ diff --git a/akka-camel/src/main/scala/akka/camelexamples/README.txt b/akka-camel/src/test/scala/akka/camelexamples/README.txt similarity index 100% rename from akka-camel/src/main/scala/akka/camelexamples/README.txt rename to akka-camel/src/test/scala/akka/camelexamples/README.txt diff --git a/akka-camel/src/main/scala/akka/camelexamples/RichString.scala b/akka-camel/src/test/scala/akka/camelexamples/RichString.scala similarity index 94% rename from akka-camel/src/main/scala/akka/camelexamples/RichString.scala rename to akka-camel/src/test/scala/akka/camelexamples/RichString.scala index 66c844e091..1c4443c465 100644 --- a/akka-camel/src/main/scala/akka/camelexamples/RichString.scala +++ b/akka-camel/src/test/scala/akka/camelexamples/RichString.scala @@ -4,6 +4,8 @@ package akka.camelexamples +import language.implicitConversions + import java.io.FileWriter private[camelexamples] object RichString { diff --git a/akka-camel/src/main/scala/akka/camelexamples/_1_SimpleConsumer.scala b/akka-camel/src/test/scala/akka/camelexamples/_1_SimpleConsumer.scala similarity index 100% rename from akka-camel/src/main/scala/akka/camelexamples/_1_SimpleConsumer.scala rename to akka-camel/src/test/scala/akka/camelexamples/_1_SimpleConsumer.scala diff --git a/akka-camel/src/main/scala/akka/camelexamples/_2_SupervisedConsumers.scala b/akka-camel/src/test/scala/akka/camelexamples/_2_SupervisedConsumers.scala similarity index 100% rename from akka-camel/src/main/scala/akka/camelexamples/_2_SupervisedConsumers.scala rename to akka-camel/src/test/scala/akka/camelexamples/_2_SupervisedConsumers.scala diff --git a/akka-camel/src/main/scala/akka/camelexamples/_3_SimpleActorEndpoint.scala b/akka-camel/src/test/scala/akka/camelexamples/_3_SimpleActorEndpoint.scala similarity index 100% rename from akka-camel/src/main/scala/akka/camelexamples/_3_SimpleActorEndpoint.scala rename to akka-camel/src/test/scala/akka/camelexamples/_3_SimpleActorEndpoint.scala diff --git a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala index 76c9595a59..202eab4dd7 100644 --- a/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala +++ b/akka-cluster/src/main/scala/akka/cluster/AccrualFailureDetector.scala @@ -11,8 +11,8 @@ import scala.collection.immutable.Map import scala.annotation.tailrec import java.util.concurrent.atomic.AtomicReference import java.util.concurrent.TimeUnit.NANOSECONDS -import akka.util.Duration -import akka.util.duration._ +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ object AccrualFailureDetector { private def realClock: () ⇒ Long = () ⇒ NANOSECONDS.toMillis(System.nanoTime) diff --git a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala index a2c64b75cd..796b39af52 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Cluster.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Cluster.scala @@ -4,32 +4,28 @@ package akka.cluster +import language.implicitConversions + +import akka.actor._ +import akka.actor.Status._ +import akka.ConfigurationException +import akka.dispatch.MonitorableThreadFactory +import akka.event.Logging +import akka.pattern._ +import akka.remote._ +import akka.routing._ +import akka.util._ +import scala.concurrent.Await +import scala.concurrent.util.duration._ +import scala.concurrent.util.{ Duration, Deadline } +import scala.concurrent.forkjoin.ThreadLocalRandom +import scala.annotation.tailrec +import scala.collection.immutable.SortedSet + import java.io.Closeable import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.atomic.AtomicReference -import scala.annotation.tailrec -import scala.collection.immutable.SortedSet - -import akka.ConfigurationException -import akka.actor.ActorRef -import akka.actor.ActorSystem -import akka.actor.ActorSystemImpl -import akka.actor.Address -import akka.actor.Cancellable -import akka.actor.DefaultScheduler -import akka.actor.ExtendedActorSystem -import akka.actor.Extension -import akka.actor.ExtensionId -import akka.actor.ExtensionIdProvider -import akka.actor.Props -import akka.actor.Scheduler -import akka.dispatch.Await -import akka.dispatch.MonitorableThreadFactory -import akka.event.Logging -import akka.pattern.ask -import akka.remote.RemoteActorRefProvider -import akka.util.Duration import akka.util.internal.HashedWheelTimer /** @@ -347,4 +343,4 @@ trait MembershipChangeListener { */ trait MetaDataChangeListener { def notify(meta: Map[String, Array[Byte]]): Unit -} \ No newline at end of file +} diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala index c23d345401..5e3e54561e 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterDaemon.scala @@ -4,25 +4,13 @@ package akka.cluster import scala.collection.immutable.SortedSet - -import akka.actor.Actor -import akka.actor.ActorLogging -import akka.actor.ActorRef -import akka.actor.Address -import akka.actor.Cancellable -import akka.actor.Props -import akka.actor.RootActorPath +import scala.concurrent.util.{ Deadline, Duration } +import scala.concurrent.forkjoin.ThreadLocalRandom +import akka.actor.{ Actor, ActorLogging, ActorRef, Address, Cancellable, Props, RootActorPath, PoisonPill, Scheduler } import akka.actor.Status.Failure -import akka.actor.PoisonPill -import akka.actor.Scheduler import akka.routing.ScatterGatherFirstCompletedRouter -import akka.util.Deadline -import akka.util.Duration import akka.util.Timeout -import akka.jsr166y.ThreadLocalRandom -import akka.pattern.AskTimeoutException -import akka.pattern.ask -import akka.pattern.pipe +import akka.pattern.{ AskTimeoutException, ask, pipe } import MemberStatus._ /** @@ -557,7 +545,7 @@ private[cluster] final class ClusterCoreDaemon(environment: ClusterEnvironment) gossipToRandomNodeOf( if (preferredGossipTargets.nonEmpty) preferredGossipTargets - else localGossip.members.toIndexedSeq[Member].map(_.address) // Fall back to localGossip; important to not accidentally use `map` of the SortedSet, since the original order is not preserved) + else localGossip.members.toIndexedSeq.map(_.address) // Fall back to localGossip; important to not accidentally use `map` of the SortedSet, since the original order is not preserved) ) } } diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala index 5780f3eda0..932be34d2f 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterHeartbeat.scala @@ -3,18 +3,13 @@ */ package akka.cluster -import akka.actor.ReceiveTimeout -import akka.actor.ActorLogging +import language.postfixOps + +import akka.actor.{ ReceiveTimeout, ActorLogging, ActorRef, Address, Actor, RootActorPath, Props } import java.security.MessageDigest -import akka.pattern.CircuitBreaker -import akka.actor.ActorRef -import akka.pattern.CircuitBreakerOpenException -import akka.actor.Address -import akka.actor.Actor -import akka.actor.RootActorPath -import akka.actor.Props -import akka.util.duration._ -import akka.util.Deadline +import akka.pattern.{ CircuitBreaker, CircuitBreakerOpenException } +import scala.concurrent.util.duration._ +import scala.concurrent.util.Deadline /** * Sent at regular intervals for failure detection. diff --git a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala index d48db5446c..b8f5463529 100644 --- a/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala +++ b/akka-cluster/src/main/scala/akka/cluster/ClusterSettings.scala @@ -4,7 +4,7 @@ package akka.cluster import com.typesafe.config.Config -import akka.util.Duration +import scala.concurrent.util.Duration import java.util.concurrent.TimeUnit.MILLISECONDS import akka.ConfigurationException import scala.collection.JavaConverters._ @@ -50,4 +50,4 @@ class ClusterSettings(val config: Config, val systemName: String) { resetTimeout = Duration(getMilliseconds("akka.cluster.send-circuit-breaker.reset-timeout"), MILLISECONDS)) } -case class CircuitBreakerSettings(maxFailures: Int, callTimeout: Duration, resetTimeout: Duration) \ No newline at end of file +case class CircuitBreakerSettings(maxFailures: Int, callTimeout: Duration, resetTimeout: Duration) diff --git a/akka-cluster/src/main/scala/akka/cluster/FixedRateTask.scala b/akka-cluster/src/main/scala/akka/cluster/FixedRateTask.scala index c7799fc5c8..118785ef18 100644 --- a/akka-cluster/src/main/scala/akka/cluster/FixedRateTask.scala +++ b/akka-cluster/src/main/scala/akka/cluster/FixedRateTask.scala @@ -5,19 +5,16 @@ package akka.cluster import java.util.concurrent.TimeUnit -import java.util.concurrent.atomic.AtomicBoolean -import java.util.concurrent.atomic.AtomicLong -import akka.actor.Scheduler -import akka.util.Duration -import akka.actor.Cancellable +import java.util.concurrent.atomic.{ AtomicBoolean, AtomicLong } +import akka.actor.{ Scheduler, Cancellable } +import scala.concurrent.util.Duration /** * INTERNAL API */ private[akka] object FixedRateTask { - def apply(scheduler: Scheduler, initalDelay: Duration, delay: Duration)(f: ⇒ Unit): FixedRateTask = { + def apply(scheduler: Scheduler, initalDelay: Duration, delay: Duration)(f: ⇒ Unit): FixedRateTask = new FixedRateTask(scheduler, initalDelay, delay, new Runnable { def run(): Unit = f }) - } } /** diff --git a/akka-cluster/src/main/scala/akka/cluster/Member.scala b/akka-cluster/src/main/scala/akka/cluster/Member.scala index a10899cf37..d50131c85d 100644 --- a/akka-cluster/src/main/scala/akka/cluster/Member.scala +++ b/akka-cluster/src/main/scala/akka/cluster/Member.scala @@ -4,6 +4,8 @@ package akka.cluster +import language.implicitConversions + import scala.collection.immutable.SortedSet import scala.collection.GenTraversableOnce import akka.actor.Address diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterAccrualFailureDetectorSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterAccrualFailureDetectorSpec.scala index d5d41b52aa..a43ee4f8b9 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterAccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ClusterAccrualFailureDetectorSpec.scala @@ -6,7 +6,7 @@ package akka.cluster import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.testkit._ object ClusterAccrualFailureDetectorMultiJvmSpec extends MultiNodeConfig { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala index 6d92a6f094..fec36c5229 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/ConvergenceSpec.scala @@ -7,7 +7,7 @@ import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.actor.Address object ConvergenceMultiJvmSpec extends MultiNodeConfig { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala index 256b7d563d..7c70f37bd5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinInProgressSpec.scala @@ -8,7 +8,7 @@ import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.util.Deadline object JoinInProgressMultiJvmSpec extends MultiNodeConfig { diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala index ef52d9e131..f71ebe3cc3 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/JoinSeedNodeSpec.scala @@ -8,7 +8,7 @@ import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ object JoinSeedNodeMultiJvmSpec extends MultiNodeConfig { val seed1 = role("seed1") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala index 08f7ca10fa..da612b57bf 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LargeClusterSpec.scala @@ -7,7 +7,7 @@ import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.actor.ActorSystem import akka.util.Deadline import java.util.concurrent.TimeoutException diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala index d1640be511..0ac9fa8344 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderDowningNodeThatIsUnreachableSpec.scala @@ -8,7 +8,7 @@ import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ import akka.actor._ -import akka.util.duration._ +import scala.concurrent.util.duration._ object LeaderDowningNodeThatIsUnreachableMultiJvmSpec extends MultiNodeConfig { val first = role("first") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala index e232802eeb..9e45b1529b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/LeaderLeavingSpec.scala @@ -8,7 +8,7 @@ import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ object LeaderLeavingMultiJvmSpec extends MultiNodeConfig { val first = role("first") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala index ee74584953..3bf49a538b 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerExitingSpec.scala @@ -9,7 +9,7 @@ import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ object MembershipChangeListenerExitingMultiJvmSpec extends MultiNodeConfig { val first = role("first") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala index eacec24109..441ecc4528 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MembershipChangeListenerJoinSpec.scala @@ -9,7 +9,7 @@ import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ object MembershipChangeListenerJoinMultiJvmSpec extends MultiNodeConfig { val first = role("first") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala index af0b38d447..8f280d07bf 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/MultiNodeClusterSpec.scala @@ -9,8 +9,8 @@ import akka.actor.{ Address, ExtendedActorSystem } import akka.remote.testconductor.RoleName import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ -import akka.util.Duration +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration import org.scalatest.Suite import org.scalatest.TestFailedException import java.util.concurrent.ConcurrentHashMap diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala index 50656a6a9d..1a35af6411 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeJoinSpec.scala @@ -8,7 +8,7 @@ import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ object NodeJoinMultiJvmSpec extends MultiNodeConfig { val first = role("first") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala index 64f52c4549..d5c374ba64 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingAndBeingRemovedSpec.scala @@ -8,7 +8,7 @@ import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ object NodeLeavingAndExitingAndBeingRemovedMultiJvmSpec extends MultiNodeConfig { val first = role("first") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala index 565d78e9d8..19c81ecb28 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeLeavingAndExitingSpec.scala @@ -8,7 +8,7 @@ import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ object NodeLeavingAndExitingMultiJvmSpec extends MultiNodeConfig { val first = role("first") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala index f951da0801..34b2e00590 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/NodeUpSpec.scala @@ -8,7 +8,7 @@ import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ import scala.collection.immutable.SortedSet import java.util.concurrent.atomic.AtomicReference diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala index 3c35e95333..dddeac8816 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SingletonClusterSpec.scala @@ -7,7 +7,7 @@ import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ object SingletonClusterMultiJvmSpec extends MultiNodeConfig { val first = role("first") diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala index 24e94f715d..4f26b55ff5 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SplitBrainSpec.scala @@ -7,7 +7,7 @@ import com.typesafe.config.ConfigFactory import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.actor.Address import akka.remote.testconductor.Direction diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala index ddacf668e0..215b9f24e4 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/SunnyWeatherSpec.scala @@ -8,7 +8,7 @@ import org.scalatest.BeforeAndAfter import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ import java.util.concurrent.atomic.AtomicReference import scala.collection.immutable.SortedSet diff --git a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala index 65a36080ff..1244727d3f 100644 --- a/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala +++ b/akka-cluster/src/multi-jvm/scala/akka/cluster/UnreachableNodeRejoinsClusterSpec.scala @@ -10,7 +10,7 @@ import akka.testkit._ import com.typesafe.config.ConfigFactory import akka.actor.Address import akka.remote.testconductor.{ RoleName, Direction } -import akka.util.duration._ +import scala.concurrent.util.duration._ object UnreachableNodeRejoinsClusterMultiJvmSpec extends MultiNodeConfig { val first = role("first") diff --git a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala index df69a52e19..908b7298fd 100644 --- a/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/AccrualFailureDetectorSpec.scala @@ -7,8 +7,8 @@ package akka.cluster import akka.actor.Address import akka.testkit.{ LongRunningTest, AkkaSpec } import scala.collection.immutable.TreeMap -import akka.util.duration._ -import akka.util.Duration +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class AccrualFailureDetectorSpec extends AkkaSpec(""" diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala index 71504e6b2b..8d420dc021 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterConfigSpec.scala @@ -4,10 +4,12 @@ package akka.cluster +import language.postfixOps + import akka.testkit.AkkaSpec -import akka.util.duration._ -import akka.util.Duration import akka.dispatch.Dispatchers +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class ClusterConfigSpec extends AkkaSpec { diff --git a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala index f660af3763..dd3fe83de9 100644 --- a/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/ClusterSpec.scala @@ -4,15 +4,18 @@ package akka.cluster +import language.postfixOps +import language.reflectiveCalls + +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration + import akka.testkit.AkkaSpec import akka.testkit.ImplicitSender -import akka.util.duration._ -import akka.util.Duration import akka.actor.ExtendedActorSystem import akka.actor.Address -import java.util.concurrent.atomic.AtomicInteger +import akka.cluster.InternalClusterAction._ import akka.remote.RemoteActorRefProvider -import InternalClusterAction._ import java.lang.management.ManagementFactory import javax.management.ObjectName diff --git a/akka-cluster/src/test/scala/akka/cluster/FixedRateTaskSpec.scala b/akka-cluster/src/test/scala/akka/cluster/FixedRateTaskSpec.scala index d259a5310b..98634b0787 100644 --- a/akka-cluster/src/test/scala/akka/cluster/FixedRateTaskSpec.scala +++ b/akka-cluster/src/test/scala/akka/cluster/FixedRateTaskSpec.scala @@ -5,10 +5,10 @@ package akka.cluster import akka.testkit.AkkaSpec -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.testkit.TimingTest import akka.testkit.TestLatch -import akka.dispatch.Await +import scala.concurrent.Await @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class FixedRateTaskSpec extends AkkaSpec { @@ -31,7 +31,7 @@ class FixedRateTaskSpec extends AkkaSpec { val n = 22 val latch = new TestLatch(n) FixedRateTask(system.scheduler, 225.millis, 225.millis) { - 80.millis.sleep() + Thread.sleep(80) latch.countDown() } Await.ready(latch, 6.seconds) diff --git a/akka-docs/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala b/akka-docs/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala index c4603017e3..6e942a0160 100644 --- a/akka-docs/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala +++ b/akka-docs/common/code/docs/circuitbreaker/CircuitBreakerDocSpec.scala @@ -5,10 +5,10 @@ package docs.circuitbreaker //#imports1 -import akka.util.duration._ // small d is important here +import scala.concurrent.util.duration._ // small d is important here import akka.pattern.CircuitBreaker import akka.actor.Actor -import akka.dispatch.Future +import scala.concurrent.Future import akka.event.Logging //#imports1 diff --git a/akka-docs/common/code/docs/circuitbreaker/DangerousJavaActor.java b/akka-docs/common/code/docs/circuitbreaker/DangerousJavaActor.java index 1562338e04..f8df40f30a 100644 --- a/akka-docs/common/code/docs/circuitbreaker/DangerousJavaActor.java +++ b/akka-docs/common/code/docs/circuitbreaker/DangerousJavaActor.java @@ -6,9 +6,9 @@ package docs.circuitbreaker; //#imports1 import akka.actor.UntypedActor; -import akka.dispatch.Future; +import scala.concurrent.Future; import akka.event.LoggingAdapter; -import akka.util.Duration; +import scala.concurrent.util.Duration; import akka.pattern.CircuitBreaker; import akka.event.Logging; diff --git a/akka-docs/common/duration.rst b/akka-docs/common/duration.rst index fd25dc5128..62cfb75a91 100644 --- a/akka-docs/common/duration.rst +++ b/akka-docs/common/duration.rst @@ -16,7 +16,7 @@ In Scala durations are constructable using a mini-DSL and support all expected o .. code-block:: scala - import akka.util.duration._ // notice the small d + import scala.concurrent.util.duration._ // notice the small d val fivesec = 5.seconds val threemillis = 3.millis diff --git a/akka-docs/java/code/docs/actor/FaultHandlingTestBase.java b/akka-docs/java/code/docs/actor/FaultHandlingTestBase.java index 2d40071fe8..a2aab6e6c7 100644 --- a/akka-docs/java/code/docs/actor/FaultHandlingTestBase.java +++ b/akka-docs/java/code/docs/actor/FaultHandlingTestBase.java @@ -12,9 +12,9 @@ import akka.actor.OneForOneStrategy; import akka.actor.Props; import akka.actor.Terminated; import akka.actor.UntypedActor; -import akka.dispatch.Await; +import scala.concurrent.Await; import static akka.pattern.Patterns.ask; -import akka.util.Duration; +import scala.concurrent.util.Duration; import akka.testkit.AkkaSpec; import akka.testkit.TestProbe; diff --git a/akka-docs/java/code/docs/actor/MyReceivedTimeoutUntypedActor.java b/akka-docs/java/code/docs/actor/MyReceivedTimeoutUntypedActor.java index 025d634b09..3f24e9cb1f 100644 --- a/akka-docs/java/code/docs/actor/MyReceivedTimeoutUntypedActor.java +++ b/akka-docs/java/code/docs/actor/MyReceivedTimeoutUntypedActor.java @@ -6,7 +6,7 @@ package docs.actor; //#receive-timeout import akka.actor.ReceiveTimeout; import akka.actor.UntypedActor; -import akka.util.Duration; +import scala.concurrent.util.Duration; public class MyReceivedTimeoutUntypedActor extends UntypedActor { diff --git a/akka-docs/java/code/docs/actor/SchedulerDocTestBase.java b/akka-docs/java/code/docs/actor/SchedulerDocTestBase.java index d7e8fa644f..4a98d66338 100644 --- a/akka-docs/java/code/docs/actor/SchedulerDocTestBase.java +++ b/akka-docs/java/code/docs/actor/SchedulerDocTestBase.java @@ -5,7 +5,7 @@ package docs.actor; //#imports1 import akka.actor.Props; -import akka.util.Duration; +import scala.concurrent.util.Duration; import java.util.concurrent.TimeUnit; //#imports1 diff --git a/akka-docs/java/code/docs/actor/TypedActorDocTestBase.java b/akka-docs/java/code/docs/actor/TypedActorDocTestBase.java index fdd677c78b..ab2acd4db2 100644 --- a/akka-docs/java/code/docs/actor/TypedActorDocTestBase.java +++ b/akka-docs/java/code/docs/actor/TypedActorDocTestBase.java @@ -6,10 +6,12 @@ package docs.actor; //#imports import akka.actor.TypedActor; -import akka.dispatch.*; import akka.actor.*; import akka.japi.*; -import akka.util.Duration; +import akka.dispatch.Futures; +import scala.concurrent.Await; +import scala.concurrent.Future; +import scala.concurrent.util.Duration; import java.util.concurrent.TimeUnit; //#imports @@ -54,7 +56,7 @@ public class TypedActorDocTestBase { } public Future square(int i) { - return Futures.successful(i * i, TypedActor.dispatcher()); + return Futures.successful(i * i); } public Option squareNowPlease(int i) { diff --git a/akka-docs/java/code/docs/actor/UntypedActorDocTestBase.java b/akka-docs/java/code/docs/actor/UntypedActorDocTestBase.java index d5f44c6000..a64fa48615 100644 --- a/akka-docs/java/code/docs/actor/UntypedActorDocTestBase.java +++ b/akka-docs/java/code/docs/actor/UntypedActorDocTestBase.java @@ -10,11 +10,11 @@ import akka.actor.Props; //#imports //#import-future -import akka.dispatch.Future; +import scala.concurrent.Future; import akka.dispatch.Futures; import akka.dispatch.Mapper; -import akka.dispatch.Await; -import akka.util.Duration; +import scala.concurrent.Await; +import scala.concurrent.util.Duration; import akka.util.Timeout; //#import-future @@ -33,18 +33,18 @@ import akka.actor.Terminated; //#import-gracefulStop import static akka.pattern.Patterns.gracefulStop; -import akka.dispatch.Future; -import akka.dispatch.Await; -import akka.util.Duration; +import scala.concurrent.Future; +import scala.concurrent.Await; +import scala.concurrent.util.Duration; import akka.pattern.AskTimeoutException; //#import-gracefulStop //#import-askPipe import static akka.pattern.Patterns.ask; import static akka.pattern.Patterns.pipe; -import akka.dispatch.Future; +import scala.concurrent.Future; import akka.dispatch.Futures; -import akka.util.Duration; +import scala.concurrent.util.Duration; import akka.util.Timeout; import java.util.concurrent.TimeUnit; import java.util.ArrayList; @@ -206,7 +206,6 @@ public class UntypedActorDocTestBase { ActorSystem system = ActorSystem.create("MySystem"); ActorRef actorRef = system.actorOf(new Props(MyUntypedActor.class)); //#gracefulStop - try { Future stopped = gracefulStop(actorRef, Duration.create(5, TimeUnit.SECONDS), system); Await.result(stopped, Duration.create(6, TimeUnit.SECONDS)); @@ -250,9 +249,9 @@ public class UntypedActorDocTestBase { final int x = (Integer) it.next(); return new Result(x, s); } - }); + }, system.dispatcher()); - pipe(transformed).to(actorC); + pipe(transformed, system.dispatcher()).to(actorC); //#ask-pipe system.shutdown(); } diff --git a/akka-docs/java/code/docs/actor/japi/FaultHandlingDocSample.java b/akka-docs/java/code/docs/actor/japi/FaultHandlingDocSample.java index 1213ab5949..b7338830e4 100644 --- a/akka-docs/java/code/docs/actor/japi/FaultHandlingDocSample.java +++ b/akka-docs/java/code/docs/actor/japi/FaultHandlingDocSample.java @@ -13,14 +13,14 @@ import java.util.Map; import akka.actor.*; import akka.dispatch.Mapper; import akka.japi.Function; -import akka.util.Duration; +import scala.concurrent.util.Duration; import akka.util.Timeout; import akka.event.Logging; import akka.event.LoggingAdapter; import com.typesafe.config.Config; import com.typesafe.config.ConfigFactory; -import static akka.japi.Util.manifest; +import static akka.japi.Util.classTag; import static akka.actor.SupervisorStrategy.*; import static akka.pattern.Patterns.ask; @@ -146,12 +146,12 @@ public class FaultHandlingDocSample { // Send current progress to the initial sender pipe(ask(counterService, GetCurrentCount, askTimeout) - .mapTo(manifest(CurrentCount.class)) + .mapTo(classTag(CurrentCount.class)) .map(new Mapper() { public Progress apply(CurrentCount c) { return new Progress(100.0 * c.count / totalCount); } - })) + }, getContext().dispatcher()), getContext().dispatcher()) .to(progressListener); } else { unhandled(msg); diff --git a/akka-docs/java/code/docs/camel/ActivationTestBase.java b/akka-docs/java/code/docs/camel/ActivationTestBase.java index e85afa08b4..96e6ff6af0 100644 --- a/akka-docs/java/code/docs/camel/ActivationTestBase.java +++ b/akka-docs/java/code/docs/camel/ActivationTestBase.java @@ -6,9 +6,9 @@ package docs.camel; import akka.camel.Camel; import akka.camel.CamelExtension; import akka.camel.javaapi.UntypedConsumerActor; - import akka.dispatch.Future; - import akka.util.Duration; - import akka.util.FiniteDuration; + import scala.concurrent.Future; + import scala.concurrent.util.Duration; + import scala.concurrent.util.FiniteDuration; import static java.util.concurrent.TimeUnit.SECONDS; //#CamelActivation diff --git a/akka-docs/java/code/docs/camel/Consumer4.java b/akka-docs/java/code/docs/camel/Consumer4.java index b6ca904f7c..144d79965b 100644 --- a/akka-docs/java/code/docs/camel/Consumer4.java +++ b/akka-docs/java/code/docs/camel/Consumer4.java @@ -2,8 +2,8 @@ package docs.camel; //#Consumer4 import akka.camel.CamelMessage; import akka.camel.javaapi.UntypedConsumerActor; -import akka.util.Duration; -import akka.util.FiniteDuration; +import scala.concurrent.util.Duration; +import scala.concurrent.util.FiniteDuration; import java.util.concurrent.TimeUnit; diff --git a/akka-docs/java/code/docs/camel/ProducerTestBase.java b/akka-docs/java/code/docs/camel/ProducerTestBase.java index de7853a282..2cab47d02c 100644 --- a/akka-docs/java/code/docs/camel/ProducerTestBase.java +++ b/akka-docs/java/code/docs/camel/ProducerTestBase.java @@ -4,10 +4,10 @@ import akka.actor.*; import akka.camel.Camel; import akka.camel.CamelExtension; import akka.camel.CamelMessage; -import akka.dispatch.Future; import akka.pattern.Patterns; -import akka.util.Duration; -import akka.util.FiniteDuration; +import scala.concurrent.Future; +import scala.concurrent.util.Duration; +import scala.concurrent.util.FiniteDuration; import org.apache.camel.CamelContext; import org.apache.camel.ProducerTemplate; import org.junit.Test; diff --git a/akka-docs/java/code/docs/extension/SettingsExtensionDocTestBase.java b/akka-docs/java/code/docs/extension/SettingsExtensionDocTestBase.java index 265c91b206..112668597d 100644 --- a/akka-docs/java/code/docs/extension/SettingsExtensionDocTestBase.java +++ b/akka-docs/java/code/docs/extension/SettingsExtensionDocTestBase.java @@ -9,7 +9,7 @@ import akka.actor.AbstractExtensionId; import akka.actor.ExtensionIdProvider; import akka.actor.ActorSystem; import akka.actor.ExtendedActorSystem; -import akka.util.Duration; +import scala.concurrent.util.Duration; import com.typesafe.config.Config; import java.util.concurrent.TimeUnit; diff --git a/akka-docs/java/code/docs/future/FutureDocTestBase.java b/akka-docs/java/code/docs/future/FutureDocTestBase.java index 2fe2220223..e6c482a66d 100644 --- a/akka-docs/java/code/docs/future/FutureDocTestBase.java +++ b/akka-docs/java/code/docs/future/FutureDocTestBase.java @@ -5,12 +5,15 @@ package docs.future; //#imports1 import akka.dispatch.*; +import scala.concurrent.ExecutionContext; +import scala.concurrent.Future; +import scala.concurrent.Await; import akka.util.Timeout; //#imports1 //#imports2 -import akka.util.Duration; +import scala.concurrent.util.Duration; import akka.japi.Function; import java.util.concurrent.Callable; import static akka.dispatch.Futures.future; @@ -40,8 +43,8 @@ import static akka.dispatch.Futures.reduce; //#imports6 //#imports7 -import akka.dispatch.ExecutionContexts; -import akka.dispatch.ExecutionContextExecutorService; +import scala.concurrent.ExecutionContext; +import scala.concurrent.ExecutionContext$; //#imports7 @@ -79,17 +82,17 @@ public class FutureDocTestBase { system.shutdown(); } - @Test public void useCustomExecutionContext() throws Exception { + @SuppressWarnings("unchecked") @Test public void useCustomExecutionContext() throws Exception { ExecutorService yourExecutorServiceGoesHere = Executors.newSingleThreadExecutor(); //#diy-execution-context - ExecutionContextExecutorService ec = - ExecutionContexts.fromExecutorService(yourExecutorServiceGoesHere); + ExecutionContext ec = + ExecutionContext$.MODULE$.fromExecutorService(yourExecutorServiceGoesHere); //Use ec with your Futures - Future f1 = Futures.successful("foo", ec); + Future f1 = Futures.successful("foo"); - // Then you shut the ec down somewhere at the end of your program/application. - ec.shutdown(); + // Then you shut the ExecutorService down somewhere at the end of your program/application. + yourExecutorServiceGoesHere.shutdown(); //#diy-execution-context } @@ -121,17 +124,19 @@ public class FutureDocTestBase { @Test public void useMap() throws Exception { //#map + final ExecutionContext ec = system.dispatcher(); + Future f1 = future(new Callable() { public String call() { return "Hello" + "World"; } - }, system.dispatcher()); + }, ec); Future f2 = f1.map(new Mapper() { public Integer apply(String s) { return s.length(); } - }); + }, ec); int result = Await.result(f2, Duration.create(1, SECONDS)); assertEquals(10, result); @@ -141,18 +146,20 @@ public class FutureDocTestBase { @Test public void useMap2() throws Exception { //#map2 + final ExecutionContext ec = system.dispatcher(); + Future f1 = future(new Callable() { public String call() throws Exception { Thread.sleep(100); return "Hello" + "World"; } - }, system.dispatcher()); + }, ec); Future f2 = f1.map(new Mapper() { public Integer apply(String s) { return s.length(); } - }); + }, ec); //#map2 int result = Await.result(f2, Duration.create(1, SECONDS)); @@ -162,11 +169,13 @@ public class FutureDocTestBase { @Test public void useMap3() throws Exception { //#map3 + final ExecutionContext ec = system.dispatcher(); + Future f1 = future(new Callable() { public String call() { return "Hello" + "World"; } - }, system.dispatcher()); + }, ec); Thread.sleep(100); @@ -174,7 +183,7 @@ public class FutureDocTestBase { public Integer apply(String s) { return s.length(); } - }); + }, ec); //#map3 int result = Await.result(f2, Duration.create(1, SECONDS)); @@ -184,11 +193,13 @@ public class FutureDocTestBase { @Test public void useFlatMap() throws Exception { //#flat-map + final ExecutionContext ec = system.dispatcher(); + Future f1 = future(new Callable() { public String call() { return "Hello" + "World"; } - }, system.dispatcher()); + }, ec); Future f2 = f1.flatMap(new Mapper>() { public Future apply(final String s) { @@ -196,9 +207,9 @@ public class FutureDocTestBase { public Integer call() { return s.length(); } - }, system.dispatcher()); + }, ec); } - }); + }, ec); //#flat-map int result = Await.result(f2, Duration.create(1, SECONDS)); @@ -208,15 +219,16 @@ public class FutureDocTestBase { @Test public void useSequence() throws Exception { List> source = new ArrayList>(); - source.add(Futures.successful(1, system.dispatcher())); - source.add(Futures.successful(2, system.dispatcher())); + source.add(Futures.successful(1)); + source.add(Futures.successful(2)); //#sequence + final ExecutionContext ec = system.dispatcher(); //Some source generating a sequence of Future:s Iterable> listOfFutureInts = source; // now we have a Future[Iterable[Integer]] - Future> futureListOfInts = sequence(listOfFutureInts, system.dispatcher()); + Future> futureListOfInts = sequence(listOfFutureInts, ec); // Find the sum of the odd numbers Future futureSum = futureListOfInts.map(new Mapper, Long>() { @@ -226,7 +238,7 @@ public class FutureDocTestBase { sum += i; return sum; } - }); + }, ec); long result = Await.result(futureSum, Duration.create(1, SECONDS)); //#sequence @@ -236,6 +248,7 @@ public class FutureDocTestBase { @Test public void useTraverse() throws Exception { //#traverse + final ExecutionContext ec = system.dispatcher(); //Just a sequence of Strings Iterable listStrings = Arrays.asList("a", "b", "c"); @@ -245,9 +258,9 @@ public class FutureDocTestBase { public String call() { return r.toUpperCase(); } - }, system.dispatcher()); + }, ec); } - }, system.dispatcher()); + }, ec); //Returns the sequence of strings as upper case Iterable result = Await.result(futureResult, Duration.create(1, SECONDS)); @@ -258,10 +271,12 @@ public class FutureDocTestBase { @Test public void useFold() throws Exception { List> source = new ArrayList>(); - source.add(Futures.successful("a", system.dispatcher())); - source.add(Futures.successful("b", system.dispatcher())); + source.add(Futures.successful("a")); + source.add(Futures.successful("b")); //#fold + final ExecutionContext ec = system.dispatcher(); + //A sequence of Futures, in this case Strings Iterable> futures = source; @@ -270,7 +285,7 @@ public class FutureDocTestBase { public String apply(String r, String t) { return r + t; //Just concatenate } - }, system.dispatcher()); + }, ec); String result = Await.result(resultFuture, Duration.create(1, SECONDS)); //#fold @@ -280,10 +295,11 @@ public class FutureDocTestBase { @Test public void useReduce() throws Exception { List> source = new ArrayList>(); - source.add(Futures.successful("a", system.dispatcher())); - source.add(Futures.successful("b", system.dispatcher())); + source.add(Futures.successful("a")); + source.add(Futures.successful("b")); //#reduce + final ExecutionContext ec = system.dispatcher(); //A sequence of Futures, in this case Strings Iterable> futures = source; @@ -291,7 +307,7 @@ public class FutureDocTestBase { public Object apply(Object r, String t) { return r + t; //Just concatenate } - }, system.dispatcher()); + }, ec); Object result = Await.result(resultFuture, Duration.create(1, SECONDS)); //#reduce @@ -301,11 +317,12 @@ public class FutureDocTestBase { @Test public void useSuccessfulAndFailed() throws Exception { + final ExecutionContext ec = system.dispatcher(); //#successful - Future future = Futures.successful("Yay!", system.dispatcher()); + Future future = Futures.successful("Yay!"); //#successful //#failed - Future otherFuture = Futures.failed(new IllegalArgumentException("Bang!"), system.dispatcher()); + Future otherFuture = Futures.failed(new IllegalArgumentException("Bang!")); //#failed Object result = Await.result(future, Duration.create(1, SECONDS)); assertEquals("Yay!", result); @@ -316,18 +333,19 @@ public class FutureDocTestBase { @Test public void useFilter() throws Exception { //#filter - Future future1 = Futures.successful(4, system.dispatcher()); + final ExecutionContext ec = system.dispatcher(); + Future future1 = Futures.successful(4); Future successfulFilter = future1.filter(Filter.filterOf(new Function() { public Boolean apply(Integer i) { return i % 2 == 0; } - })); + }), ec); Future failedFilter = future1.filter(Filter.filterOf(new Function() { public Boolean apply(Integer i) { return i % 2 != 0; } - })); + }), ec); //When filter fails, the returned Future will be failed with a scala.MatchError //#filter } @@ -343,35 +361,38 @@ public class FutureDocTestBase { @Test public void useAndThen() { //#and-then - Future future1 = Futures.successful("value", system.dispatcher()).andThen(new OnComplete() { + final ExecutionContext ec = system.dispatcher(); + Future future1 = Futures.successful("value").andThen(new OnComplete() { public void onComplete(Throwable failure, String result) { if (failure != null) sendToIssueTracker(failure); } - }).andThen(new OnComplete() { + }, ec).andThen(new OnComplete() { public void onComplete(Throwable failure, String result) { if (result != null) sendToTheInternetz(result); } - }); + }, ec); //#and-then } @Test public void useRecover() throws Exception { //#recover + final ExecutionContext ec = system.dispatcher(); + Future future = future(new Callable() { public Integer call() { return 1 / 0; } - }, system.dispatcher()).recover(new Recover() { + }, ec).recover(new Recover() { public Integer recover(Throwable problem) throws Throwable { if (problem instanceof ArithmeticException) return 0; else throw problem; } - }); + }, ec); int result = Await.result(future, Duration.create(1, SECONDS)); assertEquals(result, 0); //#recover @@ -380,22 +401,24 @@ public class FutureDocTestBase { @Test public void useTryRecover() throws Exception { //#try-recover + final ExecutionContext ec = system.dispatcher(); + Future future = future(new Callable() { public Integer call() { return 1 / 0; } - }, system.dispatcher()).recoverWith(new Recover>() { + }, ec).recoverWith(new Recover>() { public Future recover(Throwable problem) throws Throwable { if (problem instanceof ArithmeticException) { return future(new Callable() { public Integer call() { return 0; } - }, system.dispatcher()); + }, ec); } else throw problem; } - }); + }, ec); int result = Await.result(future, Duration.create(1, SECONDS)); assertEquals(result, 0); //#try-recover @@ -404,9 +427,11 @@ public class FutureDocTestBase { @Test public void useOnSuccessOnFailureAndOnComplete() throws Exception { { - Future future = Futures.successful("foo", system.dispatcher()); + Future future = Futures.successful("foo"); //#onSuccess + final ExecutionContext ec = system.dispatcher(); + future.onSuccess(new OnSuccess() { public void onSuccess(String result) { if ("bar" == result) { @@ -415,12 +440,14 @@ public class FutureDocTestBase { //Do something if it was some other String } } - }); + }, ec); //#onSuccess } { - Future future = Futures.failed(new IllegalStateException("OHNOES"), system.dispatcher()); + Future future = Futures.failed(new IllegalStateException("OHNOES")); //#onFailure + final ExecutionContext ec = system.dispatcher(); + future.onFailure(new OnFailure() { public void onFailure(Throwable failure) { if (failure instanceof IllegalStateException) { @@ -429,12 +456,14 @@ public class FutureDocTestBase { //Do something if it was some other failure } } - }); + }, ec); //#onFailure } { - Future future = Futures.successful("foo", system.dispatcher()); + Future future = Futures.successful("foo"); //#onComplete + final ExecutionContext ec = system.dispatcher(); + future.onComplete(new OnComplete() { public void onComplete(Throwable failure, String result) { if (failure != null) { @@ -443,7 +472,7 @@ public class FutureDocTestBase { // We got a result, do something with it } } - }); + }, ec); //#onComplete } } @@ -452,13 +481,14 @@ public class FutureDocTestBase { public void useOrAndZip() throws Exception { { //#zip - Future future1 = Futures.successful("foo", system.dispatcher()); - Future future2 = Futures.successful("bar", system.dispatcher()); + final ExecutionContext ec = system.dispatcher(); + Future future1 = Futures.successful("foo"); + Future future2 = Futures.successful("bar"); Future future3 = future1.zip(future2).map(new Mapper, String>() { public String apply(scala.Tuple2 zipped) { return zipped._1() + " " + zipped._2(); } - }); + }, ec); String result = Await.result(future3, Duration.create(1, SECONDS)); assertEquals("foo bar", result); @@ -467,9 +497,9 @@ public class FutureDocTestBase { { //#fallback-to - Future future1 = Futures.failed(new IllegalStateException("OHNOES1"), system.dispatcher()); - Future future2 = Futures.failed(new IllegalStateException("OHNOES2"), system.dispatcher()); - Future future3 = Futures.successful("bar", system.dispatcher()); + Future future1 = Futures.failed(new IllegalStateException("OHNOES1")); + Future future2 = Futures.failed(new IllegalStateException("OHNOES2")); + Future future3 = Futures.successful("bar"); Future future4 = future1.fallbackTo(future2).fallbackTo(future3); // Will have "bar" in this case String result = Await.result(future4, Duration.create(1, SECONDS)); assertEquals("bar", result); diff --git a/akka-docs/java/code/docs/jrouting/CustomRouterDocTestBase.java b/akka-docs/java/code/docs/jrouting/CustomRouterDocTestBase.java index 74e7759b62..b1df65f61a 100644 --- a/akka-docs/java/code/docs/jrouting/CustomRouterDocTestBase.java +++ b/akka-docs/java/code/docs/jrouting/CustomRouterDocTestBase.java @@ -13,10 +13,10 @@ import static org.junit.Assert.assertEquals; import akka.actor.*; import akka.routing.*; -import akka.util.Duration; +import scala.concurrent.util.Duration; import akka.util.Timeout; -import akka.dispatch.Await; -import akka.dispatch.Future; +import scala.concurrent.Await; +import scala.concurrent.Future; import akka.dispatch.Dispatchers; import akka.testkit.AkkaSpec; import com.typesafe.config.ConfigFactory; diff --git a/akka-docs/java/code/docs/jrouting/ParentActor.java b/akka-docs/java/code/docs/jrouting/ParentActor.java index ada9e92138..c21aed2dc6 100644 --- a/akka-docs/java/code/docs/jrouting/ParentActor.java +++ b/akka-docs/java/code/docs/jrouting/ParentActor.java @@ -11,10 +11,10 @@ import akka.routing.SmallestMailboxRouter; import akka.actor.UntypedActor; import akka.actor.ActorRef; import akka.actor.Props; -import akka.util.Duration; +import scala.concurrent.util.Duration; import akka.util.Timeout; -import akka.dispatch.Future; -import akka.dispatch.Await; +import scala.concurrent.Future; +import scala.concurrent.Await; //#parentActor public class ParentActor extends UntypedActor { diff --git a/akka-docs/java/code/docs/testkit/TestKitDocTest.java b/akka-docs/java/code/docs/testkit/TestKitDocTest.java index a5f85019ea..0b8b5c7a4e 100644 --- a/akka-docs/java/code/docs/testkit/TestKitDocTest.java +++ b/akka-docs/java/code/docs/testkit/TestKitDocTest.java @@ -19,14 +19,14 @@ import akka.actor.Kill; import akka.actor.Props; import akka.actor.UntypedActor; import akka.actor.UntypedActorFactory; -import akka.dispatch.Await; -import akka.dispatch.Future; +import scala.concurrent.Await; +import scala.concurrent.Future; import akka.testkit.CallingThreadDispatcher; import akka.testkit.TestActor; import akka.testkit.TestActor.AutoPilot; import akka.testkit.TestActorRef; import akka.testkit.JavaTestKit; -import akka.util.Duration; +import scala.concurrent.util.Duration; public class TestKitDocTest { diff --git a/akka-docs/java/code/docs/testkit/TestKitSampleTest.java b/akka-docs/java/code/docs/testkit/TestKitSampleTest.java index ba235fad15..e09daae2fd 100644 --- a/akka-docs/java/code/docs/testkit/TestKitSampleTest.java +++ b/akka-docs/java/code/docs/testkit/TestKitSampleTest.java @@ -14,7 +14,7 @@ import akka.actor.ActorSystem; import akka.actor.Props; import akka.actor.UntypedActor; import akka.testkit.JavaTestKit; -import akka.util.Duration; +import scala.concurrent.util.Duration; public class TestKitSampleTest { diff --git a/akka-docs/java/code/docs/transactor/TransactorDocTest.java b/akka-docs/java/code/docs/transactor/TransactorDocTest.java index 4eaaa0bb31..f0b15da925 100644 --- a/akka-docs/java/code/docs/transactor/TransactorDocTest.java +++ b/akka-docs/java/code/docs/transactor/TransactorDocTest.java @@ -9,10 +9,10 @@ import org.junit.Test; //#imports import akka.actor.*; -import akka.dispatch.Await; +import scala.concurrent.Await; import static akka.pattern.Patterns.ask; import akka.transactor.Coordinated; -import akka.util.Duration; +import scala.concurrent.util.Duration; import akka.util.Timeout; import static java.util.concurrent.TimeUnit.SECONDS; //#imports diff --git a/akka-docs/java/code/docs/zeromq/ZeromqDocTestBase.java b/akka-docs/java/code/docs/zeromq/ZeromqDocTestBase.java index 1a311c9529..a24e1680cd 100644 --- a/akka-docs/java/code/docs/zeromq/ZeromqDocTestBase.java +++ b/akka-docs/java/code/docs/zeromq/ZeromqDocTestBase.java @@ -34,7 +34,7 @@ import akka.actor.UntypedActor; import akka.actor.Props; import akka.event.Logging; import akka.event.LoggingAdapter; -import akka.util.Duration; +import scala.concurrent.util.Duration; import akka.serialization.SerializationExtension; import akka.serialization.Serialization; import java.io.Serializable; diff --git a/akka-docs/java/typed-actors.rst b/akka-docs/java/typed-actors.rst index 4d36872f1a..a2b4ff9a9d 100644 --- a/akka-docs/java/typed-actors.rst +++ b/akka-docs/java/typed-actors.rst @@ -97,7 +97,7 @@ Method dispatch semantics Methods returning: * ``void`` will be dispatched with ``fire-and-forget`` semantics, exactly like ``ActorRef.tell`` - * ``akka.dispatch.Future`` will use ``send-request-reply`` semantics, exactly like ``ActorRef.ask`` + * ``scala.concurrent.Future`` will use ``send-request-reply`` semantics, exactly like ``ActorRef.ask`` * ``scala.Option`` or ``akka.japi.Option`` will use ``send-request-reply`` semantics, but *will* block to wait for an answer, and return None if no answer was produced within the timeout, or scala.Some/akka.japi.Some containing the result otherwise. Any exception that was thrown during this call will be rethrown. diff --git a/akka-docs/java/zeromq.rst b/akka-docs/java/zeromq.rst index 08d1a9541f..05123f2bc5 100644 --- a/akka-docs/java/zeromq.rst +++ b/akka-docs/java/zeromq.rst @@ -21,7 +21,7 @@ Sockets are always created using the ``akka.zeromq.ZeroMQExtension``, for exampl .. includecode:: code/docs/zeromq/ZeromqDocTestBase.java#pub-socket -Above examples will create a ZeroMQ Publisher socket that is Bound to the port 1233 on localhost. +Above examples will create a ZeroMQ Publisher socket that is Bound to the port 21231 on localhost. Similarly you can create a subscription socket, with a listener, that subscribes to all messages from the publisher using: diff --git a/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala index fc62cd940d..1d4f714f7c 100644 --- a/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala +++ b/akka-docs/modules/code/docs/actor/mailbox/DurableMailboxDocSpec.scala @@ -3,6 +3,8 @@ */ package docs.actor.mailbox +import language.postfixOps + //#imports import akka.actor.Props @@ -51,12 +53,12 @@ import akka.dispatch.MessageQueue import akka.actor.mailbox.DurableMessageQueue import akka.actor.mailbox.DurableMessageSerialization import akka.pattern.CircuitBreaker -import akka.util.duration._ +import scala.concurrent.util.duration._ class MyMailboxType(systemSettings: ActorSystem.Settings, config: Config) extends MailboxType { - override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = owner zip system headOption match { + override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = (owner zip system) headOption match { case Some((o, s: ExtendedActorSystem)) ⇒ new MyMessageQueue(o, s) case None ⇒ throw new IllegalArgumentException( "requires an owner (i.e. does not work with BalancingDispatcher)") diff --git a/akka-docs/scala/code/docs/actor/ActorDocSpec.scala b/akka-docs/scala/code/docs/actor/ActorDocSpec.scala index 108aba33b2..a5021bf525 100644 --- a/akka-docs/scala/code/docs/actor/ActorDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/ActorDocSpec.scala @@ -3,6 +3,8 @@ */ package docs.actor +import language.postfixOps + //#imports1 import akka.actor.Actor import akka.actor.Props @@ -10,15 +12,15 @@ import akka.event.Logging //#imports1 -import akka.dispatch.Future +import scala.concurrent.Future import akka.actor.{ ActorRef, ActorSystem } import org.scalatest.{ BeforeAndAfterAll, WordSpec } import org.scalatest.matchers.MustMatchers import akka.testkit._ import akka.util._ -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.actor.Actor.Receive -import akka.dispatch.Await +import scala.concurrent.Await //#my-actor class MyActor extends Actor { @@ -242,7 +244,7 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { "using implicit timeout" in { val myActor = system.actorOf(Props(new FirstActor)) //#using-implicit-timeout - import akka.util.duration._ + import scala.concurrent.util.duration._ import akka.util.Timeout import akka.pattern.ask implicit val timeout = Timeout(5 seconds) @@ -255,7 +257,7 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { "using explicit timeout" in { val myActor = system.actorOf(Props(new FirstActor)) //#using-explicit-timeout - import akka.util.duration._ + import scala.concurrent.util.duration._ import akka.pattern.ask val future = myActor.ask("hello")(5 seconds) //#using-explicit-timeout @@ -265,7 +267,7 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { "using receiveTimeout" in { //#receive-timeout import akka.actor.ReceiveTimeout - import akka.util.duration._ + import scala.concurrent.util.duration._ class MyActor extends Actor { context.setReceiveTimeout(30 milliseconds) def receive = { @@ -345,7 +347,7 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { val actorRef = system.actorOf(Props[MyActor]) //#gracefulStop import akka.pattern.gracefulStop - import akka.dispatch.Await + import scala.concurrent.Await try { val stopped: Future[Boolean] = gracefulStop(actorRef, 5 seconds)(system) @@ -361,7 +363,7 @@ class ActorDocSpec extends AkkaSpec(Map("akka.loglevel" -> "INFO")) { val actorA, actorB, actorC, actorD = system.actorOf(Props.empty) //#ask-pipeTo import akka.pattern.{ ask, pipe } - + import system.dispatcher // The ExecutionContext that will be used case class Result(x: Int, s: String, d: Double) case object Request diff --git a/akka-docs/scala/code/docs/actor/FSMDocSpec.scala b/akka-docs/scala/code/docs/actor/FSMDocSpec.scala index 75b0309a42..ff9ae0cc14 100644 --- a/akka-docs/scala/code/docs/actor/FSMDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/FSMDocSpec.scala @@ -3,6 +3,8 @@ */ package docs.actor +import language.postfixOps + //#test-code import akka.testkit.AkkaSpec import akka.actor.Props @@ -13,7 +15,7 @@ class FSMDocSpec extends AkkaSpec { //#fsm-code-elided //#simple-imports import akka.actor.{ Actor, ActorRef, FSM } - import akka.util.duration._ + import scala.concurrent.util.duration._ //#simple-imports //#simple-events // received events diff --git a/akka-docs/scala/code/docs/actor/FaultHandlingDocSample.scala b/akka-docs/scala/code/docs/actor/FaultHandlingDocSample.scala index 79f5a5d084..1e4dc4f6ab 100644 --- a/akka-docs/scala/code/docs/actor/FaultHandlingDocSample.scala +++ b/akka-docs/scala/code/docs/actor/FaultHandlingDocSample.scala @@ -3,12 +3,14 @@ */ package docs.actor +import language.postfixOps + //#all //#imports import akka.actor._ import akka.actor.SupervisorStrategy._ -import akka.util.duration._ -import akka.util.Duration +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration import akka.util.Timeout import akka.event.LoggingReceive import akka.pattern.{ ask, pipe } @@ -101,6 +103,7 @@ class Worker extends Actor with ActorLogging { counterService ! Increment(1) // Send current progress to the initial sender + import context.dispatcher // Use this Actors' Dispatcher as ExecutionContext counterService ? GetCurrentCount map { case CurrentCount(_, count) ⇒ Progress(100.0 * count / totalCount) } pipeTo progressListener.get diff --git a/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala b/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala index 65e03bd2ea..f291eb0132 100644 --- a/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/FaultHandlingDocSpec.scala @@ -3,6 +3,8 @@ */ package docs.actor +import language.postfixOps + //#testkit import akka.testkit.{ AkkaSpec, ImplicitSender, EventFilter } import akka.actor.{ ActorRef, Props, Terminated } @@ -20,7 +22,7 @@ object FaultHandlingDocSpec { //#strategy import akka.actor.OneForOneStrategy import akka.actor.SupervisorStrategy._ - import akka.util.duration._ + import scala.concurrent.util.duration._ override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) { case _: ArithmeticException ⇒ Resume @@ -41,7 +43,7 @@ object FaultHandlingDocSpec { //#strategy2 import akka.actor.OneForOneStrategy import akka.actor.SupervisorStrategy._ - import akka.util.duration._ + import scala.concurrent.util.duration._ override val supervisorStrategy = OneForOneStrategy(maxNrOfRetries = 10, withinTimeRange = 1 minute) { case _: ArithmeticException ⇒ Resume diff --git a/akka-docs/scala/code/docs/actor/SchedulerDocSpec.scala b/akka-docs/scala/code/docs/actor/SchedulerDocSpec.scala index f711d85129..3d3755bd5d 100644 --- a/akka-docs/scala/code/docs/actor/SchedulerDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/SchedulerDocSpec.scala @@ -3,10 +3,12 @@ */ package docs.actor +import language.postfixOps + //#imports1 import akka.actor.Actor import akka.actor.Props -import akka.util.duration._ +import scala.concurrent.util.duration._ //#imports1 diff --git a/akka-docs/scala/code/docs/actor/TypedActorDocSpec.scala b/akka-docs/scala/code/docs/actor/TypedActorDocSpec.scala index 0c2f3bd5b8..3078f96f4c 100644 --- a/akka-docs/scala/code/docs/actor/TypedActorDocSpec.scala +++ b/akka-docs/scala/code/docs/actor/TypedActorDocSpec.scala @@ -3,9 +3,11 @@ */ package docs.actor +import language.postfixOps + //#imports -import akka.dispatch.{ Promise, Future, Await } -import akka.util.duration._ +import scala.concurrent.{ Promise, Future, Await } +import scala.concurrent.util.duration._ import akka.actor.{ ActorContext, TypedActor, TypedProps } //#imports @@ -37,7 +39,7 @@ class SquarerImpl(val name: String) extends Squarer { def squareDontCare(i: Int): Unit = i * i //Nobody cares :( - def square(i: Int): Future[Int] = Promise successful i * i + def square(i: Int): Future[Int] = Promise.successful(i * i).future def squareNowPlease(i: Int): Option[Int] = Some(i * i) @@ -53,7 +55,7 @@ trait Foo { trait Bar { import TypedActor.dispatcher //So we have an implicit dispatcher for our Promise - def doBar(str: String): Future[String] = Promise successful str.toUpperCase + def doBar(str: String): Future[String] = Promise.successful(str.toUpperCase).future } class FooBar extends Foo with Bar diff --git a/akka-docs/scala/code/docs/agent/AgentDocSpec.scala b/akka-docs/scala/code/docs/agent/AgentDocSpec.scala index 418159f638..6c259bb34b 100644 --- a/akka-docs/scala/code/docs/agent/AgentDocSpec.scala +++ b/akka-docs/scala/code/docs/agent/AgentDocSpec.scala @@ -3,8 +3,10 @@ */ package docs.agent +import language.postfixOps + import akka.agent.Agent -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.util.Timeout import akka.testkit._ @@ -97,7 +99,7 @@ class AgentDocSpec extends AkkaSpec { val agent = Agent(0) //#read-await - import akka.util.duration._ + import scala.concurrent.util.duration._ import akka.util.Timeout implicit val timeout = Timeout(5 seconds) @@ -111,7 +113,7 @@ class AgentDocSpec extends AkkaSpec { val agent = Agent(0) //#read-future - import akka.dispatch.Await + import scala.concurrent.Await implicit val timeout = Timeout(5 seconds) val future = agent.future @@ -124,7 +126,7 @@ class AgentDocSpec extends AkkaSpec { "transfer example" in { //#transfer-example import akka.agent.Agent - import akka.util.duration._ + import scala.concurrent.util.duration._ import akka.util.Timeout import scala.concurrent.stm._ diff --git a/akka-docs/scala/code/docs/camel/Consumers.scala b/akka-docs/scala/code/docs/camel/Consumers.scala index abb43a048a..1d500cf04c 100644 --- a/akka-docs/scala/code/docs/camel/Consumers.scala +++ b/akka-docs/scala/code/docs/camel/Consumers.scala @@ -1,7 +1,13 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + package docs.camel +import language.postfixOps + object Consumers { - { + object Sample1 { //#Consumer1 import akka.camel.{ CamelMessage, Consumer } @@ -14,7 +20,7 @@ object Consumers { } //#Consumer1 } - { + object Sample2 { //#Consumer2 import akka.camel.{ CamelMessage, Consumer } @@ -27,7 +33,7 @@ object Consumers { } //#Consumer2 } - { + object Sample3 { //#Consumer3 import akka.camel.{ CamelMessage, Consumer } import akka.camel.Ack @@ -50,10 +56,10 @@ object Consumers { } //#Consumer3 } - { + object Sample4 { //#Consumer4 import akka.camel.{ CamelMessage, Consumer } - import akka.util.duration._ + import scala.concurrent.util.duration._ class Consumer4 extends Consumer { def endpointUri = "jetty:http://localhost:8877/camel/default" diff --git a/akka-docs/scala/code/docs/camel/CustomRoute.scala b/akka-docs/scala/code/docs/camel/CustomRoute.scala index c51d3e1fc4..5895a16ef1 100644 --- a/akka-docs/scala/code/docs/camel/CustomRoute.scala +++ b/akka-docs/scala/code/docs/camel/CustomRoute.scala @@ -1,10 +1,16 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ + package docs.camel import akka.camel.CamelMessage import akka.actor.Status.Failure +import language.existentials + object CustomRoute { - { + object Sample1 { //#CustomRoute import akka.actor.{ Props, ActorSystem, Actor, ActorRef } import akka.camel.{ CamelMessage, CamelExtension } @@ -31,7 +37,7 @@ object CustomRoute { //#CustomRoute } - { + object Sample2 { //#ErrorThrowingConsumer import akka.camel.Consumer diff --git a/akka-docs/scala/code/docs/camel/Introduction.scala b/akka-docs/scala/code/docs/camel/Introduction.scala index 3c9561d22c..ad4fbadf1a 100644 --- a/akka-docs/scala/code/docs/camel/Introduction.scala +++ b/akka-docs/scala/code/docs/camel/Introduction.scala @@ -3,8 +3,10 @@ package docs.camel import akka.actor.{ Props, ActorSystem } import akka.camel.CamelExtension +import language.postfixOps + object Introduction { - { + def foo = { //#Consumer-mina import akka.camel.{ CamelMessage, Consumer } @@ -24,7 +26,7 @@ object Introduction { val mina = system.actorOf(Props[MyEndpoint]) //#Consumer-mina } - { + def bar = { //#Consumer import akka.camel.{ CamelMessage, Consumer } @@ -38,7 +40,7 @@ object Introduction { } //#Consumer } - { + def baz = { //#Producer import akka.actor.Actor import akka.camel.{ Producer, Oneway } @@ -75,7 +77,7 @@ object Introduction { { //#CamelActivation import akka.camel.{ CamelMessage, Consumer } - import akka.util.duration._ + import scala.concurrent.util.duration._ class MyEndpoint extends Consumer { def endpointUri = "mina:tcp://localhost:6200?textline=true" diff --git a/akka-docs/scala/code/docs/camel/Producers.scala b/akka-docs/scala/code/docs/camel/Producers.scala index 2953ef2269..b78f67420e 100644 --- a/akka-docs/scala/code/docs/camel/Producers.scala +++ b/akka-docs/scala/code/docs/camel/Producers.scala @@ -1,9 +1,10 @@ package docs.camel import akka.camel.CamelExtension +import language.postfixOps object Producers { - { + object Sample1 { //#Producer1 import akka.actor.Actor import akka.actor.{ Props, ActorSystem } @@ -16,7 +17,7 @@ object Producers { //#Producer1 //#AskProducer import akka.pattern.ask - import akka.util.duration._ + import scala.concurrent.util.duration._ implicit val timeout = Timeout(10 seconds) val system = ActorSystem("some-system") @@ -24,7 +25,7 @@ object Producers { val future = producer.ask("some request").mapTo[CamelMessage] //#AskProducer } - { + object Sample2 { //#RouteResponse import akka.actor.{ Actor, ActorRef } import akka.camel.{ Producer, CamelMessage } @@ -50,7 +51,7 @@ object Producers { forwardResponse ! "some request" //#RouteResponse } - { + object Sample3 { //#TransformOutgoingMessage import akka.actor.Actor import akka.camel.{ Producer, CamelMessage } @@ -68,7 +69,7 @@ object Producers { } //#TransformOutgoingMessage } - { + object Sample4 { //#Oneway import akka.actor.{ Actor, Props, ActorSystem } import akka.camel.Producer @@ -84,7 +85,7 @@ object Producers { //#Oneway } - { + object Sample5 { //#Correlate import akka.camel.{ Producer, CamelMessage } import akka.actor.Actor @@ -99,7 +100,7 @@ object Producers { producer ! CamelMessage("bar", Map(CamelMessage.MessageExchangeId -> "123")) //#Correlate } - { + object Sample6 { //#ProducerTemplate import akka.actor.Actor class MyActor extends Actor { @@ -111,7 +112,7 @@ object Producers { } //#ProducerTemplate } - { + object Sample7 { //#RequestProducerTemplate import akka.actor.Actor class MyActor extends Actor { diff --git a/akka-docs/scala/code/docs/camel/QuartzExample.scala b/akka-docs/scala/code/docs/camel/QuartzExample.scala index 2735511d3c..f0ad04be57 100644 --- a/akka-docs/scala/code/docs/camel/QuartzExample.scala +++ b/akka-docs/scala/code/docs/camel/QuartzExample.scala @@ -1,34 +1,30 @@ package docs.camel object QuartzExample { + //#Quartz + import akka.actor.{ ActorSystem, Props } - { - //#Quartz - import akka.actor.{ ActorSystem, Props } + import akka.camel.{ Consumer } - import akka.camel.{ Consumer } + class MyQuartzActor extends Consumer { - class MyQuartzActor extends Consumer { + def endpointUri = "quartz://example?cron=0/2+*+*+*+*+?" - def endpointUri = "quartz://example?cron=0/2+*+*+*+*+?" + def receive = { - def receive = { + case msg ⇒ println("==============> received %s " format msg) - case msg ⇒ println("==============> received %s " format msg) + } // end receive - } // end receive + } // end MyQuartzActor - } // end MyQuartzActor + object MyQuartzActor { - object MyQuartzActor { - - def main(str: Array[String]) { - val system = ActorSystem("my-quartz-system") - system.actorOf(Props[MyQuartzActor]) - } // end main - - } // end MyQuartzActor - //#Quartz - } + def main(str: Array[String]) { + val system = ActorSystem("my-quartz-system") + system.actorOf(Props[MyQuartzActor]) + } // end main + } // end MyQuartzActor + //#Quartz } diff --git a/akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala b/akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala index 7fdd0cd9bf..47a776c2ed 100644 --- a/akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala +++ b/akka-docs/scala/code/docs/dispatcher/DispatcherDocSpec.scala @@ -3,12 +3,14 @@ */ package docs.dispatcher +import language.postfixOps + import org.scalatest.{ BeforeAndAfterAll, WordSpec } import org.scalatest.matchers.MustMatchers import akka.testkit.AkkaSpec import akka.event.Logging import akka.event.LoggingAdapter -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.actor.{ Props, Actor, PoisonPill, ActorSystem } object DispatcherDocSpec { diff --git a/akka-docs/scala/code/docs/extension/SettingsExtensionDocSpec.scala b/akka-docs/scala/code/docs/extension/SettingsExtensionDocSpec.scala index a1e033e386..247bc61044 100644 --- a/akka-docs/scala/code/docs/extension/SettingsExtensionDocSpec.scala +++ b/akka-docs/scala/code/docs/extension/SettingsExtensionDocSpec.scala @@ -8,7 +8,7 @@ import akka.actor.Extension import akka.actor.ExtensionId import akka.actor.ExtensionIdProvider import akka.actor.ExtendedActorSystem -import akka.util.Duration +import scala.concurrent.util.Duration import com.typesafe.config.Config import java.util.concurrent.TimeUnit diff --git a/akka-docs/scala/code/docs/future/FutureDocSpec.scala b/akka-docs/scala/code/docs/future/FutureDocSpec.scala index 66e80578fd..4f3d8e2fd3 100644 --- a/akka-docs/scala/code/docs/future/FutureDocSpec.scala +++ b/akka-docs/scala/code/docs/future/FutureDocSpec.scala @@ -3,16 +3,15 @@ */ package docs.future -import org.scalatest.{ BeforeAndAfterAll, WordSpec } -import org.scalatest.matchers.MustMatchers +import language.postfixOps + import akka.testkit._ -import akka.actor.Actor -import akka.actor.Props +import akka.actor.{ Actor, Props } import akka.actor.Status.Failure import akka.util.Timeout -import akka.util.duration._ +import scala.concurrent.util.duration._ import java.lang.IllegalStateException -import akka.dispatch.{ ExecutionContext, Future, Await, Promise } +import scala.concurrent.{ Await, ExecutionContext, Future, Promise } object FutureDocSpec { @@ -38,11 +37,11 @@ object FutureDocSpec { class FutureDocSpec extends AkkaSpec { import FutureDocSpec._ - + import system.dispatcher "demonstrate usage custom ExecutionContext" in { val yourExecutorServiceGoesHere = java.util.concurrent.Executors.newSingleThreadExecutor() //#diy-execution-context - import akka.dispatch.{ ExecutionContext, Promise } + import scala.concurrent.{ ExecutionContext, Promise } implicit val ec = ExecutionContext.fromExecutorService(yourExecutorServiceGoesHere) @@ -59,10 +58,10 @@ class FutureDocSpec extends AkkaSpec { val actor = system.actorOf(Props[MyActor]) val msg = "hello" //#ask-blocking - import akka.dispatch.Await + import scala.concurrent.Await import akka.pattern.ask import akka.util.Timeout - import akka.util.duration._ + import scala.concurrent.util.duration._ implicit val timeout = Timeout(5 seconds) val future = actor ? msg // enabled by the “ask” import @@ -76,7 +75,7 @@ class FutureDocSpec extends AkkaSpec { val msg = "hello" implicit val timeout = Timeout(5 seconds) //#map-to - import akka.dispatch.Future + import scala.concurrent.Future import akka.pattern.ask val future: Future[String] = ask(actor, msg).mapTo[String] @@ -86,9 +85,9 @@ class FutureDocSpec extends AkkaSpec { "demonstrate usage of simple future eval" in { //#future-eval - import akka.dispatch.Await - import akka.dispatch.Future - import akka.util.duration._ + import scala.concurrent.Await + import scala.concurrent.Future + import scala.concurrent.util.duration._ val future = Future { "Hello" + "World" @@ -117,7 +116,7 @@ class FutureDocSpec extends AkkaSpec { val f1 = Future { "Hello" + "World" } - val f2 = Promise.successful(3) + val f2 = Promise.successful(3).future val f3 = f1 map { x ⇒ f2 map { y ⇒ x.length * y @@ -132,7 +131,7 @@ class FutureDocSpec extends AkkaSpec { val f1 = Future { "Hello" + "World" } - val f2 = Promise.successful(3) + val f2 = Promise.successful(3).future val f3 = f1 flatMap { x ⇒ f2 map { y ⇒ x.length * y @@ -145,13 +144,13 @@ class FutureDocSpec extends AkkaSpec { "demonstrate usage of filter" in { //#filter - val future1 = Promise.successful(4) + val future1 = Promise.successful(4).future val future2 = future1.filter(_ % 2 == 0) val result = Await.result(future2, 1 second) result must be(4) val failedFilter = future1.filter(_ % 2 == 1).recover { - case m: MatchError ⇒ 0 //When filter fails, it will have a MatchError + case m: NoSuchElementException ⇒ 0 //When filter fails, it will have a java.util.NoSuchElementException } val result2 = Await.result(failedFilter, 1 second) result2 must be(0) //Can only be 0 when there was a MatchError @@ -182,7 +181,7 @@ class FutureDocSpec extends AkkaSpec { val msg1 = 1 val msg2 = 2 implicit val timeout = Timeout(5 seconds) - import akka.dispatch.Await + import scala.concurrent.Await import akka.pattern.ask //#composing-wrong @@ -206,7 +205,7 @@ class FutureDocSpec extends AkkaSpec { val msg1 = 1 val msg2 = 2 implicit val timeout = Timeout(5 seconds) - import akka.dispatch.Await + import scala.concurrent.Await import akka.pattern.ask //#composing @@ -290,8 +289,8 @@ class FutureDocSpec extends AkkaSpec { val msg1 = -1 //#try-recover val future = akka.pattern.ask(actor, msg1) recoverWith { - case e: ArithmeticException ⇒ Promise.successful(0) - case foo: IllegalArgumentException ⇒ Promise.failed[Int](new IllegalStateException("All br0ken!")) + case e: ArithmeticException ⇒ Promise.successful(0).future + case foo: IllegalArgumentException ⇒ Promise.failed[Int](new IllegalStateException("All br0ken!")).future } //#try-recover Await.result(future, 1 second) must be(0) @@ -343,7 +342,7 @@ class FutureDocSpec extends AkkaSpec { Await.result(future, 1 second) must be("foo") } { - val future = Promise.failed[String](new IllegalStateException("OHNOES")) + val future = Promise.failed[String](new IllegalStateException("OHNOES")).future //#onFailure future onFailure { case ise: IllegalStateException if ise.getMessage == "OHNOES" ⇒ @@ -369,10 +368,10 @@ class FutureDocSpec extends AkkaSpec { "demonstrate usage of Promise.success & Promise.failed" in { //#successful - val future = Promise.successful("Yay!") + val future = Promise.successful("Yay!").future //#successful //#failed - val otherFuture = Promise.failed[String](new IllegalArgumentException("Bang!")) + val otherFuture = Promise.failed[String](new IllegalArgumentException("Bang!")).future //#failed Await.result(future, 1 second) must be("Yay!") intercept[IllegalArgumentException] { Await.result(otherFuture, 1 second) } diff --git a/akka-docs/scala/code/docs/io/HTTPServer.scala b/akka-docs/scala/code/docs/io/HTTPServer.scala index b6b80aa27f..5c63eac3c2 100644 --- a/akka-docs/scala/code/docs/io/HTTPServer.scala +++ b/akka-docs/scala/code/docs/io/HTTPServer.scala @@ -3,6 +3,8 @@ */ package docs.io +import language.postfixOps + //#imports import akka.actor._ import akka.util.{ ByteString, ByteStringBuilder } diff --git a/akka-docs/scala/code/docs/routing/RouterTypeExample.scala b/akka-docs/scala/code/docs/routing/RouterTypeExample.scala index 421c7af3bb..db711b565a 100644 --- a/akka-docs/scala/code/docs/routing/RouterTypeExample.scala +++ b/akka-docs/scala/code/docs/routing/RouterTypeExample.scala @@ -3,12 +3,14 @@ */ package docs.routing +import language.postfixOps + import akka.routing.{ ScatterGatherFirstCompletedRouter, BroadcastRouter, RandomRouter, RoundRobinRouter } import annotation.tailrec import akka.actor.{ Props, Actor } -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.util.Timeout -import akka.dispatch.Await +import scala.concurrent.Await import akka.pattern.ask import akka.routing.SmallestMailboxRouter diff --git a/akka-docs/scala/code/docs/testkit/Specs2DemoAcceptance.scala b/akka-docs/scala/code/docs/testkit/Specs2DemoAcceptance.scala deleted file mode 100644 index ab8bac9bf3..0000000000 --- a/akka-docs/scala/code/docs/testkit/Specs2DemoAcceptance.scala +++ /dev/null @@ -1,36 +0,0 @@ -package docs.testkit - -import org.specs2.Specification -import org.specs2.specification.{ Step, Scope } - -import akka.actor.{ Props, ActorSystem, Actor } -import akka.testkit.{ TestKit, ImplicitSender } - -class Specs2DemoAcceptance extends Specification { - def is = - - "This is a specification of basic TestKit interop" ^ - p ^ - "A TestKit should" ^ - "work properly with Specs2 acceptance tests" ! e1 ^ - "correctly convert durations" ! e2 ^ - Step(system.shutdown()) ^ end // do not forget to shutdown! - - val system = ActorSystem() - - // an alternative to mixing in NoTimeConversions - implicit def d2d(d: org.specs2.time.Duration): akka.util.FiniteDuration = - akka.util.Duration(d.inMilliseconds, "millis") - - def e1 = new TestKit(system) with Scope with ImplicitSender { - within(1 second) { - system.actorOf(Props(new Actor { - def receive = { case x ⇒ sender ! x } - })) ! "hallo" - - expectMsgType[String] must be equalTo "hallo" - } - } - - def e2 = ((1 second): akka.util.Duration).toMillis must be equalTo 1000 -} diff --git a/akka-docs/scala/code/docs/testkit/Specs2DemoSpec.scala b/akka-docs/scala/code/docs/testkit/Specs2DemoSpec.scala deleted file mode 100644 index a620c5139b..0000000000 --- a/akka-docs/scala/code/docs/testkit/Specs2DemoSpec.scala +++ /dev/null @@ -1,35 +0,0 @@ -package docs.testkit - -import org.specs2.mutable.Specification -import org.specs2.specification.Scope -import org.specs2.time.NoTimeConversions - -import akka.actor.{ Props, ActorSystem, Actor } -import akka.testkit.{ TestKit, ImplicitSender } -import akka.util.duration._ - -class Specs2DemoUnitSpec extends Specification with NoTimeConversions { - - val system = ActorSystem() - - /* - * this is needed if different test cases would clash when run concurrently, - * e.g. when creating specifically named top-level actors; leave out otherwise - */ - sequential - - "A TestKit" should { - "work properly with Specs2 unit tests" in - new TestKit(system) with Scope with ImplicitSender { - within(1 second) { - system.actorOf(Props(new Actor { - def receive = { case x ⇒ sender ! x } - })) ! "hallo" - - expectMsgType[String] must be equalTo "hallo" - } - } - } - - step(system.shutdown) // do not forget to shutdown! -} diff --git a/akka-docs/scala/code/docs/testkit/TestKitUsageSpec.scala b/akka-docs/scala/code/docs/testkit/TestKitUsageSpec.scala index 2ca1dbcef8..7d793c2eed 100644 --- a/akka-docs/scala/code/docs/testkit/TestKitUsageSpec.scala +++ b/akka-docs/scala/code/docs/testkit/TestKitUsageSpec.scala @@ -3,6 +3,8 @@ */ package docs.testkit +import language.postfixOps + //#testkit-usage import scala.util.Random @@ -19,7 +21,7 @@ import akka.actor.Props import akka.testkit.DefaultTimeout import akka.testkit.ImplicitSender import akka.testkit.TestKit -import akka.util.duration._ +import scala.concurrent.util.duration._ /** * a Test to show some TestKit examples diff --git a/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala b/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala index 1e42b2e8ac..94ddceab2f 100644 --- a/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala +++ b/akka-docs/scala/code/docs/testkit/TestkitDocSpec.scala @@ -3,18 +3,20 @@ */ package docs.testkit +import language.postfixOps + //#imports-test-probe import akka.testkit.TestProbe -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.actor._ -import akka.dispatch.Futures +import scala.concurrent.Future //#imports-test-probe import akka.testkit.AkkaSpec import akka.testkit.DefaultTimeout import akka.testkit.ImplicitSender -import akka.util.NonFatal +import scala.util.control.NonFatal object TestkitDocSpec { case object Say42 @@ -86,7 +88,7 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { //#test-fsm-ref import akka.testkit.TestFSMRef import akka.actor.FSM - import akka.util.duration._ + import scala.concurrent.util.duration._ val fsm = TestFSMRef(new Actor with FSM[Int, String] { startWith(1, "") @@ -119,8 +121,8 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { //#test-behavior import akka.testkit.TestActorRef - import akka.util.duration._ - import akka.dispatch.Await + import scala.concurrent.util.duration._ + import scala.concurrent.Await import akka.pattern.ask val actorRef = TestActorRef(new MyActor) @@ -160,7 +162,7 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { type Worker = MyActor //#test-within import akka.actor.Props - import akka.util.duration._ + import scala.concurrent.util.duration._ val worker = system.actorOf(Props[Worker]) within(200 millis) { @@ -174,7 +176,7 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { "demonstrate dilated duration" in { //#duration-dilation - import akka.util.duration._ + import scala.concurrent.util.duration._ import akka.testkit._ 10.milliseconds.dilated //#duration-dilation @@ -207,7 +209,7 @@ class TestkitDocSpec extends AkkaSpec with DefaultTimeout with ImplicitSender { "demonstrate probe reply" in { import akka.testkit.TestProbe - import akka.util.duration._ + import scala.concurrent.util.duration._ import akka.pattern.ask //#test-probe-reply val probe = TestProbe() diff --git a/akka-docs/scala/code/docs/transactor/TransactorDocSpec.scala b/akka-docs/scala/code/docs/transactor/TransactorDocSpec.scala index c1556b837d..2faa1a9703 100644 --- a/akka-docs/scala/code/docs/transactor/TransactorDocSpec.scala +++ b/akka-docs/scala/code/docs/transactor/TransactorDocSpec.scala @@ -4,9 +4,11 @@ package docs.transactor +import language.postfixOps + import akka.actor._ import akka.transactor._ -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.util.Timeout import akka.testkit._ import scala.concurrent.stm._ @@ -138,8 +140,8 @@ class TransactorDocSpec extends AkkaSpec { import CoordinatedExample._ //#run-coordinated-example - import akka.dispatch.Await - import akka.util.duration._ + import scala.concurrent.Await + import scala.concurrent.util.duration._ import akka.util.Timeout import akka.pattern.ask @@ -166,7 +168,7 @@ class TransactorDocSpec extends AkkaSpec { import CoordinatedApi._ //#implicit-timeout - import akka.util.duration._ + import scala.concurrent.util.duration._ import akka.util.Timeout implicit val timeout = Timeout(5 seconds) diff --git a/akka-docs/scala/code/docs/zeromq/ZeromqDocSpec.scala b/akka-docs/scala/code/docs/zeromq/ZeromqDocSpec.scala index 812e0edaaa..1702a8e89a 100644 --- a/akka-docs/scala/code/docs/zeromq/ZeromqDocSpec.scala +++ b/akka-docs/scala/code/docs/zeromq/ZeromqDocSpec.scala @@ -3,16 +3,15 @@ */ package docs.zeromq -import akka.actor.Actor -import akka.actor.Props -import akka.util.duration._ +import language.postfixOps + +import akka.actor.{ Actor, Props } +import scala.concurrent.util.duration._ import akka.testkit._ -import akka.zeromq.ZeroMQVersion -import akka.zeromq.ZeroMQExtension +import akka.zeromq.{ ZeroMQVersion, ZeroMQExtension } import java.text.SimpleDateFormat import java.util.Date -import akka.zeromq.SocketType -import akka.zeromq.Bind +import akka.zeromq.{ SocketType, Bind } object ZeromqDocSpec { @@ -30,7 +29,7 @@ object ZeromqDocSpec { class HealthProbe extends Actor { - val pubSocket = context.system.newSocket(SocketType.Pub, Bind("tcp://127.0.0.1:1235")) + val pubSocket = ZeroMQExtension(context.system).newSocket(SocketType.Pub, Bind("tcp://127.0.0.1:1235")) val memory = ManagementFactory.getMemoryMXBean val os = ManagementFactory.getOperatingSystemMXBean val ser = SerializationExtension(context.system) @@ -64,7 +63,7 @@ object ZeromqDocSpec { //#logger class Logger extends Actor with ActorLogging { - context.system.newSocket(SocketType.Sub, Listener(self), Connect("tcp://127.0.0.1:1235"), Subscribe("health")) + ZeroMQExtension(context.system).newSocket(SocketType.Sub, Listener(self), Connect("tcp://127.0.0.1:1235"), Subscribe("health")) val ser = SerializationExtension(context.system) val timestampFormat = new SimpleDateFormat("HH:mm:ss.SSS") @@ -90,7 +89,7 @@ object ZeromqDocSpec { //#alerter class HeapAlerter extends Actor with ActorLogging { - context.system.newSocket(SocketType.Sub, Listener(self), Connect("tcp://127.0.0.1:1235"), Subscribe("health.heap")) + ZeroMQExtension(context.system).newSocket(SocketType.Sub, Listener(self), Connect("tcp://127.0.0.1:1235"), Subscribe("health.heap")) val ser = SerializationExtension(context.system) var count = 0 @@ -121,11 +120,6 @@ class ZeromqDocSpec extends AkkaSpec("akka.loglevel=INFO") { val pubSocket = ZeroMQExtension(system).newSocket(SocketType.Pub, Bind("tcp://127.0.0.1:21231")) //#pub-socket - //#pub-socket2 - import akka.zeromq._ - val pubSocket2 = system.newSocket(SocketType.Pub, Bind("tcp://127.0.0.1:21232")) - //#pub-socket2 - //#sub-socket import akka.zeromq._ val listener = system.actorOf(Props(new Actor { @@ -135,11 +129,11 @@ class ZeromqDocSpec extends AkkaSpec("akka.loglevel=INFO") { case _ ⇒ //... } })) - val subSocket = system.newSocket(SocketType.Sub, Listener(listener), Connect("tcp://127.0.0.1:21231"), SubscribeAll) + val subSocket = ZeroMQExtension(system).newSocket(SocketType.Sub, Listener(listener), Connect("tcp://127.0.0.1:21231"), SubscribeAll) //#sub-socket //#sub-topic-socket - val subTopicSocket = system.newSocket(SocketType.Sub, Listener(listener), Connect("tcp://127.0.0.1:21231"), Subscribe("foo.bar")) + val subTopicSocket = ZeroMQExtension(system).newSocket(SocketType.Sub, Listener(listener), Connect("tcp://127.0.0.1:21231"), Subscribe("foo.bar")) //#sub-topic-socket //#unsub-topic-socket @@ -155,7 +149,7 @@ class ZeromqDocSpec extends AkkaSpec("akka.loglevel=INFO") { system.stop(subTopicSocket) //#high-watermark - val highWatermarkSocket = system.newSocket( + val highWatermarkSocket = ZeroMQExtension(system).newSocket( SocketType.Router, Listener(listener), Bind("tcp://127.0.0.1:21233"), @@ -183,7 +177,7 @@ class ZeromqDocSpec extends AkkaSpec("akka.loglevel=INFO") { // Let it run for a while to see some output. // Don't do like this in real tests, this is only doc demonstration. - 3.seconds.sleep() + Thread.sleep(3.seconds.toMillis) } diff --git a/akka-docs/scala/testing.rst b/akka-docs/scala/testing.rst index 8a05280580..0ca0373f5f 100644 --- a/akka-docs/scala/testing.rst +++ b/akka-docs/scala/testing.rst @@ -707,10 +707,3 @@ Some `Specs2 `_ users have contributed examples of how to wor * Specifications are by default executed concurrently, which requires some care when writing the tests or alternatively the ``sequential`` keyword. - -You can use the following two examples as guidelines: - -.. includecode:: code/docs/testkit/Specs2DemoSpec.scala - -.. includecode:: code/docs/testkit/Specs2DemoAcceptance.scala - diff --git a/akka-docs/scala/typed-actors.rst b/akka-docs/scala/typed-actors.rst index bd7d92f924..ce9c608e4e 100644 --- a/akka-docs/scala/typed-actors.rst +++ b/akka-docs/scala/typed-actors.rst @@ -97,7 +97,7 @@ Method dispatch semantics Methods returning: * ``Unit`` will be dispatched with ``fire-and-forget`` semantics, exactly like ``ActorRef.tell`` - * ``akka.dispatch.Future[_]`` will use ``send-request-reply`` semantics, exactly like ``ActorRef.ask`` + * ``scala.concurrent.Future[_]`` will use ``send-request-reply`` semantics, exactly like ``ActorRef.ask`` * ``scala.Option[_]`` or ``akka.japi.Option`` will use ``send-request-reply`` semantics, but *will* block to wait for an answer, and return None if no answer was produced within the timeout, or scala.Some/akka.japi.Some containing the result otherwise. Any exception that was thrown during this call will be rethrown. diff --git a/akka-docs/scala/zeromq.rst b/akka-docs/scala/zeromq.rst index d94ee81270..b9780b70d6 100644 --- a/akka-docs/scala/zeromq.rst +++ b/akka-docs/scala/zeromq.rst @@ -21,12 +21,8 @@ Sockets are always created using the ``akka.zeromq.ZeroMQExtension``, for exampl .. includecode:: code/docs/zeromq/ZeromqDocSpec.scala#pub-socket -or by importing the ``akka.zeromq._`` package to make newSocket method available on system, via an implicit conversion. -.. includecode:: code/docs/zeromq/ZeromqDocSpec.scala#pub-socket2 - - -Above examples will create a ZeroMQ Publisher socket that is Bound to the port 1234 on localhost. +Above examples will create a ZeroMQ Publisher socket that is Bound to the port 1233 on localhost. Similarly you can create a subscription socket, with a listener, that subscribes to all messages from the publisher using: diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala index c703bf0b49..2341f47fd5 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailbox.scala @@ -4,22 +4,20 @@ package akka.actor.mailbox -import akka.actor.ActorContext +import akka.actor.{ ActorContext, ActorRef, ActorSystem, ExtendedActorSystem } import akka.event.Logging -import akka.actor.ActorRef import com.typesafe.config.Config import akka.ConfigurationException -import akka.actor.ActorSystem import akka.dispatch._ -import akka.util.{ Duration, NonFatal } +import scala.util.control.NonFatal import akka.pattern.{ CircuitBreakerOpenException, CircuitBreaker } -import akka.actor.ExtendedActorSystem +import scala.concurrent.util.Duration class FileBasedMailboxType(systemSettings: ActorSystem.Settings, config: Config) extends MailboxType { private val settings = new FileBasedMailboxSettings(systemSettings, config) - override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = owner zip system headOption match { + override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = (owner zip system).headOption match { case Some((o, s: ExtendedActorSystem)) ⇒ new FileBasedMessageQueue(o, s, settings) - case None ⇒ throw new ConfigurationException("creating a durable mailbox requires an owner (i.e. does not work with BalancingDispatcher)") + case _ ⇒ throw new ConfigurationException("creating a durable mailbox requires an owner (i.e. does not work with BalancingDispatcher)") } } diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala index 27088dfc92..47dc3d89bd 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/FileBasedMailboxSettings.scala @@ -4,7 +4,7 @@ package akka.actor.mailbox import com.typesafe.config.Config -import akka.util.Duration +import scala.concurrent.util.Duration import java.util.concurrent.TimeUnit.MILLISECONDS import akka.actor.ActorSystem diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/filequeue/Journal.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/filequeue/Journal.scala index 65910fb158..5d5ed54ae7 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/filequeue/Journal.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/filequeue/Journal.scala @@ -21,7 +21,7 @@ import java.io._ import java.nio.{ ByteBuffer, ByteOrder } import java.nio.channels.FileChannel import akka.event.LoggingAdapter -import akka.util.NonFatal +import scala.util.control.NonFatal // returned from journal replay sealed trait JournalItem diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/filequeue/PersistentQueue.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/filequeue/PersistentQueue.scala index 152b29406c..9b012e34f0 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/filequeue/PersistentQueue.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/filequeue/PersistentQueue.scala @@ -20,7 +20,7 @@ package akka.actor.mailbox.filequeue import java.io._ import scala.collection.mutable import akka.event.LoggingAdapter -import akka.util.Duration +import scala.concurrent.util.Duration import java.util.concurrent.TimeUnit import akka.actor.mailbox.FileBasedMailboxSettings @@ -34,6 +34,10 @@ class OverlaySetting[T](base: ⇒ T) { def apply() = local.getOrElse(base) } +trait Prependable[T] { + def prepend(t: T): Unit +} + class PersistentQueue(persistencePath: String, val name: String, val settings: FileBasedMailboxSettings, log: LoggingAdapter) { private case object ItemArrived @@ -56,9 +60,9 @@ class PersistentQueue(persistencePath: String, val name: String, val settings: F // # of items in the queue (including those not in memory) private var queueLength: Long = 0 - private var queue = new mutable.Queue[QItem] { + private var queue: mutable.Queue[QItem] with Prependable[QItem] = new mutable.Queue[QItem] with Prependable[QItem] { // scala's Queue doesn't (yet?) have a way to put back. - def unget(item: QItem) = prependElem(item) + def prepend(item: QItem) = prependElem(item) } private var _memoryBytes: Long = 0 @@ -435,7 +439,7 @@ class PersistentQueue(persistencePath: String, val name: String, val settings: F openTransactions.remove(xid) map { item ⇒ queueLength += 1 queueSize += item.data.length - queue unget item + queue prepend item _memoryBytes += item.data.length } } diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/filequeue/tools/QDumper.scala b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/filequeue/tools/QDumper.scala index 06f151d84a..1ede4428f1 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/filequeue/tools/QDumper.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/main/scala/akka/actor/mailbox/filequeue/tools/QDumper.scala @@ -17,6 +17,8 @@ package akka.actor.mailbox.filequeue.tools +import language.reflectiveCalls + import java.io.{ FileNotFoundException, IOException } import scala.collection.mutable import akka.actor.mailbox.filequeue._ diff --git a/akka-durable-mailboxes/akka-file-mailbox/src/test/scala/akka/actor/mailbox/FileBasedMailboxSpec.scala b/akka-durable-mailboxes/akka-file-mailbox/src/test/scala/akka/actor/mailbox/FileBasedMailboxSpec.scala index e3ad811b52..0e10c3c7ad 100644 --- a/akka-durable-mailboxes/akka-file-mailbox/src/test/scala/akka/actor/mailbox/FileBasedMailboxSpec.scala +++ b/akka-durable-mailboxes/akka-file-mailbox/src/test/scala/akka/actor/mailbox/FileBasedMailboxSpec.scala @@ -1,5 +1,7 @@ package akka.actor.mailbox +import language.postfixOps + import org.apache.commons.io.FileUtils import akka.dispatch.Mailbox @@ -25,7 +27,7 @@ class FileBasedMailboxSpec extends DurableMailboxSpec("File", FileBasedMailboxSp settings.QueuePath must be("file-based") settings.CircuitBreakerMaxFailures must be(5) - import akka.util.duration._ + import scala.concurrent.util.duration._ settings.CircuitBreakerCallTimeout must be(5 seconds) } diff --git a/akka-durable-mailboxes/akka-mailboxes-common/src/test/scala/akka/actor/mailbox/DurableMailboxSpec.scala b/akka-durable-mailboxes/akka-mailboxes-common/src/test/scala/akka/actor/mailbox/DurableMailboxSpec.scala index 8264bd0348..0e156a5632 100644 --- a/akka-durable-mailboxes/akka-mailboxes-common/src/test/scala/akka/actor/mailbox/DurableMailboxSpec.scala +++ b/akka-durable-mailboxes/akka-mailboxes-common/src/test/scala/akka/actor/mailbox/DurableMailboxSpec.scala @@ -3,6 +3,8 @@ */ package akka.actor.mailbox +import language.postfixOps + import java.io.InputStream import java.util.concurrent.TimeoutException @@ -17,7 +19,7 @@ import DurableMailboxSpecActorFactory.{ MailboxTestActor, AccumulatorActor } import akka.actor.{ RepointableRef, Props, ActorSystem, ActorRefWithCell, ActorRef, ActorCell, Actor } import akka.dispatch.Mailbox import akka.testkit.TestKit -import akka.util.duration.intToDurationInt +import scala.concurrent.util.duration.intToDurationInt object DurableMailboxSpecActorFactory { diff --git a/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/BlueprintActorSystemFactory.scala b/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/BlueprintActorSystemFactory.scala index f45f29b82a..30720a230c 100644 --- a/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/BlueprintActorSystemFactory.scala +++ b/akka-osgi-aries/src/main/scala/akka/osgi/aries/blueprint/BlueprintActorSystemFactory.scala @@ -1,3 +1,6 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ package akka.osgi.aries.blueprint import org.osgi.framework.BundleContext diff --git a/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/ActorSystemAwareBean.scala b/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/ActorSystemAwareBean.scala index 6e4bac39dd..ade4a17bda 100644 --- a/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/ActorSystemAwareBean.scala +++ b/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/ActorSystemAwareBean.scala @@ -1,3 +1,6 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ package akka.osgi.aries.blueprint import akka.actor.ActorSystem diff --git a/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/NamespaceHandlerTest.scala b/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/NamespaceHandlerTest.scala index cfe9ce2e94..03338c6b24 100644 --- a/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/NamespaceHandlerTest.scala +++ b/akka-osgi-aries/src/test/scala/akka/osgi/aries/blueprint/NamespaceHandlerTest.scala @@ -1,3 +1,6 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ package akka.osgi.aries.blueprint import org.scalatest.WordSpec diff --git a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala index fcd7d9aa9b..4318ebc44f 100644 --- a/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala +++ b/akka-osgi/src/main/scala/akka/osgi/ActorSystemActivator.scala @@ -1,3 +1,6 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ package akka.osgi import akka.actor.ActorSystem diff --git a/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala b/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala index 6acb766bfb..608b80403b 100644 --- a/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala +++ b/akka-osgi/src/main/scala/akka/osgi/OsgiActorSystemFactory.scala @@ -1,3 +1,6 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ package akka.osgi import impl.BundleDelegatingClassLoader diff --git a/akka-osgi/src/main/scala/akka/osgi/impl/BundleDelegatingClassLoader.scala b/akka-osgi/src/main/scala/akka/osgi/impl/BundleDelegatingClassLoader.scala index 5d89e234d9..47476e6a82 100644 --- a/akka-osgi/src/main/scala/akka/osgi/impl/BundleDelegatingClassLoader.scala +++ b/akka-osgi/src/main/scala/akka/osgi/impl/BundleDelegatingClassLoader.scala @@ -1,8 +1,12 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ package akka.osgi.impl +import language.existentials + import java.net.URL import java.util.Enumeration - import org.osgi.framework.{ BundleContext, Bundle } /* diff --git a/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala b/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala index 6fa89886dd..e1781b4a80 100644 --- a/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala +++ b/akka-osgi/src/test/scala/akka/osgi/ActorSystemActivatorTest.scala @@ -1,10 +1,15 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ package akka.osgi +import language.postfixOps + import org.scalatest.WordSpec import akka.actor.ActorSystem import akka.pattern.ask -import akka.dispatch.Await -import akka.util.duration._ +import scala.concurrent.Await +import scala.concurrent.util.duration._ import akka.util.Timeout import de.kalpatec.pojosr.framework.launch.BundleDescriptor import test.{ RuntimeNameActorSystemActivator, TestActivators, PingPongActorSystemActivator } diff --git a/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala b/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala index df96601d79..e928f42c53 100644 --- a/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala +++ b/akka-osgi/src/test/scala/akka/osgi/PojoSRTestSupport.scala @@ -1,3 +1,6 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ package akka.osgi import de.kalpatec.pojosr.framework.launch.{ BundleDescriptor, PojoServiceRegistryFactory, ClasspathScanner } diff --git a/akka-osgi/src/test/scala/akka/osgi/test/PingPong.scala b/akka-osgi/src/test/scala/akka/osgi/test/PingPong.scala index 6a7409c667..cd2294f3b0 100644 --- a/akka-osgi/src/test/scala/akka/osgi/test/PingPong.scala +++ b/akka-osgi/src/test/scala/akka/osgi/test/PingPong.scala @@ -1,3 +1,6 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ package akka.osgi.test import akka.actor.Actor diff --git a/akka-osgi/src/test/scala/akka/osgi/test/TestActivators.scala b/akka-osgi/src/test/scala/akka/osgi/test/TestActivators.scala index 54369d88ca..eec07b99e1 100644 --- a/akka-osgi/src/test/scala/akka/osgi/test/TestActivators.scala +++ b/akka-osgi/src/test/scala/akka/osgi/test/TestActivators.scala @@ -1,3 +1,6 @@ +/** + * Copyright (C) 2009-2012 Typesafe Inc. + */ package akka.osgi.test import akka.osgi.ActorSystemActivator diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala index 24377d54a1..fba915dd46 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Conductor.scala @@ -3,28 +3,26 @@ */ package akka.remote.testconductor +import language.postfixOps + import akka.actor.{ Actor, ActorRef, ActorSystem, LoggingFSM, Props } import RemoteConnection.getAddrString import TestConductorProtocol._ import org.jboss.netty.channel.{ Channel, SimpleChannelUpstreamHandler, ChannelHandlerContext, ChannelStateEvent, MessageEvent } import com.typesafe.config.ConfigFactory -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.pattern.ask -import java.util.concurrent.TimeUnit.MILLISECONDS -import akka.dispatch.Await -import akka.event.LoggingAdapter -import akka.actor.PoisonPill -import akka.event.Logging +import scala.concurrent.Await +import akka.event.{ LoggingAdapter, Logging } import scala.util.control.NoStackTrace import akka.event.LoggingReceive -import akka.actor.Address import java.net.InetSocketAddress -import akka.dispatch.Future -import akka.actor.OneForOneStrategy -import akka.actor.SupervisorStrategy +import scala.concurrent.Future +import akka.actor.{ OneForOneStrategy, SupervisorStrategy, Status, Address, PoisonPill } import java.util.concurrent.ConcurrentHashMap -import akka.actor.Status -import akka.util.{ Deadline, Timeout, Duration } +import java.util.concurrent.TimeUnit.MILLISECONDS +import akka.util.{ Timeout } +import scala.concurrent.util.{ Deadline, Duration } sealed trait Direction { def includes(other: Direction): Boolean @@ -88,6 +86,7 @@ trait Conductor { this: TestConductorExt ⇒ if (_controller ne null) throw new RuntimeException("TestConductorServer was already started") _controller = system.actorOf(Props(new Controller(participants, controllerPort)), "controller") import Settings.BarrierTimeout + import system.dispatcher controller ? GetSockAddr flatMap { case sockAddr: InetSocketAddress ⇒ startClient(name, sockAddr) map (_ ⇒ sockAddr) } } @@ -444,6 +443,7 @@ private[akka] class Controller(private var initialParticipants: Int, controllerP case GetAddress(node) ⇒ if (nodes contains node) sender ! ToClient(AddressReply(node, nodes(node).addr)) else addrInterest += node -> ((addrInterest get node getOrElse Set()) + sender) + case _: Done ⇒ //FIXME what should happen? } case op: CommandOp ⇒ op match { @@ -569,7 +569,7 @@ private[akka] class BarrierCoordinator extends Actor with LoggingFSM[BarrierCoor val together = if (clients.exists(_.fsm == sender)) sender :: arrived else arrived val enterDeadline = getDeadline(timeout) // we only allow the deadlines to get shorter - if (enterDeadline < deadline) { + if (enterDeadline.timeLeft < deadline.timeLeft) { setTimer("Timeout", StateTimeout, enterDeadline.timeLeft, false) handleBarrier(d.copy(arrived = together, deadline = enterDeadline)) } else diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala index 830b32e485..d265998c5e 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/DataTypes.scala @@ -3,6 +3,8 @@ */ package akka.remote.testconductor +import language.implicitConversions + import org.jboss.netty.handler.codec.oneone.OneToOneEncoder import org.jboss.netty.channel.ChannelHandlerContext import org.jboss.netty.channel.Channel @@ -10,7 +12,7 @@ import akka.remote.testconductor.{ TestConductorProtocol ⇒ TCP } import com.google.protobuf.Message import akka.actor.Address import org.jboss.netty.handler.codec.oneone.OneToOneDecoder -import akka.util.Duration +import scala.concurrent.util.Duration import akka.remote.testconductor.TestConductorProtocol.BarrierOp case class RoleName(name: String) diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala index 48f3983a78..4469ce308a 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Extension.scala @@ -1,17 +1,11 @@ package akka.remote.testconductor -import akka.actor.ExtensionKey -import akka.actor.Extension -import akka.actor.ExtendedActorSystem +import akka.actor.{ ExtensionKey, Extension, ExtendedActorSystem, ActorContext, ActorRef, Address, ActorSystemImpl, Props } import akka.remote.RemoteActorRefProvider -import akka.actor.ActorContext -import akka.util.{ Duration, Timeout } +import akka.util.Timeout import java.util.concurrent.TimeUnit.MILLISECONDS -import akka.actor.ActorRef import java.util.concurrent.ConcurrentHashMap -import akka.actor.Address -import akka.actor.ActorSystemImpl -import akka.actor.Props +import scala.concurrent.util.Duration /** * Access to the [[akka.remote.testconductor.TestConductorExt]] extension: diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala index 2d5b73216e..6c8352d880 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/NetworkFailureInjector.scala @@ -3,6 +3,8 @@ */ package akka.remote.testconductor +import language.postfixOps + import java.net.InetSocketAddress import scala.annotation.tailrec @@ -14,8 +16,8 @@ import org.jboss.netty.channel.{ SimpleChannelHandler, MessageEvent, Channels, C import akka.actor.{ Props, LoggingFSM, Address, ActorSystem, ActorRef, ActorLogging, Actor, FSM } import akka.event.Logging import akka.remote.netty.ChannelAddress -import akka.util.Duration -import akka.util.duration._ +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ /** * INTERNAL API. diff --git a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala index c7f69091cf..f102d3b700 100644 --- a/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala +++ b/akka-remote-tests/src/main/scala/akka/remote/testconductor/Player.scala @@ -3,30 +3,23 @@ */ package akka.remote.testconductor -import akka.actor.{ Actor, ActorRef, ActorSystem, LoggingFSM, Props } +import language.postfixOps + +import akka.actor.{ Actor, ActorRef, ActorSystem, LoggingFSM, Props, PoisonPill, Status, Address, Scheduler } import RemoteConnection.getAddrString -import akka.util.duration._ -import org.jboss.netty.channel.{ Channel, SimpleChannelUpstreamHandler, ChannelHandlerContext, ChannelStateEvent, MessageEvent } -import com.typesafe.config.ConfigFactory +import scala.concurrent.util.{ Duration, Deadline } +import scala.concurrent.Await +import scala.concurrent.util.duration._ import akka.util.Timeout -import akka.util.Duration +import org.jboss.netty.channel.{ Channel, SimpleChannelUpstreamHandler, ChannelHandlerContext, ChannelStateEvent, MessageEvent, WriteCompletionEvent, ExceptionEvent } +import com.typesafe.config.ConfigFactory import java.util.concurrent.TimeUnit.MILLISECONDS -import akka.pattern.{ ask, pipe, AskTimeoutException } -import akka.dispatch.Await -import scala.util.control.NoStackTrace -import akka.actor.Status -import akka.event.LoggingAdapter -import akka.actor.PoisonPill -import akka.event.Logging -import akka.dispatch.Future -import java.net.InetSocketAddress -import akka.actor.Address -import org.jboss.netty.channel.ExceptionEvent -import org.jboss.netty.channel.WriteCompletionEvent -import java.net.ConnectException -import akka.util.Deadline -import akka.actor.Scheduler import java.util.concurrent.TimeoutException +import akka.pattern.{ ask, pipe, AskTimeoutException } +import scala.concurrent.Future +import scala.util.control.NoStackTrace +import akka.event.{ LoggingAdapter, Logging } +import java.net.{ InetSocketAddress, ConnectException } /** * The Player is the client component of the @@ -184,7 +177,7 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) case Event(Disconnected, _) ⇒ log.info("disconnected from TestConductor") throw new ConnectionFailure("disconnect") - case Event(ToServer(Done), Data(Some(channel), _)) ⇒ + case Event(ToServer(_: Done), Data(Some(channel), _)) ⇒ channel.write(Done) stay case Event(ToServer(msg), d @ Data(Some(channel), None)) ⇒ @@ -202,36 +195,35 @@ private[akka] class ClientFSM(name: RoleName, controllerAddr: InetSocketAddress) case BarrierResult(b, success) ⇒ runningOp match { case Some((barrier, requester)) ⇒ - if (b != barrier) { - requester ! Status.Failure(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) - } else if (!success) { - requester ! Status.Failure(new RuntimeException("barrier failed: " + b)) - } else { - requester ! b - } + val response = + if (b != barrier) Status.Failure(new RuntimeException("wrong barrier " + b + " received while waiting for " + barrier)) + else if (!success) Status.Failure(new RuntimeException("barrier failed: " + b)) + else b + requester ! response case None ⇒ log.warning("did not expect {}", op) } stay using d.copy(runningOp = None) case AddressReply(node, addr) ⇒ runningOp match { - case Some((_, requester)) ⇒ - requester ! addr - case None ⇒ - log.warning("did not expect {}", op) + case Some((_, requester)) ⇒ requester ! addr + case None ⇒ log.warning("did not expect {}", op) } stay using d.copy(runningOp = None) case t: ThrottleMsg ⇒ import settings.QueryTimeout + import context.dispatcher // FIXME is this the right EC for the future below? TestConductor().failureInjector ? t map (_ ⇒ ToServer(Done)) pipeTo self stay case d: DisconnectMsg ⇒ import settings.QueryTimeout + import context.dispatcher // FIXME is this the right EC for the future below? TestConductor().failureInjector ? d map (_ ⇒ ToServer(Done)) pipeTo self stay case TerminateMsg(exit) ⇒ System.exit(exit) stay // needed because Java doesn’t have Nothing + case _: Done ⇒ stay //FIXME what should happen? } } diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala index 44c7ae5047..26ae39088a 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RandomRoutedRemoteActorSpec.scala @@ -8,7 +8,7 @@ import akka.actor.ActorRef import akka.actor.Props import akka.actor.PoisonPill import akka.actor.Address -import akka.dispatch.Await +import scala.concurrent.Await import akka.pattern.ask import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec @@ -16,7 +16,7 @@ import akka.routing.Broadcast import akka.routing.RandomRouter import akka.routing.RoutedActorRef import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ object RandomRoutedRemoteActorMultiJvmSpec extends MultiNodeConfig { diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala index 76a7e41ad1..2107a40f4d 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/RoundRobinRoutedRemoteActorSpec.scala @@ -8,7 +8,7 @@ import akka.actor.ActorRef import akka.actor.Props import akka.actor.PoisonPill import akka.actor.Address -import akka.dispatch.Await +import scala.concurrent.Await import akka.pattern.ask import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec @@ -16,7 +16,7 @@ import akka.routing.Broadcast import akka.routing.RoundRobinRouter import akka.routing.RoutedActorRef import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ object RoundRobinRoutedRemoteActorMultiJvmSpec extends MultiNodeConfig { diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala index b77b0c196e..cfcf7a393d 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/router/ScatterGatherRoutedRemoteActorSpec.scala @@ -6,7 +6,7 @@ package akka.remote.router import akka.actor.Actor import akka.actor.ActorRef import akka.actor.Props -import akka.dispatch.Await +import scala.concurrent.Await import akka.pattern.ask import akka.remote.testkit.MultiNodeConfig import akka.remote.testkit.MultiNodeSpec @@ -14,7 +14,7 @@ import akka.routing.Broadcast import akka.routing.ScatterGatherFirstCompletedRouter import akka.routing.RoutedActorRef import akka.testkit._ -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.actor.PoisonPill import akka.actor.Address diff --git a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala index 86fabc489d..c208011f32 100644 --- a/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala +++ b/akka-remote-tests/src/multi-jvm/scala/akka/remote/testconductor/TestConductorSpec.scala @@ -6,10 +6,10 @@ package akka.remote.testconductor import com.typesafe.config.ConfigFactory import akka.actor.Props import akka.actor.Actor -import akka.dispatch.Await -import akka.dispatch.Await.Awaitable -import akka.util.Duration -import akka.util.duration._ +import scala.concurrent.Await +import scala.concurrent.Await.Awaitable +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ import akka.testkit.ImplicitSender import akka.testkit.LongRunningTest import java.net.InetSocketAddress diff --git a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala index 8ff95d0831..f306477a28 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testconductor/BarrierSpec.scala @@ -3,23 +3,16 @@ */ package akka.remote.testconductor -import akka.testkit.AkkaSpec -import akka.actor.Props -import akka.actor.AddressFromURIString -import akka.actor.ActorRef -import akka.testkit.ImplicitSender -import akka.actor.Actor -import akka.actor.OneForOneStrategy -import akka.actor.SupervisorStrategy -import akka.testkit.EventFilter -import akka.testkit.TestProbe -import akka.util.duration._ +import language.postfixOps + +import akka.actor.{ Props, AddressFromURIString, ActorRef, Actor, OneForOneStrategy, SupervisorStrategy } +import akka.testkit.{ AkkaSpec, ImplicitSender, EventFilter, TestProbe, TimingTest } +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ import akka.event.Logging +import akka.util.Timeout import org.scalatest.BeforeAndAfterEach -import java.net.InetSocketAddress -import java.net.InetAddress -import akka.testkit.TimingTest -import akka.util.{ Timeout, Duration } +import java.net.{ InetSocketAddress, InetAddress } object BarrierSpec { case class Failed(ref: ActorRef, thr: Throwable) diff --git a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala index 838681b071..29f404e1f9 100644 --- a/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala +++ b/akka-remote-tests/src/test/scala/akka/remote/testkit/MultiNodeSpec.scala @@ -3,16 +3,19 @@ */ package akka.remote.testkit +import language.implicitConversions + import java.net.InetSocketAddress import com.typesafe.config.{ ConfigObject, ConfigFactory, Config } import akka.actor.{ RootActorPath, ActorPath, ActorSystem, ExtendedActorSystem } -import akka.dispatch.Await -import akka.dispatch.Await.Awaitable +import akka.util.Timeout import akka.remote.testconductor.{ TestConductorExt, TestConductor, RoleName } -import akka.testkit.AkkaSpec -import akka.util.{ Timeout, NonFatal } -import akka.util.duration._ import akka.remote.RemoteActorRefProvider +import akka.testkit.AkkaSpec +import scala.concurrent.{ Await, Awaitable } +import scala.util.control.NonFatal +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ /** * Configure the role names and participants of the test, including configuration settings. diff --git a/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala b/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala index 6bd61dd812..b2413da3f9 100644 --- a/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala +++ b/akka-remote/src/main/scala/akka/remote/MessageSerializer.scala @@ -4,6 +4,8 @@ package akka.remote +import language.existentials + import akka.remote.RemoteProtocol._ import com.google.protobuf.ByteString import akka.actor.ExtendedActorSystem diff --git a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala index cdf9ad9d70..cff9dda274 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteActorRefProvider.scala @@ -6,10 +6,9 @@ package akka.remote import akka.actor._ import akka.dispatch._ -import akka.event.{ Logging, LoggingAdapter } -import akka.event.EventStream -import akka.serialization.Serialization -import akka.serialization.SerializationExtension +import akka.event.{ Logging, LoggingAdapter, EventStream } +import akka.serialization.{ Serialization, SerializationExtension } +import scala.concurrent.Future /** * Remote ActorRefProvider. Starts up actor on remote node and creates a RemoteActorRef representing it. @@ -38,7 +37,7 @@ private[akka] class RemoteActorRefProvider( override def rootGuardian: InternalActorRef = local.rootGuardian override def guardian: LocalActorRef = local.guardian override def systemGuardian: LocalActorRef = local.systemGuardian - override def terminationFuture: Promise[Unit] = local.terminationFuture + override def terminationFuture: Future[Unit] = local.terminationFuture override def dispatcher: MessageDispatcher = local.dispatcher override def registerTempActor(actorRef: InternalActorRef, path: ActorPath): Unit = local.registerTempActor(actorRef, path) override def unregisterTempActor(path: ActorPath): Unit = local.unregisterTempActor(path) @@ -92,7 +91,7 @@ private[akka] class RemoteActorRefProvider( system.eventStream.subscribe(remoteClientLifeCycleHandler, classOf[RemoteLifeCycleEvent]) - terminationFuture.onComplete(_ ⇒ transport.shutdown()) + system.registerOnTermination(transport.shutdown()) } def actorOf(system: ActorSystemImpl, props: Props, supervisor: InternalActorRef, path: ActorPath, @@ -236,7 +235,7 @@ private[akka] class RemoteActorRef private[akka] ( def suspend(): Unit = sendSystemMessage(Suspend()) - def resume(): Unit = sendSystemMessage(Resume()) + def resume(inResponseToFailure: Boolean): Unit = sendSystemMessage(Resume(inResponseToFailure)) def stop(): Unit = sendSystemMessage(Terminate()) diff --git a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala index 58a3b8452d..f5589b3f72 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteDeployer.scala @@ -22,7 +22,7 @@ private[akka] class RemoteDeployer(_settings: ActorSystem.Settings, _pm: Dynamic case AddressFromURIString(r) ⇒ Some(deploy.copy(scope = RemoteScope(r))) case str ⇒ if (!str.isEmpty) throw new ConfigurationException("unparseable remote node name " + str) - val nodes = deploy.config.getStringList("target.nodes").asScala map (AddressFromURIString(_)) + val nodes = deploy.config.getStringList("target.nodes").asScala.toIndexedSeq map (AddressFromURIString(_)) if (nodes.isEmpty || deploy.routerConfig == NoRouter) d else Some(deploy.copy(routerConfig = RemoteRouterConfig(deploy.routerConfig, nodes))) } diff --git a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala index 88a7003309..c18635f1ca 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteSettings.scala @@ -4,7 +4,7 @@ package akka.remote import com.typesafe.config.Config -import akka.util.Duration +import scala.concurrent.util.Duration import java.util.concurrent.TimeUnit.MILLISECONDS class RemoteSettings(val config: Config, val systemName: String) { diff --git a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala index f91c5b03d0..3aa3818de7 100644 --- a/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala +++ b/akka-remote/src/main/scala/akka/remote/RemoteTransport.scala @@ -34,7 +34,7 @@ case class RemoteClientError( @transient @BeanProperty remote: RemoteTransport, @BeanProperty remoteAddress: Address) extends RemoteClientLifeCycleEvent { override def logLevel: Logging.LogLevel = Logging.ErrorLevel - override def toString: String = "RemoteClientError@" + remoteAddress + ": Error[" + cause + "]" + override def toString: String = "RemoteClientError@" + remoteAddress + ": Error[" + Logging.stackTraceFor(cause) + "]" } /** diff --git a/akka-remote/src/main/scala/akka/remote/netty/Client.scala b/akka-remote/src/main/scala/akka/remote/netty/Client.scala index 35c0674d23..0b66cd3b7c 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Client.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Client.scala @@ -17,7 +17,8 @@ import akka.remote.{ RemoteProtocol, RemoteMessage, RemoteLifeCycleEvent, Remote import akka.AkkaException import akka.event.Logging import akka.actor.{ DeadLetter, Address, ActorRef } -import akka.util.{ NonFatal, Switch } +import akka.util.Switch +import scala.util.control.NonFatal import org.jboss.netty.handler.ssl.SslHandler /** diff --git a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala index b87f873cde..6e36c63024 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/NettyRemoteSupport.scala @@ -20,7 +20,7 @@ import org.jboss.netty.util.{ DefaultObjectSizeEstimator, HashedWheelTimer } import akka.event.Logging import akka.remote.RemoteProtocol.AkkaRemoteProtocol import akka.remote.{ RemoteTransportException, RemoteTransport, RemoteActorRefProvider, RemoteActorRef, RemoteServerStarted } -import akka.util.NonFatal +import scala.util.control.NonFatal import akka.actor.{ ExtendedActorSystem, Address, ActorRef } import com.google.protobuf.MessageLite diff --git a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala index 9babf6005c..16df3c0c1c 100644 --- a/akka-remote/src/main/scala/akka/remote/netty/Settings.scala +++ b/akka-remote/src/main/scala/akka/remote/netty/Settings.scala @@ -4,7 +4,7 @@ package akka.remote.netty import com.typesafe.config.Config -import akka.util.Duration +import scala.concurrent.util.Duration import java.util.concurrent.TimeUnit._ import java.net.InetAddress import akka.ConfigurationException diff --git a/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala b/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala index 2905c3ef3b..8f25021253 100644 --- a/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala +++ b/akka-remote/src/main/scala/akka/serialization/DaemonMsgCreateSerializer.scala @@ -6,23 +6,13 @@ package akka.serialization import java.io.Serializable import com.google.protobuf.ByteString -import com.typesafe.config.Config -import com.typesafe.config.ConfigFactory -import akka.actor.Actor -import akka.actor.ActorRef -import akka.actor.Deploy -import akka.actor.ExtendedActorSystem -import akka.actor.NoScopeGiven -import akka.actor.Props -import akka.actor.Scope +import com.typesafe.config.{ Config, ConfigFactory } +import akka.actor.{ Actor, ActorRef, Deploy, ExtendedActorSystem, NoScopeGiven, Props, Scope } import akka.remote.DaemonMsgCreate -import akka.remote.RemoteProtocol.ActorRefProtocol -import akka.remote.RemoteProtocol.DaemonMsgCreateProtocol -import akka.remote.RemoteProtocol.DeployProtocol -import akka.remote.RemoteProtocol.PropsProtocol -import akka.routing.NoRouter -import akka.routing.RouterConfig +import akka.remote.RemoteProtocol.{ DaemonMsgCreateProtocol, DeployProtocol, PropsProtocol } +import akka.routing.{ NoRouter, RouterConfig } import akka.actor.FromClassCreator +import scala.reflect.ClassTag /** * Serializes akka's internal DaemonMsgCreate using protobuf @@ -99,7 +89,7 @@ private[akka] class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) e def props = { val creator = if (proto.getProps.hasFromClassCreator) { - system.dynamicAccess.getClassFor(proto.getProps.getFromClassCreator) match { + system.dynamicAccess.getClassFor[Actor](proto.getProps.getFromClassCreator) match { case Right(clazz) ⇒ FromClassCreator(clazz) case Left(e) ⇒ throw e } @@ -131,12 +121,11 @@ private[akka] class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) e case Left(e) ⇒ throw e } - protected def deserialize[T: ClassManifest](data: ByteString, clazz: Class[T]): T = { + protected def deserialize[T: ClassTag](data: ByteString, clazz: Class[T]): T = { val bytes = data.toByteArray serialization.deserialize(bytes, clazz) match { - case Right(x) if classManifest[T].erasure.isInstance(x) ⇒ x.asInstanceOf[T] - case Right(other) ⇒ throw new IllegalArgumentException("Can't deserialize to [%s], got [%s]". - format(clazz.getName, other)) + case Right(x: T) ⇒ x + case Right(other) ⇒ throw new IllegalArgumentException("Can't deserialize to [%s], got [%s]".format(clazz.getName, other)) case Left(e) ⇒ // Fallback to the java serializer, because some interfaces don't implement java.io.Serializable, // but the impl instance does. This could be optimized by adding java serializers in reference.conf: @@ -144,8 +133,8 @@ private[akka] class DaemonMsgCreateSerializer(val system: ExtendedActorSystem) e // akka.routing.RouterConfig // akka.actor.Scope serialization.deserialize(bytes, classOf[java.io.Serializable]) match { - case Right(x) if classManifest[T].erasure.isInstance(x) ⇒ x.asInstanceOf[T] - case _ ⇒ throw e // the first exception + case Right(x: T) ⇒ x + case _ ⇒ throw e // the first exception } } } diff --git a/akka-remote/src/test/scala/akka/remote/NetworkFailureSpec.scala b/akka-remote/src/test/scala/akka/remote/NetworkFailureSpec.scala index 966120583a..053c9a93b6 100644 --- a/akka-remote/src/test/scala/akka/remote/NetworkFailureSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/NetworkFailureSpec.scala @@ -10,14 +10,16 @@ import akka.remote.netty.NettyRemoteTransport import akka.actor.Actor import akka.testkit.AkkaSpec import akka.testkit.DefaultTimeout -import akka.dispatch.Future import java.util.concurrent.{ TimeUnit, CountDownLatch } import java.util.concurrent.atomic.AtomicBoolean +import scala.concurrent.{ ExecutionContext, Future } trait NetworkFailureSpec extends DefaultTimeout { self: AkkaSpec ⇒ import Actor._ - import akka.util.Duration + import scala.concurrent.util.Duration + + import system.dispatcher val BytesPerSecond = "60KByte/s" val DelayMillis = "350ms" @@ -31,7 +33,7 @@ trait NetworkFailureSpec extends DefaultTimeout { self: AkkaSpec ⇒ Thread.sleep(duration.toMillis) restoreIP } catch { - case e ⇒ + case e: Throwable ⇒ dead.set(true) e.printStackTrace } @@ -46,7 +48,7 @@ trait NetworkFailureSpec extends DefaultTimeout { self: AkkaSpec ⇒ Thread.sleep(duration.toMillis) restoreIP } catch { - case e ⇒ + case e: Throwable ⇒ dead.set(true) e.printStackTrace } @@ -61,7 +63,7 @@ trait NetworkFailureSpec extends DefaultTimeout { self: AkkaSpec ⇒ Thread.sleep(duration.toMillis) restoreIP } catch { - case e ⇒ + case e: Throwable ⇒ dead.set(true) e.printStackTrace } diff --git a/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala index 7f92e3089b..1f0badebd2 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteCommunicationSpec.scala @@ -6,7 +6,8 @@ package akka.remote import akka.testkit._ import akka.actor._ import com.typesafe.config._ -import akka.dispatch.{ Await, Future } +import scala.concurrent.Future +import scala.concurrent.Await import akka.pattern.ask object RemoteCommunicationSpec { @@ -131,6 +132,7 @@ akka { } "not fail ask across node boundaries" in { + import system.dispatcher val f = for (_ ← 1 to 1000) yield here ? "ping" mapTo manifest[(String, ActorRef)] Await.result(Future.sequence(f), remaining).map(_._1).toSet must be(Set("pong")) } diff --git a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala index e35cd42cd8..38d3c29fa8 100644 --- a/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/RemoteConfigSpec.scala @@ -3,10 +3,12 @@ */ package akka.remote +import language.postfixOps + import akka.testkit.AkkaSpec import akka.actor.ExtendedActorSystem -import akka.util.duration._ -import akka.util.Duration +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration import akka.remote.netty.NettyRemoteTransport @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala index b4bf6fa56b..dfa47665de 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978CommunicationSpec.scala @@ -3,17 +3,21 @@ */ package akka.remote +import language.postfixOps + import akka.testkit._ import akka.actor._ import com.typesafe.config._ -import akka.dispatch.{ Await, Future } +import scala.concurrent.Future import akka.pattern.ask import java.io.File import java.security.{ NoSuchAlgorithmException, SecureRandom, PrivilegedAction, AccessController } -import netty.{ NettySettings, NettySSLSupport } +import akka.remote.netty.{ NettySettings, NettySSLSupport } import javax.net.ssl.SSLException -import akka.util.{ Timeout, Duration } -import akka.util.duration._ +import akka.util.Timeout +import scala.concurrent.Await +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration import akka.event.{ Logging, NoLogging, LoggingAdapter } object Configuration { @@ -137,6 +141,7 @@ abstract class Ticket1978CommunicationSpec(val cipherConfig: CipherConfig) exten } "support ask" in { + import system.dispatcher val here = system.actorFor(otherAddress.toString + "/user/echo") val f = for (i ← 1 to 1000) yield here ? (("ping", i)) mapTo manifest[((String, Int), ActorRef)] diff --git a/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala index 0a39d20a9a..be172a563b 100644 --- a/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala +++ b/akka-remote/src/test/scala/akka/remote/Ticket1978ConfigSpec.scala @@ -3,9 +3,8 @@ package akka.remote import akka.testkit._ import akka.actor._ import com.typesafe.config._ -import akka.actor.ExtendedActorSystem -import akka.util.duration._ -import akka.util.Duration +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration import akka.remote.netty.NettyRemoteTransport import java.util.ArrayList diff --git a/akka-remote/src/test/scala/akka/serialization/DaemonMsgCreateSerializerSpec.scala b/akka-remote/src/test/scala/akka/serialization/DaemonMsgCreateSerializerSpec.scala index e38a3e1d1f..89532bea91 100644 --- a/akka-remote/src/test/scala/akka/serialization/DaemonMsgCreateSerializerSpec.scala +++ b/akka-remote/src/test/scala/akka/serialization/DaemonMsgCreateSerializerSpec.scala @@ -4,20 +4,14 @@ package akka.serialization +import language.postfixOps + import com.typesafe.config.ConfigFactory import akka.testkit.AkkaSpec -import akka.actor.Actor -import akka.actor.Address -import akka.actor.Props -import akka.actor.Deploy -import akka.actor.OneForOneStrategy -import akka.actor.SupervisorStrategy -import akka.remote.DaemonMsgCreate -import akka.remote.RemoteScope -import akka.routing.RoundRobinRouter -import akka.routing.FromConfig -import akka.util.duration._ -import akka.actor.FromClassCreator +import akka.actor.{ Actor, Address, Props, Deploy, OneForOneStrategy, SupervisorStrategy, FromClassCreator } +import akka.remote.{ DaemonMsgCreate, RemoteScope } +import akka.routing.{ RoundRobinRouter, FromConfig } +import scala.concurrent.util.duration._ object DaemonMsgCreateSerializerSpec { class MyActor extends Actor { @@ -92,6 +86,7 @@ class DaemonMsgCreateSerializerSpec extends AkkaSpec { ser.deserialize(bytes.asInstanceOf[Array[Byte]], classOf[DaemonMsgCreate]) match { case Left(exception) ⇒ fail(exception) case Right(m: DaemonMsgCreate) ⇒ assertDaemonMsgCreate(msg, m) + case other ⇒ throw new MatchError(other) } } diff --git a/akka-samples/akka-sample-fsm/src/main/scala/Buncher.scala b/akka-samples/akka-sample-fsm/src/main/scala/Buncher.scala index e13f7e6a98..fe1b31ce45 100644 --- a/akka-samples/akka-sample-fsm/src/main/scala/Buncher.scala +++ b/akka-samples/akka-sample-fsm/src/main/scala/Buncher.scala @@ -4,8 +4,8 @@ package sample.fsm.buncher import akka.actor.ActorRefFactory -import scala.reflect.ClassManifest -import akka.util.Duration +import scala.reflect.ClassTag +import scala.concurrent.util.Duration import akka.actor.{ FSM, Actor, ActorRef } /* @@ -26,18 +26,16 @@ object GenericBuncher { case object Flush // send out current queue immediately case object Stop // poison pill - class MsgExtractor[A: Manifest] { - def unapply(m: AnyRef): Option[A] = { - if (ClassManifest.fromClass(m.getClass) <:< manifest[A]) { + class MsgExtractor[A: ClassTag] { + def unapply(m: AnyRef): Option[A] = + if (implicitly[ClassTag[A]].runtimeClass isAssignableFrom m.getClass) Some(m.asInstanceOf[A]) - } else { + else None - } - } } } -abstract class GenericBuncher[A: Manifest, B](val singleTimeout: Duration, val multiTimeout: Duration) +abstract class GenericBuncher[A: ClassTag, B](val singleTimeout: Duration, val multiTimeout: Duration) extends Actor with FSM[GenericBuncher.State, B] { import GenericBuncher._ import FSM._ @@ -87,7 +85,7 @@ object Buncher { val Flush = GenericBuncher.Flush } -class Buncher[A: Manifest](singleTimeout: Duration, multiTimeout: Duration) +class Buncher[A: ClassTag](singleTimeout: Duration, multiTimeout: Duration) extends GenericBuncher[A, List[A]](singleTimeout, multiTimeout) { import Buncher._ diff --git a/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala b/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala index 65d7d7c23c..9fc39ec2a6 100644 --- a/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala +++ b/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnBecome.scala @@ -3,11 +3,13 @@ */ package sample.fsm.dining.become +import language.postfixOps + //Akka adaptation of //http://www.dalnefre.com/wp/2010/08/dining-philosophers-in-humus/ import akka.actor._ -import akka.util.duration._ +import scala.concurrent.util.duration._ /* * First we define our messages, they basically speak for themselves diff --git a/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala b/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala index 7928a85334..e97c76e6af 100644 --- a/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala +++ b/akka-samples/akka-sample-fsm/src/main/scala/DiningHakkersOnFsm.scala @@ -3,10 +3,12 @@ */ package sample.fsm.dining.fsm +import language.postfixOps + import akka.actor._ import akka.actor.FSM._ -import akka.util.Duration -import akka.util.duration._ +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ /* * Some messages for the chopstick diff --git a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jEventHandlerSpec.scala b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jEventHandlerSpec.scala index cac488c77e..77b10039ad 100644 --- a/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jEventHandlerSpec.scala +++ b/akka-slf4j/src/test/scala/akka/event/slf4j/Slf4jEventHandlerSpec.scala @@ -3,10 +3,12 @@ */ package akka.event.slf4j +import language.postfixOps + import akka.testkit.AkkaSpec import akka.actor.Actor import akka.actor.ActorLogging -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.event.Logging import akka.actor.Props import ch.qos.logback.core.OutputStreamAppender diff --git a/akka-testkit/src/main/java/akka/testkit/JavaTestKit.java b/akka-testkit/src/main/java/akka/testkit/JavaTestKit.java index 08846a4ad4..fda0eaa538 100644 --- a/akka-testkit/src/main/java/akka/testkit/JavaTestKit.java +++ b/akka-testkit/src/main/java/akka/testkit/JavaTestKit.java @@ -11,7 +11,7 @@ import akka.event.Logging.LogEvent; import akka.japi.PurePartialFunction; import akka.japi.CachingPartialFunction; import akka.japi.Util; -import akka.util.Duration; +import scala.concurrent.util.Duration; /** * Java API for the TestProbe. Proper JavaDocs to come once JavaDoccing is implemented. @@ -191,12 +191,12 @@ public class JavaTestKit { public Object[] expectMsgAllOf(Object... msgs) { return (Object[]) p.expectMsgAllOf(Util.arrayToSeq(msgs)).toArray( - Util.manifest(Object.class)); + Util.classTag(Object.class)); } public Object[] expectMsgAllOf(Duration max, Object... msgs) { return (Object[]) p.expectMsgAllOf(max, Util.arrayToSeq(msgs)).toArray( - Util.manifest(Object.class)); + Util.classTag(Object.class)); } @SuppressWarnings("unchecked") @@ -241,7 +241,7 @@ public class JavaTestKit { public T match(Object msg) { return ReceiveWhile.this.match(msg); } - }).toArray(Util.manifest(clazz)); + }).toArray(Util.classTag(clazz)); } protected RuntimeException noMatch() { diff --git a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala index 1732d5faf3..26efaef84d 100644 --- a/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala +++ b/akka-testkit/src/main/scala/akka/testkit/CallingThreadDispatcher.scala @@ -3,18 +3,21 @@ */ package akka.testkit +import language.postfixOps + import java.lang.ref.WeakReference import java.util.concurrent.locks.ReentrantLock import java.util.LinkedList import scala.annotation.tailrec import com.typesafe.config.Config import akka.actor.{ ActorInitializationException, ExtensionIdProvider, ExtensionId, Extension, ExtendedActorSystem, ActorRef, ActorCell } -import akka.dispatch.{ MailboxType, TaskInvocation, SystemMessage, Suspend, Resume, MessageDispatcherConfigurator, MessageDispatcher, Mailbox, Envelope, DispatcherPrerequisites, DefaultSystemMessageQueue } -import akka.util.duration.intToDurationInt -import akka.util.{ Switch, Duration } -import akka.util.NonFatal +import akka.dispatch.{ MessageQueue, MailboxType, TaskInvocation, SystemMessage, Suspend, Resume, MessageDispatcherConfigurator, MessageDispatcher, Mailbox, Envelope, DispatcherPrerequisites, DefaultSystemMessageQueue } +import scala.concurrent.util.duration.intToDurationInt +import akka.util.Switch +import scala.concurrent.util.Duration +import scala.concurrent.Awaitable import akka.actor.ActorContext -import akka.dispatch.MessageQueue +import scala.util.control.NonFatal /* * Locking rules: @@ -74,7 +77,7 @@ private[testkit] class CallingThreadDispatcherQueues extends Extension { if (queues contains mbox) { for { ref ← queues(mbox) - val q = ref.get + q = ref.get if (q ne null) && (q ne own) } { val owner = mbox.actor.self @@ -151,7 +154,7 @@ class CallingThreadDispatcher( override def suspend(actor: ActorCell) { actor.mailbox match { - case m: CallingThreadMailbox ⇒ m.suspendSwitch.switchOn + case m: CallingThreadMailbox ⇒ m.suspendSwitch.switchOn; m.suspend() case m ⇒ m.systemEnqueue(actor.self, Suspend()) } } @@ -163,11 +166,12 @@ class CallingThreadDispatcher( val wasActive = queue.isActive val switched = mbox.suspendSwitch.switchOff { CallingThreadDispatcherQueues(actor.system).gatherFromAllOtherQueues(mbox, queue) + mbox.resume() } if (switched && !wasActive) { runQueue(mbox, queue) } - case m ⇒ m.systemEnqueue(actor.self, Resume()) + case m ⇒ m.systemEnqueue(actor.self, Resume(false)) } } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala index a5b6bbbcd2..b073cca23d 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestActorRef.scala @@ -7,6 +7,7 @@ package akka.testkit import akka.actor._ import java.util.concurrent.atomic.AtomicLong import akka.dispatch._ +import scala.concurrent.Await import akka.pattern.ask /** diff --git a/akka-testkit/src/main/scala/akka/testkit/TestBarrier.scala b/akka-testkit/src/main/scala/akka/testkit/TestBarrier.scala index f2457c437c..dd13c22309 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestBarrier.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestBarrier.scala @@ -4,7 +4,7 @@ package akka.testkit -import akka.util.Duration +import scala.concurrent.util.Duration import java.util.concurrent.{ CyclicBarrier, TimeUnit, TimeoutException } import akka.actor.ActorSystem diff --git a/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala b/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala index fac3739d15..5d6d7ad0f3 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestEventListener.scala @@ -3,6 +3,8 @@ */ package akka.testkit +import language.existentials + import scala.util.matching.Regex import akka.actor.{ DeadLetter, ActorSystem, Terminated, UnhandledMessage } import akka.dispatch.{ SystemMessage, Terminate } @@ -10,7 +12,7 @@ import akka.event.Logging.{ Warning, LogEvent, InitializeLogger, Info, Error, De import akka.event.Logging import java.lang.{ Iterable ⇒ JIterable } import scala.collection.JavaConverters -import akka.util.Duration +import scala.concurrent.util.Duration /** * Implementation helpers of the EventFilter facilities: send `Mute` diff --git a/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala b/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala index f6d0ecfbce..bab4601587 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestFSMRef.scala @@ -5,7 +5,7 @@ package akka.testkit import akka.actor._ -import akka.util._ +import scala.concurrent.util.Duration import akka.dispatch.DispatcherPrerequisites /** diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala index d8916167f4..8bbb18bc64 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKit.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKit.scala @@ -3,16 +3,16 @@ */ package akka.testkit +import language.postfixOps + import akka.actor._ -import Actor._ -import akka.util.Duration -import akka.util.duration._ +import akka.actor.Actor._ +import scala.concurrent.util.Duration +import scala.concurrent.util.duration._ import java.util.concurrent.{ BlockingDeque, LinkedBlockingDeque, TimeUnit, atomic } import atomic.AtomicInteger import scala.annotation.tailrec -import akka.actor.ActorSystem -import akka.util.Timeout -import akka.util.BoxedType +import akka.util.{ Timeout, BoxedType } import scala.annotation.varargs import akka.japi.PurePartialFunction diff --git a/akka-testkit/src/main/scala/akka/testkit/TestKitExtension.scala b/akka-testkit/src/main/scala/akka/testkit/TestKitExtension.scala index 71ba8d0eac..964641cf19 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestKitExtension.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestKitExtension.scala @@ -4,7 +4,7 @@ package akka.testkit import com.typesafe.config.Config -import akka.util.Duration +import scala.concurrent.util.Duration import akka.util.Timeout import java.util.concurrent.TimeUnit.MILLISECONDS import akka.actor.{ ExtensionId, ActorSystem, Extension, ExtendedActorSystem } diff --git a/akka-testkit/src/main/scala/akka/testkit/TestLatch.scala b/akka-testkit/src/main/scala/akka/testkit/TestLatch.scala index f9e426e20e..f045552d44 100644 --- a/akka-testkit/src/main/scala/akka/testkit/TestLatch.scala +++ b/akka-testkit/src/main/scala/akka/testkit/TestLatch.scala @@ -4,9 +4,9 @@ package akka.testkit -import akka.util.Duration +import scala.concurrent.util.Duration import akka.actor.ActorSystem -import akka.dispatch.Await.{ CanAwait, Awaitable } +import scala.concurrent.{ Await, CanAwait, Awaitable } import java.util.concurrent.{ TimeoutException, CountDownLatch, TimeUnit } /** diff --git a/akka-testkit/src/main/scala/akka/testkit/package.scala b/akka-testkit/src/main/scala/akka/testkit/package.scala index 6f7bf965ca..26bdc49e5b 100644 --- a/akka-testkit/src/main/scala/akka/testkit/package.scala +++ b/akka-testkit/src/main/scala/akka/testkit/package.scala @@ -1,7 +1,9 @@ package akka +import language.implicitConversions + import akka.actor.ActorSystem -import akka.util.Duration +import scala.concurrent.util.Duration import java.util.concurrent.TimeUnit.MILLISECONDS package object testkit { @@ -32,7 +34,7 @@ package object testkit { * Scala API. Scale timeouts (durations) during tests with the configured * 'akka.test.timefactor'. * Implicit conversion to add dilated function to Duration. - * import akka.util.duration._ + * import scala.concurrent.util.duration._ * import akka.testkit._ * 10.milliseconds.dilated * diff --git a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala index f381e53013..67478b35e3 100644 --- a/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/AkkaSpec.scala @@ -3,20 +3,19 @@ */ package akka.testkit +import language.{ postfixOps, reflectiveCalls } + import org.scalatest.{ WordSpec, BeforeAndAfterAll, Tag } import org.scalatest.matchers.MustMatchers -import akka.actor.ActorSystem -import akka.actor.{ Actor, ActorRef, Props } +import akka.actor.{ Actor, ActorRef, Props, ActorSystem, PoisonPill, DeadLetter } import akka.event.{ Logging, LoggingAdapter } -import akka.util.duration._ -import com.typesafe.config.Config -import com.typesafe.config.ConfigFactory -import akka.actor.PoisonPill -import akka.actor.DeadLetter +import scala.concurrent.util.duration._ +import scala.concurrent.Await +import com.typesafe.config.{ Config, ConfigFactory } import java.util.concurrent.TimeoutException -import akka.dispatch.{ Await, MessageDispatcher } -import akka.dispatch.Dispatchers +import akka.dispatch.{ MessageDispatcher, Dispatchers } import akka.pattern.ask +import akka.actor.ActorSystemImpl object TimingTest extends Tag("timing") object LongRunningTest extends Tag("long-running") @@ -78,7 +77,9 @@ abstract class AkkaSpec(_system: ActorSystem) beforeShutdown() system.shutdown() try system.awaitTermination(5 seconds) catch { - case _: TimeoutException ⇒ system.log.warning("Failed to stop [{}] within 5 seconds", system.name) + case _: TimeoutException ⇒ + system.log.warning("Failed to stop [{}] within 5 seconds", system.name) + println(system.asInstanceOf[ActorSystemImpl].printTree) } atTermination() } diff --git a/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala index 492c44408c..7df30be4c7 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestActorRefSpec.scala @@ -3,12 +3,14 @@ */ package akka.testkit +import language.{ postfixOps, reflectiveCalls } + import org.scalatest.matchers.MustMatchers import org.scalatest.{ BeforeAndAfterEach, WordSpec } import akka.actor._ import akka.event.Logging.Warning -import akka.dispatch.{ Future, Promise, Await } -import akka.util.duration._ +import scala.concurrent.{ Future, Promise, Await } +import scala.concurrent.util.duration._ import akka.actor.ActorSystem import akka.pattern.ask import akka.dispatch.Dispatcher diff --git a/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala index 86c6a8c7c5..256273bc1f 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestFSMRefSpec.scala @@ -4,10 +4,12 @@ package akka.testkit +import language.postfixOps + import org.scalatest.matchers.MustMatchers import org.scalatest.{ BeforeAndAfterEach, WordSpec } import akka.actor._ -import akka.util.duration._ +import scala.concurrent.util.duration._ @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) class TestFSMRefSpec extends AkkaSpec { diff --git a/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala index 6e764c96dc..0d1e139e81 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestProbeSpec.scala @@ -1,11 +1,13 @@ package akka.testkit +import language.postfixOps + import org.scalatest.WordSpec import org.scalatest.matchers.MustMatchers import org.scalatest.{ BeforeAndAfterEach, WordSpec } import akka.actor._ -import akka.util.duration._ -import akka.dispatch.{ Await, Future } +import scala.concurrent.{ Future, Await } +import scala.concurrent.util.duration._ import akka.pattern.ask @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) diff --git a/akka-testkit/src/test/scala/akka/testkit/TestTimeSpec.scala b/akka-testkit/src/test/scala/akka/testkit/TestTimeSpec.scala index 0d0bab20b6..aac0f490b0 100644 --- a/akka-testkit/src/test/scala/akka/testkit/TestTimeSpec.scala +++ b/akka-testkit/src/test/scala/akka/testkit/TestTimeSpec.scala @@ -2,7 +2,7 @@ package akka.testkit import org.scalatest.matchers.MustMatchers import org.scalatest.{ BeforeAndAfterEach, WordSpec } -import akka.util.Duration +import scala.concurrent.util.Duration import com.typesafe.config.Config @org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner]) diff --git a/akka-transactor/src/main/scala/akka/transactor/Transactor.scala b/akka-transactor/src/main/scala/akka/transactor/Transactor.scala index fd802e1f21..3df222fe4d 100644 --- a/akka-transactor/src/main/scala/akka/transactor/Transactor.scala +++ b/akka-transactor/src/main/scala/akka/transactor/Transactor.scala @@ -4,6 +4,8 @@ package akka.transactor +import language.postfixOps + import akka.actor.{ Actor, ActorRef } import scala.concurrent.stm.InTxn diff --git a/akka-transactor/src/main/scala/akka/transactor/TransactorExtension.scala b/akka-transactor/src/main/scala/akka/transactor/TransactorExtension.scala index 85cb8c46fd..6b4a0157bc 100644 --- a/akka-transactor/src/main/scala/akka/transactor/TransactorExtension.scala +++ b/akka-transactor/src/main/scala/akka/transactor/TransactorExtension.scala @@ -7,7 +7,7 @@ import akka.actor.{ ActorSystem, ExtensionId, ExtensionIdProvider, ExtendedActor import akka.actor.Extension import com.typesafe.config.Config import akka.util.Timeout -import akka.util.Duration +import scala.concurrent.util.Duration import java.util.concurrent.TimeUnit.MILLISECONDS /** diff --git a/akka-transactor/src/test/java/akka/transactor/UntypedCoordinatedIncrementTest.java b/akka-transactor/src/test/java/akka/transactor/UntypedCoordinatedIncrementTest.java index 36c063feaa..60c4873b27 100644 --- a/akka-transactor/src/test/java/akka/transactor/UntypedCoordinatedIncrementTest.java +++ b/akka-transactor/src/test/java/akka/transactor/UntypedCoordinatedIncrementTest.java @@ -16,8 +16,8 @@ import akka.actor.ActorRef; import akka.actor.Props; import akka.actor.UntypedActor; import akka.actor.UntypedActorFactory; -import akka.dispatch.Await; -import akka.dispatch.Future; +import scala.concurrent.Await; +import scala.concurrent.Future; import static akka.pattern.Patterns.ask; import akka.testkit.AkkaSpec; import akka.testkit.EventFilter; diff --git a/akka-transactor/src/test/java/akka/transactor/UntypedTransactorTest.java b/akka-transactor/src/test/java/akka/transactor/UntypedTransactorTest.java index b7dc99389a..b24d000ced 100644 --- a/akka-transactor/src/test/java/akka/transactor/UntypedTransactorTest.java +++ b/akka-transactor/src/test/java/akka/transactor/UntypedTransactorTest.java @@ -16,8 +16,8 @@ import akka.actor.ActorRef; import akka.actor.Props; import akka.actor.UntypedActor; import akka.actor.UntypedActorFactory; -import akka.dispatch.Await; -import akka.dispatch.Future; +import scala.concurrent.Await; +import scala.concurrent.Future; import static akka.pattern.Patterns.ask; import akka.testkit.AkkaSpec; import akka.testkit.EventFilter; diff --git a/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala b/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala index c76a5a701c..ecaa47fe96 100644 --- a/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala +++ b/akka-transactor/src/test/scala/akka/transactor/CoordinatedIncrementSpec.scala @@ -7,8 +7,8 @@ package akka.transactor import org.scalatest.BeforeAndAfterAll import akka.actor._ -import akka.dispatch.Await -import akka.util.duration._ +import scala.concurrent.Await +import scala.concurrent.util.duration._ import akka.util.Timeout import akka.testkit._ import scala.concurrent.stm._ diff --git a/akka-transactor/src/test/scala/akka/transactor/FickleFriendsSpec.scala b/akka-transactor/src/test/scala/akka/transactor/FickleFriendsSpec.scala index 9deee7b9cc..4e1219324e 100644 --- a/akka-transactor/src/test/scala/akka/transactor/FickleFriendsSpec.scala +++ b/akka-transactor/src/test/scala/akka/transactor/FickleFriendsSpec.scala @@ -4,18 +4,21 @@ package akka.transactor +import language.postfixOps + import org.scalatest.BeforeAndAfterAll import akka.actor._ -import akka.dispatch.Await -import akka.util.duration._ -import akka.util.Timeout +import scala.concurrent.Await +import scala.concurrent.util.duration._ import akka.testkit._ import akka.testkit.TestEvent.Mute import scala.concurrent.stm._ import scala.util.Random.{ nextInt ⇒ random } import java.util.concurrent.CountDownLatch import akka.pattern.{ AskTimeoutException, ask } +import akka.util.Timeout +import scala.util.control.NonFatal object FickleFriends { case class FriendlyIncrement(friends: Seq[ActorRef], timeout: Timeout, latch: CountDownLatch) @@ -49,7 +52,7 @@ object FickleFriends { } } } catch { - case _ ⇒ () // swallow exceptions + case NonFatal(_) ⇒ () // swallow exceptions } } } diff --git a/akka-transactor/src/test/scala/akka/transactor/TransactorSpec.scala b/akka-transactor/src/test/scala/akka/transactor/TransactorSpec.scala index df9723ffd2..cb4d2d633b 100644 --- a/akka-transactor/src/test/scala/akka/transactor/TransactorSpec.scala +++ b/akka-transactor/src/test/scala/akka/transactor/TransactorSpec.scala @@ -4,9 +4,11 @@ package akka.transactor +import language.postfixOps + import akka.actor._ -import akka.dispatch.Await -import akka.util.duration._ +import scala.concurrent.Await +import scala.concurrent.util.duration._ import akka.util.Timeout import akka.testkit._ import scala.concurrent.stm._ diff --git a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala index 1a54cbeb29..945f0b6df3 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/ConcurrentSocketActor.scala @@ -6,12 +6,13 @@ package akka.zeromq import org.zeromq.ZMQ.{ Socket, Poller } import org.zeromq.{ ZMQ ⇒ JZMQ } import akka.actor._ -import akka.dispatch.{ Promise, Future } +import scala.concurrent.{ Promise, Future } +import scala.concurrent.util.Duration +import scala.annotation.tailrec +import scala.collection.mutable.ListBuffer import akka.event.Logging -import annotation.tailrec import java.util.concurrent.TimeUnit -import collection.mutable.ListBuffer -import akka.util.{ NonFatal, Duration } +import scala.util.control.NonFatal private[zeromq] object ConcurrentSocketActor { private sealed trait PollMsg diff --git a/akka-zeromq/src/main/scala/akka/zeromq/SocketOption.scala b/akka-zeromq/src/main/scala/akka/zeromq/SocketOption.scala index f5c782e849..1d393afef0 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/SocketOption.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/SocketOption.scala @@ -6,8 +6,8 @@ package akka.zeromq import com.google.protobuf.Message import org.zeromq.{ ZMQ ⇒ JZMQ } import akka.actor.ActorRef -import akka.util.duration._ -import akka.util.Duration +import scala.concurrent.util.duration._ +import scala.concurrent.util.Duration import org.zeromq.ZMQ.{ Poller, Socket } /** diff --git a/akka-zeromq/src/main/scala/akka/zeromq/ZeroMQExtension.scala b/akka-zeromq/src/main/scala/akka/zeromq/ZeroMQExtension.scala index b5a5e29310..ed2cc2a596 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/ZeroMQExtension.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/ZeroMQExtension.scala @@ -6,9 +6,9 @@ package akka.zeromq import org.zeromq.{ ZMQ ⇒ JZMQ } import org.zeromq.ZMQ.Poller import akka.actor._ -import akka.dispatch.{ Await } import akka.pattern.ask -import akka.util.Duration +import scala.concurrent.Await +import scala.concurrent.util.Duration import java.util.concurrent.TimeUnit import akka.util.Timeout import org.zeromq.ZMQException @@ -33,9 +33,6 @@ object ZeroMQExtension extends ExtensionId[ZeroMQExtension] with ExtensionIdProv private val minVersionString = "2.1.0" private val minVersion = JZMQ.makeVersion(2, 1, 0) - - private[zeromq] def check[TOption <: SocketOption: Manifest](parameters: Seq[SocketOption]) = - parameters exists { p ⇒ ClassManifest.singleType(p) <:< manifest[TOption] } } /** @@ -64,7 +61,10 @@ class ZeroMQExtension(system: ActorSystem) extends Extension { */ def newSocketProps(socketParameters: SocketOption*): Props = { verifyZeroMQVersion - require(ZeroMQExtension.check[SocketType.ZMQSocketType](socketParameters), "A socket type is required") + require(socketParameters exists { + case s: SocketType.ZMQSocketType ⇒ true + case _ ⇒ false + }, "A socket type is required") Props(new ConcurrentSocketActor(socketParameters)).withDispatcher("akka.zeromq.socket-dispatcher") } diff --git a/akka-zeromq/src/main/scala/akka/zeromq/package.scala b/akka-zeromq/src/main/scala/akka/zeromq/package.scala index 1241700fcb..2795505fa0 100644 --- a/akka-zeromq/src/main/scala/akka/zeromq/package.scala +++ b/akka-zeromq/src/main/scala/akka/zeromq/package.scala @@ -3,20 +3,14 @@ */ package akka +import language.implicitConversions + import actor.ActorSystem /** * A package object with an implicit conversion for the actor system as a convenience */ package object zeromq { - - /** - * Creates a zeromq actor system implicitly - * @param system - * @return An augmented [[akka.actor.ActorSystem]] - */ - implicit def zeromqSystem(system: ActorSystem): ZeroMQExtension = ZeroMQExtension(system) - /** * Convenience accessor to subscribe to all events */ diff --git a/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala b/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala index e075ca2158..4a0864ea1b 100644 --- a/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala +++ b/akka-zeromq/src/test/scala/akka/zeromq/ConcurrentSocketActorSpec.scala @@ -3,9 +3,11 @@ */ package akka.zeromq +import language.postfixOps + import org.scalatest.matchers.MustMatchers import akka.testkit.{ TestProbe, DefaultTimeout, AkkaSpec } -import akka.util.duration._ +import scala.concurrent.util.duration._ import akka.actor.{ Cancellable, Actor, Props, ActorRef } import akka.util.Timeout diff --git a/project/AkkaBuild.scala b/project/AkkaBuild.scala index ea36644120..478d54c685 100644 --- a/project/AkkaBuild.scala +++ b/project/AkkaBuild.scala @@ -20,10 +20,17 @@ import Sphinx.{ sphinxDocs, sphinxHtml, sphinxLatex, sphinxPdf, sphinxPygments, object AkkaBuild extends Build { System.setProperty("akka.mode", "test") // Is there better place for this? + lazy val desiredScalaVersion = "2.10.0-M5" + lazy val buildSettings = Seq( organization := "com.typesafe.akka", version := "2.1-SNAPSHOT", - scalaVersion := "2.9.2" + //scalaVersion := desiredScalaVersion + scalaVersion := "2.10.0-SNAPSHOT", + scalaVersion in update <<= (scalaVersion) apply { + case "2.10.0-SNAPSHOT" => desiredScalaVersion + case x => x + } ) lazy val akka = Project( @@ -40,7 +47,7 @@ object AkkaBuild extends Build { """|import akka.actor._ |import akka.dispatch._ |import com.typesafe.config.ConfigFactory - |import akka.util.duration._ + |import scala.concurrent.util.duration._ |import akka.util.Timeout |val config = ConfigFactory.parseString("akka.stdout-loglevel=INFO,akka.loglevel=DEBUG") |val remoteConfig = ConfigFactory.parseString("akka.remote.netty{port=0,use-dispatcher-for-io=akka.actor.default-dispatcher,execution-pool-size=0},akka.actor.provider=RemoteActorRefProvider").withFallback(config) @@ -57,7 +64,7 @@ object AkkaBuild extends Build { sphinxLatex <<= sphinxLatex in LocalProject(docs.id), sphinxPdf <<= sphinxPdf in LocalProject(docs.id) ), - aggregate = Seq(actor, testkit, actorTests, remote, remoteTests, camel, cluster, slf4j, agent, transactor, mailboxes, zeroMQ, kernel, akkaSbtPlugin, samples, tutorials, osgi, osgiAries, docs) + aggregate = Seq(actor, testkit, actorTests, remote, remoteTests, camel, cluster, slf4j, agent, transactor, mailboxes, zeroMQ, kernel, /*akkaSbtPlugin,*/ samples, tutorials, osgi, osgiAries, docs) ) lazy val actor = Project( @@ -65,8 +72,7 @@ object AkkaBuild extends Build { base = file("akka-actor"), settings = defaultSettings ++ OSGi.actor ++ Seq( autoCompilerPlugins := true, - libraryDependencies <+= scalaVersion { v => compilerPlugin("org.scala-lang.plugins" % "continuations" % v) }, - scalacOptions += "-P:continuations:enable", + libraryDependencies <+= scalaVersion { v => "org.scala-lang" % "scala-reflect" % v }, packagedArtifact in (Compile, packageBin) <<= (artifact in (Compile, packageBin), OsgiKeys.bundle).identityMap, artifact in (Compile, packageBin) ~= (_.copy(`type` = "bundle")), // to fix scaladoc generation @@ -93,8 +99,6 @@ object AkkaBuild extends Build { dependencies = Seq(testkit % "compile;test->test"), settings = defaultSettings ++ Seq( autoCompilerPlugins := true, - libraryDependencies <+= scalaVersion { v => compilerPlugin("org.scala-lang.plugins" % "continuations" % v) }, - scalacOptions += "-P:continuations:enable", libraryDependencies ++= Dependencies.actorTests ) ) @@ -329,6 +333,8 @@ object AkkaBuild extends Build { override lazy val settings = super.settings ++ buildSettings ++ Seq( resolvers += "Sonatype Snapshot Repo" at "https://oss.sonatype.org/content/repositories/snapshots/", + //resolvers += "Sonatype Releases Repo" at "https://oss.sonatype.org/content/repositories/releases/", + resolvers += "Typesafe 2.10 Freshness" at "http://typesafe.artifactoryonline.com/typesafe/scala-fresh-2.10.x/", shellPrompt := { s => Project.extract(s).currentProject.id + " > " } ) @@ -397,7 +403,7 @@ object AkkaBuild extends Build { resolvers += "Typesafe Repo" at "http://repo.typesafe.com/typesafe/releases/", // compile options - scalacOptions ++= Seq("-encoding", "UTF-8", "-deprecation", "-unchecked") ++ ( + scalacOptions ++= Seq("-encoding", "UTF-8", "-target:jvm-1.6", /*"-deprecation",*/ "-feature", "-unchecked", "-Xlog-reflective-calls") ++ ( if (true || (System getProperty "java.runtime.version" startsWith "1.7")) Seq() else Seq("-optimize")), // -optimize fails with jdk7 javacOptions ++= Seq("-Xlint:unchecked", "-Xlint:deprecation"), @@ -481,22 +487,17 @@ object Dependencies { val testkit = Seq(Test.scalatest, Test.junit) - val actorTests = Seq( - Test.junit, Test.scalatest, Test.commonsMath, Test.mockito, - Test.scalacheck, protobuf - ) + val actorTests = Seq(Test.junit, Test.scalatest, Test.commonsMath, Test.mockito, Test.scalacheck, protobuf) - val remote = Seq( - netty, protobuf, uncommonsMath, Test.junit, Test.scalatest - ) + val remote = Seq(netty, protobuf, uncommonsMath, Test.junit, Test.scalatest) val cluster = Seq(Test.junit, Test.scalatest) val slf4j = Seq(slf4jApi, Test.logback) - val agent = Seq(scalaStm, Test.scalatest, Test.junit) + val agent = Seq(scalaStm, scalaActors, Test.scalatest, Test.junit) - val transactor = Seq(scalaStm, Test.scalatest, Test.junit) + val transactor = Seq(scalaStm, scalaActors, Test.scalatest, Test.junit) val mailboxes = Seq(Test.scalatest, Test.junit) @@ -512,55 +513,44 @@ object Dependencies { val tutorials = Seq(Test.scalatest, Test.junit) - val docs = Seq(Test.scalatest, Test.junit, Test.specs2, Test.junitIntf) + val docs = Seq(Test.scalatest, Test.junit, Test.junitIntf) val zeroMQ = Seq(protobuf, Dependency.zeroMQ, Test.scalatest, Test.junit) } object Dependency { - // Versions - - object V { - val Camel = "2.8.0" - val Logback = "1.0.4" - val Netty = "3.5.2.Final" - val OSGi = "4.2.0" - val Protobuf = "2.4.1" - val ScalaStm = "0.5" - val Scalatest = "1.6.1" - val Slf4j = "1.6.4" - val UncommonsMath = "1.2.2a" - } + def v(a: String): String = a+"_"+AkkaBuild.desiredScalaVersion // Compile - val ariesBlueprint = "org.apache.aries.blueprint" % "org.apache.aries.blueprint" % "0.3.2" // ApacheV2 - val config = "com.typesafe" % "config" % "0.4.1" // ApacheV2 - val camelCore = "org.apache.camel" % "camel-core" % V.Camel // ApacheV2 - val netty = "io.netty" % "netty" % V.Netty // ApacheV2 - val osgiCore = "org.osgi" % "org.osgi.core" % V.OSGi // ApacheV2 - val protobuf = "com.google.protobuf" % "protobuf-java" % V.Protobuf // New BSD - val scalaStm = "org.scala-tools" % "scala-stm_2.9.1" % V.ScalaStm // Modified BSD (Scala) - val slf4jApi = "org.slf4j" % "slf4j-api" % V.Slf4j // MIT - val uncommonsMath = "org.uncommons.maths" % "uncommons-maths" % V.UncommonsMath // ApacheV2 - val zeroMQ = "org.zeromq" % "zeromq-scala-binding_2.9.1" % "0.0.6" // ApacheV2 + val config = "com.typesafe" % "config" % "0.4.1" // ApacheV2 + val camelCore = "org.apache.camel" % "camel-core" % "2.8.0" // ApacheV2 + val netty = "io.netty" % "netty" % "3.5.1.Final" // ApacheV2 + val protobuf = "com.google.protobuf" % "protobuf-java" % "2.4.1" // New BSD + //val scalaStm = "org.scala-tools" % "scala-stm" % "0.5" // Modified BSD (Scala) + val scalaStm = "scala-stm" % "scala-stm" % "0.6-SNAPSHOT" //"0.5" // Modified BSD (Scala) + val scalaActors = "org.scala-lang" % "scala-actors" % "2.10.0-SNAPSHOT" + val slf4jApi = "org.slf4j" % "slf4j-api" % "1.6.4" // MIT + val zeroMQ = "org.zeromq" % v("zeromq-scala-binding") % "0.0.6" // ApacheV2 + val uncommonsMath = "org.uncommons.maths" % "uncommons-maths" % "1.2.2a" // ApacheV2 + val ariesBlueprint = "org.apache.aries.blueprint" % "org.apache.aries.blueprint" % "0.3.2" // ApacheV2 + val osgiCore = "org.osgi" % "org.osgi.core" % "4.2.0" // ApacheV2 // Test object Test { - val ariesProxy = "org.apache.aries.proxy" % "org.apache.aries.proxy.impl" % "0.3" % "test" // ApacheV2 - val commonsMath = "org.apache.commons" % "commons-math" % "2.1" % "test" // ApacheV2 - val commonsIo = "commons-io" % "commons-io" % "2.0.1" % "test"// ApacheV2 - val junit = "junit" % "junit" % "4.5" % "test" // Common Public License 1.0 - val logback = "ch.qos.logback" % "logback-classic" % V.Logback % "test" // EPL 1.0 / LGPL 2.1 - val mockito = "org.mockito" % "mockito-all" % "1.8.1" % "test" // MIT - val pojosr = "com.googlecode.pojosr" % "de.kalpatec.pojosr.framework" % "0.1.4" % "test" // ApacheV2 - val scalatest = "org.scalatest" % "scalatest_2.9.1" % V.Scalatest % "test" // ApacheV2 - val scalacheck = "org.scala-tools.testing" % "scalacheck_2.9.1" % "1.9" % "test" // New BSD - val specs2 = "org.specs2" % "specs2_2.9.1" % "1.9" % "test" // Modified BSD / ApacheV2 - val tinybundles = "org.ops4j.pax.tinybundles" % "tinybundles" % "1.0.0" % "test" // ApacheV2 - val log4j = "log4j" % "log4j" % "1.2.14" % "test" // ApacheV2 - val junitIntf = "com.novocode" % "junit-interface" % "0.8" % "test" // MIT + val commonsMath = "org.apache.commons" % "commons-math" % "2.1" % "test" // ApacheV2 + val commonsIo = "commons-io" % "commons-io" % "2.0.1" % "test" // ApacheV2 + val junit = "junit" % "junit" % "4.10" % "test" // Common Public License 1.0 + val logback = "ch.qos.logback" % "logback-classic" % "1.0.4" % "test" // EPL 1.0 / LGPL 2.1 + val mockito = "org.mockito" % "mockito-all" % "1.8.1" % "test" // MIT + val scalatest = "org.scalatest" % v("scalatest") % "1.9-2.10.0-M5-B2" % "test" // ApacheV2 + val scalacheck = "org.scalacheck" % v("scalacheck") % "1.10.0" % "test" // New BSD + val ariesProxy = "org.apache.aries.proxy" % "org.apache.aries.proxy.impl" % "0.3" % "test" // ApacheV2 + val pojosr = "com.googlecode.pojosr" % "de.kalpatec.pojosr.framework" % "0.1.4" % "test" // ApacheV2 + val tinybundles = "org.ops4j.pax.tinybundles" % "tinybundles" % "1.0.0" % "test" // ApacheV2 + val log4j = "log4j" % "log4j" % "1.2.14" % "test" // ApacheV2 + val junitIntf = "com.novocode" % "junit-interface" % "0.8" % "test" // MIT } } @@ -572,7 +562,7 @@ object OSGi { val agent = exports(Seq("akka.agent.*")) - val camel = exports(Seq("akka.camel.*", "akka.camelexamples")) + val camel = exports(Seq("akka.camel.*")) val cluster = exports(Seq("akka.cluster.*"))