Update to a working version of Scalariform

This commit is contained in:
Björn Antonsson 2016-06-02 14:06:57 +02:00
parent cae070bd93
commit c66ce62d63
616 changed files with 5966 additions and 5436 deletions

View file

@ -101,7 +101,8 @@ class ActorLifeCycleSpec extends AkkaSpec("akka.actor.serialize-messages=off") w
"not invoke preRestart and postRestart when never restarted using OneForOneStrategy" in {
val id = newUuid().toString
val supervisor = system.actorOf(Props(classOf[Supervisor],
val supervisor = system.actorOf(Props(
classOf[Supervisor],
OneForOneStrategy(maxNrOfRetries = 3)(List(classOf[Exception]))))
val gen = new AtomicInteger(0)
val props = Props(classOf[LifeCycleTestActor], testActor, id, gen)

View file

@ -249,14 +249,14 @@ class ActorLookupSpec extends AkkaSpec with DefaultTimeout {
val lookname = looker.path.elements.mkString("", "/", "/")
for (
(l, r) Seq(
LookupString("a/b/c") -> empty(lookname + "a/b/c"),
LookupString("") -> system.deadLetters,
LookupString("akka://all-systems/Nobody") -> system.deadLetters,
LookupPath(system / "hallo") -> empty("user/hallo"),
LookupPath(looker.path child "hallo") -> empty(lookname + "hallo"), // test Java API
LookupPath(looker.path descendant Seq("a", "b").asJava) -> empty(lookname + "a/b"), // test Java API
LookupElems(Seq()) -> system.deadLetters,
LookupElems(Seq("a")) -> empty(lookname + "a"))
LookupString("a/b/c") empty(lookname + "a/b/c"),
LookupString("") system.deadLetters,
LookupString("akka://all-systems/Nobody") system.deadLetters,
LookupPath(system / "hallo") empty("user/hallo"),
LookupPath(looker.path child "hallo") empty(lookname + "hallo"), // test Java API
LookupPath(looker.path descendant Seq("a", "b").asJava) empty(lookname + "a/b"), // test Java API
LookupElems(Seq()) system.deadLetters,
LookupElems(Seq("a")) empty(lookname + "a"))
) checkOne(looker, l, r)
}
for (looker all) check(looker)

View file

@ -210,7 +210,8 @@ object ActorMailboxSpec {
final case class MCBoundedMailbox(val capacity: Int, val pushTimeOut: FiniteDuration)
extends MailboxType with ProducesMessageQueue[MCBoundedMessageQueueSemantics] {
def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity"),
def this(settings: ActorSystem.Settings, config: Config) = this(
config.getInt("mailbox-capacity"),
config.getNanosDuration("mailbox-push-timeout-time"))
final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue =
@ -241,23 +242,29 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
}
"get an unbounded deque message queue when it is only configured on the props" in {
checkMailboxQueue(Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
checkMailboxQueue(
Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
"default-override-from-props", UnboundedDeqMailboxTypes)
}
"get an bounded message queue when it's only configured with RequiresMailbox" in {
checkMailboxQueue(Props[BoundedQueueReportingActor],
checkMailboxQueue(
Props[BoundedQueueReportingActor],
"default-override-from-trait", BoundedMailboxTypes)
}
"get an unbounded deque message queue when it's only mixed with Stash" in {
checkMailboxQueue(Props[StashQueueReportingActor],
checkMailboxQueue(
Props[StashQueueReportingActor],
"default-override-from-stash", UnboundedDeqMailboxTypes)
checkMailboxQueue(Props(new StashQueueReportingActor),
checkMailboxQueue(
Props(new StashQueueReportingActor),
"default-override-from-stash2", UnboundedDeqMailboxTypes)
checkMailboxQueue(Props(classOf[StashQueueReportingActorWithParams], 17, "hello"),
checkMailboxQueue(
Props(classOf[StashQueueReportingActorWithParams], 17, "hello"),
"default-override-from-stash3", UnboundedDeqMailboxTypes)
checkMailboxQueue(Props(new StashQueueReportingActorWithParams(17, "hello")),
checkMailboxQueue(
Props(new StashQueueReportingActorWithParams(17, "hello")),
"default-override-from-stash4", UnboundedDeqMailboxTypes)
}
@ -278,12 +285,14 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
}
"get an bounded control aware message queue when it's only configured with RequiresMailbox" in {
checkMailboxQueue(Props[BoundedControlAwareQueueReportingActor],
checkMailboxQueue(
Props[BoundedControlAwareQueueReportingActor],
"default-override-from-trait-bounded-control-aware", BoundedControlAwareMailboxTypes)
}
"get an unbounded control aware message queue when it's only configured with RequiresMailbox" in {
checkMailboxQueue(Props[UnboundedControlAwareQueueReportingActor],
checkMailboxQueue(
Props[UnboundedControlAwareQueueReportingActor],
"default-override-from-trait-unbounded-control-aware", UnboundedControlAwareMailboxTypes)
}
@ -317,7 +326,8 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
}
"get an unbounded message queue overriding configuration on the props" in {
checkMailboxQueue(Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
checkMailboxQueue(
Props[QueueReportingActor].withMailbox("akka.actor.mailbox.unbounded-deque-based"),
"bounded-unbounded-override-props", UnboundedMailboxTypes)
}
@ -401,17 +411,20 @@ class ActorMailboxSpec(conf: Config) extends AkkaSpec(conf) with DefaultTimeout
}
"get an unbounded message queue with a balancing dispatcher" in {
checkMailboxQueue(Props[QueueReportingActor].withDispatcher("balancing-dispatcher"),
checkMailboxQueue(
Props[QueueReportingActor].withDispatcher("balancing-dispatcher"),
"unbounded-balancing", UnboundedMailboxTypes)
}
"get a bounded message queue with a balancing bounded dispatcher" in {
checkMailboxQueue(Props[QueueReportingActor].withDispatcher("balancing-bounded-dispatcher"),
checkMailboxQueue(
Props[QueueReportingActor].withDispatcher("balancing-bounded-dispatcher"),
"bounded-balancing", BoundedMailboxTypes)
}
"get a bounded message queue with a requiring balancing bounded dispatcher" in {
checkMailboxQueue(Props[QueueReportingActor].withDispatcher("requiring-balancing-bounded-dispatcher"),
checkMailboxQueue(
Props[QueueReportingActor].withDispatcher("requiring-balancing-bounded-dispatcher"),
"requiring-bounded-balancing", BoundedMailboxTypes)
}
}

View file

@ -65,7 +65,8 @@ class ActorSelectionSpec extends AkkaSpec("akka.loglevel=DEBUG") with DefaultTim
asked.correlationId should ===(selection)
implicit val ec = system.dispatcher
val resolved = Await.result(selection.resolveOne(timeout.duration).mapTo[ActorRef] recover { case _ null },
val resolved = Await.result(
selection.resolveOne(timeout.duration).mapTo[ActorRef] recover { case _ null },
timeout.duration)
Option(resolved) should ===(result)
@ -248,11 +249,11 @@ class ActorSelectionSpec extends AkkaSpec("akka.loglevel=DEBUG") with DefaultTim
val lookname = looker.path.elements.mkString("", "/", "/")
for (
(l, r) Seq(
SelectString("a/b/c") -> None,
SelectString("akka://all-systems/Nobody") -> None,
SelectPath(system / "hallo") -> None,
SelectPath(looker.path child "hallo") -> None, // test Java API
SelectPath(looker.path descendant Seq("a", "b").asJava) -> None) // test Java API
SelectString("a/b/c") None,
SelectString("akka://all-systems/Nobody") None,
SelectPath(system / "hallo") None,
SelectPath(looker.path child "hallo") None, // test Java API
SelectPath(looker.path descendant Seq("a", "b").asJava) None) // test Java API
) checkOne(looker, l, r)
}
for (looker all) check(looker)

View file

@ -273,7 +273,8 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend
}
"allow configuration of guardian supervisor strategy" in {
implicit val system = ActorSystem("Stop",
implicit val system = ActorSystem(
"Stop",
ConfigFactory.parseString("akka.actor.guardian-supervisor-strategy=akka.actor.StoppingSupervisorStrategy")
.withFallback(AkkaSpec.testConf))
val a = system.actorOf(Props(new Actor {
@ -293,7 +294,8 @@ class ActorSystemSpec extends AkkaSpec(ActorSystemSpec.config) with ImplicitSend
}
"shut down when /user escalates" in {
implicit val system = ActorSystem("Stop",
implicit val system = ActorSystem(
"Stop",
ConfigFactory.parseString("akka.actor.guardian-supervisor-strategy=\"akka.actor.ActorSystemSpec$Strategy\"")
.withFallback(AkkaSpec.testConf))
val a = system.actorOf(Props(new Actor {

View file

@ -8,12 +8,11 @@ import java.util.concurrent.atomic.AtomicInteger
import akka.testkit.EventFilter
import akka.testkit.TestKit._
import com.typesafe.config.ConfigFactory
import org.scalatest.{Matchers, WordSpec}
import org.scalatest.{ Matchers, WordSpec }
import org.scalatest.junit.JUnitSuiteLike
import scala.util.control.NoStackTrace
class JavaExtensionSpec extends JavaExtension with JUnitSuiteLike
object TestExtension extends ExtensionId[TestExtension] with ExtensionIdProvider {
@ -52,7 +51,6 @@ class FailingTestExtension(val system: ExtendedActorSystem) extends Extension {
throw new FailingTestExtension.TestException
}
class ExtensionSpec extends WordSpec with Matchers {
"The ActorSystem extensions support" should {
@ -83,9 +81,8 @@ class ExtensionSpec extends WordSpec with Matchers {
shutdownActorSystem(system)
}
"fail the actor system if an extension listed in akka.extensions fails to start" in {
intercept[RuntimeException]{
intercept[RuntimeException] {
val system = ActorSystem("failing", ConfigFactory.parseString(
"""
akka.extensions = ["akka.actor.FailingTestExtension"]
@ -134,7 +131,6 @@ class ExtensionSpec extends WordSpec with Matchers {
}
}
}
}

View file

@ -34,6 +34,7 @@ object FSMActorSpec {
class Lock(code: String, timeout: FiniteDuration, latches: Latches) extends Actor with FSM[LockState, CodeState] {
import latches._
import FSM.`→`
startWith(Locked, CodeState("", code))
@ -71,7 +72,7 @@ object FSMActorSpec {
}
onTransition {
case Locked -> Open transitionLatch.open
case Locked Open transitionLatch.open
}
// verify that old-style does still compile
@ -98,8 +99,9 @@ object FSMActorSpec {
final case class CodeState(soFar: String, code: String)
}
class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with ImplicitSender {
class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" true)) with ImplicitSender {
import FSMActorSpec._
import FSM.`→`
val timeout = Timeout(2 seconds)
@ -222,7 +224,7 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im
case Event("stop", _) stop()
}
onTransition {
case "not-started" -> "started"
case "not-started" "started"
for (timerName timerNames) setTimer(timerName, (), 10 seconds, false)
}
onTermination {
@ -250,8 +252,8 @@ class FSMActorSpec extends AkkaSpec(Map("akka.actor.debug.fsm" -> true)) with Im
"log events and transitions if asked to do so" in {
import scala.collection.JavaConverters._
val config = ConfigFactory.parseMap(Map("akka.loglevel" -> "DEBUG", "akka.actor.serialize-messages" -> "off",
"akka.actor.debug.fsm" -> true).asJava).withFallback(system.settings.config)
val config = ConfigFactory.parseMap(Map("akka.loglevel" "DEBUG", "akka.actor.serialize-messages" "off",
"akka.actor.debug.fsm" true).asJava).withFallback(system.settings.config)
val fsmEventSystem = ActorSystem("fsmEvent", config)
try {
new TestKit(fsmEventSystem) {

View file

@ -129,7 +129,8 @@ class FSMTimingSpec extends AkkaSpec with ImplicitSender {
}
"notify unhandled messages" taggedAs TimingTest in {
filterEvents(EventFilter.warning("unhandled event Tick in state TestUnhandled", source = fsm.path.toString, occurrences = 1),
filterEvents(
EventFilter.warning("unhandled event Tick in state TestUnhandled", source = fsm.path.toString, occurrences = 1),
EventFilter.warning("unhandled event Unhandled(test) in state TestUnhandled", source = fsm.path.toString, occurrences = 1)) {
fsm ! TestUnhandled
within(3 second) {
@ -208,7 +209,7 @@ object FSMTimingSpec {
goto(Initial)
}
onTransition {
case Initial -> TestSingleTimerResubmit setTimer("blah", Tick, 500.millis.dilated)
case Initial TestSingleTimerResubmit setTimer("blah", Tick, 500.millis.dilated)
}
when(TestSingleTimerResubmit) {
case Event(Tick, _)

View file

@ -9,6 +9,7 @@ import scala.concurrent.duration._
import scala.language.postfixOps
object FSMTransitionSpec {
import FSM.`→`
class Supervisor extends Actor {
def receive = { case _ }
@ -20,7 +21,7 @@ object FSMTransitionSpec {
case Event("stay", _) stay()
case Event(_, _) goto(0)
}
onTransition { case from -> to target ! (from -> to) }
onTransition { case from to target ! (from to) }
initialize()
}
@ -50,8 +51,8 @@ object FSMTransitionSpec {
case _ goto(1)
}
onTransition {
case 0 -> 1 target ! ((stateData, nextStateData))
case 1 -> 1 target ! ((stateData, nextStateData))
case 0 1 target ! ((stateData, nextStateData))
case 1 1 target ! ((stateData, nextStateData))
}
}
@ -64,16 +65,17 @@ object FSMTransitionSpec {
class FSMTransitionSpec extends AkkaSpec with ImplicitSender {
import FSMTransitionSpec._
import FSM.`→`
"A FSM transition notifier" must {
"not trigger onTransition for stay" in {
val fsm = system.actorOf(Props(new SendAnyTransitionFSM(testActor)))
expectMsg(0 -> 0) // caused by initialize(), OK.
expectMsg(0 0) // caused by initialize(), OK.
fsm ! "stay" // no transition event
expectNoMsg(500.millis)
fsm ! "goto" // goto(current state)
expectMsg(0 -> 0)
expectMsg(0 0)
}
"notify listeners" in {
@ -150,7 +152,7 @@ class FSMTransitionSpec extends AkkaSpec with ImplicitSender {
case Event("switch", _) goto(1) using sender()
}
onTransition {
case x -> y nextStateData ! (x -> y)
case x y nextStateData ! (x y)
}
when(1) {
case Event("test", _)

View file

@ -26,6 +26,8 @@ import java.lang.System.identityHashCode
import akka.util.Helpers.ConfigOps
object SupervisorHierarchySpec {
import FSM.`→`
class FireWorkerException(msg: String) extends Exception(msg)
/**
@ -79,7 +81,8 @@ object SupervisorHierarchySpec {
extends DispatcherConfigurator(config, prerequisites) {
private val instance: MessageDispatcher =
new Dispatcher(this,
new Dispatcher(
this,
config.getString("id"),
config.getInt("throughput"),
config.getNanosDuration("throughput-deadline-time"),
@ -467,7 +470,7 @@ object SupervisorHierarchySpec {
}
onTransition {
case Init -> Stress
case Init Stress
self ! Work
idleChildren = children
activeChildren = children
@ -532,7 +535,7 @@ object SupervisorHierarchySpec {
}
onTransition {
case Stress -> Finishing ignoreFailConstr = true
case Stress Finishing ignoreFailConstr = true
}
when(Finishing) {
@ -546,7 +549,7 @@ object SupervisorHierarchySpec {
}
onTransition {
case _ -> LastPing
case _ LastPing
idleChildren foreach (_ ! "ping")
pingChildren ++= idleChildren
idleChildren = Vector.empty
@ -563,7 +566,7 @@ object SupervisorHierarchySpec {
}
onTransition {
case _ -> Stopping
case _ Stopping
ignoreNotResumedLogs = false
hierarchy ! PingOfDeath
}
@ -596,7 +599,7 @@ object SupervisorHierarchySpec {
stop
}
case Event(StateTimeout, _)
errors :+= self -> ErrorLog("timeout while Stopping", Vector.empty)
errors :+= self ErrorLog("timeout while Stopping", Vector.empty)
println(system.asInstanceOf[ActorSystemImpl].printTree)
getErrors(hierarchy, 10)
printErrors()
@ -604,7 +607,7 @@ object SupervisorHierarchySpec {
testActor ! "timeout in Stopping"
stop
case Event(e: ErrorLog, _)
errors :+= sender() -> e
errors :+= sender() e
goto(Failed)
}
@ -630,7 +633,7 @@ object SupervisorHierarchySpec {
when(Failed, stateTimeout = 5.seconds.dilated) {
case Event(e: ErrorLog, _)
if (!e.msg.startsWith("not resumed") || !ignoreNotResumedLogs)
errors :+= sender() -> e
errors :+= sender() e
stay
case Event(Terminated(r), _) if r == hierarchy
printErrors()
@ -650,8 +653,8 @@ object SupervisorHierarchySpec {
target match {
case l: LocalActorRef
l.underlying.actor match {
case h: Hierarchy errors :+= target -> ErrorLog("forced", h.log)
case _ errors :+= target -> ErrorLog("fetched", stateCache.get(target.path).log)
case h: Hierarchy errors :+= target ErrorLog("forced", h.log)
case _ errors :+= target ErrorLog("fetched", stateCache.get(target.path).log)
}
if (depth > 0) {
l.underlying.children foreach (getErrors(_, depth - 1))
@ -663,8 +666,8 @@ object SupervisorHierarchySpec {
target match {
case l: LocalActorRef
l.underlying.actor match {
case h: Hierarchy errors :+= target -> ErrorLog("forced", h.log)
case _ errors :+= target -> ErrorLog("fetched", stateCache.get(target.path).log)
case h: Hierarchy errors :+= target ErrorLog("forced", h.log)
case _ errors :+= target ErrorLog("fetched", stateCache.get(target.path).log)
}
if (target != hierarchy) getErrorsUp(l.getParent)
}
@ -693,7 +696,7 @@ object SupervisorHierarchySpec {
case Event(e: ErrorLog, _)
if (e.msg.startsWith("not resumed")) stay
else {
errors :+= sender() -> e
errors :+= sender() e
// dont stop the hierarchy, that is going to happen all by itself and in the right order
goto(Failed)
}

View file

@ -58,7 +58,7 @@ class SupervisorMiscSpec extends AkkaSpec(SupervisorMiscSpec.config) with Defaul
countDownLatch.await(10, TimeUnit.SECONDS)
Seq("actor1" -> actor1, "actor2" -> actor2, "actor3" -> actor3, "actor4" -> actor4) map {
Seq("actor1" actor1, "actor2" actor2, "actor3" actor3, "actor4" actor4) map {
case (id, ref) (id, ref ? "status")
} foreach {
case (id, f) (id, Await.result(f, timeout.duration)) should ===((id, "OK"))

View file

@ -16,9 +16,10 @@ object UidClashTest {
@volatile var oldActor: ActorRef = _
private[akka] class EvilCollidingActorRef(override val provider: ActorRefProvider,
override val path: ActorPath,
val eventStream: EventStream) extends MinimalActorRef {
private[akka] class EvilCollidingActorRef(
override val provider: ActorRefProvider,
override val path: ActorPath,
val eventStream: EventStream) extends MinimalActorRef {
//Ignore everything
override def isTerminated: Boolean = true

View file

@ -181,13 +181,13 @@ object ActorModelSpec {
dispatcher.asInstanceOf[MessageDispatcherInterceptor].getStats(actorRef)
def assertRefDefaultZero(actorRef: ActorRef, dispatcher: MessageDispatcher = null)(
suspensions: Long = 0,
resumes: Long = 0,
registers: Long = 0,
unregisters: Long = 0,
msgsReceived: Long = 0,
suspensions: Long = 0,
resumes: Long = 0,
registers: Long = 0,
unregisters: Long = 0,
msgsReceived: Long = 0,
msgsProcessed: Long = 0,
restarts: Long = 0)(implicit system: ActorSystem) {
restarts: Long = 0)(implicit system: ActorSystem) {
assertRef(actorRef, dispatcher)(
suspensions,
resumes,
@ -199,13 +199,13 @@ object ActorModelSpec {
}
def assertRef(actorRef: ActorRef, dispatcher: MessageDispatcher = null)(
suspensions: Long = statsFor(actorRef, dispatcher).suspensions.get(),
resumes: Long = statsFor(actorRef, dispatcher).resumes.get(),
registers: Long = statsFor(actorRef, dispatcher).registers.get(),
unregisters: Long = statsFor(actorRef, dispatcher).unregisters.get(),
msgsReceived: Long = statsFor(actorRef, dispatcher).msgsReceived.get(),
suspensions: Long = statsFor(actorRef, dispatcher).suspensions.get(),
resumes: Long = statsFor(actorRef, dispatcher).resumes.get(),
registers: Long = statsFor(actorRef, dispatcher).registers.get(),
unregisters: Long = statsFor(actorRef, dispatcher).unregisters.get(),
msgsReceived: Long = statsFor(actorRef, dispatcher).msgsReceived.get(),
msgsProcessed: Long = statsFor(actorRef, dispatcher).msgsProcessed.get(),
restarts: Long = statsFor(actorRef, dispatcher).restarts.get())(implicit system: ActorSystem) {
restarts: Long = statsFor(actorRef, dispatcher).restarts.get())(implicit system: ActorSystem) {
val stats = statsFor(actorRef, Option(dispatcher).getOrElse(actorRef.asInstanceOf[ActorRefWithCell].underlying.asInstanceOf[ActorCell].dispatcher))
val deadline = System.currentTimeMillis + 1000
try {
@ -218,7 +218,8 @@ object ActorModelSpec {
await(deadline)(stats.restarts.get() == restarts)
} catch {
case e: Throwable
system.eventStream.publish(Error(e,
system.eventStream.publish(Error(
e,
Option(dispatcher).toString,
(Option(dispatcher) getOrElse this).getClass,
"actual: " + stats + ", required: InterceptorStats(susp=" + suspensions +
@ -529,7 +530,8 @@ object DispatcherModelSpec {
import akka.util.Helpers.ConfigOps
private val instance: MessageDispatcher =
new Dispatcher(this,
new Dispatcher(
this,
config.getString("id"),
config.getInt("throughput"),
config.getNanosDuration("throughput-deadline-time"),
@ -602,7 +604,8 @@ object BalancingDispatcherModelSpec {
import akka.util.Helpers.ConfigOps
override protected def create(mailboxType: MailboxType): BalancingDispatcher =
new BalancingDispatcher(this,
new BalancingDispatcher(
this,
config.getString("id"),
config.getInt("throughput"),
config.getNanosDuration("throughput-deadline-time"),

View file

@ -104,15 +104,15 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend
def ofType[T <: MessageDispatcher: ClassTag]: (MessageDispatcher) Boolean = _.getClass == implicitly[ClassTag[T]].runtimeClass
def typesAndValidators: Map[String, (MessageDispatcher) Boolean] = Map(
"PinnedDispatcher" -> ofType[PinnedDispatcher],
"Dispatcher" -> ofType[Dispatcher])
"PinnedDispatcher" ofType[PinnedDispatcher],
"Dispatcher" ofType[Dispatcher])
def validTypes = typesAndValidators.keys.toList
val defaultDispatcherConfig = settings.config.getConfig("akka.actor.default-dispatcher")
lazy val allDispatchers: Map[String, MessageDispatcher] = {
validTypes.map(t (t, from(ConfigFactory.parseMap(Map(tipe -> t, id -> t).asJava).
validTypes.map(t (t, from(ConfigFactory.parseMap(Map(tipe t, id t).asJava).
withFallback(defaultDispatcherConfig)))).toMap
}
@ -150,7 +150,7 @@ class DispatchersSpec extends AkkaSpec(DispatchersSpec.config) with ImplicitSend
"throw ConfigurationException if type does not exist" in {
intercept[ConfigurationException] {
from(ConfigFactory.parseMap(Map(tipe -> "typedoesntexist", id -> "invalid-dispatcher").asJava).
from(ConfigFactory.parseMap(Map(tipe "typedoesntexist", id "invalid-dispatcher").asJava).
withFallback(defaultDispatcherConfig))
}
}

View file

@ -125,55 +125,57 @@ abstract class MailboxSpec extends AkkaSpec with BeforeAndAfterAll with BeforeAn
q.hasMessages should ===(false)
}
def testEnqueueDequeue(config: MailboxType,
enqueueN: Int = 10000,
dequeueN: Int = 10000,
parallel: Boolean = true): Unit = within(10 seconds) {
def testEnqueueDequeue(
config: MailboxType,
enqueueN: Int = 10000,
dequeueN: Int = 10000,
parallel: Boolean = true): Unit = within(10 seconds) {
val q = factory(config)
ensureInitialMailboxState(config, q)
EventFilter.warning(pattern = ".*received dead letter from Actor.*MailboxSpec/deadLetters.*",
EventFilter.warning(
pattern = ".*received dead letter from Actor.*MailboxSpec/deadLetters.*",
occurrences = (enqueueN - dequeueN)) intercept {
def createProducer(fromNum: Int, toNum: Int): Future[Vector[Envelope]] = spawn {
val messages = Vector() ++ (for (i fromNum to toNum) yield createMessageInvocation(i))
for (i messages) q.enqueue(testActor, i)
messages
}
val producers = {
val step = 500
val ps = for (i (1 to enqueueN by step).toList) yield createProducer(i, Math.min(enqueueN, i + step - 1))
if (parallel == false)
ps foreach { Await.ready(_, remainingOrDefault) }
ps
}
def createConsumer: Future[Vector[Envelope]] = spawn {
var r = Vector[Envelope]()
while (producers.exists(_.isCompleted == false) || q.hasMessages)
Option(q.dequeue) foreach { message r = r :+ message }
r
}
val consumers = List.fill(maxConsumers)(createConsumer)
val ps = producers.map(Await.result(_, remainingOrDefault))
val cs = consumers.map(Await.result(_, remainingOrDefault))
ps.map(_.size).sum should ===(enqueueN) //Must have produced 1000 messages
cs.map(_.size).sum should ===(dequeueN) //Must have consumed all produced messages
//No message is allowed to be consumed by more than one consumer
cs.flatten.distinct.size should ===(dequeueN)
//All consumed messages should have been produced
(cs.flatten diff ps.flatten).size should ===(0)
//The ones that were produced and not consumed
(ps.flatten diff cs.flatten).size should ===(enqueueN - dequeueN)
def createProducer(fromNum: Int, toNum: Int): Future[Vector[Envelope]] = spawn {
val messages = Vector() ++ (for (i fromNum to toNum) yield createMessageInvocation(i))
for (i messages) q.enqueue(testActor, i)
messages
}
val producers = {
val step = 500
val ps = for (i (1 to enqueueN by step).toList) yield createProducer(i, Math.min(enqueueN, i + step - 1))
if (parallel == false)
ps foreach { Await.ready(_, remainingOrDefault) }
ps
}
def createConsumer: Future[Vector[Envelope]] = spawn {
var r = Vector[Envelope]()
while (producers.exists(_.isCompleted == false) || q.hasMessages)
Option(q.dequeue) foreach { message r = r :+ message }
r
}
val consumers = List.fill(maxConsumers)(createConsumer)
val ps = producers.map(Await.result(_, remainingOrDefault))
val cs = consumers.map(Await.result(_, remainingOrDefault))
ps.map(_.size).sum should ===(enqueueN) //Must have produced 1000 messages
cs.map(_.size).sum should ===(dequeueN) //Must have consumed all produced messages
//No message is allowed to be consumed by more than one consumer
cs.flatten.distinct.size should ===(dequeueN)
//All consumed messages should have been produced
(cs.flatten diff ps.flatten).size should ===(0)
//The ones that were produced and not consumed
(ps.flatten diff cs.flatten).size should ===(enqueueN - dequeueN)
}
}
}

View file

@ -10,8 +10,8 @@ import org.scalatest.BeforeAndAfterEach
import akka.testkit._
import scala.concurrent.duration._
import akka.actor.{ Props, Actor, ActorRef, ActorSystem, PoisonPill}
import akka.japi.{ Procedure}
import akka.actor.{ Props, Actor, ActorRef, ActorSystem, PoisonPill }
import akka.japi.{ Procedure }
import com.typesafe.config.{ Config, ConfigFactory }
object EventBusSpec {

View file

@ -117,10 +117,10 @@ object LoggerSpec {
override def mdc(currentMessage: Any): MDC = {
reqId += 1
val always = Map("requestId" -> reqId)
val always = Map("requestId" reqId)
val cmim = "Current Message in MDC"
val perMessage = currentMessage match {
case `cmim` Map[String, Any]("currentMsg" -> cmim, "currentMsgLength" -> cmim.length)
case `cmim` Map[String, Any]("currentMsg" cmim, "currentMsgLength" cmim.length)
case _ Map()
}
always ++ perMessage

View file

@ -28,9 +28,9 @@ class LoggingReceiveSpec extends WordSpec with BeforeAndAfterAll {
akka.loglevel=DEBUG
akka.actor.serialize-messages = off # debug noise from serialization
""").withFallback(AkkaSpec.testConf)
val appLogging = ActorSystem("logging", ConfigFactory.parseMap(Map("akka.actor.debug.receive" -> true).asJava).withFallback(config))
val appAuto = ActorSystem("autoreceive", ConfigFactory.parseMap(Map("akka.actor.debug.autoreceive" -> true).asJava).withFallback(config))
val appLifecycle = ActorSystem("lifecycle", ConfigFactory.parseMap(Map("akka.actor.debug.lifecycle" -> true).asJava).withFallback(config))
val appLogging = ActorSystem("logging", ConfigFactory.parseMap(Map("akka.actor.debug.receive" true).asJava).withFallback(config))
val appAuto = ActorSystem("autoreceive", ConfigFactory.parseMap(Map("akka.actor.debug.autoreceive" true).asJava).withFallback(config))
val appLifecycle = ActorSystem("lifecycle", ConfigFactory.parseMap(Map("akka.actor.debug.lifecycle" true).asJava).withFallback(config))
val filter = TestEvent.Mute(EventFilter.custom {
case _: Logging.Debug true

View file

@ -886,10 +886,11 @@ class TcpConnectionSpec extends AkkaSpec("""
def setServerSocketOptions() = ()
def createConnectionActor(serverAddress: InetSocketAddress = serverAddress,
options: immutable.Seq[SocketOption] = Nil,
timeout: Option[FiniteDuration] = None,
pullMode: Boolean = false): TestActorRef[TcpOutgoingConnection] = {
def createConnectionActor(
serverAddress: InetSocketAddress = serverAddress,
options: immutable.Seq[SocketOption] = Nil,
timeout: Option[FiniteDuration] = None,
pullMode: Boolean = false): TestActorRef[TcpOutgoingConnection] = {
val ref = createConnectionActorWithoutRegistration(serverAddress, options, timeout, pullMode)
ref ! newChannelRegistration
ref
@ -901,10 +902,11 @@ class TcpConnectionSpec extends AkkaSpec("""
def disableInterest(op: Int): Unit = interestCallReceiver.ref ! -op
}
def createConnectionActorWithoutRegistration(serverAddress: InetSocketAddress = serverAddress,
options: immutable.Seq[SocketOption] = Nil,
timeout: Option[FiniteDuration] = None,
pullMode: Boolean = false): TestActorRef[TcpOutgoingConnection] =
def createConnectionActorWithoutRegistration(
serverAddress: InetSocketAddress = serverAddress,
options: immutable.Seq[SocketOption] = Nil,
timeout: Option[FiniteDuration] = None,
pullMode: Boolean = false): TestActorRef[TcpOutgoingConnection] =
TestActorRef(
new TcpOutgoingConnection(Tcp(system), this, userHandler.ref,
Connect(serverAddress, options = options, timeout = timeout, pullMode = pullMode)) {
@ -931,8 +933,8 @@ class TcpConnectionSpec extends AkkaSpec("""
abstract class EstablishedConnectionTest(
keepOpenOnPeerClosed: Boolean = false,
useResumeWriting: Boolean = true,
pullMode: Boolean = false)
useResumeWriting: Boolean = true,
pullMode: Boolean = false)
extends UnacceptedConnectionTest(pullMode) {
// lazy init since potential exceptions should not be triggered in the constructor but during execution of `run`
@ -1074,7 +1076,7 @@ class TcpConnectionSpec extends AkkaSpec("""
}
val interestsNames =
Seq(OP_ACCEPT -> "accepting", OP_CONNECT -> "connecting", OP_READ -> "reading", OP_WRITE -> "writing")
Seq(OP_ACCEPT "accepting", OP_CONNECT "connecting", OP_READ "reading", OP_WRITE "writing")
def interestsDesc(interests: Int): String =
interestsNames.filter(i (i._1 & interests) != 0).map(_._2).mkString(", ")

View file

@ -185,11 +185,11 @@ class TcpIntegrationSpec extends AkkaSpec("""
}
def chitchat(
clientHandler: TestProbe,
clientHandler: TestProbe,
clientConnection: ActorRef,
serverHandler: TestProbe,
serverHandler: TestProbe,
serverConnection: ActorRef,
rounds: Int = 100) = {
rounds: Int = 100) = {
val testData = ByteString(0)
(1 to rounds) foreach { _

View file

@ -213,7 +213,7 @@ class AskSpec extends AkkaSpec {
val act = system.actorOf(Props(new Actor {
def receive = {
case msg p.ref ! sender() -> msg
case msg p.ref ! sender() msg
}
}))

View file

@ -44,9 +44,10 @@ object MetricsBasedResizerSpec {
var msgs: Set[TestLatch] = Set()
def mockSend(await: Boolean,
l: TestLatch = TestLatch(),
routeeIdx: Int = Random.nextInt(routees.length)): Latches = {
def mockSend(
await: Boolean,
l: TestLatch = TestLatch(),
routeeIdx: Int = Random.nextInt(routees.length)): Latches = {
val target = routees(routeeIdx)
val first = TestLatch()
val latches = Latches(first, l)

View file

@ -50,7 +50,7 @@ class RandomSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
val counter = new AtomicInteger
var replies = Map.empty[Int, Int]
for (i 0 until connectionCount) {
replies = replies + (i -> 0)
replies = replies + (i 0)
}
val actor = system.actorOf(RandomPool(connectionCount).props(routeeProps =
@ -65,7 +65,7 @@ class RandomSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
for (i 0 until iterationCount) {
for (k 0 until connectionCount) {
val id = Await.result((actor ? "hit").mapTo[Int], timeout.duration)
replies = replies + (id -> (replies(id) + 1))
replies = replies + (id (replies(id) + 1))
}
}

View file

@ -64,7 +64,7 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
for (_ 1 to iterationCount; _ 1 to connectionCount) {
val id = Await.result((actor ? "hit").mapTo[Int], timeout.duration)
replies = replies + (id -> (replies(id) + 1))
replies = replies + (id (replies(id) + 1))
}
counter.get should ===(connectionCount)
@ -138,7 +138,7 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
for (_ 1 to iterationCount; _ 1 to connectionCount) {
val id = Await.result((actor ? "hit").mapTo[String], timeout.duration)
replies = replies + (id -> (replies(id) + 1))
replies = replies + (id (replies(id) + 1))
}
actor ! akka.routing.Broadcast("end")
@ -184,7 +184,7 @@ class RoundRobinSpec extends AkkaSpec with DefaultTimeout with ImplicitSender {
for (_ 1 to iterationCount; _ 1 to connectionCount) {
val id = Await.result((actor ? "hit").mapTo[String], timeout.duration)
replies = replies + (id -> (replies(id) + 1))
replies = replies + (id (replies(id) + 1))
}
watch(actor)

View file

@ -323,7 +323,8 @@ class SerializationCompatibilitySpec extends AkkaSpec(SerializationTests.mostlyR
"be preserved for the Create SystemMessage" in {
// Using null as the cause to avoid a large serialized message and JDK differences
verify(Create(Some(null)),
verify(
Create(Some(null)),
if (scala.util.Properties.versionNumberString.startsWith("2.10.")) {
"aced00057372001b616b6b612e64697370617463682e7379736d73672e4372656174650000000000" +
"0000010200014c00076661696c75726574000e4c7363616c612f4f7074696f6e3b78707372000a73" +
@ -337,53 +338,62 @@ class SerializationCompatibilitySpec extends AkkaSpec(SerializationTests.mostlyR
})
}
"be preserved for the Recreate SystemMessage" in {
verify(Recreate(null),
verify(
Recreate(null),
"aced00057372001d616b6b612e64697370617463682e7379736d73672e5265637265617465000000" +
"00000000010200014c000563617573657400154c6a6176612f6c616e672f5468726f7761626c653b" +
"787070")
}
"be preserved for the Suspend SystemMessage" in {
verify(Suspend(),
verify(
Suspend(),
"aced00057372001c616b6b612e64697370617463682e7379736d73672e53757370656e6400000000" +
"000000010200007870")
}
"be preserved for the Resume SystemMessage" in {
verify(Resume(null),
verify(
Resume(null),
"aced00057372001b616b6b612e64697370617463682e7379736d73672e526573756d650000000000" +
"0000010200014c000f63617573656442794661696c7572657400154c6a6176612f6c616e672f5468" +
"726f7761626c653b787070")
}
"be preserved for the Terminate SystemMessage" in {
verify(Terminate(),
verify(
Terminate(),
"aced00057372001e616b6b612e64697370617463682e7379736d73672e5465726d696e6174650000" +
"0000000000010200007870")
}
"be preserved for the Supervise SystemMessage" in {
verify(Supervise(null, true),
verify(
Supervise(null, true),
"aced00057372001e616b6b612e64697370617463682e7379736d73672e5375706572766973650000" +
"0000000000010200025a00056173796e634c00056368696c647400154c616b6b612f6163746f722f" +
"4163746f725265663b78700170")
}
"be preserved for the Watch SystemMessage" in {
verify(Watch(null, null),
verify(
Watch(null, null),
"aced00057372001a616b6b612e64697370617463682e7379736d73672e5761746368000000000000" +
"00010200024c00077761746368656574001d4c616b6b612f6163746f722f496e7465726e616c4163" +
"746f725265663b4c00077761746368657271007e000178707070")
}
"be preserved for the Unwatch SystemMessage" in {
verify(Unwatch(null, null),
verify(
Unwatch(null, null),
"aced00057372001c616b6b612e64697370617463682e7379736d73672e556e776174636800000000" +
"000000010200024c0007776174636865657400154c616b6b612f6163746f722f4163746f72526566" +
"3b4c00077761746368657271007e000178707070")
}
"be preserved for the NoMessage SystemMessage" in {
verify(NoMessage,
verify(
NoMessage,
"aced00057372001f616b6b612e64697370617463682e7379736d73672e4e6f4d6573736167652400" +
"000000000000010200007870")
}
"be preserved for the Failed SystemMessage" in {
// Using null as the cause to avoid a large serialized message and JDK differences
verify(Failed(null, cause = null, uid = 0),
verify(
Failed(null, cause = null, uid = 0),
"aced00057372001b616b6b612e64697370617463682e7379736d73672e4661696c65640000000000" +
"0000010200034900037569644c000563617573657400154c6a6176612f6c616e672f5468726f7761" +
"626c653b4c00056368696c647400154c616b6b612f6163746f722f4163746f725265663b78700000" +

View file

@ -121,7 +121,7 @@ class ByteStringSpec extends WordSpec with Matchers with Checkers {
val (bsAIt, bsBIt) = (a.iterator, b.iterator)
val (vecAIt, vecBIt) = (Vector(a: _*).iterator.buffered, Vector(b: _*).iterator.buffered)
(body(bsAIt, bsBIt) == body(vecAIt, vecBIt)) &&
(!strict || (bsAIt.toSeq -> bsBIt.toSeq) == (vecAIt.toSeq -> vecBIt.toSeq))
(!strict || (bsAIt.toSeq bsBIt.toSeq) == (vecAIt.toSeq vecBIt.toSeq))
}
def likeVecBld(body: Builder[Byte, _] Unit): Boolean = {

View file

@ -15,16 +15,16 @@ class PrettyDurationSpec extends FlatSpec with Matchers {
import scala.concurrent.duration._
val cases: Seq[(Duration, String)] =
9.nanos -> "9.000 ns" ::
95.nanos -> "95.00 ns" ::
999.nanos -> "999.0 ns" ::
1000.nanos -> "1.000 μs" ::
9500.nanos -> "9.500 μs" ::
9500.micros -> "9.500 ms" ::
9500.millis -> "9.500 s" ::
95.seconds -> "1.583 min" ::
95.minutes -> "1.583 h" ::
95.hours -> "3.958 d" ::
9.nanos "9.000 ns" ::
95.nanos "95.00 ns" ::
999.nanos "999.0 ns" ::
1000.nanos "1.000 μs" ::
9500.nanos "9.500 μs" ::
9500.micros "9.500 ms" ::
9500.millis "9.500 s" ::
95.seconds "1.583 min" ::
95.minutes "1.583 h" ::
95.hours "3.958 d" ::
Nil
cases foreach {

View file

@ -66,9 +66,10 @@ abstract class AbstractFSM[S, D] extends FSM[S, D] {
* @param stateTimeout default state timeout for this state
* @param stateFunctionBuilder partial function builder describing response to input
*/
final def when(stateName: S,
stateTimeout: FiniteDuration,
stateFunctionBuilder: FSMStateFunctionBuilder[S, D]): Unit =
final def when(
stateName: S,
stateTimeout: FiniteDuration,
stateFunctionBuilder: FSMStateFunctionBuilder[S, D]): Unit =
when(stateName, stateTimeout)(stateFunctionBuilder.build())
/**

View file

@ -96,7 +96,7 @@ final case class ActorIdentity(correlationId: Any, ref: Option[ActorRef]) {
@SerialVersionUID(1L)
final case class Terminated private[akka] (@BeanProperty actor: ActorRef)(
@BeanProperty val existenceConfirmed: Boolean,
@BeanProperty val addressTerminated: Boolean)
@BeanProperty val addressTerminated: Boolean)
extends AutoReceivedMessage with PossiblyHarmful with DeadLetterSuppression
/**
@ -189,7 +189,8 @@ object ActorInitializationException {
*/
@SerialVersionUID(1L)
final case class PreRestartException private[akka] (actor: ActorRef, cause: Throwable, originalCause: Throwable, messageOption: Option[Any])
extends ActorInitializationException(actor,
extends ActorInitializationException(
actor,
"exception in preRestart(" +
(if (originalCause == null) "null" else originalCause.getClass) + ", " +
(messageOption match { case Some(m: AnyRef) m.getClass; case _ "None" }) +
@ -205,7 +206,8 @@ final case class PreRestartException private[akka] (actor: ActorRef, cause: Thro
*/
@SerialVersionUID(1L)
final case class PostRestartException private[akka] (actor: ActorRef, cause: Throwable, originalCause: Throwable)
extends ActorInitializationException(actor,
extends ActorInitializationException(
actor,
"exception post restart (" + (if (originalCause == null) "null" else originalCause.getClass) + ")", cause)
/**

View file

@ -372,11 +372,11 @@ private[akka] object ActorCell {
* for! (waves hand)
*/
private[akka] class ActorCell(
val system: ActorSystemImpl,
val self: InternalActorRef,
val system: ActorSystemImpl,
val self: InternalActorRef,
final val props: Props, // Must be final so that it can be properly cleared in clearActorCellFields
val dispatcher: MessageDispatcher,
val parent: InternalActorRef)
val dispatcher: MessageDispatcher,
val parent: InternalActorRef)
extends UntypedActorContext with AbstractActorContext with Cell
with dungeon.ReceiveTimeout
with dungeon.Children
@ -598,7 +598,8 @@ private[akka] class ActorCell(
case NonFatal(e)
clearOutActorIfNonNull()
e match {
case i: InstantiationException throw ActorInitializationException(self,
case i: InstantiationException throw ActorInitializationException(
self,
"""exception during creation, this problem is likely to occur because the class of the Actor you tried to create is either,
a non-static inner class (in which case make it a static inner class or use Props(new ...) or Props( new Creator ... )
or is missing an appropriate, reachable no-args constructor.

View file

@ -254,7 +254,8 @@ sealed trait ActorPath extends Comparable[ActorPath] with Serializable {
*/
@SerialVersionUID(1L)
final case class RootActorPath(address: Address, name: String = "/") extends ActorPath {
require(name.length == 1 || name.indexOf('/', 1) == -1,
require(
name.length == 1 || name.indexOf('/', 1) == -1,
"/ may only exist at the beginning of the root actors name, " +
"it is a path separator and is not legal in ActorPath names: [%s]" format name)
require(name.indexOf('#') == -1, "# is a fragment separator and is not legal in ActorPath names: [%s]" format name)

View file

@ -302,11 +302,11 @@ private[akka] case object Nobody extends MinimalActorRef {
* INTERNAL API
*/
private[akka] class LocalActorRef private[akka] (
_system: ActorSystemImpl,
_props: Props,
_dispatcher: MessageDispatcher,
_mailboxType: MailboxType,
_supervisor: InternalActorRef,
_system: ActorSystemImpl,
_props: Props,
_dispatcher: MessageDispatcher,
_mailboxType: MailboxType,
_supervisor: InternalActorRef,
override val path: ActorPath)
extends ActorRefWithCell with LocalRef {
@ -518,9 +518,10 @@ private[akka] object DeadLetterActorRef {
*
* INTERNAL API
*/
private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider,
override val path: ActorPath,
val eventStream: EventStream) extends MinimalActorRef {
private[akka] class EmptyLocalActorRef(
override val provider: ActorRefProvider,
override val path: ActorPath,
val eventStream: EventStream) extends MinimalActorRef {
@deprecated("Use context.watch(actor) and receive Terminated(actor)", "2.2")
override private[akka] def isTerminated = true
@ -570,9 +571,10 @@ private[akka] class EmptyLocalActorRef(override val provider: ActorRefProvider,
*
* INTERNAL API
*/
private[akka] class DeadLetterActorRef(_provider: ActorRefProvider,
_path: ActorPath,
_eventStream: EventStream) extends EmptyLocalActorRef(_provider, _path, _eventStream) {
private[akka] class DeadLetterActorRef(
_provider: ActorRefProvider,
_path: ActorPath,
_eventStream: EventStream) extends EmptyLocalActorRef(_provider, _path, _eventStream) {
override def !(message: Any)(implicit sender: ActorRef = this): Unit = message match {
case null throw new InvalidMessageException("Message is null")
@ -601,10 +603,10 @@ private[akka] class DeadLetterActorRef(_provider: ActorRefProvider,
* INTERNAL API
*/
private[akka] class VirtualPathContainer(
override val provider: ActorRefProvider,
override val path: ActorPath,
override val provider: ActorRefProvider,
override val path: ActorPath,
override val getParent: InternalActorRef,
val log: LoggingAdapter) extends MinimalActorRef {
val log: LoggingAdapter) extends MinimalActorRef {
private val children = new ConcurrentHashMap[String, InternalActorRef]
@ -705,10 +707,11 @@ private[akka] class VirtualPathContainer(
* When using the watch() feature you must ensure that upon reception of the
* Terminated message the watched actorRef is unwatch()ed.
*/
private[akka] final class FunctionRef(override val path: ActorPath,
override val provider: ActorRefProvider,
val eventStream: EventStream,
f: (ActorRef, Any) Unit) extends MinimalActorRef {
private[akka] final class FunctionRef(
override val path: ActorPath,
override val provider: ActorRefProvider,
val eventStream: EventStream,
f: (ActorRef, Any) Unit) extends MinimalActorRef {
override def !(message: Any)(implicit sender: ActorRef = Actor.noSender): Unit = {
f(sender, message)

View file

@ -105,14 +105,14 @@ trait ActorRefProvider {
* the latter can be suppressed by setting ``lookupDeploy`` to ``false``.
*/
def actorOf(
system: ActorSystemImpl,
props: Props,
supervisor: InternalActorRef,
path: ActorPath,
system: ActorSystemImpl,
props: Props,
supervisor: InternalActorRef,
path: ActorPath,
systemService: Boolean,
deploy: Option[Deploy],
lookupDeploy: Boolean,
async: Boolean): InternalActorRef
deploy: Option[Deploy],
lookupDeploy: Boolean,
async: Boolean): InternalActorRef
/**
* INTERNAL API
@ -475,20 +475,22 @@ private[akka] object LocalActorRefProvider {
* Depending on this class is not supported, only the [[ActorRefProvider]] interface is supported.
*/
private[akka] class LocalActorRefProvider private[akka] (
_systemName: String,
_systemName: String,
override val settings: ActorSystem.Settings,
val eventStream: EventStream,
val dynamicAccess: DynamicAccess,
val eventStream: EventStream,
val dynamicAccess: DynamicAccess,
override val deployer: Deployer,
_deadLetters: Option[ActorPath InternalActorRef])
_deadLetters: Option[ActorPath InternalActorRef])
extends ActorRefProvider {
// this is the constructor needed for reflectively instantiating the provider
def this(_systemName: String,
settings: ActorSystem.Settings,
eventStream: EventStream,
dynamicAccess: DynamicAccess) =
this(_systemName,
def this(
_systemName: String,
settings: ActorSystem.Settings,
eventStream: EventStream,
dynamicAccess: DynamicAccess) =
this(
_systemName,
settings,
eventStream,
dynamicAccess,
@ -776,7 +778,8 @@ private[akka] class LocalActorRefProvider private[akka] (
if (!system.dispatchers.hasDispatcher(r.routerDispatcher))
throw new ConfigurationException(s"Dispatcher [${p.dispatcher}] not configured for router of $path")
val routerProps = Props(p.deploy.copy(dispatcher = p.routerConfig.routerDispatcher),
val routerProps = Props(
p.deploy.copy(dispatcher = p.routerConfig.routerDispatcher),
classOf[RoutedActorCell.RouterActorCreator], Vector(p.routerConfig))
val routeeProps = p.withRouter(NoRouter)

View file

@ -218,7 +218,8 @@ object ActorSelection {
if (matchingChildren.isEmpty && !sel.wildcardFanOut)
emptyRef.tell(sel, sender)
else {
val m = sel.copy(elements = iter.toVector,
val m = sel.copy(
elements = iter.toVector,
wildcardFanOut = sel.wildcardFanOut || matchingChildren.size > 1)
matchingChildren.foreach(c deliverSelection(c.asInstanceOf[InternalActorRef], sender, m))
}
@ -253,8 +254,8 @@ trait ScalaActorSelection {
*/
@SerialVersionUID(2L) // it has protobuf serialization in akka-remote
private[akka] final case class ActorSelectionMessage(
msg: Any,
elements: immutable.Iterable[SelectionPathElement],
msg: Any,
elements: immutable.Iterable[SelectionPathElement],
wildcardFanOut: Boolean)
extends AutoReceivedMessage with PossiblyHarmful {

View file

@ -505,11 +505,11 @@ abstract class ExtendedActorSystem extends ActorSystem {
}
private[akka] class ActorSystemImpl(
val name: String,
applicationConfig: Config,
classLoader: ClassLoader,
val name: String,
applicationConfig: Config,
classLoader: ClassLoader,
defaultExecutionContext: Option[ExecutionContext],
val guardianProps: Option[Props]) extends ExtendedActorSystem {
val guardianProps: Option[Props]) extends ExtendedActorSystem {
if (!name.matches("""^[a-zA-Z0-9][a-zA-Z0-9-_]*$"""))
throw new IllegalArgumentException(
@ -593,7 +593,7 @@ private[akka] class ActorSystemImpl(
eventStream.startStdoutLogger(settings)
val logFilter: LoggingFilter = {
val arguments = Vector(classOf[Settings] -> settings, classOf[EventStream] -> eventStream)
val arguments = Vector(classOf[Settings] settings, classOf[EventStream] eventStream)
dynamicAccess.createInstanceFor[LoggingFilter](LoggingFilter, arguments).get
}
@ -603,10 +603,10 @@ private[akka] class ActorSystemImpl(
val provider: ActorRefProvider = try {
val arguments = Vector(
classOf[String] -> name,
classOf[Settings] -> settings,
classOf[EventStream] -> eventStream,
classOf[DynamicAccess] -> dynamicAccess)
classOf[String] name,
classOf[Settings] settings,
classOf[EventStream] eventStream,
classOf[DynamicAccess] dynamicAccess)
dynamicAccess.createInstanceFor[ActorRefProvider](ProviderClass, arguments).get
} catch {
@ -698,9 +698,9 @@ private[akka] class ActorSystemImpl(
*/
protected def createScheduler(): Scheduler =
dynamicAccess.createInstanceFor[Scheduler](settings.SchedulerClass, immutable.Seq(
classOf[Config] -> settings.config,
classOf[LoggingAdapter] -> log,
classOf[ThreadFactory] -> threadFactory.withName(threadFactory.name + "-scheduler"))).get
classOf[Config] settings.config,
classOf[LoggingAdapter] log,
classOf[ThreadFactory] threadFactory.withName(threadFactory.name + "-scheduler"))).get
//#create-scheduler
/*
@ -767,12 +767,12 @@ private[akka] class ActorSystemImpl(
def loadExtensions(key: String, throwOnLoadFail: Boolean): Unit = {
immutableSeq(settings.config.getStringList(key)) foreach { fqcn
dynamicAccess.getObjectFor[AnyRef](fqcn) recoverWith { case _ dynamicAccess.createInstanceFor[AnyRef](fqcn, Nil) } match {
case Success(p: ExtensionIdProvider) registerExtension(p.lookup())
case Success(p: ExtensionId[_]) registerExtension(p)
case Success(other)
case Success(p: ExtensionIdProvider) registerExtension(p.lookup())
case Success(p: ExtensionId[_]) registerExtension(p)
case Success(other)
if (!throwOnLoadFail) log.error("[{}] is not an 'ExtensionIdProvider' or 'ExtensionId', skipping...", fqcn)
else throw new RuntimeException(s"[$fqcn] is not an 'ExtensionIdProvider' or 'ExtensionId'")
case Failure(problem)
case Failure(problem)
if (!throwOnLoadFail) log.error(problem, "While trying to load extension [{}], skipping...", fqcn)
else throw new RuntimeException(s"While trying to load extension [$fqcn]", problem)
}

View file

@ -35,12 +35,12 @@ object Deploy {
*/
@SerialVersionUID(2L)
final case class Deploy(
path: String = "",
config: Config = ConfigFactory.empty,
path: String = "",
config: Config = ConfigFactory.empty,
routerConfig: RouterConfig = NoRouter,
scope: Scope = NoScopeGiven,
dispatcher: String = Deploy.NoDispatcherGiven,
mailbox: String = Deploy.NoMailboxGiven) {
scope: Scope = NoScopeGiven,
dispatcher: String = Deploy.NoDispatcherGiven,
mailbox: String = Deploy.NoMailboxGiven) {
/**
* Java API to create a Deploy with the given RouterConfig
@ -137,7 +137,7 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce
protected val default = config.getConfig("default")
val routerTypeMapping: Map[String, String] =
settings.config.getConfig("akka.actor.router.type-mapping").root.unwrapped.asScala.collect {
case (key, value: String) (key -> value)
case (key, value: String) (key value)
}.toMap
config.root.asScala flatMap {
@ -198,8 +198,8 @@ private[akka] class Deployer(val settings: ActorSystem.Settings, val dynamicAcce
s"[${args(0)._1.getName}] and optional [${args(1)._1.getName}] parameter", cause)
// first try with Config param, and then with Config and DynamicAccess parameters
val args1 = List(classOf[Config] -> deployment2)
val args2 = List(classOf[Config] -> deployment2, classOf[DynamicAccess] -> dynamicAccess)
val args1 = List(classOf[Config] deployment2)
val args2 = List(classOf[Config] deployment2, classOf[DynamicAccess] dynamicAccess)
dynamicAccess.createInstanceFor[RouterConfig](fqn, args1).recover({
case e @ (_: IllegalArgumentException | _: ConfigException) throw e
case e: NoSuchMethodException

View file

@ -150,5 +150,5 @@ abstract class ExtensionKey[T <: Extension](implicit m: ClassTag[T]) extends Ext
def this(clazz: Class[T]) = this()(ClassTag(clazz))
override def lookup(): ExtensionId[T] = this
def createExtension(system: ExtendedActorSystem): T = system.dynamicAccess.createInstanceFor[T](m.runtimeClass, List(classOf[ExtendedActorSystem] -> system)).get
def createExtension(system: ExtendedActorSystem): T = system.dynamicAccess.createInstanceFor[T](m.runtimeClass, List(classOf[ExtendedActorSystem] system)).get
}

View file

@ -110,9 +110,10 @@ object FSM {
* This extractor is just convenience for matching a (S, S) pair, including a
* reminder what the new state is.
*/
object -> {
object `->` {
def unapply[S](in: (S, S)) = Some(in)
}
val `→` = `->`
/**
* Log Entry of the [[akka.actor.LoggingFSM]], can be obtained by calling `getLog`.
@ -319,7 +320,7 @@ trait FSM[S, D] extends Actor with Listeners with ActorLogging {
* This extractor is just convenience for matching a (S, S) pair, including a
* reminder what the new state is.
*/
val -> = FSM.->
val `->` = FSM.`->`
/**
* This case object is received in case of a state timeout.

View file

@ -380,9 +380,9 @@ abstract class SupervisorStrategy {
* @param loggingEnabled the strategy logs the failure if this is enabled (true), by default it is enabled
*/
case class AllForOneStrategy(
maxNrOfRetries: Int = -1,
withinTimeRange: Duration = Duration.Inf,
override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider)
maxNrOfRetries: Int = -1,
withinTimeRange: Duration = Duration.Inf,
override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider)
extends SupervisorStrategy {
import SupervisorStrategy._
@ -458,9 +458,9 @@ case class AllForOneStrategy(
* @param loggingEnabled the strategy logs the failure if this is enabled (true), by default it is enabled
*/
case class OneForOneStrategy(
maxNrOfRetries: Int = -1,
withinTimeRange: Duration = Duration.Inf,
override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider)
maxNrOfRetries: Int = -1,
withinTimeRange: Duration = Duration.Inf,
override val loggingEnabled: Boolean = true)(val decider: SupervisorStrategy.Decider)
extends SupervisorStrategy {
/**

View file

@ -34,9 +34,10 @@ import akka.dispatch.AbstractNodeQueue
* scheduled possibly one tick later than they could be (if checking that
* now() + delay &lt;= nextTick were done).
*/
class LightArrayRevolverScheduler(config: Config,
log: LoggingAdapter,
threadFactory: ThreadFactory)
class LightArrayRevolverScheduler(
config: Config,
log: LoggingAdapter,
threadFactory: ThreadFactory)
extends Scheduler with Closeable {
import Helpers.Requiring
@ -88,9 +89,10 @@ class LightArrayRevolverScheduler(config: Config,
}
}
override def schedule(initialDelay: FiniteDuration,
delay: FiniteDuration,
runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = {
override def schedule(
initialDelay: FiniteDuration,
delay: FiniteDuration,
runnable: Runnable)(implicit executor: ExecutionContext): Cancellable = {
checkMaxDelay(roundUp(delay).toNanos)
val preparedEC = executor.prepare()
try new AtomicReference[Cancellable](InitialRepeatMarker) with Cancellable { self
@ -221,7 +223,7 @@ class LightArrayRevolverScheduler(config: Config,
time - start + // calculate the nanos since timer start
(ticks * tickNanos) + // adding the desired delay
tickNanos - 1 // rounding up
) / tickNanos).toInt // and converting to slot number
) / tickNanos).toInt // and converting to slot number
// tick is an Int that will wrap around, but toInt of futureTick gives us modulo operations
// and the difference (offset) will be correct in any case
val offset = futureTick - tick

View file

@ -24,12 +24,12 @@ import scala.util.control.NonFatal
* and swap out the cell ref.
*/
private[akka] class RepointableActorRef(
val system: ActorSystemImpl,
val props: Props,
val dispatcher: MessageDispatcher,
val system: ActorSystemImpl,
val props: Props,
val dispatcher: MessageDispatcher,
val mailboxType: MailboxType,
val supervisor: InternalActorRef,
val path: ActorPath)
val supervisor: InternalActorRef,
val path: ActorPath)
extends ActorRefWithCell with RepointableRef {
import AbstractActorRef.{ cellOffset, lookupOffset }
@ -176,10 +176,11 @@ private[akka] class RepointableActorRef(
protected def writeReplace(): AnyRef = SerializedActorRef(this)
}
private[akka] class UnstartedCell(val systemImpl: ActorSystemImpl,
val self: RepointableActorRef,
val props: Props,
val supervisor: InternalActorRef) extends Cell {
private[akka] class UnstartedCell(
val systemImpl: ActorSystemImpl,
val self: RepointableActorRef,
val props: Props,
val supervisor: InternalActorRef) extends Cell {
/*
* This lock protects all accesses to this cells queues. It also ensures

View file

@ -42,10 +42,11 @@ trait Scheduler {
*/
final def schedule(
initialDelay: FiniteDuration,
interval: FiniteDuration,
receiver: ActorRef,
message: Any)(implicit executor: ExecutionContext,
sender: ActorRef = Actor.noSender): Cancellable =
interval: FiniteDuration,
receiver: ActorRef,
message: Any)(implicit
executor: ExecutionContext,
sender: ActorRef = Actor.noSender): Cancellable =
schedule(initialDelay, interval, new Runnable {
def run = {
receiver ! message
@ -71,8 +72,9 @@ trait Scheduler {
*/
final def schedule(
initialDelay: FiniteDuration,
interval: FiniteDuration)(f: Unit)(
implicit executor: ExecutionContext): Cancellable =
interval: FiniteDuration)(f: Unit)(
implicit
executor: ExecutionContext): Cancellable =
schedule(initialDelay, interval, new Runnable { override def run = f })
/**
@ -93,8 +95,8 @@ trait Scheduler {
*/
def schedule(
initialDelay: FiniteDuration,
interval: FiniteDuration,
runnable: Runnable)(implicit executor: ExecutionContext): Cancellable
interval: FiniteDuration,
runnable: Runnable)(implicit executor: ExecutionContext): Cancellable
/**
* Schedules a message to be sent once with a delay, i.e. a time period that has
@ -103,10 +105,11 @@ trait Scheduler {
* Java & Scala API
*/
final def scheduleOnce(
delay: FiniteDuration,
delay: FiniteDuration,
receiver: ActorRef,
message: Any)(implicit executor: ExecutionContext,
sender: ActorRef = Actor.noSender): Cancellable =
message: Any)(implicit
executor: ExecutionContext,
sender: ActorRef = Actor.noSender): Cancellable =
scheduleOnce(delay, new Runnable {
override def run = receiver ! message
})
@ -118,7 +121,8 @@ trait Scheduler {
* Scala API
*/
final def scheduleOnce(delay: FiniteDuration)(f: Unit)(
implicit executor: ExecutionContext): Cancellable =
implicit
executor: ExecutionContext): Cancellable =
scheduleOnce(delay, new Runnable { override def run = f })
/**
@ -128,7 +132,7 @@ trait Scheduler {
* Java & Scala API
*/
def scheduleOnce(
delay: FiniteDuration,
delay: FiniteDuration,
runnable: Runnable)(implicit executor: ExecutionContext): Cancellable
/**

View file

@ -523,11 +523,11 @@ object TypedProps {
@SerialVersionUID(1L)
final case class TypedProps[T <: AnyRef] protected[TypedProps] (
interfaces: immutable.Seq[Class[_]],
creator: () T,
dispatcher: String = TypedProps.defaultDispatcherId,
deploy: Deploy = Props.defaultDeploy,
timeout: Option[Timeout] = TypedProps.defaultTimeout,
loader: Option[ClassLoader] = TypedProps.defaultLoader) {
creator: () T,
dispatcher: String = TypedProps.defaultDispatcherId,
deploy: Deploy = Props.defaultDeploy,
timeout: Option[Timeout] = TypedProps.defaultTimeout,
loader: Option[ClassLoader] = TypedProps.defaultLoader) {
/**
* Uses the supplied class as the factory for the TypedActor implementation,
@ -536,7 +536,8 @@ final case class TypedProps[T <: AnyRef] protected[TypedProps] (
* appended in the sequence of interfaces.
*/
def this(implementation: Class[T]) =
this(interfaces = TypedProps.extractInterfaces(implementation),
this(
interfaces = TypedProps.extractInterfaces(implementation),
creator = instantiator(implementation))
/**
@ -546,7 +547,8 @@ final case class TypedProps[T <: AnyRef] protected[TypedProps] (
* appended in the sequence of interfaces.
*/
def this(interface: Class[_ >: T], implementation: Creator[T]) =
this(interfaces = TypedProps.extractInterfaces(interface),
this(
interfaces = TypedProps.extractInterfaces(interface),
creator = implementation.create _)
/**
@ -556,7 +558,8 @@ final case class TypedProps[T <: AnyRef] protected[TypedProps] (
* appended in the sequence of interfaces.
*/
def this(interface: Class[_ >: T], implementation: Class[T]) =
this(interfaces = TypedProps.extractInterfaces(interface),
this(
interfaces = TypedProps.extractInterfaces(interface),
creator = instantiator(implementation))
/**

View file

@ -62,7 +62,8 @@ private[akka] trait Dispatch { this: ActorCell ⇒
if (req isInstance mbox.messageQueue) Create(None)
else {
val gotType = if (mbox.messageQueue == null) "null" else mbox.messageQueue.getClass.getName
Create(Some(ActorInitializationException(self,
Create(Some(ActorInitializationException(
self,
s"Actor [$self] requires mailbox type [$req] got [$gotType]")))
}
case _ Create(None)

View file

@ -324,8 +324,8 @@ abstract class MessageDispatcherConfigurator(_config: Config, val prerequisites:
case "thread-pool-executor" new ThreadPoolExecutorConfigurator(config.getConfig("thread-pool-executor"), prerequisites)
case fqcn
val args = List(
classOf[Config] -> config,
classOf[DispatcherPrerequisites] -> prerequisites)
classOf[Config] config,
classOf[DispatcherPrerequisites] prerequisites)
prerequisites.dynamicAccess.createInstanceFor[ExecutorServiceConfigurator](fqcn, args).recover({
case exception throw new IllegalArgumentException(
("""Cannot instantiate ExecutorServiceConfigurator ("executor = [%s]"), defined in [%s],
@ -379,14 +379,16 @@ object ForkJoinExecutorConfigurator {
/**
* INTERNAL AKKA USAGE ONLY
*/
final class AkkaForkJoinPool(parallelism: Int,
threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
unhandledExceptionHandler: Thread.UncaughtExceptionHandler,
asyncMode: Boolean)
final class AkkaForkJoinPool(
parallelism: Int,
threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
unhandledExceptionHandler: Thread.UncaughtExceptionHandler,
asyncMode: Boolean)
extends ForkJoinPool(parallelism, threadFactory, unhandledExceptionHandler, asyncMode) with LoadMetrics {
def this(parallelism: Int,
threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
unhandledExceptionHandler: Thread.UncaughtExceptionHandler) = this(parallelism, threadFactory, unhandledExceptionHandler, asyncMode = true)
def this(
parallelism: Int,
threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
unhandledExceptionHandler: Thread.UncaughtExceptionHandler) = this(parallelism, threadFactory, unhandledExceptionHandler, asyncMode = true)
override def execute(r: Runnable): Unit =
if (r ne null)
@ -427,9 +429,10 @@ class ForkJoinExecutorConfigurator(config: Config, prerequisites: DispatcherPrer
case x throw new IllegalStateException("The prerequisites for the ForkJoinExecutorConfigurator is a ForkJoinPool.ForkJoinWorkerThreadFactory!")
}
class ForkJoinExecutorServiceFactory(val threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
val parallelism: Int,
val asyncMode: Boolean) extends ExecutorServiceFactory {
class ForkJoinExecutorServiceFactory(
val threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory,
val parallelism: Int,
val asyncMode: Boolean) extends ExecutorServiceFactory {
def this(threadFactory: ForkJoinPool.ForkJoinWorkerThreadFactory, parallelism: Int) = this(threadFactory, parallelism, asyncMode = true)
def createExecutorService: ExecutorService = new AkkaForkJoinPool(parallelism, threadFactory, MonitorableThreadFactory.doNothing, asyncMode)
}

View file

@ -30,14 +30,14 @@ import scala.concurrent.duration.FiniteDuration
*/
@deprecated("Use BalancingPool instead of BalancingDispatcher", "2.3")
class BalancingDispatcher(
_configurator: MessageDispatcherConfigurator,
_id: String,
throughput: Int,
throughputDeadlineTime: Duration,
_mailboxType: MailboxType,
_configurator: MessageDispatcherConfigurator,
_id: String,
throughput: Int,
throughputDeadlineTime: Duration,
_mailboxType: MailboxType,
_executorServiceFactoryProvider: ExecutorServiceFactoryProvider,
_shutdownTimeout: FiniteDuration,
attemptTeamWork: Boolean)
_shutdownTimeout: FiniteDuration,
attemptTeamWork: Boolean)
extends Dispatcher(_configurator, _id, throughput, throughputDeadlineTime, _executorServiceFactoryProvider, _shutdownTimeout) {
/**

View file

@ -26,12 +26,12 @@ import java.util.concurrent.atomic.AtomicReferenceFieldUpdater
* Larger values (or zero or negative) increase throughput, smaller values increase fairness
*/
class Dispatcher(
_configurator: MessageDispatcherConfigurator,
val id: String,
val throughput: Int,
val throughputDeadlineTime: Duration,
_configurator: MessageDispatcherConfigurator,
val id: String,
val throughput: Int,
val throughputDeadlineTime: Duration,
executorServiceFactoryProvider: ExecutorServiceFactoryProvider,
val shutdownTimeout: FiniteDuration)
val shutdownTimeout: FiniteDuration)
extends MessageDispatcher(_configurator) {
import configurator.prerequisites._

View file

@ -30,12 +30,12 @@ trait DispatcherPrerequisites {
* INTERNAL API
*/
private[akka] final case class DefaultDispatcherPrerequisites(
val threadFactory: ThreadFactory,
val eventStream: EventStream,
val scheduler: Scheduler,
val dynamicAccess: DynamicAccess,
val settings: ActorSystem.Settings,
val mailboxes: Mailboxes,
val threadFactory: ThreadFactory,
val eventStream: EventStream,
val scheduler: Scheduler,
val dynamicAccess: DynamicAccess,
val settings: ActorSystem.Settings,
val mailboxes: Mailboxes,
val defaultExecutionContext: Option[ExecutionContext]) extends DispatcherPrerequisites
object Dispatchers {
@ -135,13 +135,13 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc
def simpleName = id.substring(id.lastIndexOf('.') + 1)
idConfig(id)
.withFallback(appConfig)
.withFallback(ConfigFactory.parseMap(Map("name" -> simpleName).asJava))
.withFallback(ConfigFactory.parseMap(Map("name" simpleName).asJava))
.withFallback(defaultDispatcherConfig)
}
private def idConfig(id: String): Config = {
import scala.collection.JavaConverters._
ConfigFactory.parseMap(Map("id" -> id).asJava)
ConfigFactory.parseMap(Map("id" id).asJava)
}
/**
@ -180,7 +180,7 @@ class Dispatchers(val settings: ActorSystem.Settings, val prerequisites: Dispatc
classOf[BalancingDispatcherConfigurator].getName)
case "PinnedDispatcher" new PinnedDispatcherConfigurator(cfg, prerequisites)
case fqn
val args = List(classOf[Config] -> cfg, classOf[DispatcherPrerequisites] -> prerequisites)
val args = List(classOf[Config] cfg, classOf[DispatcherPrerequisites] prerequisites)
prerequisites.dynamicAccess.createInstanceFor[MessageDispatcherConfigurator](fqn, args).recover({
case exception
throw new ConfigurationException(
@ -288,7 +288,8 @@ class PinnedDispatcherConfigurator(config: Config, prerequisites: DispatcherPrer
case e: ThreadPoolExecutorConfigurator e.threadPoolConfig
case other
prerequisites.eventStream.publish(
Warning("PinnedDispatcherConfigurator",
Warning(
"PinnedDispatcherConfigurator",
this.getClass,
"PinnedDispatcher [%s] not configured to use ThreadPoolExecutor, falling back to default config.".format(
config.getString("id"))))

View file

@ -9,7 +9,7 @@ import akka.japi.{ Function ⇒ JFunc, Option ⇒ JOption, Procedure }
import scala.concurrent.{ Future, Promise, ExecutionContext, ExecutionContextExecutor, ExecutionContextExecutorService }
import java.lang.{ Iterable JIterable }
import java.util.{ LinkedList JLinkedList }
import java.util.concurrent.{ Executor, ExecutorService, Callable}
import java.util.concurrent.{ Executor, ExecutorService, Callable }
import scala.util.{ Try, Success, Failure }
import java.util.concurrent.CompletionStage
import java.util.concurrent.CompletableFuture

View file

@ -54,7 +54,7 @@ private[akka] object Mailbox {
* INTERNAL API
*/
private[akka] abstract class Mailbox(val messageQueue: MessageQueue)
extends ForkJoinTask[Unit] with SystemMessageQueue with Runnable {
extends ForkJoinTask[Unit] with SystemMessageQueue with Runnable {
import Mailbox._
@ -248,7 +248,7 @@ private[akka] abstract class Mailbox(val messageQueue: MessageQueue)
* Process the messages in the mailbox
*/
@tailrec private final def processMailbox(
left: Int = java.lang.Math.max(dispatcher.throughput, 1),
left: Int = java.lang.Math.max(dispatcher.throughput, 1),
deadlineNs: Long = if (dispatcher.isThroughputDeadlineTimeDefined == true) System.nanoTime + dispatcher.throughputDeadlineTime.toNanos else 0L): Unit =
if (shouldProcessMessage) {
val next = dequeue()
@ -391,7 +391,7 @@ class NodeMessageQueue extends AbstractNodeQueue[Envelope] with MessageQueue wit
* Discards overflowing messages into DeadLetters.
*/
class BoundedNodeMessageQueue(capacity: Int) extends AbstractBoundedNodeQueue[Envelope](capacity)
with MessageQueue with BoundedMessageQueueSemantics with MultipleConsumerSemantics {
with MessageQueue with BoundedMessageQueueSemantics with MultipleConsumerSemantics {
final def pushTimeOut: Duration = Duration.Undefined
final def enqueue(receiver: ActorRef, handle: Envelope): Unit =
@ -654,10 +654,11 @@ case class NonBlockingBoundedMailbox(val capacity: Int) extends MailboxType with
* BoundedMailbox is the default bounded MailboxType used by Akka Actors.
*/
final case class BoundedMailbox(val capacity: Int, override val pushTimeOut: FiniteDuration)
extends MailboxType with ProducesMessageQueue[BoundedMailbox.MessageQueue]
with ProducesPushTimeoutSemanticsMailbox {
extends MailboxType with ProducesMessageQueue[BoundedMailbox.MessageQueue]
with ProducesPushTimeoutSemanticsMailbox {
def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity"),
def this(settings: ActorSystem.Settings, config: Config) = this(
config.getInt("mailbox-capacity"),
config.getNanosDuration("mailbox-push-timeout-time"))
if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedMailbox can not be negative")
@ -669,7 +670,7 @@ final case class BoundedMailbox(val capacity: Int, override val pushTimeOut: Fin
object BoundedMailbox {
class MessageQueue(capacity: Int, final val pushTimeOut: FiniteDuration)
extends LinkedBlockingQueue[Envelope](capacity) with BoundedQueueBasedMessageQueue {
extends LinkedBlockingQueue[Envelope](capacity) with BoundedQueueBasedMessageQueue {
final def queue: BlockingQueue[Envelope] = this
}
}
@ -679,7 +680,7 @@ object BoundedMailbox {
* Extend this class and provide the Comparator in the constructor.
*/
class UnboundedPriorityMailbox(val cmp: Comparator[Envelope], val initialCapacity: Int)
extends MailboxType with ProducesMessageQueue[UnboundedPriorityMailbox.MessageQueue] {
extends MailboxType with ProducesMessageQueue[UnboundedPriorityMailbox.MessageQueue] {
def this(cmp: Comparator[Envelope]) = this(cmp, 11)
final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue =
new UnboundedPriorityMailbox.MessageQueue(initialCapacity, cmp)
@ -687,7 +688,7 @@ class UnboundedPriorityMailbox(val cmp: Comparator[Envelope], val initialCapacit
object UnboundedPriorityMailbox {
class MessageQueue(initialCapacity: Int, cmp: Comparator[Envelope])
extends PriorityBlockingQueue[Envelope](initialCapacity, cmp) with UnboundedQueueBasedMessageQueue {
extends PriorityBlockingQueue[Envelope](initialCapacity, cmp) with UnboundedQueueBasedMessageQueue {
final def queue: Queue[Envelope] = this
}
}
@ -697,8 +698,8 @@ object UnboundedPriorityMailbox {
* Extend this class and provide the Comparator in the constructor.
*/
class BoundedPriorityMailbox( final val cmp: Comparator[Envelope], final val capacity: Int, override final val pushTimeOut: Duration)
extends MailboxType with ProducesMessageQueue[BoundedPriorityMailbox.MessageQueue]
with ProducesPushTimeoutSemanticsMailbox {
extends MailboxType with ProducesMessageQueue[BoundedPriorityMailbox.MessageQueue]
with ProducesPushTimeoutSemanticsMailbox {
if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedMailbox can not be negative")
if (pushTimeOut eq null) throw new IllegalArgumentException("The push time-out for BoundedMailbox can not be null")
@ -709,8 +710,8 @@ class BoundedPriorityMailbox( final val cmp: Comparator[Envelope], final val cap
object BoundedPriorityMailbox {
class MessageQueue(capacity: Int, cmp: Comparator[Envelope], val pushTimeOut: Duration)
extends BoundedBlockingQueue[Envelope](capacity, new PriorityQueue[Envelope](11, cmp))
with BoundedQueueBasedMessageQueue {
extends BoundedBlockingQueue[Envelope](capacity, new PriorityQueue[Envelope](11, cmp))
with BoundedQueueBasedMessageQueue {
final def queue: BlockingQueue[Envelope] = this
}
}
@ -721,7 +722,7 @@ object BoundedPriorityMailbox {
* Extend this class and provide the Comparator in the constructor.
*/
class UnboundedStablePriorityMailbox(val cmp: Comparator[Envelope], val initialCapacity: Int)
extends MailboxType with ProducesMessageQueue[UnboundedStablePriorityMailbox.MessageQueue] {
extends MailboxType with ProducesMessageQueue[UnboundedStablePriorityMailbox.MessageQueue] {
def this(cmp: Comparator[Envelope]) = this(cmp, 11)
final override def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue =
new UnboundedStablePriorityMailbox.MessageQueue(initialCapacity, cmp)
@ -729,7 +730,7 @@ class UnboundedStablePriorityMailbox(val cmp: Comparator[Envelope], val initialC
object UnboundedStablePriorityMailbox {
class MessageQueue(initialCapacity: Int, cmp: Comparator[Envelope])
extends StablePriorityBlockingQueue[Envelope](initialCapacity, cmp) with UnboundedQueueBasedMessageQueue {
extends StablePriorityBlockingQueue[Envelope](initialCapacity, cmp) with UnboundedQueueBasedMessageQueue {
final def queue: Queue[Envelope] = this
}
}
@ -740,8 +741,8 @@ object UnboundedStablePriorityMailbox {
* Extend this class and provide the Comparator in the constructor.
*/
class BoundedStablePriorityMailbox( final val cmp: Comparator[Envelope], final val capacity: Int, override final val pushTimeOut: Duration)
extends MailboxType with ProducesMessageQueue[BoundedStablePriorityMailbox.MessageQueue]
with ProducesPushTimeoutSemanticsMailbox {
extends MailboxType with ProducesMessageQueue[BoundedStablePriorityMailbox.MessageQueue]
with ProducesPushTimeoutSemanticsMailbox {
if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedMailbox can not be negative")
if (pushTimeOut eq null) throw new IllegalArgumentException("The push time-out for BoundedMailbox can not be null")
@ -752,8 +753,8 @@ class BoundedStablePriorityMailbox( final val cmp: Comparator[Envelope], final v
object BoundedStablePriorityMailbox {
class MessageQueue(capacity: Int, cmp: Comparator[Envelope], val pushTimeOut: Duration)
extends BoundedBlockingQueue[Envelope](capacity, new StablePriorityQueue[Envelope](11, cmp))
with BoundedQueueBasedMessageQueue {
extends BoundedBlockingQueue[Envelope](capacity, new StablePriorityQueue[Envelope](11, cmp))
with BoundedQueueBasedMessageQueue {
final def queue: BlockingQueue[Envelope] = this
}
}
@ -779,10 +780,11 @@ object UnboundedDequeBasedMailbox {
* BoundedDequeBasedMailbox is an bounded MailboxType, backed by a Deque.
*/
case class BoundedDequeBasedMailbox( final val capacity: Int, override final val pushTimeOut: FiniteDuration)
extends MailboxType with ProducesMessageQueue[BoundedDequeBasedMailbox.MessageQueue]
with ProducesPushTimeoutSemanticsMailbox {
extends MailboxType with ProducesMessageQueue[BoundedDequeBasedMailbox.MessageQueue]
with ProducesPushTimeoutSemanticsMailbox {
def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity"),
def this(settings: ActorSystem.Settings, config: Config) = this(
config.getInt("mailbox-capacity"),
config.getNanosDuration("mailbox-push-timeout-time"))
if (capacity < 0) throw new IllegalArgumentException("The capacity for BoundedDequeBasedMailbox can not be negative")
@ -794,7 +796,7 @@ case class BoundedDequeBasedMailbox( final val capacity: Int, override final val
object BoundedDequeBasedMailbox {
class MessageQueue(capacity: Int, val pushTimeOut: FiniteDuration)
extends LinkedBlockingDeque[Envelope](capacity) with BoundedDequeBasedMessageQueue {
extends LinkedBlockingDeque[Envelope](capacity) with BoundedDequeBasedMessageQueue {
final val queue = this
}
}
@ -856,9 +858,10 @@ object UnboundedControlAwareMailbox {
* to allow messages that extend [[akka.dispatch.ControlMessage]] to be delivered with priority.
*/
final case class BoundedControlAwareMailbox(capacity: Int, override final val pushTimeOut: FiniteDuration) extends MailboxType
with ProducesMessageQueue[BoundedControlAwareMailbox.MessageQueue]
with ProducesPushTimeoutSemanticsMailbox {
def this(settings: ActorSystem.Settings, config: Config) = this(config.getInt("mailbox-capacity"),
with ProducesMessageQueue[BoundedControlAwareMailbox.MessageQueue]
with ProducesPushTimeoutSemanticsMailbox {
def this(settings: ActorSystem.Settings, config: Config) = this(
config.getInt("mailbox-capacity"),
config.getNanosDuration("mailbox-push-timeout-time"))
def create(owner: Option[ActorRef], system: Option[ActorSystem]): MessageQueue = new BoundedControlAwareMailbox.MessageQueue(capacity, pushTimeOut)

View file

@ -23,10 +23,10 @@ object Mailboxes {
}
private[akka] class Mailboxes(
val settings: ActorSystem.Settings,
val settings: ActorSystem.Settings,
val eventStream: EventStream,
dynamicAccess: DynamicAccess,
deadLetters: ActorRef) {
dynamicAccess: DynamicAccess,
deadLetters: ActorRef) {
import Mailboxes._
@ -187,7 +187,7 @@ private[akka] class Mailboxes(
val mailboxType = conf.getString("mailbox-type") match {
case "" throw new ConfigurationException(s"The setting mailbox-type, defined in [$id] is empty")
case fqcn
val args = List(classOf[ActorSystem.Settings] -> settings, classOf[Config] -> conf)
val args = List(classOf[ActorSystem.Settings] settings, classOf[Config] conf)
dynamicAccess.createInstanceFor[MailboxType](fqcn, args).recover({
case exception
throw new IllegalArgumentException(
@ -228,7 +228,7 @@ private[akka] class Mailboxes(
//INTERNAL API
private def config(id: String): Config = {
import scala.collection.JavaConverters._
ConfigFactory.parseMap(Map("id" -> id).asJava)
ConfigFactory.parseMap(Map("id" id).asJava)
.withFallback(settings.config.getConfig(id))
.withFallback(defaultMailboxConfig)
}

View file

@ -15,12 +15,13 @@ import scala.concurrent.duration.FiniteDuration
* the `lookup` method in [[akka.dispatch.Dispatchers]].
*/
class PinnedDispatcher(
_configurator: MessageDispatcherConfigurator,
_actor: ActorCell,
_id: String,
_shutdownTimeout: FiniteDuration,
_configurator: MessageDispatcherConfigurator,
_actor: ActorCell,
_id: String,
_shutdownTimeout: FiniteDuration,
_threadPoolConfig: ThreadPoolConfig)
extends Dispatcher(_configurator,
extends Dispatcher(
_configurator,
_id,
Int.MaxValue,
Duration.Zero,

View file

@ -65,12 +65,13 @@ trait ExecutorServiceFactoryProvider {
/**
* A small configuration DSL to create ThreadPoolExecutors that can be provided as an ExecutorServiceFactoryProvider to Dispatcher
*/
final case class ThreadPoolConfig(allowCorePoolTimeout: Boolean = ThreadPoolConfig.defaultAllowCoreThreadTimeout,
corePoolSize: Int = ThreadPoolConfig.defaultCorePoolSize,
maxPoolSize: Int = ThreadPoolConfig.defaultMaxPoolSize,
threadTimeout: Duration = ThreadPoolConfig.defaultTimeout,
queueFactory: ThreadPoolConfig.QueueFactory = ThreadPoolConfig.linkedBlockingQueue(),
rejectionPolicy: RejectedExecutionHandler = ThreadPoolConfig.defaultRejectionPolicy)
final case class ThreadPoolConfig(
allowCorePoolTimeout: Boolean = ThreadPoolConfig.defaultAllowCoreThreadTimeout,
corePoolSize: Int = ThreadPoolConfig.defaultCorePoolSize,
maxPoolSize: Int = ThreadPoolConfig.defaultMaxPoolSize,
threadTimeout: Duration = ThreadPoolConfig.defaultTimeout,
queueFactory: ThreadPoolConfig.QueueFactory = ThreadPoolConfig.linkedBlockingQueue(),
rejectionPolicy: RejectedExecutionHandler = ThreadPoolConfig.defaultRejectionPolicy)
extends ExecutorServiceFactoryProvider {
class ThreadPoolExecutorServiceFactory(val threadFactory: ThreadFactory) extends ExecutorServiceFactory {
def createExecutorService: ExecutorService = {
@ -173,11 +174,12 @@ object MonitorableThreadFactory {
}
}
final case class MonitorableThreadFactory(name: String,
daemonic: Boolean,
contextClassLoader: Option[ClassLoader],
exceptionHandler: Thread.UncaughtExceptionHandler = MonitorableThreadFactory.doNothing,
protected val counter: AtomicLong = new AtomicLong)
final case class MonitorableThreadFactory(
name: String,
daemonic: Boolean,
contextClassLoader: Option[ClassLoader],
exceptionHandler: Thread.UncaughtExceptionHandler = MonitorableThreadFactory.doNothing,
protected val counter: AtomicLong = new AtomicLong)
extends ThreadFactory with ForkJoinPool.ForkJoinWorkerThreadFactory {
def newThread(pool: ForkJoinPool): ForkJoinWorkerThread = {

View file

@ -261,6 +261,6 @@ private[akka] final case class Failed(child: ActorRef, cause: Throwable, uid: In
@SerialVersionUID(1L)
private[akka] final case class DeathWatchNotification(
actor: ActorRef,
actor: ActorRef,
existenceConfirmed: Boolean,
addressTerminated: Boolean) extends SystemMessage with DeadLetterSuppression
addressTerminated: Boolean) extends SystemMessage with DeadLetterSuppression

View file

@ -572,9 +572,9 @@ object Logging {
}
/**
* Obtain LoggingAdapter with MDC support for the given actor.
* Don't use it outside its specific Actor as it isn't thread safe
*/
* Obtain LoggingAdapter with MDC support for the given actor.
* Don't use it outside its specific Actor as it isn't thread safe
*/
def getLogger(logSource: Actor): DiagnosticLoggingAdapter = apply(logSource)
/**

View file

@ -58,7 +58,7 @@ object SimpleDnsCache {
new Cache(
queue + new ExpiryEntry(answer.name, until),
cache + (answer.name -> CacheEntry(answer, until)),
cache + (answer.name CacheEntry(answer, until)),
clock)
}

View file

@ -110,11 +110,12 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
* @param localAddress optionally specifies a specific address to bind to
* @param options Please refer to the `Tcp.SO` object for a list of all supported options.
*/
final case class Connect(remoteAddress: InetSocketAddress,
localAddress: Option[InetSocketAddress] = None,
options: immutable.Traversable[SocketOption] = Nil,
timeout: Option[FiniteDuration] = None,
pullMode: Boolean = false) extends Command
final case class Connect(
remoteAddress: InetSocketAddress,
localAddress: Option[InetSocketAddress] = None,
options: immutable.Traversable[SocketOption] = Nil,
timeout: Option[FiniteDuration] = None,
pullMode: Boolean = false) extends Command
/**
* The Bind message is send to the TCP manager actor, which is obtained via
@ -135,11 +136,12 @@ object Tcp extends ExtensionId[TcpExt] with ExtensionIdProvider {
*
* @param options Please refer to the `Tcp.SO` object for a list of all supported options.
*/
final case class Bind(handler: ActorRef,
localAddress: InetSocketAddress,
backlog: Int = 100,
options: immutable.Traversable[SocketOption] = Nil,
pullMode: Boolean = false) extends Command
final case class Bind(
handler: ActorRef,
localAddress: InetSocketAddress,
backlog: Int = 100,
options: immutable.Traversable[SocketOption] = Nil,
pullMode: Boolean = false) extends Command
/**
* This message must be sent to a TCP connection actor after receiving the
@ -624,11 +626,12 @@ object TcpMessage {
* @param timeout is the desired connection timeout, `null` means "no timeout"
* @param pullMode enables pull based reading from the connection
*/
def connect(remoteAddress: InetSocketAddress,
localAddress: InetSocketAddress,
options: JIterable[SocketOption],
timeout: FiniteDuration,
pullMode: Boolean): Command = Connect(remoteAddress, Option(localAddress), options, Option(timeout), pullMode)
def connect(
remoteAddress: InetSocketAddress,
localAddress: InetSocketAddress,
options: JIterable[SocketOption],
timeout: FiniteDuration,
pullMode: Boolean): Command = Connect(remoteAddress, Option(localAddress), options, Option(timeout), pullMode)
/**
* Connect to the given `remoteAddress` without binding to a local address and without
@ -658,17 +661,19 @@ object TcpMessage {
* @param pullMode enables pull based accepting and of connections and pull
* based reading from the accepted connections.
*/
def bind(handler: ActorRef,
endpoint: InetSocketAddress,
backlog: Int,
options: JIterable[SocketOption],
pullMode: Boolean): Command = Bind(handler, endpoint, backlog, options, pullMode)
def bind(
handler: ActorRef,
endpoint: InetSocketAddress,
backlog: Int,
options: JIterable[SocketOption],
pullMode: Boolean): Command = Bind(handler, endpoint, backlog, options, pullMode)
/**
* Open a listening socket without specifying options.
*/
def bind(handler: ActorRef,
endpoint: InetSocketAddress,
backlog: Int): Command = Bind(handler, endpoint, backlog, Nil)
def bind(
handler: ActorRef,
endpoint: InetSocketAddress,
backlog: Int): Command = Bind(handler, endpoint, backlog, Nil)
/**
* This message must be sent to a TCP connection actor after receiving the

View file

@ -388,9 +388,9 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha
class PendingBufferWrite(
val commander: ActorRef,
remainingData: ByteString,
ack: Any,
buffer: ByteBuffer,
tail: WriteCommand) extends PendingWrite {
ack: Any,
buffer: ByteBuffer,
tail: WriteCommand) extends PendingWrite {
def doWrite(info: ConnectionInfo): PendingWrite = {
@tailrec def writeToChannel(data: ByteString): PendingWrite = {
@ -429,11 +429,11 @@ private[io] abstract class TcpConnection(val tcp: TcpExt, val channel: SocketCha
class PendingWriteFile(
val commander: ActorRef,
fileChannel: FileChannel,
offset: Long,
remaining: Long,
ack: Event,
tail: WriteCommand) extends PendingWrite with Runnable {
fileChannel: FileChannel,
offset: Long,
remaining: Long,
ack: Event,
tail: WriteCommand) extends PendingWrite with Runnable {
def doWrite(info: ConnectionInfo): PendingWrite = {
tcp.fileIoDispatcher.execute(this)
@ -479,10 +479,11 @@ private[io] object TcpConnection {
/**
* Groups required connection-related data that are only available once the connection has been fully established.
*/
final case class ConnectionInfo(registration: ChannelRegistration,
handler: ActorRef,
keepOpenOnPeerClosed: Boolean,
useResumeWriting: Boolean)
final case class ConnectionInfo(
registration: ChannelRegistration,
handler: ActorRef,
keepOpenOnPeerClosed: Boolean,
useResumeWriting: Boolean)
// INTERNAL MESSAGES

View file

@ -15,12 +15,13 @@ import akka.io.Inet.SocketOption
*
* INTERNAL API
*/
private[io] class TcpIncomingConnection(_tcp: TcpExt,
_channel: SocketChannel,
registry: ChannelRegistry,
bindHandler: ActorRef,
options: immutable.Traversable[SocketOption],
readThrottling: Boolean)
private[io] class TcpIncomingConnection(
_tcp: TcpExt,
_channel: SocketChannel,
registry: ChannelRegistry,
bindHandler: ActorRef,
options: immutable.Traversable[SocketOption],
readThrottling: Boolean)
extends TcpConnection(_tcp, _channel, readThrottling) {
signDeathPact(bindHandler)

View file

@ -31,11 +31,12 @@ private[io] object TcpListener {
/**
* INTERNAL API
*/
private[io] class TcpListener(selectorRouter: ActorRef,
tcp: TcpExt,
channelRegistry: ChannelRegistry,
bindCommander: ActorRef,
bind: Bind)
private[io] class TcpListener(
selectorRouter: ActorRef,
tcp: TcpExt,
channelRegistry: ChannelRegistry,
bindCommander: ActorRef,
bind: Bind)
extends Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
import TcpListener._

View file

@ -19,10 +19,11 @@ import akka.io.Tcp._
*
* INTERNAL API
*/
private[io] class TcpOutgoingConnection(_tcp: TcpExt,
channelRegistry: ChannelRegistry,
commander: ActorRef,
connect: Connect)
private[io] class TcpOutgoingConnection(
_tcp: TcpExt,
channelRegistry: ChannelRegistry,
commander: ActorRef,
connect: Connect)
extends TcpConnection(_tcp, SocketChannel.open().configureBlocking(false).asInstanceOf[SocketChannel], connect.pullMode) {
import context._

View file

@ -92,9 +92,10 @@ object Udp extends ExtensionId[UdpExt] with ExtensionIdProvider {
* The listener actor for the newly bound port will reply with a [[Bound]]
* message, or the manager will reply with a [[CommandFailed]] message.
*/
final case class Bind(handler: ActorRef,
localAddress: InetSocketAddress,
options: immutable.Traversable[SocketOption] = Nil) extends Command
final case class Bind(
handler: ActorRef,
localAddress: InetSocketAddress,
options: immutable.Traversable[SocketOption] = Nil) extends Command
/**
* Send this message to the listener actor that previously sent a [[Bound]]

View file

@ -84,10 +84,11 @@ object UdpConnected extends ExtensionId[UdpConnectedExt] with ExtensionIdProvide
* which is restricted to sending to and receiving from the given `remoteAddress`.
* All received datagrams will be sent to the designated `handler` actor.
*/
final case class Connect(handler: ActorRef,
remoteAddress: InetSocketAddress,
localAddress: Option[InetSocketAddress] = None,
options: immutable.Traversable[SocketOption] = Nil) extends Command
final case class Connect(
handler: ActorRef,
remoteAddress: InetSocketAddress,
localAddress: Option[InetSocketAddress] = None,
options: immutable.Traversable[SocketOption] = Nil) extends Command
/**
* Send this message to a connection actor (which had previously sent the
@ -176,21 +177,24 @@ object UdpConnectedMessage {
* which is restricted to sending to and receiving from the given `remoteAddress`.
* All received datagrams will be sent to the designated `handler` actor.
*/
def connect(handler: ActorRef,
remoteAddress: InetSocketAddress,
localAddress: InetSocketAddress,
options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, Some(localAddress), options)
def connect(
handler: ActorRef,
remoteAddress: InetSocketAddress,
localAddress: InetSocketAddress,
options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, Some(localAddress), options)
/**
* Connect without specifying the `localAddress`.
*/
def connect(handler: ActorRef,
remoteAddress: InetSocketAddress,
options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, None, options)
def connect(
handler: ActorRef,
remoteAddress: InetSocketAddress,
options: JIterable[SocketOption]): Command = Connect(handler, remoteAddress, None, options)
/**
* Connect without specifying the `localAddress` or `options`.
*/
def connect(handler: ActorRef,
remoteAddress: InetSocketAddress): Command = Connect(handler, remoteAddress, None, Nil)
def connect(
handler: ActorRef,
remoteAddress: InetSocketAddress): Command = Connect(handler, remoteAddress, None, Nil)
/**
* This message is understood by the connection actors to send data to their

View file

@ -18,10 +18,11 @@ import akka.io.UdpConnected._
/**
* INTERNAL API
*/
private[io] class UdpConnection(udpConn: UdpConnectedExt,
channelRegistry: ChannelRegistry,
commander: ActorRef,
connect: Connect)
private[io] class UdpConnection(
udpConn: UdpConnectedExt,
channelRegistry: ChannelRegistry,
commander: ActorRef,
connect: Connect)
extends Actor with ActorLogging with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
import connect._
@ -153,7 +154,8 @@ private[io] class UdpConnection(udpConn: UdpConnectedExt,
thunk
} catch {
case NonFatal(e)
log.debug("Failure while connecting UDP channel to remote address [{}] local address [{}]: {}",
log.debug(
"Failure while connecting UDP channel to remote address [{}] local address [{}]: {}",
remoteAddress, localAddress.getOrElse("undefined"), e)
commander ! CommandFailed(connect)
context.stop(self)

View file

@ -19,10 +19,11 @@ import akka.io.Udp._
/**
* INTERNAL API
*/
private[io] class UdpListener(val udp: UdpExt,
channelRegistry: ChannelRegistry,
bindCommander: ActorRef,
bind: Bind)
private[io] class UdpListener(
val udp: UdpExt,
channelRegistry: ChannelRegistry,
bindCommander: ActorRef,
bind: Bind)
extends Actor with ActorLogging with WithUdpSend with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
import udp.bufferPool

View file

@ -14,10 +14,11 @@ import akka.actor._
/**
* INTERNAL API
*/
private[io] class UdpSender(val udp: UdpExt,
channelRegistry: ChannelRegistry,
commander: ActorRef,
options: immutable.Traversable[SocketOption])
private[io] class UdpSender(
val udp: UdpExt,
channelRegistry: ChannelRegistry,
commander: ActorRef,
options: immutable.Traversable[SocketOption])
extends Actor with ActorLogging with WithUdpSend with RequiresMessageQueue[UnboundedMessageQueueSemantics] {
val channel = {

View file

@ -51,7 +51,8 @@ private[io] trait WithUdpSend {
} catch {
case NonFatal(e)
sender() ! CommandFailed(send)
log.debug("Failure while sending UDP datagram to remote address [{}]: {}",
log.debug(
"Failure while sending UDP datagram to remote address [{}]: {}",
send.target, e)
retriedSend = false
pendingSend = null

View file

@ -16,12 +16,12 @@ import akka.actor.SupervisorStrategy._
*/
private class BackoffOnRestartSupervisor(
val childProps: Props,
val childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
val reset: BackoffReset,
randomFactor: Double,
strategy: OneForOneStrategy)
val childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
val reset: BackoffReset,
randomFactor: Double,
strategy: OneForOneStrategy)
extends Actor with HandleBackoff
with ActorLogging {

View file

@ -70,10 +70,10 @@ object Backoff {
* In order to skip this additional delay pass in `0`.
*/
def onFailure(
childProps: Props,
childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
childProps: Props,
childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
randomFactor: Double): BackoffOptions =
BackoffOptionsImpl(RestartImpliesFailure, childProps, childName, minBackoff, maxBackoff, randomFactor)
@ -131,10 +131,10 @@ object Backoff {
* In order to skip this additional delay pass in `0`.
*/
def onStop(
childProps: Props,
childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
childProps: Props,
childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
randomFactor: Double): BackoffOptions =
BackoffOptionsImpl(StopImpliesFailure, childProps, childName, minBackoff, maxBackoff, randomFactor)
}
@ -183,14 +183,14 @@ trait BackoffOptions {
}
private final case class BackoffOptionsImpl(
backoffType: BackoffType = RestartImpliesFailure,
childProps: Props,
childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
randomFactor: Double,
reset: Option[BackoffReset] = None,
supervisorStrategy: OneForOneStrategy = OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider)) extends BackoffOptions {
backoffType: BackoffType = RestartImpliesFailure,
childProps: Props,
childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
randomFactor: Double,
reset: Option[BackoffReset] = None,
supervisorStrategy: OneForOneStrategy = OneForOneStrategy()(SupervisorStrategy.defaultStrategy.decider)) extends BackoffOptions {
val backoffReset = reset.getOrElse(AutoReset(minBackoff))

View file

@ -37,10 +37,10 @@ object BackoffSupervisor {
* In order to skip this additional delay pass in `0`.
*/
def props(
childProps: Props,
childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
childProps: Props,
childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
randomFactor: Double): Props = {
propsWithSupervisorStrategy(childProps, childName, minBackoff, maxBackoff, randomFactor, SupervisorStrategy.defaultStrategy)
}
@ -66,12 +66,12 @@ object BackoffSupervisor {
* in the child
*/
def propsWithSupervisorStrategy(
childProps: Props,
childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
childProps: Props,
childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
randomFactor: Double,
strategy: SupervisorStrategy): Props = {
strategy: SupervisorStrategy): Props = {
require(minBackoff > Duration.Zero, "minBackoff must be > 0")
require(maxBackoff >= minBackoff, "maxBackoff must be >= minBackoff")
require(0.0 <= randomFactor && randomFactor <= 1.0, "randomFactor must be between 0.0 and 1.0")
@ -145,8 +145,8 @@ object BackoffSupervisor {
*/
private[akka] def calculateDelay(
restartCount: Int,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
randomFactor: Double): FiniteDuration = {
val rnd = 1.0 + ThreadLocalRandom.current().nextDouble() * randomFactor
if (restartCount >= 30) // Duration overflow protection (> 100 years)
@ -166,12 +166,12 @@ object BackoffSupervisor {
*/
final class BackoffSupervisor(
val childProps: Props,
val childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
val reset: BackoffReset,
randomFactor: Double,
strategy: SupervisorStrategy)
val childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
val reset: BackoffReset,
randomFactor: Double,
strategy: SupervisorStrategy)
extends Actor with HandleBackoff {
import BackoffSupervisor._
@ -192,20 +192,20 @@ final class BackoffSupervisor(
// for binary compatibility with 2.4.1
def this(
childProps: Props,
childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
randomFactor: Double,
childProps: Props,
childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
randomFactor: Double,
supervisorStrategy: SupervisorStrategy) =
this(childProps, childName, minBackoff, maxBackoff, AutoReset(minBackoff), randomFactor, supervisorStrategy)
// for binary compatibility with 2.4.0
def this(
childProps: Props,
childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
childProps: Props,
childName: String,
minBackoff: FiniteDuration,
maxBackoff: FiniteDuration,
randomFactor: Double) =
this(childProps, childName, minBackoff, maxBackoff, randomFactor, SupervisorStrategy.defaultStrategy)

View file

@ -515,5 +515,5 @@ class CircuitBreaker(scheduler: Scheduler, maxFailures: Int, callTimeout: Finite
*/
class CircuitBreakerOpenException(
val remainingDuration: FiniteDuration,
message: String = "Circuit Breaker is open; calls are failing fast")
message: String = "Circuit Breaker is open; calls are failing fast")
extends AkkaException(message) with NoStackTrace

View file

@ -66,9 +66,9 @@ private[akka] final class BalancingRoutingLogic extends RoutingLogic {
*/
@SerialVersionUID(1L)
final case class BalancingPool(
override val nrOfInstances: Int,
override val nrOfInstances: Int,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Pool {
def this(config: Config) =
@ -112,12 +112,14 @@ final case class BalancingPool(
// dispatcher of this pool
val deployDispatcherConfigPath = s"akka.actor.deployment.$deployPath.pool-dispatcher"
val systemConfig = context.system.settings.config
val dispatcherConfig = context.system.dispatchers.config(dispatcherId,
val dispatcherConfig = context.system.dispatchers.config(
dispatcherId,
// use the user defined 'pool-dispatcher' config as fallback, if any
if (systemConfig.hasPath(deployDispatcherConfigPath)) systemConfig.getConfig(deployDispatcherConfigPath)
else ConfigFactory.empty)
dispatchers.registerConfigurator(dispatcherId, new BalancingDispatcherConfigurator(dispatcherConfig,
dispatchers.registerConfigurator(dispatcherId, new BalancingDispatcherConfigurator(
dispatcherConfig,
dispatchers.prerequisites))
}

View file

@ -58,8 +58,8 @@ final class BroadcastRoutingLogic extends RoutingLogic {
final case class BroadcastPool(
override val nrOfInstances: Int, override val resizer: Option[Resizer] = None,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
override val usePoolDispatcher: Boolean = false)
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
override val usePoolDispatcher: Boolean = false)
extends Pool with PoolOverrideUnsetConfig[BroadcastPool] {
def this(config: Config) =
@ -118,8 +118,8 @@ final case class BroadcastPool(
*/
@SerialVersionUID(1L)
final case class BroadcastGroup(
override val paths: immutable.Iterable[String],
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
override val paths: immutable.Iterable[String],
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =

View file

@ -39,7 +39,8 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v
*/
def :+(node: T): ConsistentHash[T] = {
val nodeHash = hashFor(node.toString)
new ConsistentHash(nodes ++ ((1 to virtualNodesFactor) map { r (concatenateNodeHash(nodeHash, r) -> node) }),
new ConsistentHash(
nodes ++ ((1 to virtualNodesFactor) map { r (concatenateNodeHash(nodeHash, r) node) }),
virtualNodesFactor)
}
@ -57,7 +58,8 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v
*/
def :-(node: T): ConsistentHash[T] = {
val nodeHash = hashFor(node.toString)
new ConsistentHash(nodes -- ((1 to virtualNodesFactor) map { r concatenateNodeHash(nodeHash, r) }),
new ConsistentHash(
nodes -- ((1 to virtualNodesFactor) map { r concatenateNodeHash(nodeHash, r) }),
virtualNodesFactor)
}
@ -110,12 +112,13 @@ class ConsistentHash[T: ClassTag] private (nodes: immutable.SortedMap[Int, T], v
object ConsistentHash {
def apply[T: ClassTag](nodes: Iterable[T], virtualNodesFactor: Int): ConsistentHash[T] = {
new ConsistentHash(immutable.SortedMap.empty[Int, T] ++
(for {
node nodes
nodeHash = hashFor(node.toString)
vnode 1 to virtualNodesFactor
} yield (concatenateNodeHash(nodeHash, vnode) -> node)),
new ConsistentHash(
immutable.SortedMap.empty[Int, T] ++
(for {
node nodes
nodeHash = hashFor(node.toString)
vnode 1 to virtualNodesFactor
} yield (concatenateNodeHash(nodeHash, vnode) node)),
virtualNodesFactor)
}

View file

@ -135,9 +135,9 @@ object ConsistentHashingRoutingLogic {
*/
@SerialVersionUID(1L)
final case class ConsistentHashingRoutingLogic(
system: ActorSystem,
virtualNodesFactor: Int = 0,
hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping)
system: ActorSystem,
virtualNodesFactor: Int = 0,
hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping)
extends RoutingLogic {
import ConsistentHashingRouter._
@ -219,7 +219,8 @@ final case class ConsistentHashingRoutingLogic(
case _ if hashMapping.isDefinedAt(message) target(hashMapping(message))
case hashable: ConsistentHashable target(hashable.consistentHashKey)
case other
log.warning("Message [{}] must be handled by hashMapping, or implement [{}] or be wrapped in [{}]",
log.warning(
"Message [{}] must be handled by hashMapping, or implement [{}] or be wrapped in [{}]",
message.getClass.getName, classOf[ConsistentHashable].getName,
classOf[ConsistentHashableEnvelope].getName)
NoRoutee
@ -266,13 +267,13 @@ final case class ConsistentHashingRoutingLogic(
*/
@SerialVersionUID(1L)
final case class ConsistentHashingPool(
override val nrOfInstances: Int,
override val resizer: Option[Resizer] = None,
val virtualNodesFactor: Int = 0,
val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
override val usePoolDispatcher: Boolean = false)
override val nrOfInstances: Int,
override val resizer: Option[Resizer] = None,
val virtualNodesFactor: Int = 0,
val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
override val usePoolDispatcher: Boolean = false)
extends Pool with PoolOverrideUnsetConfig[ConsistentHashingPool] {
def this(config: Config) =
@ -354,10 +355,10 @@ final case class ConsistentHashingPool(
*/
@SerialVersionUID(1L)
final case class ConsistentHashingGroup(
override val paths: immutable.Iterable[String],
val virtualNodesFactor: Int = 0,
val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
override val paths: immutable.Iterable[String],
val virtualNodesFactor: Int = 0,
val hashMapping: ConsistentHashingRouter.ConsistentHashMapping = ConsistentHashingRouter.emptyConsistentHashMapping,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =

View file

@ -44,9 +44,9 @@ case object OptimalSizeExploringResizer {
*/
private[routing] case class ResizeRecord(
underutilizationStreak: Option[UnderUtilizationStreak] = None,
messageCount: Long = 0,
totalQueueLength: Int = 0,
checkTime: Long = 0)
messageCount: Long = 0,
totalQueueLength: Int = 0,
checkTime: Long = 0)
/**
* INTERNAL API
@ -115,16 +115,16 @@ case object OptimalSizeExploringResizer {
*/
@SerialVersionUID(1L)
case class DefaultOptimalSizeExploringResizer(
lowerBound: PoolSize = 1,
upperBound: PoolSize = 30,
chanceOfScalingDownWhenFull: Double = 0.2,
actionInterval: Duration = 5.seconds,
numOfAdjacentSizesToConsiderDuringOptimization: Int = 16,
exploreStepSize: Double = 0.1,
downsizeRatio: Double = 0.8,
downsizeAfterUnderutilizedFor: Duration = 72.hours,
explorationProbability: Double = 0.4,
weightOfLatestMetric: Double = 0.5) extends OptimalSizeExploringResizer {
lowerBound: PoolSize = 1,
upperBound: PoolSize = 30,
chanceOfScalingDownWhenFull: Double = 0.2,
actionInterval: Duration = 5.seconds,
numOfAdjacentSizesToConsiderDuringOptimization: Int = 16,
exploreStepSize: Double = 0.1,
downsizeRatio: Double = 0.8,
downsizeAfterUnderutilizedFor: Duration = 72.hours,
explorationProbability: Double = 0.4,
weightOfLatestMetric: Double = 0.5) extends OptimalSizeExploringResizer {
/**
* Leave package accessible for testing purpose
*/

View file

@ -59,8 +59,8 @@ final class RandomRoutingLogic extends RoutingLogic {
final case class RandomPool(
override val nrOfInstances: Int, override val resizer: Option[Resizer] = None,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
override val usePoolDispatcher: Boolean = false)
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
override val usePoolDispatcher: Boolean = false)
extends Pool with PoolOverrideUnsetConfig[RandomPool] {
def this(config: Config) =
@ -119,8 +119,8 @@ final case class RandomPool(
*/
@SerialVersionUID(1L)
final case class RandomGroup(
override val paths: immutable.Iterable[String],
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
override val paths: immutable.Iterable[String],
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =

View file

@ -126,13 +126,13 @@ case object DefaultResizer {
*/
@SerialVersionUID(1L)
case class DefaultResizer(
val lowerBound: Int = 1,
val upperBound: Int = 10,
val pressureThreshold: Int = 1,
val rampupRate: Double = 0.2,
val backoffThreshold: Double = 0.3,
val backoffRate: Double = 0.1,
val messagesPerResize: Int = 10) extends Resizer {
val lowerBound: Int = 1,
val upperBound: Int = 10,
val pressureThreshold: Int = 1,
val rampupRate: Double = 0.2,
val backoffThreshold: Double = 0.3,
val backoffRate: Double = 0.1,
val messagesPerResize: Int = 10) extends Resizer {
/**
* Java API constructor for default values except bounds.
@ -246,13 +246,13 @@ case class DefaultResizer(
* INTERNAL API
*/
private[akka] final class ResizablePoolCell(
_system: ActorSystemImpl,
_ref: InternalActorRef,
_routerProps: Props,
_system: ActorSystemImpl,
_ref: InternalActorRef,
_routerProps: Props,
_routerDispatcher: MessageDispatcher,
_routeeProps: Props,
_supervisor: InternalActorRef,
val pool: Pool)
_routeeProps: Props,
_supervisor: InternalActorRef,
val pool: Pool)
extends RoutedActorCell(_system, _ref, _routerProps, _routerDispatcher, _routeeProps, _supervisor) {
require(pool.resizer.isDefined, "RouterConfig must be a Pool with defined resizer")

View file

@ -67,12 +67,13 @@ final class RoundRobinRoutingLogic extends RoutingLogic {
final case class RoundRobinPool(
override val nrOfInstances: Int, override val resizer: Option[Resizer] = None,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
override val usePoolDispatcher: Boolean = false)
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
override val usePoolDispatcher: Boolean = false)
extends Pool with PoolOverrideUnsetConfig[RoundRobinPool] {
def this(config: Config) =
this(nrOfInstances = config.getInt("nr-of-instances"),
this(
nrOfInstances = config.getInt("nr-of-instances"),
resizer = Resizer.fromConfig(config),
usePoolDispatcher = config.hasPath("pool-dispatcher"))
@ -127,8 +128,8 @@ final case class RoundRobinPool(
*/
@SerialVersionUID(1L)
final case class RoundRobinGroup(
override val paths: immutable.Iterable[String],
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
override val paths: immutable.Iterable[String],
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =

View file

@ -35,12 +35,12 @@ private[akka] object RoutedActorCell {
* INTERNAL API
*/
private[akka] class RoutedActorCell(
_system: ActorSystemImpl,
_ref: InternalActorRef,
_routerProps: Props,
_system: ActorSystemImpl,
_ref: InternalActorRef,
_routerProps: Props,
_routerDispatcher: MessageDispatcher,
val routeeProps: Props,
_supervisor: InternalActorRef)
val routeeProps: Props,
_supervisor: InternalActorRef)
extends ActorCell(_system, _ref, _routerProps, _routerDispatcher, _supervisor) {
private[akka] val routerConfig = _routerProps.routerConfig
@ -154,8 +154,9 @@ private[akka] class RouterActor extends Actor {
}
val routingLogicController: Option[ActorRef] = cell.routerConfig.routingLogicController(
cell.router.logic).map(props context.actorOf(props.withDispatcher(context.props.dispatcher),
name = "routingLogicController"))
cell.router.logic).map(props context.actorOf(
props.withDispatcher(context.props.dispatcher),
name = "routingLogicController"))
def receive = {
case GetRoutees

View file

@ -22,13 +22,13 @@ import akka.dispatch.MessageDispatcher
* send a message to one (or more) of these actors.
*/
private[akka] class RoutedActorRef(
_system: ActorSystemImpl,
_routerProps: Props,
_system: ActorSystemImpl,
_routerProps: Props,
_routerDispatcher: MessageDispatcher,
_routerMailbox: MailboxType,
_routeeProps: Props,
_supervisor: InternalActorRef,
_path: ActorPath)
_routerMailbox: MailboxType,
_routeeProps: Props,
_supervisor: InternalActorRef,
_path: ActorPath)
extends RepointableActorRef(_system, _routerProps, _routerDispatcher, _routerMailbox, _supervisor, _path) {
// verify that a BalancingDispatcher is not used with a Router

View file

@ -3,7 +3,6 @@
*/
package akka.routing
import scala.collection.immutable
import akka.ConfigurationException
import akka.actor.ActorContext
@ -282,9 +281,9 @@ case object FromConfig extends FromConfig {
*/
def getInstance = this
@inline final def apply(
resizer: Option[Resizer] = None,
resizer: Option[Resizer] = None,
supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
routerDispatcher: String = Dispatchers.DefaultDispatcherId) =
routerDispatcher: String = Dispatchers.DefaultDispatcherId) =
new FromConfig(resizer, supervisorStrategy, routerDispatcher)
@inline final def unapply(fc: FromConfig): Option[String] = Some(fc.routerDispatcher)
@ -297,9 +296,10 @@ case object FromConfig extends FromConfig {
* (defaults to default-dispatcher).
*/
@SerialVersionUID(1L)
class FromConfig(override val resizer: Option[Resizer],
override val supervisorStrategy: SupervisorStrategy,
override val routerDispatcher: String) extends Pool {
class FromConfig(
override val resizer: Option[Resizer],
override val supervisorStrategy: SupervisorStrategy,
override val routerDispatcher: String) extends Pool {
def this() = this(None, Pool.defaultSupervisorStrategy, Dispatchers.DefaultDispatcherId)

View file

@ -97,10 +97,10 @@ private[akka] final case class ScatterGatherFirstCompletedRoutees(
@SerialVersionUID(1L)
final case class ScatterGatherFirstCompletedPool(
override val nrOfInstances: Int, override val resizer: Option[Resizer] = None,
within: FiniteDuration,
within: FiniteDuration,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
override val usePoolDispatcher: Boolean = false)
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
override val usePoolDispatcher: Boolean = false)
extends Pool with PoolOverrideUnsetConfig[ScatterGatherFirstCompletedPool] {
def this(config: Config) =
@ -165,9 +165,9 @@ final case class ScatterGatherFirstCompletedPool(
*/
@SerialVersionUID(1L)
final case class ScatterGatherFirstCompletedGroup(
override val paths: immutable.Iterable[String],
within: FiniteDuration,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
override val paths: immutable.Iterable[String],
within: FiniteDuration,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId)
extends Group {
def this(config: Config) =

View file

@ -45,11 +45,12 @@ class SmallestMailboxRoutingLogic extends RoutingLogic {
// 4. An ActorRef with unknown mailbox size that isn't processing anything
// 5. An ActorRef with a known mailbox size
// 6. An ActorRef without any messages
@tailrec private def selectNext(targets: immutable.IndexedSeq[Routee],
proposedTarget: Routee = NoRoutee,
currentScore: Long = Long.MaxValue,
at: Int = 0,
deep: Boolean = false): Routee = {
@tailrec private def selectNext(
targets: immutable.IndexedSeq[Routee],
proposedTarget: Routee = NoRoutee,
currentScore: Long = Long.MaxValue,
at: Int = 0,
deep: Boolean = false): Routee = {
if (targets.isEmpty)
NoRoutee
else if (at >= targets.size) {
@ -174,8 +175,8 @@ class SmallestMailboxRoutingLogic extends RoutingLogic {
final case class SmallestMailboxPool(
override val nrOfInstances: Int, override val resizer: Option[Resizer] = None,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
override val usePoolDispatcher: Boolean = false)
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
override val usePoolDispatcher: Boolean = false)
extends Pool with PoolOverrideUnsetConfig[SmallestMailboxPool] {
def this(config: Config) =

View file

@ -142,11 +142,11 @@ private[akka] final case class TailChoppingRoutees(
@SerialVersionUID(1L)
final case class TailChoppingPool(
override val nrOfInstances: Int, override val resizer: Option[Resizer] = None,
within: FiniteDuration,
interval: FiniteDuration,
within: FiniteDuration,
interval: FiniteDuration,
override val supervisorStrategy: SupervisorStrategy = Pool.defaultSupervisorStrategy,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
override val usePoolDispatcher: Boolean = false)
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId,
override val usePoolDispatcher: Boolean = false)
extends Pool with PoolOverrideUnsetConfig[TailChoppingPool] {
def this(config: Config) =
@ -227,10 +227,10 @@ final case class TailChoppingPool(
* router management messages
*/
final case class TailChoppingGroup(
override val paths: immutable.Iterable[String],
within: FiniteDuration,
interval: FiniteDuration,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends Group {
override val paths: immutable.Iterable[String],
within: FiniteDuration,
interval: FiniteDuration,
override val routerDispatcher: String = Dispatchers.DefaultDispatcherId) extends Group {
def this(config: Config) =
this(

View file

@ -35,7 +35,7 @@ object Serialization {
private final def configToMap(path: String): Map[String, String] = {
import scala.collection.JavaConverters._
config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) (k -> v.toString) }
config.getConfig(path).root.unwrapped.asScala.toMap map { case (k, v) (k v.toString) }
}
}
@ -194,7 +194,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension {
* loading is performed by the systems [[akka.actor.DynamicAccess]].
*/
def serializerOf(serializerFQN: String): Try[Serializer] =
system.dynamicAccess.createInstanceFor[Serializer](serializerFQN, List(classOf[ExtendedActorSystem] -> system)) recoverWith {
system.dynamicAccess.createInstanceFor[Serializer](serializerFQN, List(classOf[ExtendedActorSystem] system)) recoverWith {
case _: NoSuchMethodException system.dynamicAccess.createInstanceFor[Serializer](serializerFQN, Nil)
}
@ -203,7 +203,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension {
* By default always contains the following mapping: "java" -> akka.serialization.JavaSerializer
*/
private val serializers: Map[String, Serializer] =
for ((k: String, v: String) settings.Serializers) yield k -> serializerOf(v).get
for ((k: String, v: String) settings.Serializers) yield k serializerOf(v).get
/**
* bindings is a Seq of tuple representing the mapping from Class to Serializer.
@ -244,7 +244,7 @@ class Serialization(val system: ExtendedActorSystem) extends Extension {
* Maps from a Serializer Identity (Int) to a Serializer instance (optimization)
*/
val serializerByIdentity: Map[Int, Serializer] =
Map(NullSerializer.identifier -> NullSerializer) ++ serializers map { case (_, v) (v.identifier, v) }
Map(NullSerializer.identifier NullSerializer) ++ serializers map { case (_, v) (v.identifier, v) }
private val isJavaSerializationWarningEnabled = settings.config.getBoolean("akka.actor.warn-about-java-serializer-usage")

View file

@ -7,15 +7,15 @@ object BoxedType {
import java.{ lang jl }
private val toBoxed = Map[Class[_], Class[_]](
classOf[Boolean] -> classOf[jl.Boolean],
classOf[Byte] -> classOf[jl.Byte],
classOf[Char] -> classOf[jl.Character],
classOf[Short] -> classOf[jl.Short],
classOf[Int] -> classOf[jl.Integer],
classOf[Long] -> classOf[jl.Long],
classOf[Float] -> classOf[jl.Float],
classOf[Double] -> classOf[jl.Double],
classOf[Unit] -> classOf[scala.runtime.BoxedUnit])
classOf[Boolean] classOf[jl.Boolean],
classOf[Byte] classOf[jl.Byte],
classOf[Char] classOf[jl.Character],
classOf[Short] classOf[jl.Short],
classOf[Int] classOf[jl.Integer],
classOf[Long] classOf[jl.Long],
classOf[Float] classOf[jl.Float],
classOf[Double] classOf[jl.Double],
classOf[Unit] classOf[scala.runtime.BoxedUnit])
final def apply(c: Class[_]): Class[_] = if (c.isPrimitive) toBoxed(c) else c
}

View file

@ -357,7 +357,7 @@ object ByteString {
private[akka] object Companion {
private val companionMap = Seq(ByteString1, ByteString1C, ByteStrings).
map(x x.SerializationIdentity -> x).toMap.
map(x x.SerializationIdentity x).toMap.
withDefault(x throw new IllegalArgumentException("Invalid serialization id " + x))
def apply(from: Byte): Companion = companionMap(from)

View file

@ -187,7 +187,7 @@ object LineNumbers {
val cl = c.getClassLoader
val r = cl.getResourceAsStream(resource)
if (debug) println(s"LNB: resource '$resource' resolved to stream $r")
Option(r).map(_ -> None)
Option(r).map(_ None)
}
private def getStreamForLambda(l: AnyRef): Option[(InputStream, Some[String])] =
@ -269,7 +269,7 @@ object LineNumbers {
val count = d.readUnsignedShort()
if (debug) println(s"LNB: reading $count methods")
if (c.contains("Code") && c.contains("LineNumberTable")) {
(1 to count).map(_ readMethod(d, c("Code"), c("LineNumberTable"), filter)).flatten.foldLeft(Int.MaxValue -> 0) {
(1 to count).map(_ readMethod(d, c("Code"), c("LineNumberTable"), filter)).flatten.foldLeft(Int.MaxValue 0) {
case ((low, high), (start, end)) (Math.min(low, start), Math.max(high, end))
} match {
case (Int.MaxValue, 0) None
@ -282,10 +282,11 @@ object LineNumbers {
}
}
private def readMethod(d: DataInputStream,
codeTag: Int,
lineNumberTableTag: Int,
filter: Option[String])(implicit c: Constants): Option[(Int, Int)] = {
private def readMethod(
d: DataInputStream,
codeTag: Int,
lineNumberTableTag: Int,
filter: Option[String])(implicit c: Constants): Option[(Int, Int)] = {
skip(d, 2) // access flags
val name = d.readUnsignedShort() // name
skip(d, 2) // signature
@ -315,7 +316,7 @@ object LineNumbers {
skip(d, 2) // start PC
d.readUnsignedShort() // finally: the line number
}
Some(lines.min -> lines.max)
Some(lines.min lines.max)
}
}
if (debug) println(s"LNB: nested attributes yielded: $possibleLines")

View file

@ -127,7 +127,7 @@ private[akka] class SubclassifiedIndex[K, V] private (protected var values: Set[
if (!found) {
val v = values + value
val n = new Nonroot(root, key, v)
integrate(n) ++ n.innerAddValue(key, value) :+ (key -> v)
integrate(n) ++ n.innerAddValue(key, value) :+ (key v)
} else ch
}

View file

@ -34,7 +34,7 @@ class ActorCreationBenchmark {
}
@TearDown(Level.Trial)
def shutdown():Unit = {
def shutdown(): Unit = {
system.terminate()
Await.ready(system.whenTerminated, 15.seconds)
}

View file

@ -28,7 +28,7 @@ class ForkJoinActorBenchmark {
implicit var system: ActorSystem = _
@Setup(Level.Trial)
def setup():Unit = {
def setup(): Unit = {
system = ActorSystem("ForkJoinActorBenchmark", ConfigFactory.parseString(
s"""| akka {
| log-dead-letters = off
@ -44,11 +44,12 @@ class ForkJoinActorBenchmark {
| }
| }
| }
""".stripMargin))
""".stripMargin
))
}
@TearDown(Level.Trial)
def shutdown():Unit = {
def shutdown(): Unit = {
system.terminate()
Await.ready(system.whenTerminated, 15.seconds)
}
@ -56,7 +57,7 @@ class ForkJoinActorBenchmark {
@Benchmark
@Measurement(timeUnit = TimeUnit.MILLISECONDS)
@OperationsPerInvocation(messages)
def pingPong():Unit = {
def pingPong(): Unit = {
val ping = system.actorOf(Props[ForkJoinActorBenchmark.PingPong])
val pong = system.actorOf(Props[ForkJoinActorBenchmark.PingPong])
@ -72,7 +73,7 @@ class ForkJoinActorBenchmark {
@Benchmark
@Measurement(timeUnit = TimeUnit.MILLISECONDS)
@OperationsPerInvocation(messages)
def floodPipe():Unit = {
def floodPipe(): Unit = {
val end = system.actorOf(Props(classOf[ForkJoinActorBenchmark.Pipe], None))
val middle = system.actorOf(Props(classOf[ForkJoinActorBenchmark.Pipe], Some(end)))

View file

@ -26,7 +26,7 @@ class RouterPoolCreationBenchmark {
var size = 0
@TearDown(Level.Trial)
def shutdown():Unit = {
def shutdown(): Unit = {
system.terminate()
Await.ready(system.whenTerminated, 15.seconds)
}

View file

@ -56,13 +56,13 @@ class ScheduleBenchmark {
var promise: Promise[Any] = _
@Setup(Level.Iteration)
def setup():Unit = {
def setup(): Unit = {
winner = (to * ratio + 1).toInt
promise = Promise[Any]()
}
@TearDown
def shutdown():Unit = {
def shutdown(): Unit = {
system.terminate()
Await.ready(system.whenTerminated, 15.seconds)
}
@ -70,7 +70,7 @@ class ScheduleBenchmark {
def op(idx: Int) = if (idx == winner) promise.trySuccess(idx) else idx
@Benchmark
def oneSchedule():Unit = {
def oneSchedule(): Unit = {
val aIdx = new AtomicInteger(1)
val tryWithNext = scheduler.schedule(0.millis, interval) {
val idx = aIdx.getAndIncrement
@ -84,7 +84,7 @@ class ScheduleBenchmark {
}
@Benchmark
def multipleScheduleOnce():Unit = {
def multipleScheduleOnce(): Unit = {
val tryWithNext = (1 to to).foldLeft(0.millis -> List[Cancellable]()) {
case ((interv, c), idx)
(interv + interval, scheduler.scheduleOnce(interv) {

View file

@ -35,7 +35,7 @@ class StashCreationBenchmark {
val probe = TestProbe()
@TearDown(Level.Trial)
def shutdown():Unit = {
def shutdown(): Unit = {
system.terminate()
Await.ready(system.whenTerminated, 15.seconds)
}

View file

@ -25,7 +25,7 @@ class TellOnlyBenchmark {
implicit var system: ActorSystem = _
@Setup(Level.Trial)
def setup():Unit = {
def setup(): Unit = {
system = ActorSystem("TellOnlyBenchmark", ConfigFactory.parseString(
s"""| akka {
| log-dead-letters = off
@ -46,11 +46,12 @@ class TellOnlyBenchmark {
| type = "akka.actor.TellOnlyBenchmark$$DroppingDispatcherConfigurator"
| mailbox-type = "akka.actor.TellOnlyBenchmark$$UnboundedDroppingMailbox"
| }
| """.stripMargin))
| """.stripMargin
))
}
@TearDown(Level.Trial)
def shutdown():Unit = {
def shutdown(): Unit = {
system.terminate()
Await.ready(system.whenTerminated, 15.seconds)
}
@ -59,7 +60,7 @@ class TellOnlyBenchmark {
var probe: TestProbe = _
@Setup(Level.Iteration)
def setupIteration():Unit = {
def setupIteration(): Unit = {
actor = system.actorOf(Props[TellOnlyBenchmark.Echo].withDispatcher("dropping-dispatcher"))
probe = TestProbe()
probe.watch(actor)
@ -71,7 +72,7 @@ class TellOnlyBenchmark {
}
@TearDown(Level.Iteration)
def shutdownIteration():Unit = {
def shutdownIteration(): Unit = {
probe.send(actor, flipDrop)
probe.expectNoMsg(200.millis)
actor ! stop
@ -82,7 +83,7 @@ class TellOnlyBenchmark {
@Benchmark
@OutputTimeUnit(TimeUnit.MICROSECONDS)
def tell():Unit = {
def tell(): Unit = {
probe.send(actor, message)
}
}
@ -105,7 +106,7 @@ object TellOnlyBenchmark {
class DroppingMessageQueue extends UnboundedMailbox.MessageQueue {
@volatile var dropping = false
override def enqueue(receiver: ActorRef, handle: Envelope):Unit = {
override def enqueue(receiver: ActorRef, handle: Envelope): Unit = {
if (handle.message == flipDrop) dropping = !dropping
else if (!dropping) super.enqueue(receiver, handle)
}
@ -125,21 +126,22 @@ object TellOnlyBenchmark {
_throughput: Int,
_throughputDeadlineTime: Duration,
_executorServiceFactoryProvider: ExecutorServiceFactoryProvider,
_shutdownTimeout: FiniteDuration)
extends Dispatcher(_configurator, _id, _throughput, _throughputDeadlineTime, _executorServiceFactoryProvider, _shutdownTimeout) {
_shutdownTimeout: FiniteDuration
)
extends Dispatcher(_configurator, _id, _throughput, _throughputDeadlineTime, _executorServiceFactoryProvider, _shutdownTimeout) {
override protected[akka] def dispatch(receiver: ActorCell, invocation: Envelope): Unit = {
val mbox = receiver.mailbox
mbox.enqueue(receiver.self, invocation)
mbox.messageQueue match {
case mb: DroppingMessageQueue if mb.dropping // do nothing
case _ registerForExecution(mbox, true, false)
case _ registerForExecution(mbox, true, false)
}
}
}
class DroppingDispatcherConfigurator(config: Config, prerequisites: DispatcherPrerequisites)
extends MessageDispatcherConfigurator(config, prerequisites) {
extends MessageDispatcherConfigurator(config, prerequisites) {
override def dispatcher(): MessageDispatcher = new DroppingDispatcher(
this,
@ -147,6 +149,7 @@ object TellOnlyBenchmark {
config.getInt("throughput"),
config.getNanosDuration("throughput-deadline-time"),
configureExecutor(),
config.getMillisDuration("shutdown-timeout"))
config.getMillisDuration("shutdown-timeout")
)
}
}

View file

@ -48,7 +48,7 @@ class ORSetMergeBenchmark {
var elem2: String = _
@Setup(Level.Trial)
def setup():Unit = {
def setup(): Unit = {
set1 = (1 to set1Size).foldLeft(ORSet.empty[String])((s, n) => s.add(nextNode(), "elem" + n))
addFromSameNode = set1.add(nodeA, "elem" + set1Size + 1).merge(set1)
addFromOtherNode = set1.add(nodeB, "elem" + set1Size + 1).merge(set1)

View file

@ -45,7 +45,7 @@ class VersionVectorBenchmark {
var dot1: VersionVector = _
@Setup(Level.Trial)
def setup():Unit = {
def setup(): Unit = {
vv1 = (1 to size).foldLeft(VersionVector.empty)((vv, n) => vv + nextNode())
vv2 = vv1 + nextNode()
vv3 = vv1 + nextNode()

View file

@ -21,7 +21,7 @@ class CachingConfigBenchmark {
val deepConfig = ConfigFactory.parseString(deepConfigString)
val deepCaching = new CachingConfig(deepConfig)
@Benchmark def deep_config = deepConfig.hasPath(deepKey)
@Benchmark def deep_config = deepConfig.hasPath(deepKey)
@Benchmark def deep_caching = deepCaching.hasPath(deepKey)
}

View file

@ -42,7 +42,7 @@ mailbox {
val ref = sys.actorOf(Props(new Actor {
def receive = {
case Stop => sender() ! Stop
case _ =>
case _ =>
}
}).withDispatcher("dispatcher").withMailbox("mailbox"), "receiver")

Some files were not shown because too many files have changed in this diff Show more